vectara-agentic 0.2.0__tar.gz → 0.2.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vectara-agentic might be problematic. Click here for more details.
- {vectara_agentic-0.2.0/vectara_agentic.egg-info → vectara_agentic-0.2.2}/PKG-INFO +42 -15
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/README.md +29 -2
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/requirements.txt +12 -12
- vectara_agentic-0.2.2/tests/endpoint.py +47 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/tests/test_agent.py +39 -6
- vectara_agentic-0.2.2/tests/test_private_llm.py +67 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/tests/test_tools.py +32 -5
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/_prompts.py +3 -1
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/_version.py +1 -1
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/agent.py +123 -32
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/agent_config.py +9 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/tools.py +28 -7
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/tools_catalog.py +1 -1
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/types.py +1 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/utils.py +4 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2/vectara_agentic.egg-info}/PKG-INFO +42 -15
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic.egg-info/SOURCES.txt +2 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic.egg-info/requires.txt +12 -12
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/LICENSE +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/MANIFEST.in +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/setup.cfg +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/setup.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/tests/__init__.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/__init__.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/_callback.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/_observability.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/agent_endpoint.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic/db_tools.py +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic.egg-info/dependency_links.txt +0 -0
- {vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,19 +16,19 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.12.
|
|
20
|
-
Requires-Dist: llama-index-indices-managed-vectara==0.4.
|
|
19
|
+
Requires-Dist: llama-index==0.12.22
|
|
20
|
+
Requires-Dist: llama-index-indices-managed-vectara==0.4.1
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
23
|
-
Requires-Dist: llama-index-agent-openai==0.4.
|
|
24
|
-
Requires-Dist: llama-index-llms-openai==0.3.
|
|
25
|
-
Requires-Dist: llama-index-llms-anthropic==0.6.
|
|
23
|
+
Requires-Dist: llama-index-agent-openai==0.4.6
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.3.25
|
|
25
|
+
Requires-Dist: llama-index-llms-anthropic==0.6.7
|
|
26
26
|
Requires-Dist: llama-index-llms-together==0.3.1
|
|
27
27
|
Requires-Dist: llama-index-llms-groq==0.3.1
|
|
28
|
-
Requires-Dist: llama-index-llms-fireworks==0.3.
|
|
28
|
+
Requires-Dist: llama-index-llms-fireworks==0.3.2
|
|
29
29
|
Requires-Dist: llama-index-llms-cohere==0.4.0
|
|
30
|
-
Requires-Dist: llama-index-llms-gemini==0.4.
|
|
31
|
-
Requires-Dist: llama-index-llms-bedrock==0.3.
|
|
30
|
+
Requires-Dist: llama-index-llms-gemini==0.4.11
|
|
31
|
+
Requires-Dist: llama-index-llms-bedrock==0.3.4
|
|
32
32
|
Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
|
|
33
33
|
Requires-Dist: llama-index-tools-arxiv==0.3.0
|
|
34
34
|
Requires-Dist: llama-index-tools-database==0.3.0
|
|
@@ -38,8 +38,8 @@ Requires-Dist: llama-index-tools-neo4j==0.3.0
|
|
|
38
38
|
Requires-Dist: llama-index-graph-stores-kuzu==0.6.0
|
|
39
39
|
Requires-Dist: llama-index-tools-slack==0.3.0
|
|
40
40
|
Requires-Dist: llama-index-tools-exa==0.3.0
|
|
41
|
-
Requires-Dist: tavily-python==0.5.
|
|
42
|
-
Requires-Dist: exa-py==1.8.
|
|
41
|
+
Requires-Dist: tavily-python==0.5.1
|
|
42
|
+
Requires-Dist: exa-py==1.8.9
|
|
43
43
|
Requires-Dist: yahoo-finance==1.4.0
|
|
44
44
|
Requires-Dist: openinference-instrumentation-llama-index==3.1.4
|
|
45
45
|
Requires-Dist: opentelemetry-proto==1.26.0
|
|
@@ -50,8 +50,8 @@ Requires-Dist: tokenizers>=0.20
|
|
|
50
50
|
Requires-Dist: pydantic==2.10.3
|
|
51
51
|
Requires-Dist: retrying==1.3.4
|
|
52
52
|
Requires-Dist: python-dotenv==1.0.1
|
|
53
|
-
Requires-Dist: tiktoken==0.
|
|
54
|
-
Requires-Dist:
|
|
53
|
+
Requires-Dist: tiktoken==0.9.0
|
|
54
|
+
Requires-Dist: cloudpickle>=3.1.1
|
|
55
55
|
Requires-Dist: httpx==0.27.2
|
|
56
56
|
Dynamic: author
|
|
57
57
|
Dynamic: author-email
|
|
@@ -135,7 +135,7 @@ from vectara_agentic.tools import VectaraToolFactory
|
|
|
135
135
|
vec_factory = VectaraToolFactory(
|
|
136
136
|
vectara_api_key=os.environ['VECTARA_API_KEY'],
|
|
137
137
|
vectara_customer_id=os.environ['VECTARA_CUSTOMER_ID'],
|
|
138
|
-
|
|
138
|
+
vectara_corpus_key=os.environ['VECTARA_CORPUS_KEY']
|
|
139
139
|
)
|
|
140
140
|
```
|
|
141
141
|
|
|
@@ -315,6 +315,10 @@ def mult_func(x, y):
|
|
|
315
315
|
mult_tool = ToolsFactory().create_tool(mult_func)
|
|
316
316
|
```
|
|
317
317
|
|
|
318
|
+
Note: When you define your own Python functions as tools, implement them at the top module level,
|
|
319
|
+
and not as nested functions. Nested functions are not supported if you use serialization
|
|
320
|
+
(dumps/loads or from_dict/to_dict).
|
|
321
|
+
|
|
318
322
|
## 🛠️ Configuration
|
|
319
323
|
|
|
320
324
|
## Configuring Vectara-agentic
|
|
@@ -352,10 +356,31 @@ If any of these are not provided, `AgentConfig` first tries to read the values f
|
|
|
352
356
|
|
|
353
357
|
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
354
358
|
|
|
355
|
-
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY
|
|
359
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY` and `VECTARA_CORPUS_KEY`). Note that `VECTARA_CORPUS_KEY` can be a single KEY or a comma-separated list of KEYs (if you want to query multiple corpora).
|
|
356
360
|
|
|
357
361
|
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
358
362
|
|
|
363
|
+
## Setting up a privately hosted LLM
|
|
364
|
+
|
|
365
|
+
If you want to setup vectara-agentic to use your own self-hosted LLM endpoint, follow the example below
|
|
366
|
+
|
|
367
|
+
```python
|
|
368
|
+
config = AgentConfig(
|
|
369
|
+
agent_type=AgentType.REACT,
|
|
370
|
+
main_llm_provider=ModelProvider.PRIVATE,
|
|
371
|
+
main_llm_model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
372
|
+
private_llm_api_base="http://vllm-server.company.com/v1",
|
|
373
|
+
private_llm_api_key="TEST_API_KEY",
|
|
374
|
+
)
|
|
375
|
+
agent = Agent(agent_config=config, tools=tools, topic=topic,
|
|
376
|
+
custom_instructions=custom_instructions)
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
In this case we specify the Main LLM provider to be privately hosted with Llama-3.1-8B as the model.
|
|
380
|
+
- The `ModelProvider.PRIVATE` specifies a privately hosted LLM.
|
|
381
|
+
- The `private_llm_api_base` specifies the api endpoint to use, and the `private_llm_api_key`
|
|
382
|
+
specifies the private API key requires to use this service.
|
|
383
|
+
|
|
359
384
|
## ℹ️ Additional Information
|
|
360
385
|
|
|
361
386
|
### About Custom Instructions for your Agent
|
|
@@ -376,6 +401,8 @@ The `Agent` class defines a few helpful methods to help you understand the inter
|
|
|
376
401
|
|
|
377
402
|
The `Agent` class supports serialization. Use the `dumps()` to serialize and `loads()` to read back from a serialized stream.
|
|
378
403
|
|
|
404
|
+
Note: due to cloudpickle limitations, if a tool contains Python `weakref` objects, serialization won't work and an exception will be raised.
|
|
405
|
+
|
|
379
406
|
### Observability
|
|
380
407
|
|
|
381
408
|
vectara-agentic supports observability via the existing integration of LlamaIndex and Arize Phoenix.
|
|
@@ -68,7 +68,7 @@ from vectara_agentic.tools import VectaraToolFactory
|
|
|
68
68
|
vec_factory = VectaraToolFactory(
|
|
69
69
|
vectara_api_key=os.environ['VECTARA_API_KEY'],
|
|
70
70
|
vectara_customer_id=os.environ['VECTARA_CUSTOMER_ID'],
|
|
71
|
-
|
|
71
|
+
vectara_corpus_key=os.environ['VECTARA_CORPUS_KEY']
|
|
72
72
|
)
|
|
73
73
|
```
|
|
74
74
|
|
|
@@ -248,6 +248,10 @@ def mult_func(x, y):
|
|
|
248
248
|
mult_tool = ToolsFactory().create_tool(mult_func)
|
|
249
249
|
```
|
|
250
250
|
|
|
251
|
+
Note: When you define your own Python functions as tools, implement them at the top module level,
|
|
252
|
+
and not as nested functions. Nested functions are not supported if you use serialization
|
|
253
|
+
(dumps/loads or from_dict/to_dict).
|
|
254
|
+
|
|
251
255
|
## 🛠️ Configuration
|
|
252
256
|
|
|
253
257
|
## Configuring Vectara-agentic
|
|
@@ -285,10 +289,31 @@ If any of these are not provided, `AgentConfig` first tries to read the values f
|
|
|
285
289
|
|
|
286
290
|
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
287
291
|
|
|
288
|
-
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY
|
|
292
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY` and `VECTARA_CORPUS_KEY`). Note that `VECTARA_CORPUS_KEY` can be a single KEY or a comma-separated list of KEYs (if you want to query multiple corpora).
|
|
289
293
|
|
|
290
294
|
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
291
295
|
|
|
296
|
+
## Setting up a privately hosted LLM
|
|
297
|
+
|
|
298
|
+
If you want to setup vectara-agentic to use your own self-hosted LLM endpoint, follow the example below
|
|
299
|
+
|
|
300
|
+
```python
|
|
301
|
+
config = AgentConfig(
|
|
302
|
+
agent_type=AgentType.REACT,
|
|
303
|
+
main_llm_provider=ModelProvider.PRIVATE,
|
|
304
|
+
main_llm_model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
305
|
+
private_llm_api_base="http://vllm-server.company.com/v1",
|
|
306
|
+
private_llm_api_key="TEST_API_KEY",
|
|
307
|
+
)
|
|
308
|
+
agent = Agent(agent_config=config, tools=tools, topic=topic,
|
|
309
|
+
custom_instructions=custom_instructions)
|
|
310
|
+
```
|
|
311
|
+
|
|
312
|
+
In this case we specify the Main LLM provider to be privately hosted with Llama-3.1-8B as the model.
|
|
313
|
+
- The `ModelProvider.PRIVATE` specifies a privately hosted LLM.
|
|
314
|
+
- The `private_llm_api_base` specifies the api endpoint to use, and the `private_llm_api_key`
|
|
315
|
+
specifies the private API key requires to use this service.
|
|
316
|
+
|
|
292
317
|
## ℹ️ Additional Information
|
|
293
318
|
|
|
294
319
|
### About Custom Instructions for your Agent
|
|
@@ -309,6 +334,8 @@ The `Agent` class defines a few helpful methods to help you understand the inter
|
|
|
309
334
|
|
|
310
335
|
The `Agent` class supports serialization. Use the `dumps()` to serialize and `loads()` to read back from a serialized stream.
|
|
311
336
|
|
|
337
|
+
Note: due to cloudpickle limitations, if a tool contains Python `weakref` objects, serialization won't work and an exception will be raised.
|
|
338
|
+
|
|
312
339
|
### Observability
|
|
313
340
|
|
|
314
341
|
vectara-agentic supports observability via the existing integration of LlamaIndex and Arize Phoenix.
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
llama-index==0.12.
|
|
2
|
-
llama-index-indices-managed-vectara==0.4.
|
|
1
|
+
llama-index==0.12.22
|
|
2
|
+
llama-index-indices-managed-vectara==0.4.1
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
5
|
-
llama-index-agent-openai==0.4.
|
|
6
|
-
llama-index-llms-openai==0.3.
|
|
7
|
-
llama-index-llms-anthropic==0.6.
|
|
5
|
+
llama-index-agent-openai==0.4.6
|
|
6
|
+
llama-index-llms-openai==0.3.25
|
|
7
|
+
llama-index-llms-anthropic==0.6.7
|
|
8
8
|
llama-index-llms-together==0.3.1
|
|
9
9
|
llama-index-llms-groq==0.3.1
|
|
10
|
-
llama-index-llms-fireworks==0.3.
|
|
10
|
+
llama-index-llms-fireworks==0.3.2
|
|
11
11
|
llama-index-llms-cohere==0.4.0
|
|
12
|
-
llama-index-llms-gemini==0.4.
|
|
13
|
-
llama-index-llms-bedrock==0.3.
|
|
12
|
+
llama-index-llms-gemini==0.4.11
|
|
13
|
+
llama-index-llms-bedrock==0.3.4
|
|
14
14
|
llama-index-tools-yahoo-finance==0.3.0
|
|
15
15
|
llama-index-tools-arxiv==0.3.0
|
|
16
16
|
llama-index-tools-database==0.3.0
|
|
@@ -20,8 +20,8 @@ llama-index-tools-neo4j==0.3.0
|
|
|
20
20
|
llama-index-graph-stores-kuzu==0.6.0
|
|
21
21
|
llama-index-tools-slack==0.3.0
|
|
22
22
|
llama-index-tools-exa==0.3.0
|
|
23
|
-
tavily-python==0.5.
|
|
24
|
-
exa-py==1.8.
|
|
23
|
+
tavily-python==0.5.1
|
|
24
|
+
exa-py==1.8.9
|
|
25
25
|
yahoo-finance==1.4.0
|
|
26
26
|
openinference-instrumentation-llama-index==3.1.4
|
|
27
27
|
opentelemetry-proto==1.26.0
|
|
@@ -32,6 +32,6 @@ tokenizers>=0.20
|
|
|
32
32
|
pydantic==2.10.3
|
|
33
33
|
retrying==1.3.4
|
|
34
34
|
python-dotenv==1.0.1
|
|
35
|
-
tiktoken==0.
|
|
36
|
-
|
|
35
|
+
tiktoken==0.9.0
|
|
36
|
+
cloudpickle>=3.1.1
|
|
37
37
|
httpx==0.27.2
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from openai import OpenAI
|
|
2
|
+
from flask import Flask, request, jsonify
|
|
3
|
+
import logging
|
|
4
|
+
from functools import wraps
|
|
5
|
+
|
|
6
|
+
app = Flask(__name__)
|
|
7
|
+
app.config['TESTING'] = True
|
|
8
|
+
|
|
9
|
+
log = logging.getLogger('werkzeug')
|
|
10
|
+
log.setLevel(logging.ERROR)
|
|
11
|
+
|
|
12
|
+
# Set your OpenAI API key (ensure you've set this in your environment)
|
|
13
|
+
|
|
14
|
+
EXPECTED_API_KEY = "TEST_API_KEY"
|
|
15
|
+
|
|
16
|
+
def require_api_key(f):
|
|
17
|
+
@wraps(f)
|
|
18
|
+
def decorated_function(*args, **kwargs):
|
|
19
|
+
api_key = request.headers.get("Authorization").split("Bearer ")[-1]
|
|
20
|
+
if not api_key or api_key != EXPECTED_API_KEY:
|
|
21
|
+
return jsonify({"error": "Unauthorized"}), 401
|
|
22
|
+
return f(*args, **kwargs)
|
|
23
|
+
return decorated_function
|
|
24
|
+
|
|
25
|
+
@app.before_request
|
|
26
|
+
def log_request_info():
|
|
27
|
+
app.logger.info("Request received: %s %s", request.method, request.path)
|
|
28
|
+
|
|
29
|
+
@app.route("/v1/chat/completions", methods=["POST"])
|
|
30
|
+
@require_api_key
|
|
31
|
+
def chat_completions():
|
|
32
|
+
app.logger.info("Received request on /v1/chat/completions")
|
|
33
|
+
data = request.get_json()
|
|
34
|
+
if not data:
|
|
35
|
+
return jsonify({"error": "Invalid JSON payload"}), 400
|
|
36
|
+
|
|
37
|
+
client = OpenAI()
|
|
38
|
+
try:
|
|
39
|
+
completion = client.chat.completions.create(**data)
|
|
40
|
+
return jsonify(completion.model_dump()), 200
|
|
41
|
+
except Exception as e:
|
|
42
|
+
return jsonify({"error": str(e)}), 400
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
if __name__ == "__main__":
|
|
46
|
+
# Run on port 5000 by default; adjust as needed.
|
|
47
|
+
app.run(debug=True, port=5000, use_reloader=False)
|
|
@@ -6,6 +6,9 @@ from vectara_agentic.agent_config import AgentConfig
|
|
|
6
6
|
from vectara_agentic.types import ModelProvider, ObserverType
|
|
7
7
|
from vectara_agentic.tools import ToolsFactory
|
|
8
8
|
|
|
9
|
+
def mult(x, y):
|
|
10
|
+
return x * y
|
|
11
|
+
|
|
9
12
|
class TestAgentPackage(unittest.TestCase):
|
|
10
13
|
def test_get_prompt(self):
|
|
11
14
|
prompt_template = "{chat_topic} on {today} with {custom_instructions}"
|
|
@@ -21,9 +24,6 @@ class TestAgentPackage(unittest.TestCase):
|
|
|
21
24
|
)
|
|
22
25
|
|
|
23
26
|
def test_agent_init(self):
|
|
24
|
-
def mult(x, y):
|
|
25
|
-
return x * y
|
|
26
|
-
|
|
27
27
|
tools = [ToolsFactory().create_tool(mult)]
|
|
28
28
|
topic = "AI"
|
|
29
29
|
custom_instructions = "Always do as your mother tells you!"
|
|
@@ -41,9 +41,6 @@ class TestAgentPackage(unittest.TestCase):
|
|
|
41
41
|
)
|
|
42
42
|
|
|
43
43
|
def test_agent_config(self):
|
|
44
|
-
def mult(x, y):
|
|
45
|
-
return x * y
|
|
46
|
-
|
|
47
44
|
tools = [ToolsFactory().create_tool(mult)]
|
|
48
45
|
topic = "AI topic"
|
|
49
46
|
instructions = "Always do as your father tells you, if your mother agrees!"
|
|
@@ -77,6 +74,21 @@ class TestAgentPackage(unittest.TestCase):
|
|
|
77
74
|
"50",
|
|
78
75
|
)
|
|
79
76
|
|
|
77
|
+
def test_multiturn(self):
|
|
78
|
+
tools = [ToolsFactory().create_tool(mult)]
|
|
79
|
+
topic = "AI topic"
|
|
80
|
+
instructions = "Always do as your father tells you, if your mother agrees!"
|
|
81
|
+
agent = Agent(
|
|
82
|
+
tools=tools,
|
|
83
|
+
topic=topic,
|
|
84
|
+
custom_instructions=instructions,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
agent.chat("What is 5 times 10. Only give the answer, nothing else")
|
|
88
|
+
agent.chat("what is 3 times 7. Only give the answer, nothing else")
|
|
89
|
+
res = agent.chat("multiply the results of the last two questions. Output only the answer.")
|
|
90
|
+
self.assertEqual(res.response, "1050")
|
|
91
|
+
|
|
80
92
|
def test_from_corpus(self):
|
|
81
93
|
agent = Agent.from_corpus(
|
|
82
94
|
tool_name="RAG Tool",
|
|
@@ -99,8 +111,29 @@ class TestAgentPackage(unittest.TestCase):
|
|
|
99
111
|
)
|
|
100
112
|
|
|
101
113
|
agent_reloaded = agent.loads(agent.dumps())
|
|
114
|
+
agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
|
|
115
|
+
|
|
102
116
|
self.assertIsInstance(agent_reloaded, Agent)
|
|
103
117
|
self.assertEqual(agent, agent_reloaded)
|
|
118
|
+
self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
|
|
119
|
+
|
|
120
|
+
self.assertIsInstance(agent_reloaded, Agent)
|
|
121
|
+
self.assertEqual(agent, agent_reloaded_again)
|
|
122
|
+
self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
|
|
123
|
+
|
|
124
|
+
def test_chat_history(self):
|
|
125
|
+
tools = [ToolsFactory().create_tool(mult)]
|
|
126
|
+
topic = "AI topic"
|
|
127
|
+
instructions = "Always do as your father tells you, if your mother agrees!"
|
|
128
|
+
agent = Agent(
|
|
129
|
+
tools=tools,
|
|
130
|
+
topic=topic,
|
|
131
|
+
custom_instructions=instructions,
|
|
132
|
+
chat_history=[("What is 5 times 10", "50"), ("What is 3 times 7", "21")]
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
res = agent.chat("multiply the results of the last two questions. Output only the answer.")
|
|
136
|
+
self.assertEqual(res.response, "1050")
|
|
104
137
|
|
|
105
138
|
|
|
106
139
|
if __name__ == "__main__":
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import unittest
|
|
3
|
+
import subprocess
|
|
4
|
+
import time
|
|
5
|
+
import requests
|
|
6
|
+
import signal
|
|
7
|
+
|
|
8
|
+
from vectara_agentic.agent import Agent, AgentType
|
|
9
|
+
from vectara_agentic.agent_config import AgentConfig
|
|
10
|
+
from vectara_agentic.types import ModelProvider
|
|
11
|
+
from vectara_agentic.tools import ToolsFactory
|
|
12
|
+
|
|
13
|
+
class TestPrivateLLM(unittest.TestCase):
|
|
14
|
+
|
|
15
|
+
@classmethod
|
|
16
|
+
def setUp(cls):
|
|
17
|
+
# Start the Flask server as a subprocess
|
|
18
|
+
cls.flask_process = subprocess.Popen(
|
|
19
|
+
['flask', 'run', '--port=5000'],
|
|
20
|
+
env={**os.environ, 'FLASK_APP': 'tests.endpoint:app', 'FLASK_ENV': 'development'},
|
|
21
|
+
stdout=None, stderr=None,
|
|
22
|
+
)
|
|
23
|
+
# Wait for the server to start
|
|
24
|
+
timeout = 10
|
|
25
|
+
url = 'http://127.0.0.1:5000/'
|
|
26
|
+
for _ in range(timeout):
|
|
27
|
+
try:
|
|
28
|
+
requests.get(url)
|
|
29
|
+
return
|
|
30
|
+
except requests.ConnectionError:
|
|
31
|
+
time.sleep(1)
|
|
32
|
+
raise RuntimeError(f"Failed to start Flask server at {url}")
|
|
33
|
+
|
|
34
|
+
@classmethod
|
|
35
|
+
def tearDown(cls):
|
|
36
|
+
# Terminate the Flask server
|
|
37
|
+
cls.flask_process.send_signal(signal.SIGINT)
|
|
38
|
+
cls.flask_process.wait()
|
|
39
|
+
|
|
40
|
+
def test_endpoint(self):
|
|
41
|
+
def mult(x, y):
|
|
42
|
+
return x * y
|
|
43
|
+
|
|
44
|
+
tools = [ToolsFactory().create_tool(mult)]
|
|
45
|
+
topic = "calculator"
|
|
46
|
+
custom_instructions = "you are an agent specializing in math, assisting a user."
|
|
47
|
+
config = AgentConfig(
|
|
48
|
+
agent_type=AgentType.REACT,
|
|
49
|
+
main_llm_provider=ModelProvider.PRIVATE,
|
|
50
|
+
main_llm_model_name="gpt-4o",
|
|
51
|
+
private_llm_api_base="http://127.0.0.1:5000/v1",
|
|
52
|
+
private_llm_api_key="TEST_API_KEY",
|
|
53
|
+
)
|
|
54
|
+
agent = Agent(agent_config=config, tools=tools, topic=topic,
|
|
55
|
+
custom_instructions=custom_instructions)
|
|
56
|
+
|
|
57
|
+
# To run this test, you must have OPENAI_API_KEY in your environment
|
|
58
|
+
self.assertEqual(
|
|
59
|
+
agent.chat(
|
|
60
|
+
"What is 5 times 10. Only give the answer, nothing else"
|
|
61
|
+
).response.replace("$", "\\$"),
|
|
62
|
+
"50",
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
if __name__ == "__main__":
|
|
67
|
+
unittest.main()
|
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import unittest
|
|
2
2
|
|
|
3
|
+
from pydantic import Field, BaseModel
|
|
4
|
+
|
|
3
5
|
from vectara_agentic.tools import VectaraTool, VectaraToolFactory, ToolsFactory, ToolType
|
|
4
6
|
from vectara_agentic.agent import Agent
|
|
5
|
-
from
|
|
7
|
+
from vectara_agentic.agent_config import AgentConfig
|
|
8
|
+
|
|
6
9
|
from llama_index.core.tools import FunctionTool
|
|
7
10
|
|
|
8
11
|
|
|
@@ -60,9 +63,6 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
60
63
|
vectara_corpus_key = "vectara-docs_1"
|
|
61
64
|
vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
|
|
62
65
|
|
|
63
|
-
class QueryToolArgs(BaseModel):
|
|
64
|
-
query: str = Field(description="The user query")
|
|
65
|
-
|
|
66
66
|
agent = Agent.from_corpus(
|
|
67
67
|
vectara_corpus_key=vectara_corpus_key,
|
|
68
68
|
vectara_api_key=vectara_api_key,
|
|
@@ -72,7 +72,34 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
72
72
|
vectara_summarizer="mockingbird-1.0-2024-07-16"
|
|
73
73
|
)
|
|
74
74
|
|
|
75
|
-
self.assertIn("Vectara is an end-to-end platform", agent.chat("What is Vectara?"))
|
|
75
|
+
self.assertIn("Vectara is an end-to-end platform", str(agent.chat("What is Vectara?")))
|
|
76
|
+
|
|
77
|
+
def test_class_method_as_tool(self):
|
|
78
|
+
class TestClass:
|
|
79
|
+
def __init__(self):
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
def mult(self, x, y):
|
|
83
|
+
return x * y
|
|
84
|
+
|
|
85
|
+
test_class = TestClass()
|
|
86
|
+
tools = [ToolsFactory().create_tool(test_class.mult)]
|
|
87
|
+
topic = "AI topic"
|
|
88
|
+
instructions = "Always do as your father tells you, if your mother agrees!"
|
|
89
|
+
config = AgentConfig()
|
|
90
|
+
agent = Agent(
|
|
91
|
+
tools=tools,
|
|
92
|
+
topic=topic,
|
|
93
|
+
custom_instructions=instructions,
|
|
94
|
+
agent_config=config
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
self.assertEqual(
|
|
98
|
+
agent.chat(
|
|
99
|
+
"What is 5 times 10. Only give the answer, nothing else"
|
|
100
|
+
).response.replace("$", "\\$"),
|
|
101
|
+
"50",
|
|
102
|
+
)
|
|
76
103
|
|
|
77
104
|
|
|
78
105
|
if __name__ == "__main__":
|
|
@@ -5,7 +5,9 @@ This file contains the prompt templates for the different types of agents.
|
|
|
5
5
|
# General (shared) instructions
|
|
6
6
|
GENERAL_INSTRUCTIONS = """
|
|
7
7
|
- Use tools as your main source of information, do not respond without using a tool. Do not respond based on pre-trained knowledge.
|
|
8
|
-
-
|
|
8
|
+
- Before responding to a user query that requires knowledge of the current date, call the 'get_current_date' tool to get the current date.
|
|
9
|
+
Never rely on previous knowledge of the current date.
|
|
10
|
+
Example queries that require the current date: "What is the revenue of Apple last october?" or "What was the stock price 5 days ago?".
|
|
9
11
|
- When using a tool with arguments, simplify the query as much as possible if you use the tool with arguments.
|
|
10
12
|
For example, if the original query is "revenue for apple in 2021", you can use the tool with a query "revenue" with arguments year=2021 and company=apple.
|
|
11
13
|
- If a tool responds with "I do not have enough information", try one of the following:
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
"""
|
|
2
2
|
This module contains the Agent class for handling different types of agents and their interactions.
|
|
3
3
|
"""
|
|
4
|
-
from typing import List, Callable, Optional, Dict, Any
|
|
4
|
+
from typing import List, Callable, Optional, Dict, Any, Union, Tuple
|
|
5
5
|
import os
|
|
6
|
+
import re
|
|
6
7
|
from datetime import date
|
|
7
8
|
import time
|
|
8
9
|
import json
|
|
@@ -10,12 +11,17 @@ import logging
|
|
|
10
11
|
import traceback
|
|
11
12
|
import asyncio
|
|
12
13
|
|
|
13
|
-
import
|
|
14
|
+
from collections import Counter
|
|
15
|
+
|
|
16
|
+
import cloudpickle as pickle
|
|
17
|
+
|
|
14
18
|
from dotenv import load_dotenv
|
|
15
19
|
|
|
16
20
|
from retrying import retry
|
|
17
21
|
from pydantic import Field, create_model
|
|
18
22
|
|
|
23
|
+
from llama_index.core.memory import ChatMemoryBuffer
|
|
24
|
+
from llama_index.core.llms import ChatMessage, MessageRole
|
|
19
25
|
from llama_index.core.tools import FunctionTool
|
|
20
26
|
from llama_index.core.agent import ReActAgent
|
|
21
27
|
from llama_index.core.agent.react.formatter import ReActChatFormatter
|
|
@@ -24,7 +30,7 @@ from llama_index.agent.lats import LATSAgentWorker
|
|
|
24
30
|
from llama_index.core.callbacks import CallbackManager, TokenCountingHandler
|
|
25
31
|
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
|
|
26
32
|
from llama_index.agent.openai import OpenAIAgent
|
|
27
|
-
|
|
33
|
+
|
|
28
34
|
|
|
29
35
|
from .types import AgentType, AgentStatusType, LLMRole, ToolType, AgentResponse, AgentStreamingResponse
|
|
30
36
|
from .utils import get_llm, get_tokenizer_for_model
|
|
@@ -35,6 +41,21 @@ from .tools import VectaraToolFactory, VectaraTool, ToolsFactory
|
|
|
35
41
|
from .tools_catalog import get_current_date
|
|
36
42
|
from .agent_config import AgentConfig
|
|
37
43
|
|
|
44
|
+
class IgnoreUnpickleableAttributeFilter(logging.Filter):
|
|
45
|
+
'''
|
|
46
|
+
Filter to ignore log messages that contain certain strings
|
|
47
|
+
'''
|
|
48
|
+
def filter(self, record):
|
|
49
|
+
msgs_to_ignore = [
|
|
50
|
+
"Removing unpickleable private attribute _chunking_tokenizer_fn",
|
|
51
|
+
"Removing unpickleable private attribute _split_fns",
|
|
52
|
+
"Removing unpickleable private attribute _sub_sentence_split_fns",
|
|
53
|
+
]
|
|
54
|
+
return all(msg not in record.getMessage() for msg in msgs_to_ignore)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
logging.getLogger().addFilter(IgnoreUnpickleableAttributeFilter())
|
|
58
|
+
|
|
38
59
|
logger = logging.getLogger("opentelemetry.exporter.otlp.proto.http.trace_exporter")
|
|
39
60
|
logger.setLevel(logging.CRITICAL)
|
|
40
61
|
|
|
@@ -81,6 +102,34 @@ def _retry_if_exception(exception):
|
|
|
81
102
|
return isinstance(exception, (TimeoutError))
|
|
82
103
|
|
|
83
104
|
|
|
105
|
+
def get_field_type(field_schema: dict) -> Any:
|
|
106
|
+
"""
|
|
107
|
+
Convert a JSON schema field definition to a Python type.
|
|
108
|
+
Handles 'type' and 'anyOf' cases.
|
|
109
|
+
"""
|
|
110
|
+
json_type_to_python = {
|
|
111
|
+
"string": str,
|
|
112
|
+
"integer": int,
|
|
113
|
+
"boolean": bool,
|
|
114
|
+
"array": list,
|
|
115
|
+
"object": dict,
|
|
116
|
+
"number": float,
|
|
117
|
+
}
|
|
118
|
+
if "anyOf" in field_schema:
|
|
119
|
+
types = []
|
|
120
|
+
for option in field_schema["anyOf"]:
|
|
121
|
+
# If the option has a type, convert it; otherwise, use Any.
|
|
122
|
+
if "type" in option:
|
|
123
|
+
types.append(json_type_to_python.get(option["type"], Any))
|
|
124
|
+
else:
|
|
125
|
+
types.append(Any)
|
|
126
|
+
# Return a Union of the types. For example, Union[str, int]
|
|
127
|
+
return Union[tuple(types)]
|
|
128
|
+
elif "type" in field_schema:
|
|
129
|
+
return json_type_to_python.get(field_schema["type"], Any)
|
|
130
|
+
else:
|
|
131
|
+
return Any
|
|
132
|
+
|
|
84
133
|
class Agent:
|
|
85
134
|
"""
|
|
86
135
|
Agent class for handling different types of agents and their interactions.
|
|
@@ -96,6 +145,8 @@ class Agent:
|
|
|
96
145
|
agent_progress_callback: Optional[Callable[[AgentStatusType, str], None]] = None,
|
|
97
146
|
query_logging_callback: Optional[Callable[[str, str], None]] = None,
|
|
98
147
|
agent_config: Optional[AgentConfig] = None,
|
|
148
|
+
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
149
|
+
validate_tools: bool = False,
|
|
99
150
|
) -> None:
|
|
100
151
|
"""
|
|
101
152
|
Initialize the agent with the specified type, tools, topic, and system message.
|
|
@@ -111,21 +162,52 @@ class Agent:
|
|
|
111
162
|
query_logging_callback (Callable): A callback function the code calls upon completion of a query
|
|
112
163
|
agent_config (AgentConfig, optional): The configuration of the agent.
|
|
113
164
|
Defaults to AgentConfig(), which reads from environment variables.
|
|
165
|
+
chat_history (Tuple[str, str], optional): A list of user/agent chat pairs to initialize the agent memory.
|
|
166
|
+
validate_tools (bool, optional): Whether to validate tool inconsistency with instructions.
|
|
167
|
+
Defaults to False.
|
|
114
168
|
"""
|
|
115
169
|
self.agent_config = agent_config or AgentConfig()
|
|
116
170
|
self.agent_type = self.agent_config.agent_type
|
|
117
|
-
self.tools = tools
|
|
171
|
+
self.tools = tools
|
|
172
|
+
if not any(tool.metadata.name == 'get_current_date' for tool in self.tools):
|
|
173
|
+
self.tools += [ToolsFactory().create_tool(get_current_date)]
|
|
118
174
|
self.llm = get_llm(LLMRole.MAIN, config=self.agent_config)
|
|
119
175
|
self._custom_instructions = custom_instructions
|
|
120
176
|
self._topic = topic
|
|
121
177
|
self.agent_progress_callback = agent_progress_callback if agent_progress_callback else update_func
|
|
122
178
|
self.query_logging_callback = query_logging_callback
|
|
123
179
|
|
|
180
|
+
# Validate tools
|
|
181
|
+
# Check for:
|
|
182
|
+
# 1. multiple copies of the same tool
|
|
183
|
+
# 2. Instructions for using tools that do not exist
|
|
184
|
+
tool_names = [tool.metadata.name for tool in self.tools]
|
|
185
|
+
duplicates = [tool for tool, count in Counter(tool_names).items() if count > 1]
|
|
186
|
+
if duplicates:
|
|
187
|
+
raise ValueError(f"Duplicate tools detected: {', '.join(duplicates)}")
|
|
188
|
+
|
|
189
|
+
if validate_tools:
|
|
190
|
+
prompt = f'''
|
|
191
|
+
Given the following instructions, and a list of tool names,
|
|
192
|
+
Please identify tools mentioned in the instructions that do not exist in the list.
|
|
193
|
+
Instructions:
|
|
194
|
+
{self._custom_instructions}
|
|
195
|
+
Tool names: {', '.join(tool_names)}
|
|
196
|
+
Your response should include a comma separated list of tool names that do not exist in the list.
|
|
197
|
+
Your response should be an empty string if all tools mentioned in the instructions are in the list.
|
|
198
|
+
'''
|
|
199
|
+
llm = get_llm(LLMRole.MAIN, config=self.agent_config)
|
|
200
|
+
bad_tools = llm.complete(prompt).text.split(", ")
|
|
201
|
+
if bad_tools:
|
|
202
|
+
raise ValueError(f"The Agent custom instructions mention these invalid tools: {', '.join(bad_tools)}")
|
|
203
|
+
|
|
204
|
+
# Create token counters for the main and tool LLMs
|
|
124
205
|
main_tok = get_tokenizer_for_model(role=LLMRole.MAIN)
|
|
125
206
|
self.main_token_counter = TokenCountingHandler(tokenizer=main_tok) if main_tok else None
|
|
126
207
|
tool_tok = get_tokenizer_for_model(role=LLMRole.TOOL)
|
|
127
208
|
self.tool_token_counter = TokenCountingHandler(tokenizer=tool_tok) if tool_tok else None
|
|
128
209
|
|
|
210
|
+
# Setup callback manager
|
|
129
211
|
callbacks: list[BaseCallbackHandler] = [AgentCallbackHandler(self.agent_progress_callback)]
|
|
130
212
|
if self.main_token_counter:
|
|
131
213
|
callbacks.append(self.main_token_counter)
|
|
@@ -135,7 +217,14 @@ class Agent:
|
|
|
135
217
|
self.llm.callback_manager = callback_manager
|
|
136
218
|
self.verbose = verbose
|
|
137
219
|
|
|
138
|
-
|
|
220
|
+
if chat_history:
|
|
221
|
+
msg_history = []
|
|
222
|
+
for text_pairs in chat_history:
|
|
223
|
+
msg_history.append(ChatMessage.from_str(content=text_pairs[0], role=MessageRole.USER))
|
|
224
|
+
msg_history.append(ChatMessage.from_str(content=text_pairs[1], role=MessageRole.ASSISTANT))
|
|
225
|
+
self.memory = ChatMemoryBuffer.from_defaults(token_limit=128000, chat_history=msg_history)
|
|
226
|
+
else:
|
|
227
|
+
self.memory = ChatMemoryBuffer.from_defaults(token_limit=128000)
|
|
139
228
|
if self.agent_type == AgentType.REACT:
|
|
140
229
|
prompt = _get_prompt(REACT_PROMPT_TEMPLATE, topic, custom_instructions)
|
|
141
230
|
self.agent = ReActAgent.from_tools(
|
|
@@ -219,7 +308,10 @@ class Agent:
|
|
|
219
308
|
|
|
220
309
|
# Compare tools
|
|
221
310
|
if self.tools != other.tools:
|
|
222
|
-
print(
|
|
311
|
+
print(
|
|
312
|
+
"Comparison failed: tools differ."
|
|
313
|
+
f"(self.tools: {[t.metadata.name for t in self.tools]}, "
|
|
314
|
+
f"other.tools: {[t.metadata.name for t in other.tools]})")
|
|
223
315
|
return False
|
|
224
316
|
|
|
225
317
|
# Compare topic
|
|
@@ -263,6 +355,7 @@ class Agent:
|
|
|
263
355
|
agent_progress_callback: Optional[Callable[[AgentStatusType, str], None]] = None,
|
|
264
356
|
query_logging_callback: Optional[Callable[[str, str], None]] = None,
|
|
265
357
|
agent_config: AgentConfig = AgentConfig(),
|
|
358
|
+
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
266
359
|
) -> "Agent":
|
|
267
360
|
"""
|
|
268
361
|
Create an agent from tools, agent type, and language model.
|
|
@@ -277,6 +370,7 @@ class Agent:
|
|
|
277
370
|
update_func (Callable): old name for agent_progress_callback. Will be deprecated in future.
|
|
278
371
|
query_logging_callback (Callable): A callback function the code calls upon completion of a query
|
|
279
372
|
agent_config (AgentConfig, optional): The configuration of the agent.
|
|
373
|
+
chat_history (Tuple[str, str], optional): A list of user/agent chat pairs to initialize the agent memory.
|
|
280
374
|
|
|
281
375
|
Returns:
|
|
282
376
|
Agent: An instance of the Agent class.
|
|
@@ -285,7 +379,8 @@ class Agent:
|
|
|
285
379
|
tools=tools, topic=topic, custom_instructions=custom_instructions,
|
|
286
380
|
verbose=verbose, agent_progress_callback=agent_progress_callback,
|
|
287
381
|
query_logging_callback=query_logging_callback,
|
|
288
|
-
update_func=update_func, agent_config=agent_config
|
|
382
|
+
update_func=update_func, agent_config=agent_config,
|
|
383
|
+
chat_history=chat_history,
|
|
289
384
|
)
|
|
290
385
|
|
|
291
386
|
@classmethod
|
|
@@ -322,7 +417,7 @@ class Agent:
|
|
|
322
417
|
vectara_temperature: Optional[float] = None,
|
|
323
418
|
vectara_frequency_penalty: Optional[float] = None,
|
|
324
419
|
vectara_presence_penalty: Optional[float] = None,
|
|
325
|
-
vectara_save_history: bool =
|
|
420
|
+
vectara_save_history: bool = True,
|
|
326
421
|
) -> "Agent":
|
|
327
422
|
"""
|
|
328
423
|
Create an agent from a single Vectara corpus
|
|
@@ -383,6 +478,10 @@ class Agent:
|
|
|
383
478
|
) # type: ignore
|
|
384
479
|
query_args = create_model("QueryArgs", **field_definitions) # type: ignore
|
|
385
480
|
|
|
481
|
+
# tool name must be valid Python function name
|
|
482
|
+
if tool_name:
|
|
483
|
+
tool_name = re.sub(r"[^A-Za-z0-9_]", "_", tool_name)
|
|
484
|
+
|
|
386
485
|
vectara_tool = vec_factory.create_rag_tool(
|
|
387
486
|
tool_name=tool_name or f"vectara_{vectara_corpus_key}",
|
|
388
487
|
tool_description=f"""
|
|
@@ -414,6 +513,7 @@ class Agent:
|
|
|
414
513
|
presence_penalty=vectara_presence_penalty,
|
|
415
514
|
save_history=vectara_save_history,
|
|
416
515
|
include_citations=True,
|
|
516
|
+
verbose=verbose,
|
|
417
517
|
)
|
|
418
518
|
|
|
419
519
|
assistant_instructions = f"""
|
|
@@ -587,8 +687,8 @@ class Agent:
|
|
|
587
687
|
"tool_type": tool.metadata.tool_type.value,
|
|
588
688
|
"name": tool.metadata.name,
|
|
589
689
|
"description": tool.metadata.description,
|
|
590
|
-
"fn":
|
|
591
|
-
"async_fn":
|
|
690
|
+
"fn": pickle.dumps(tool.fn).decode("latin-1") if tool.fn else None, # Serialize fn
|
|
691
|
+
"async_fn": pickle.dumps(tool.async_fn).decode("latin-1")
|
|
592
692
|
if tool.async_fn
|
|
593
693
|
else None, # Serialize async_fn
|
|
594
694
|
"fn_schema": tool.metadata.fn_schema.model_json_schema()
|
|
@@ -599,7 +699,7 @@ class Agent:
|
|
|
599
699
|
|
|
600
700
|
return {
|
|
601
701
|
"agent_type": self.agent_type.value,
|
|
602
|
-
"memory":
|
|
702
|
+
"memory": pickle.dumps(self.agent.memory).decode("latin-1"),
|
|
603
703
|
"tools": tool_info,
|
|
604
704
|
"topic": self._topic,
|
|
605
705
|
"custom_instructions": self._custom_instructions,
|
|
@@ -613,39 +713,30 @@ class Agent:
|
|
|
613
713
|
agent_config = AgentConfig.from_dict(data["agent_config"])
|
|
614
714
|
tools = []
|
|
615
715
|
|
|
616
|
-
json_type_to_python = {
|
|
617
|
-
"string": str,
|
|
618
|
-
"integer": int,
|
|
619
|
-
"boolean": bool,
|
|
620
|
-
"array": list,
|
|
621
|
-
"object": dict,
|
|
622
|
-
"number": float,
|
|
623
|
-
}
|
|
624
|
-
|
|
625
716
|
for tool_data in data["tools"]:
|
|
626
717
|
# Recreate the dynamic model using the schema info
|
|
627
718
|
if tool_data.get("fn_schema"):
|
|
628
719
|
field_definitions = {}
|
|
629
720
|
for field, values in tool_data["fn_schema"]["properties"].items():
|
|
721
|
+
# Instead of checking for 'type', use the helper:
|
|
722
|
+
field_type = get_field_type(values)
|
|
723
|
+
# If there's a default value, include it.
|
|
630
724
|
if "default" in values:
|
|
631
725
|
field_definitions[field] = (
|
|
632
|
-
|
|
633
|
-
Field(
|
|
634
|
-
|
|
635
|
-
default=values["default"],
|
|
636
|
-
),
|
|
637
|
-
) # type: ignore
|
|
726
|
+
field_type,
|
|
727
|
+
Field(description=values.get("description", ""), default=values["default"]),
|
|
728
|
+
)
|
|
638
729
|
else:
|
|
639
730
|
field_definitions[field] = (
|
|
640
|
-
|
|
641
|
-
Field(description=values
|
|
642
|
-
)
|
|
731
|
+
field_type,
|
|
732
|
+
Field(description=values.get("description", "")),
|
|
733
|
+
)
|
|
643
734
|
query_args_model = create_model("QueryArgs", **field_definitions) # type: ignore
|
|
644
735
|
else:
|
|
645
736
|
query_args_model = create_model("QueryArgs")
|
|
646
737
|
|
|
647
|
-
fn =
|
|
648
|
-
async_fn =
|
|
738
|
+
fn = pickle.loads(tool_data["fn"].encode("latin-1")) if tool_data["fn"] else None
|
|
739
|
+
async_fn = pickle.loads(tool_data["async_fn"].encode("latin-1")) if tool_data["async_fn"] else None
|
|
649
740
|
|
|
650
741
|
tool = VectaraTool.from_defaults(
|
|
651
742
|
name=tool_data["name"],
|
|
@@ -664,7 +755,7 @@ class Agent:
|
|
|
664
755
|
custom_instructions=data["custom_instructions"],
|
|
665
756
|
verbose=data["verbose"],
|
|
666
757
|
)
|
|
667
|
-
memory =
|
|
758
|
+
memory = pickle.loads(data["memory"].encode("latin-1")) if data.get("memory") else None
|
|
668
759
|
if memory:
|
|
669
760
|
agent.agent.memory = memory
|
|
670
761
|
return agent
|
|
@@ -44,6 +44,15 @@ class AgentConfig:
|
|
|
44
44
|
default_factory=lambda: os.getenv("VECTARA_AGENTIC_TOOL_MODEL_NAME", "")
|
|
45
45
|
)
|
|
46
46
|
|
|
47
|
+
# Params for Private LLM endpoint if used
|
|
48
|
+
private_llm_api_base: str = field(
|
|
49
|
+
default_factory=lambda: os.getenv("VECTARA_AGENTIC_PRIVATE_LLM_API_BASE",
|
|
50
|
+
"http://private-endpoint.company.com:5000/v1")
|
|
51
|
+
)
|
|
52
|
+
private_llm_api_key: str = field(
|
|
53
|
+
default_factory=lambda: os.getenv("VECTARA_AGENTIC_PRIVATE_LLM_API_KEY", "<private-api-key>")
|
|
54
|
+
)
|
|
55
|
+
|
|
47
56
|
# Observer
|
|
48
57
|
observer: ObserverType = field(
|
|
49
58
|
default_factory=lambda: ObserverType(
|
|
@@ -17,7 +17,6 @@ from llama_index.indices.managed.vectara import VectaraIndex
|
|
|
17
17
|
from llama_index.core.utilities.sql_wrapper import SQLDatabase
|
|
18
18
|
from llama_index.core.tools.types import ToolMetadata, ToolOutput
|
|
19
19
|
|
|
20
|
-
|
|
21
20
|
from .types import ToolType
|
|
22
21
|
from .tools_catalog import ToolsCatalog, get_bad_topics
|
|
23
22
|
from .db_tools import DBLoadSampleData, DBLoadUniqueValues, DBLoadData
|
|
@@ -100,9 +99,14 @@ class VectaraTool(FunctionTool):
|
|
|
100
99
|
fn_schema: Optional[Type[BaseModel]] = None,
|
|
101
100
|
async_fn: Optional[AsyncCallable] = None,
|
|
102
101
|
tool_metadata: Optional[ToolMetadata] = None,
|
|
102
|
+
callback: Optional[Callable[[Any], Any]] = None,
|
|
103
|
+
async_callback: Optional[AsyncCallable] = None,
|
|
103
104
|
tool_type: ToolType = ToolType.QUERY,
|
|
104
105
|
) -> "VectaraTool":
|
|
105
|
-
tool = FunctionTool.from_defaults(
|
|
106
|
+
tool = FunctionTool.from_defaults(
|
|
107
|
+
fn, name, description, return_direct, fn_schema, async_fn, tool_metadata,
|
|
108
|
+
callback, async_callback
|
|
109
|
+
)
|
|
106
110
|
vectara_tool = cls(tool_type=tool_type, fn=tool.fn, metadata=tool.metadata, async_fn=tool.async_fn)
|
|
107
111
|
return vectara_tool
|
|
108
112
|
|
|
@@ -110,6 +114,9 @@ class VectaraTool(FunctionTool):
|
|
|
110
114
|
if self.metadata.tool_type != other.metadata.tool_type:
|
|
111
115
|
return False
|
|
112
116
|
|
|
117
|
+
if self.metadata.name != other.metadata.name or self.metadata.description != other.metadata.description:
|
|
118
|
+
return False
|
|
119
|
+
|
|
113
120
|
# Check if fn_schema is an instance of a BaseModel or a class itself (metaclass)
|
|
114
121
|
self_schema_dict = self.metadata.fn_schema.model_fields
|
|
115
122
|
other_schema_dict = other.metadata.fn_schema.model_fields
|
|
@@ -252,7 +259,10 @@ def _build_filter_string(kwargs: Dict[str, Any], tool_args_type: Dict[str, dict]
|
|
|
252
259
|
filter_parts.append(f"{prefix}.{key}='{val_str}'")
|
|
253
260
|
|
|
254
261
|
filter_str = " AND ".join(filter_parts)
|
|
255
|
-
|
|
262
|
+
if fixed_filter and filter_str:
|
|
263
|
+
return f"({fixed_filter}) AND ({filter_str})"
|
|
264
|
+
else:
|
|
265
|
+
return fixed_filter or filter_str
|
|
256
266
|
|
|
257
267
|
class VectaraToolFactory:
|
|
258
268
|
"""
|
|
@@ -294,8 +304,10 @@ class VectaraToolFactory:
|
|
|
294
304
|
mmr_diversity_bias: float = 0.2,
|
|
295
305
|
udf_expression: str = None,
|
|
296
306
|
rerank_chain: List[Dict] = None,
|
|
297
|
-
save_history: bool =
|
|
307
|
+
save_history: bool = True,
|
|
298
308
|
verbose: bool = False,
|
|
309
|
+
vectara_base_url: str = "https://api.vectara.io",
|
|
310
|
+
vectara_verify_ssl: bool = True,
|
|
299
311
|
) -> VectaraTool:
|
|
300
312
|
"""
|
|
301
313
|
Creates a Vectara search/retrieval tool
|
|
@@ -327,6 +339,8 @@ class VectaraToolFactory:
|
|
|
327
339
|
If using slingshot/multilingual_reranker_v1, it must be first in the list.
|
|
328
340
|
save_history (bool, optional): Whether to save the query in history.
|
|
329
341
|
verbose (bool, optional): Whether to print verbose output.
|
|
342
|
+
vectara_base_url (str, optional): The base URL for the Vectara API.
|
|
343
|
+
vectara_verify_ssl (bool, optional): Whether to verify SSL certificates for the Vectara API.
|
|
330
344
|
|
|
331
345
|
Returns:
|
|
332
346
|
VectaraTool: A VectaraTool object.
|
|
@@ -336,6 +350,8 @@ class VectaraToolFactory:
|
|
|
336
350
|
vectara_api_key=self.vectara_api_key,
|
|
337
351
|
vectara_corpus_key=self.vectara_corpus_key,
|
|
338
352
|
x_source_str="vectara-agentic",
|
|
353
|
+
base_url=vectara_base_url,
|
|
354
|
+
verify_ssl=vectara_verify_ssl,
|
|
339
355
|
)
|
|
340
356
|
|
|
341
357
|
# Dynamically generate the search function
|
|
@@ -426,7 +442,7 @@ class VectaraToolFactory:
|
|
|
426
442
|
|
|
427
443
|
# Create the tool function signature string
|
|
428
444
|
fields = []
|
|
429
|
-
for name, field in tool_args_schema.
|
|
445
|
+
for name, field in tool_args_schema.model_fields.items():
|
|
430
446
|
annotation = field.annotation
|
|
431
447
|
type_name = annotation.__name__ if hasattr(annotation, '__name__') else str(annotation)
|
|
432
448
|
fields.append(f"{name}: {type_name}")
|
|
@@ -476,6 +492,8 @@ class VectaraToolFactory:
|
|
|
476
492
|
save_history: bool = False,
|
|
477
493
|
fcs_threshold: float = 0.0,
|
|
478
494
|
verbose: bool = False,
|
|
495
|
+
vectara_base_url: str = "https://api.vectara.io",
|
|
496
|
+
vectara_verify_ssl: bool = True,
|
|
479
497
|
) -> VectaraTool:
|
|
480
498
|
"""
|
|
481
499
|
Creates a RAG (Retrieve and Generate) tool.
|
|
@@ -526,6 +544,8 @@ class VectaraToolFactory:
|
|
|
526
544
|
fcs_threshold (float, optional): A threshold for factual consistency.
|
|
527
545
|
If set above 0, the tool notifies the calling agent that it "cannot respond" if FCS is too low.
|
|
528
546
|
verbose (bool, optional): Whether to print verbose output.
|
|
547
|
+
vectara_base_url (str, optional): The base URL for the Vectara API.
|
|
548
|
+
vectara_verify_ssl (bool, optional): Whether to verify SSL certificates for the Vectara API.
|
|
529
549
|
|
|
530
550
|
Returns:
|
|
531
551
|
VectaraTool: A VectaraTool object.
|
|
@@ -535,6 +555,8 @@ class VectaraToolFactory:
|
|
|
535
555
|
vectara_api_key=self.vectara_api_key,
|
|
536
556
|
vectara_corpus_key=self.vectara_corpus_key,
|
|
537
557
|
x_source_str="vectara-agentic",
|
|
558
|
+
base_url=vectara_base_url,
|
|
559
|
+
verify_ssl=vectara_verify_ssl,
|
|
538
560
|
)
|
|
539
561
|
|
|
540
562
|
# Dynamically generate the RAG function
|
|
@@ -677,7 +699,7 @@ class VectaraToolFactory:
|
|
|
677
699
|
|
|
678
700
|
# Create the tool function signature string
|
|
679
701
|
fields = []
|
|
680
|
-
for name, field in tool_args_schema.
|
|
702
|
+
for name, field in tool_args_schema.model_fields.items():
|
|
681
703
|
annotation = field.annotation
|
|
682
704
|
type_name = annotation.__name__ if hasattr(annotation, '__name__') else str(annotation)
|
|
683
705
|
fields.append(f"{name}: {type_name}")
|
|
@@ -743,7 +765,6 @@ class ToolsFactory:
|
|
|
743
765
|
|
|
744
766
|
# Get the tool spec class or function from the module
|
|
745
767
|
tool_spec = getattr(module, tool_spec_name)
|
|
746
|
-
|
|
747
768
|
func_type = LI_packages[tool_package_name]
|
|
748
769
|
tools = tool_spec(**kwargs).to_tool_list()
|
|
749
770
|
vtools = []
|
|
@@ -112,6 +112,10 @@ def get_llm(
|
|
|
112
112
|
elif model_provider == ModelProvider.COHERE:
|
|
113
113
|
from llama_index.llms.cohere import Cohere
|
|
114
114
|
llm = Cohere(model=model_name, temperature=0)
|
|
115
|
+
elif model_provider == ModelProvider.PRIVATE:
|
|
116
|
+
from llama_index.llms.openai_like import OpenAILike
|
|
117
|
+
llm = OpenAILike(model=model_name, temperature=0, is_function_calling_model=True,is_chat_model=True,
|
|
118
|
+
api_base=config.private_llm_api_base, api_key=config.private_llm_api_key)
|
|
115
119
|
else:
|
|
116
120
|
raise ValueError(f"Unknown LLM provider: {model_provider}")
|
|
117
121
|
return llm
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.2
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,19 +16,19 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.12.
|
|
20
|
-
Requires-Dist: llama-index-indices-managed-vectara==0.4.
|
|
19
|
+
Requires-Dist: llama-index==0.12.22
|
|
20
|
+
Requires-Dist: llama-index-indices-managed-vectara==0.4.1
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
23
|
-
Requires-Dist: llama-index-agent-openai==0.4.
|
|
24
|
-
Requires-Dist: llama-index-llms-openai==0.3.
|
|
25
|
-
Requires-Dist: llama-index-llms-anthropic==0.6.
|
|
23
|
+
Requires-Dist: llama-index-agent-openai==0.4.6
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.3.25
|
|
25
|
+
Requires-Dist: llama-index-llms-anthropic==0.6.7
|
|
26
26
|
Requires-Dist: llama-index-llms-together==0.3.1
|
|
27
27
|
Requires-Dist: llama-index-llms-groq==0.3.1
|
|
28
|
-
Requires-Dist: llama-index-llms-fireworks==0.3.
|
|
28
|
+
Requires-Dist: llama-index-llms-fireworks==0.3.2
|
|
29
29
|
Requires-Dist: llama-index-llms-cohere==0.4.0
|
|
30
|
-
Requires-Dist: llama-index-llms-gemini==0.4.
|
|
31
|
-
Requires-Dist: llama-index-llms-bedrock==0.3.
|
|
30
|
+
Requires-Dist: llama-index-llms-gemini==0.4.11
|
|
31
|
+
Requires-Dist: llama-index-llms-bedrock==0.3.4
|
|
32
32
|
Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
|
|
33
33
|
Requires-Dist: llama-index-tools-arxiv==0.3.0
|
|
34
34
|
Requires-Dist: llama-index-tools-database==0.3.0
|
|
@@ -38,8 +38,8 @@ Requires-Dist: llama-index-tools-neo4j==0.3.0
|
|
|
38
38
|
Requires-Dist: llama-index-graph-stores-kuzu==0.6.0
|
|
39
39
|
Requires-Dist: llama-index-tools-slack==0.3.0
|
|
40
40
|
Requires-Dist: llama-index-tools-exa==0.3.0
|
|
41
|
-
Requires-Dist: tavily-python==0.5.
|
|
42
|
-
Requires-Dist: exa-py==1.8.
|
|
41
|
+
Requires-Dist: tavily-python==0.5.1
|
|
42
|
+
Requires-Dist: exa-py==1.8.9
|
|
43
43
|
Requires-Dist: yahoo-finance==1.4.0
|
|
44
44
|
Requires-Dist: openinference-instrumentation-llama-index==3.1.4
|
|
45
45
|
Requires-Dist: opentelemetry-proto==1.26.0
|
|
@@ -50,8 +50,8 @@ Requires-Dist: tokenizers>=0.20
|
|
|
50
50
|
Requires-Dist: pydantic==2.10.3
|
|
51
51
|
Requires-Dist: retrying==1.3.4
|
|
52
52
|
Requires-Dist: python-dotenv==1.0.1
|
|
53
|
-
Requires-Dist: tiktoken==0.
|
|
54
|
-
Requires-Dist:
|
|
53
|
+
Requires-Dist: tiktoken==0.9.0
|
|
54
|
+
Requires-Dist: cloudpickle>=3.1.1
|
|
55
55
|
Requires-Dist: httpx==0.27.2
|
|
56
56
|
Dynamic: author
|
|
57
57
|
Dynamic: author-email
|
|
@@ -135,7 +135,7 @@ from vectara_agentic.tools import VectaraToolFactory
|
|
|
135
135
|
vec_factory = VectaraToolFactory(
|
|
136
136
|
vectara_api_key=os.environ['VECTARA_API_KEY'],
|
|
137
137
|
vectara_customer_id=os.environ['VECTARA_CUSTOMER_ID'],
|
|
138
|
-
|
|
138
|
+
vectara_corpus_key=os.environ['VECTARA_CORPUS_KEY']
|
|
139
139
|
)
|
|
140
140
|
```
|
|
141
141
|
|
|
@@ -315,6 +315,10 @@ def mult_func(x, y):
|
|
|
315
315
|
mult_tool = ToolsFactory().create_tool(mult_func)
|
|
316
316
|
```
|
|
317
317
|
|
|
318
|
+
Note: When you define your own Python functions as tools, implement them at the top module level,
|
|
319
|
+
and not as nested functions. Nested functions are not supported if you use serialization
|
|
320
|
+
(dumps/loads or from_dict/to_dict).
|
|
321
|
+
|
|
318
322
|
## 🛠️ Configuration
|
|
319
323
|
|
|
320
324
|
## Configuring Vectara-agentic
|
|
@@ -352,10 +356,31 @@ If any of these are not provided, `AgentConfig` first tries to read the values f
|
|
|
352
356
|
|
|
353
357
|
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
354
358
|
|
|
355
|
-
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY
|
|
359
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY` and `VECTARA_CORPUS_KEY`). Note that `VECTARA_CORPUS_KEY` can be a single KEY or a comma-separated list of KEYs (if you want to query multiple corpora).
|
|
356
360
|
|
|
357
361
|
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
358
362
|
|
|
363
|
+
## Setting up a privately hosted LLM
|
|
364
|
+
|
|
365
|
+
If you want to setup vectara-agentic to use your own self-hosted LLM endpoint, follow the example below
|
|
366
|
+
|
|
367
|
+
```python
|
|
368
|
+
config = AgentConfig(
|
|
369
|
+
agent_type=AgentType.REACT,
|
|
370
|
+
main_llm_provider=ModelProvider.PRIVATE,
|
|
371
|
+
main_llm_model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
372
|
+
private_llm_api_base="http://vllm-server.company.com/v1",
|
|
373
|
+
private_llm_api_key="TEST_API_KEY",
|
|
374
|
+
)
|
|
375
|
+
agent = Agent(agent_config=config, tools=tools, topic=topic,
|
|
376
|
+
custom_instructions=custom_instructions)
|
|
377
|
+
```
|
|
378
|
+
|
|
379
|
+
In this case we specify the Main LLM provider to be privately hosted with Llama-3.1-8B as the model.
|
|
380
|
+
- The `ModelProvider.PRIVATE` specifies a privately hosted LLM.
|
|
381
|
+
- The `private_llm_api_base` specifies the api endpoint to use, and the `private_llm_api_key`
|
|
382
|
+
specifies the private API key requires to use this service.
|
|
383
|
+
|
|
359
384
|
## ℹ️ Additional Information
|
|
360
385
|
|
|
361
386
|
### About Custom Instructions for your Agent
|
|
@@ -376,6 +401,8 @@ The `Agent` class defines a few helpful methods to help you understand the inter
|
|
|
376
401
|
|
|
377
402
|
The `Agent` class supports serialization. Use the `dumps()` to serialize and `loads()` to read back from a serialized stream.
|
|
378
403
|
|
|
404
|
+
Note: due to cloudpickle limitations, if a tool contains Python `weakref` objects, serialization won't work and an exception will be raised.
|
|
405
|
+
|
|
379
406
|
### Observability
|
|
380
407
|
|
|
381
408
|
vectara-agentic supports observability via the existing integration of LlamaIndex and Arize Phoenix.
|
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
llama-index==0.12.
|
|
2
|
-
llama-index-indices-managed-vectara==0.4.
|
|
1
|
+
llama-index==0.12.22
|
|
2
|
+
llama-index-indices-managed-vectara==0.4.1
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
5
|
-
llama-index-agent-openai==0.4.
|
|
6
|
-
llama-index-llms-openai==0.3.
|
|
7
|
-
llama-index-llms-anthropic==0.6.
|
|
5
|
+
llama-index-agent-openai==0.4.6
|
|
6
|
+
llama-index-llms-openai==0.3.25
|
|
7
|
+
llama-index-llms-anthropic==0.6.7
|
|
8
8
|
llama-index-llms-together==0.3.1
|
|
9
9
|
llama-index-llms-groq==0.3.1
|
|
10
|
-
llama-index-llms-fireworks==0.3.
|
|
10
|
+
llama-index-llms-fireworks==0.3.2
|
|
11
11
|
llama-index-llms-cohere==0.4.0
|
|
12
|
-
llama-index-llms-gemini==0.4.
|
|
13
|
-
llama-index-llms-bedrock==0.3.
|
|
12
|
+
llama-index-llms-gemini==0.4.11
|
|
13
|
+
llama-index-llms-bedrock==0.3.4
|
|
14
14
|
llama-index-tools-yahoo-finance==0.3.0
|
|
15
15
|
llama-index-tools-arxiv==0.3.0
|
|
16
16
|
llama-index-tools-database==0.3.0
|
|
@@ -20,8 +20,8 @@ llama-index-tools-neo4j==0.3.0
|
|
|
20
20
|
llama-index-graph-stores-kuzu==0.6.0
|
|
21
21
|
llama-index-tools-slack==0.3.0
|
|
22
22
|
llama-index-tools-exa==0.3.0
|
|
23
|
-
tavily-python==0.5.
|
|
24
|
-
exa-py==1.8.
|
|
23
|
+
tavily-python==0.5.1
|
|
24
|
+
exa-py==1.8.9
|
|
25
25
|
yahoo-finance==1.4.0
|
|
26
26
|
openinference-instrumentation-llama-index==3.1.4
|
|
27
27
|
opentelemetry-proto==1.26.0
|
|
@@ -32,6 +32,6 @@ tokenizers>=0.20
|
|
|
32
32
|
pydantic==2.10.3
|
|
33
33
|
retrying==1.3.4
|
|
34
34
|
python-dotenv==1.0.1
|
|
35
|
-
tiktoken==0.
|
|
36
|
-
|
|
35
|
+
tiktoken==0.9.0
|
|
36
|
+
cloudpickle>=3.1.1
|
|
37
37
|
httpx==0.27.2
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vectara_agentic-0.2.0 → vectara_agentic-0.2.2}/vectara_agentic.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|