vectara-agentic 0.2.16__tar.gz → 0.2.17__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vectara-agentic might be problematic. Click here for more details.
- {vectara_agentic-0.2.16/vectara_agentic.egg-info → vectara_agentic-0.2.17}/PKG-INFO +59 -16
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/README.md +52 -11
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/requirements.txt +6 -4
- vectara_agentic-0.2.17/tests/test_workflow.py +134 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/_version.py +1 -1
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/agent.py +50 -28
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/sub_query_workflow.py +5 -2
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/tool_utils.py +2 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/tools.py +2 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/utils.py +1 -1
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17/vectara_agentic.egg-info}/PKG-INFO +59 -16
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic.egg-info/requires.txt +6 -4
- vectara_agentic-0.2.16/tests/test_workflow.py +0 -67
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/LICENSE +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/MANIFEST.in +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/setup.cfg +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/setup.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/__init__.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/endpoint.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_agent.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_agent_planning.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_agent_type.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_fallback.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_groq.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_private_llm.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_return_direct.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_serialization.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_tools.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/tests/test_vectara_llms.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/__init__.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/_callback.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/_observability.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/_prompts.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/agent_config.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/agent_endpoint.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/db_tools.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/llm_utils.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/tools_catalog.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic/types.py +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic.egg-info/SOURCES.txt +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic.egg-info/dependency_links.txt +0 -0
- {vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.17
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,7 +16,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.12.
|
|
19
|
+
Requires-Dist: llama-index==0.12.35
|
|
20
20
|
Requires-Dist: llama-index-indices-managed-vectara==0.4.5
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
@@ -36,15 +36,17 @@ Requires-Dist: llama-index-tools-google==0.3.0
|
|
|
36
36
|
Requires-Dist: llama-index-tools-tavily_research==0.3.0
|
|
37
37
|
Requires-Dist: llama_index.tools.brave_search==0.3.0
|
|
38
38
|
Requires-Dist: llama-index-tools-neo4j==0.3.0
|
|
39
|
+
Requires-Dist: llama-index-tools-waii==0.3.0
|
|
39
40
|
Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
|
|
41
|
+
Requires-Dist: llama-index-tools-salesforce==0.3.0
|
|
40
42
|
Requires-Dist: llama-index-tools-slack==0.3.0
|
|
41
43
|
Requires-Dist: llama-index-tools-exa==0.3.0
|
|
42
44
|
Requires-Dist: llama-index-tools-wikipedia==0.3.0
|
|
43
45
|
Requires-Dist: llama-index-tools-bing-search==0.3.0
|
|
44
|
-
Requires-Dist: tavily-python==0.
|
|
45
|
-
Requires-Dist: exa-py==1.12.
|
|
46
|
+
Requires-Dist: tavily-python==0.7.2
|
|
47
|
+
Requires-Dist: exa-py==1.12.1
|
|
46
48
|
Requires-Dist: openinference-instrumentation-llama-index==4.2.1
|
|
47
|
-
Requires-Dist: opentelemetry-proto
|
|
49
|
+
Requires-Dist: opentelemetry-proto>=1.31.0
|
|
48
50
|
Requires-Dist: arize-phoenix==8.26.1
|
|
49
51
|
Requires-Dist: arize-phoenix-otel==0.9.2
|
|
50
52
|
Requires-Dist: protobuf==5.29.3
|
|
@@ -365,51 +367,92 @@ vectara-agentic includes various other tools from LlamaIndex ToolSpecs:
|
|
|
365
367
|
* Tavily Search: Real-time web search using [Tavily API](https://tavily.com/)
|
|
366
368
|
```python
|
|
367
369
|
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
368
|
-
|
|
370
|
+
tools_factory = ToolsFactory()
|
|
371
|
+
tavily_tools = tools_factory.get_llama_index_tools(
|
|
372
|
+
tool_package_name="tavily_research",
|
|
373
|
+
tool_spec_name="TavilyToolSpec",
|
|
374
|
+
api_key=str(os.environ["TAVILY_API_KEY"]),
|
|
375
|
+
)
|
|
369
376
|
```
|
|
370
377
|
* EXA.AI: Advanced web search and data extraction
|
|
371
378
|
```python
|
|
372
|
-
|
|
379
|
+
exa_tools = tools_factory.get_llama_index_tools(
|
|
380
|
+
tool_package_name="exa.ai",
|
|
381
|
+
tool_spec_name="ExaToolSpec",
|
|
382
|
+
api_key=str(os.environ["EXA_API_KEY"]),
|
|
383
|
+
)
|
|
373
384
|
```
|
|
374
385
|
* Brave Search: Web search using Brave's search engine
|
|
375
386
|
```python
|
|
376
|
-
|
|
387
|
+
brave_tools = tools_factory.get_llama_index_tools(
|
|
388
|
+
tool_package_name="brave_search",
|
|
389
|
+
tool_spec_name="BraveSearchToolSpec",
|
|
390
|
+
api_key=str(os.environ["BRAVE_API_KEY"]),
|
|
391
|
+
)
|
|
377
392
|
```
|
|
378
393
|
|
|
379
394
|
* **Academic Tools**
|
|
380
395
|
* arXiv: Search and retrieve academic papers
|
|
381
396
|
```python
|
|
382
|
-
|
|
397
|
+
arxiv_tools = tools_factory.get_llama_index_tools(
|
|
398
|
+
tool_package_name="arxiv",
|
|
399
|
+
tool_spec_name="ArxivToolSpec",
|
|
400
|
+
)
|
|
383
401
|
```
|
|
384
402
|
|
|
385
|
-
* **
|
|
403
|
+
* **Database Tools**
|
|
386
404
|
* Neo4j: Graph database integration
|
|
387
405
|
```python
|
|
388
|
-
|
|
406
|
+
neo4j_tools = tools_factory.get_llama_index_tools(
|
|
407
|
+
tool_package_name="neo4j",
|
|
408
|
+
tool_spec_name="Neo4jQueryToolSpec",
|
|
409
|
+
)
|
|
389
410
|
```
|
|
390
411
|
* Kuzu: Lightweight graph database
|
|
391
412
|
```python
|
|
392
|
-
|
|
413
|
+
kuzu_tools = tools_factory.get_llama_index_tools(
|
|
414
|
+
tool_package_name="kuzu",
|
|
415
|
+
tool_spec_name="KuzuGraphStore",
|
|
416
|
+
)
|
|
417
|
+
```
|
|
418
|
+
* Waii: tools for natural langauge query of a relational database
|
|
419
|
+
```python
|
|
420
|
+
waii_tools = tools_factory.get_llama_index_tools(
|
|
421
|
+
tool_package_name="waii",
|
|
422
|
+
tool_spec_name="WaiiToolSpec",
|
|
423
|
+
)
|
|
393
424
|
```
|
|
394
425
|
|
|
395
426
|
* **Google Tools**
|
|
396
427
|
* Gmail: Read and send emails
|
|
397
428
|
```python
|
|
398
|
-
|
|
429
|
+
gmail_tools = tools_factory.get_llama_index_tools(
|
|
430
|
+
tool_package_name="google",
|
|
431
|
+
tool_spec_name="GmailToolSpec",
|
|
432
|
+
)
|
|
399
433
|
```
|
|
400
434
|
* Calendar: Manage calendar events
|
|
401
435
|
```python
|
|
402
|
-
|
|
436
|
+
calendar_tools = tools_factory.get_llama_index_tools(
|
|
437
|
+
tool_package_name="google",
|
|
438
|
+
tool_spec_name="GoogleCalendarToolSpec",
|
|
439
|
+
)
|
|
403
440
|
```
|
|
404
441
|
* Search: Google search integration
|
|
405
442
|
```python
|
|
406
|
-
|
|
443
|
+
search_tools = tools_factory.get_llama_index_tools(
|
|
444
|
+
tool_package_name="google",
|
|
445
|
+
tool_spec_name="GoogleSearchToolSpec",
|
|
446
|
+
)
|
|
407
447
|
```
|
|
408
448
|
|
|
409
449
|
* **Communication Tools**
|
|
410
450
|
* Slack: Send messages and interact with Slack
|
|
411
451
|
```python
|
|
412
|
-
|
|
452
|
+
slack_tools = tools_factory.get_llama_index_tools(
|
|
453
|
+
tool_package_name="slack",
|
|
454
|
+
tool_spec_name="SlackToolSpec",
|
|
455
|
+
)
|
|
413
456
|
```
|
|
414
457
|
|
|
415
458
|
For detailed setup instructions and API key requirements, please refer the instructions on [LlamaIndex hub](https://llamahub.ai/?tab=tools) for the specific tool.
|
|
@@ -295,51 +295,92 @@ vectara-agentic includes various other tools from LlamaIndex ToolSpecs:
|
|
|
295
295
|
* Tavily Search: Real-time web search using [Tavily API](https://tavily.com/)
|
|
296
296
|
```python
|
|
297
297
|
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
298
|
-
|
|
298
|
+
tools_factory = ToolsFactory()
|
|
299
|
+
tavily_tools = tools_factory.get_llama_index_tools(
|
|
300
|
+
tool_package_name="tavily_research",
|
|
301
|
+
tool_spec_name="TavilyToolSpec",
|
|
302
|
+
api_key=str(os.environ["TAVILY_API_KEY"]),
|
|
303
|
+
)
|
|
299
304
|
```
|
|
300
305
|
* EXA.AI: Advanced web search and data extraction
|
|
301
306
|
```python
|
|
302
|
-
|
|
307
|
+
exa_tools = tools_factory.get_llama_index_tools(
|
|
308
|
+
tool_package_name="exa.ai",
|
|
309
|
+
tool_spec_name="ExaToolSpec",
|
|
310
|
+
api_key=str(os.environ["EXA_API_KEY"]),
|
|
311
|
+
)
|
|
303
312
|
```
|
|
304
313
|
* Brave Search: Web search using Brave's search engine
|
|
305
314
|
```python
|
|
306
|
-
|
|
315
|
+
brave_tools = tools_factory.get_llama_index_tools(
|
|
316
|
+
tool_package_name="brave_search",
|
|
317
|
+
tool_spec_name="BraveSearchToolSpec",
|
|
318
|
+
api_key=str(os.environ["BRAVE_API_KEY"]),
|
|
319
|
+
)
|
|
307
320
|
```
|
|
308
321
|
|
|
309
322
|
* **Academic Tools**
|
|
310
323
|
* arXiv: Search and retrieve academic papers
|
|
311
324
|
```python
|
|
312
|
-
|
|
325
|
+
arxiv_tools = tools_factory.get_llama_index_tools(
|
|
326
|
+
tool_package_name="arxiv",
|
|
327
|
+
tool_spec_name="ArxivToolSpec",
|
|
328
|
+
)
|
|
313
329
|
```
|
|
314
330
|
|
|
315
|
-
* **
|
|
331
|
+
* **Database Tools**
|
|
316
332
|
* Neo4j: Graph database integration
|
|
317
333
|
```python
|
|
318
|
-
|
|
334
|
+
neo4j_tools = tools_factory.get_llama_index_tools(
|
|
335
|
+
tool_package_name="neo4j",
|
|
336
|
+
tool_spec_name="Neo4jQueryToolSpec",
|
|
337
|
+
)
|
|
319
338
|
```
|
|
320
339
|
* Kuzu: Lightweight graph database
|
|
321
340
|
```python
|
|
322
|
-
|
|
341
|
+
kuzu_tools = tools_factory.get_llama_index_tools(
|
|
342
|
+
tool_package_name="kuzu",
|
|
343
|
+
tool_spec_name="KuzuGraphStore",
|
|
344
|
+
)
|
|
345
|
+
```
|
|
346
|
+
* Waii: tools for natural langauge query of a relational database
|
|
347
|
+
```python
|
|
348
|
+
waii_tools = tools_factory.get_llama_index_tools(
|
|
349
|
+
tool_package_name="waii",
|
|
350
|
+
tool_spec_name="WaiiToolSpec",
|
|
351
|
+
)
|
|
323
352
|
```
|
|
324
353
|
|
|
325
354
|
* **Google Tools**
|
|
326
355
|
* Gmail: Read and send emails
|
|
327
356
|
```python
|
|
328
|
-
|
|
357
|
+
gmail_tools = tools_factory.get_llama_index_tools(
|
|
358
|
+
tool_package_name="google",
|
|
359
|
+
tool_spec_name="GmailToolSpec",
|
|
360
|
+
)
|
|
329
361
|
```
|
|
330
362
|
* Calendar: Manage calendar events
|
|
331
363
|
```python
|
|
332
|
-
|
|
364
|
+
calendar_tools = tools_factory.get_llama_index_tools(
|
|
365
|
+
tool_package_name="google",
|
|
366
|
+
tool_spec_name="GoogleCalendarToolSpec",
|
|
367
|
+
)
|
|
333
368
|
```
|
|
334
369
|
* Search: Google search integration
|
|
335
370
|
```python
|
|
336
|
-
|
|
371
|
+
search_tools = tools_factory.get_llama_index_tools(
|
|
372
|
+
tool_package_name="google",
|
|
373
|
+
tool_spec_name="GoogleSearchToolSpec",
|
|
374
|
+
)
|
|
337
375
|
```
|
|
338
376
|
|
|
339
377
|
* **Communication Tools**
|
|
340
378
|
* Slack: Send messages and interact with Slack
|
|
341
379
|
```python
|
|
342
|
-
|
|
380
|
+
slack_tools = tools_factory.get_llama_index_tools(
|
|
381
|
+
tool_package_name="slack",
|
|
382
|
+
tool_spec_name="SlackToolSpec",
|
|
383
|
+
)
|
|
343
384
|
```
|
|
344
385
|
|
|
345
386
|
For detailed setup instructions and API key requirements, please refer the instructions on [LlamaIndex hub](https://llamahub.ai/?tab=tools) for the specific tool.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama-index==0.12.
|
|
1
|
+
llama-index==0.12.35
|
|
2
2
|
llama-index-indices-managed-vectara==0.4.5
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
@@ -18,15 +18,17 @@ llama-index-tools-google==0.3.0
|
|
|
18
18
|
llama-index-tools-tavily_research==0.3.0
|
|
19
19
|
llama_index.tools.brave_search==0.3.0
|
|
20
20
|
llama-index-tools-neo4j==0.3.0
|
|
21
|
+
llama-index-tools-waii==0.3.0
|
|
21
22
|
llama-index-graph-stores-kuzu==0.7.0
|
|
23
|
+
llama-index-tools-salesforce==0.3.0
|
|
22
24
|
llama-index-tools-slack==0.3.0
|
|
23
25
|
llama-index-tools-exa==0.3.0
|
|
24
26
|
llama-index-tools-wikipedia==0.3.0
|
|
25
27
|
llama-index-tools-bing-search==0.3.0
|
|
26
|
-
tavily-python==0.
|
|
27
|
-
exa-py==1.12.
|
|
28
|
+
tavily-python==0.7.2
|
|
29
|
+
exa-py==1.12.1
|
|
28
30
|
openinference-instrumentation-llama-index==4.2.1
|
|
29
|
-
opentelemetry-proto
|
|
31
|
+
opentelemetry-proto>=1.31.0
|
|
30
32
|
arize-phoenix==8.26.1
|
|
31
33
|
arize-phoenix-otel==0.9.2
|
|
32
34
|
protobuf==5.29.3
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from llama_index.core.workflow import WorkflowTimeoutError
|
|
6
|
+
|
|
7
|
+
from vectara_agentic.agent import Agent
|
|
8
|
+
from vectara_agentic.agent_config import AgentConfig
|
|
9
|
+
from vectara_agentic.tools import ToolsFactory
|
|
10
|
+
from vectara_agentic.sub_query_workflow import SubQuestionQueryWorkflow, SequentialSubQuestionsWorkflow
|
|
11
|
+
|
|
12
|
+
def mult(x: float, y: float):
|
|
13
|
+
"""
|
|
14
|
+
Multiply two numbers.
|
|
15
|
+
"""
|
|
16
|
+
return x * y
|
|
17
|
+
|
|
18
|
+
def add(x: float, y: float):
|
|
19
|
+
"""
|
|
20
|
+
Add two numbers.
|
|
21
|
+
"""
|
|
22
|
+
return x + y
|
|
23
|
+
|
|
24
|
+
class TestWorkflowPackage(unittest.IsolatedAsyncioTestCase):
|
|
25
|
+
|
|
26
|
+
async def test_sub_query_workflow(self):
|
|
27
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
28
|
+
topic = "AI topic"
|
|
29
|
+
instructions = "You are a helpful AI assistant."
|
|
30
|
+
agent = Agent(
|
|
31
|
+
tools=tools,
|
|
32
|
+
topic=topic,
|
|
33
|
+
custom_instructions=instructions,
|
|
34
|
+
agent_config = AgentConfig(),
|
|
35
|
+
workflow_cls = SubQuestionQueryWorkflow,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
39
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
40
|
+
)
|
|
41
|
+
res = await agent.run(inputs=inputs)
|
|
42
|
+
self.assertIn("22", res.response)
|
|
43
|
+
|
|
44
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
45
|
+
query="what is the sum of 10 with 21, and the multiplication of 3 and 6?"
|
|
46
|
+
)
|
|
47
|
+
res = await agent.run(inputs=inputs)
|
|
48
|
+
self.assertIn("31", res.response)
|
|
49
|
+
self.assertIn("18", res.response)
|
|
50
|
+
|
|
51
|
+
async def test_seq_sub_query_workflow(self):
|
|
52
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
53
|
+
topic = "AI topic"
|
|
54
|
+
instructions = "You are a helpful AI assistant."
|
|
55
|
+
agent = Agent(
|
|
56
|
+
tools=tools,
|
|
57
|
+
topic=topic,
|
|
58
|
+
custom_instructions=instructions,
|
|
59
|
+
agent_config = AgentConfig(),
|
|
60
|
+
workflow_cls = SequentialSubQuestionsWorkflow,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
inputs = SequentialSubQuestionsWorkflow.InputsModel(
|
|
64
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
65
|
+
)
|
|
66
|
+
res = await agent.run(inputs=inputs, verbose=True)
|
|
67
|
+
self.assertIn("22", res.response)
|
|
68
|
+
|
|
69
|
+
class TestWorkflowFailure(unittest.IsolatedAsyncioTestCase):
|
|
70
|
+
|
|
71
|
+
async def test_workflow_failure(self):
|
|
72
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
73
|
+
topic = "AI topic"
|
|
74
|
+
instructions = "You are a helpful AI assistant."
|
|
75
|
+
agent = Agent(
|
|
76
|
+
tools=tools,
|
|
77
|
+
topic=topic,
|
|
78
|
+
custom_instructions=instructions,
|
|
79
|
+
agent_config = AgentConfig(),
|
|
80
|
+
workflow_cls = SubQuestionQueryWorkflow,
|
|
81
|
+
workflow_timeout = 1
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
85
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
res = None
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
res = await agent.run(inputs=inputs)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
self.assertIsInstance(e, WorkflowTimeoutError)
|
|
94
|
+
|
|
95
|
+
self.assertIsNone(res)
|
|
96
|
+
|
|
97
|
+
async def test_workflow_with_fail_class(self):
|
|
98
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
99
|
+
topic = "AI topic"
|
|
100
|
+
instructions = "You are a helpful AI assistant."
|
|
101
|
+
|
|
102
|
+
class SubQuestionQueryWorkflowWithFailClass(SubQuestionQueryWorkflow):
|
|
103
|
+
class OutputModelOnFail(BaseModel):
|
|
104
|
+
"""
|
|
105
|
+
In case of failure, returns the user's original query
|
|
106
|
+
"""
|
|
107
|
+
original_query: str
|
|
108
|
+
|
|
109
|
+
agent = Agent(
|
|
110
|
+
tools=tools,
|
|
111
|
+
topic=topic,
|
|
112
|
+
custom_instructions=instructions,
|
|
113
|
+
agent_config = AgentConfig(),
|
|
114
|
+
workflow_cls = SubQuestionQueryWorkflowWithFailClass,
|
|
115
|
+
workflow_timeout = 1
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
119
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
res = None
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
res = await agent.run(inputs=inputs)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
assert isinstance(e, WorkflowTimeoutError)
|
|
128
|
+
|
|
129
|
+
self.assertIsInstance(res, SubQuestionQueryWorkflowWithFailClass.OutputModelOnFail)
|
|
130
|
+
self.assertEqual(res.original_query, "Compute 5 times 3, then add 7 to the result.")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
if __name__ == "__main__":
|
|
134
|
+
unittest.main()
|
|
@@ -35,7 +35,7 @@ from llama_index.core.callbacks.base_handler import BaseCallbackHandler
|
|
|
35
35
|
from llama_index.agent.openai import OpenAIAgent
|
|
36
36
|
from llama_index.core.agent.runner.base import AgentRunner
|
|
37
37
|
from llama_index.core.agent.types import BaseAgent
|
|
38
|
-
from llama_index.core.workflow import Workflow
|
|
38
|
+
from llama_index.core.workflow import Workflow, Context
|
|
39
39
|
|
|
40
40
|
from .types import (
|
|
41
41
|
AgentType,
|
|
@@ -198,7 +198,7 @@ class Agent:
|
|
|
198
198
|
|
|
199
199
|
def __init__(
|
|
200
200
|
self,
|
|
201
|
-
tools:
|
|
201
|
+
tools: List[FunctionTool],
|
|
202
202
|
topic: str = "general",
|
|
203
203
|
custom_instructions: str = "",
|
|
204
204
|
general_instructions: str = GENERAL_INSTRUCTIONS,
|
|
@@ -213,7 +213,7 @@ class Agent:
|
|
|
213
213
|
fallback_agent_config: Optional[AgentConfig] = None,
|
|
214
214
|
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
215
215
|
validate_tools: bool = False,
|
|
216
|
-
workflow_cls: Workflow = None,
|
|
216
|
+
workflow_cls: Optional[Workflow] = None,
|
|
217
217
|
workflow_timeout: int = 120,
|
|
218
218
|
) -> None:
|
|
219
219
|
"""
|
|
@@ -287,10 +287,10 @@ class Agent:
|
|
|
287
287
|
A tool is invalid if it is mentioned in the instructions but not in the tools list.
|
|
288
288
|
A tool's name must have at least two characters.
|
|
289
289
|
Your response should be a comma-separated list of the invalid tools.
|
|
290
|
-
If
|
|
290
|
+
If no invalid tools exist, respond with "<OKAY>" (and nothing else).
|
|
291
291
|
"""
|
|
292
292
|
llm = get_llm(LLMRole.MAIN, config=self.agent_config)
|
|
293
|
-
bad_tools_str = llm.complete(prompt).text
|
|
293
|
+
bad_tools_str = llm.complete(prompt).text.strip('\n')
|
|
294
294
|
if bad_tools_str and bad_tools_str != "<OKAY>":
|
|
295
295
|
bad_tools = [tool.strip() for tool in bad_tools_str.split(",")]
|
|
296
296
|
numbered = ", ".join(
|
|
@@ -643,7 +643,7 @@ class Agent:
|
|
|
643
643
|
validate_tools: bool = False,
|
|
644
644
|
fallback_agent_config: Optional[AgentConfig] = None,
|
|
645
645
|
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
646
|
-
workflow_cls: Workflow = None,
|
|
646
|
+
workflow_cls: Optional[Workflow] = None,
|
|
647
647
|
workflow_timeout: int = 120,
|
|
648
648
|
) -> "Agent":
|
|
649
649
|
"""
|
|
@@ -712,8 +712,8 @@ class Agent:
|
|
|
712
712
|
vectara_rerank_limit: Optional[int] = None,
|
|
713
713
|
vectara_rerank_cutoff: Optional[float] = None,
|
|
714
714
|
vectara_diversity_bias: float = 0.2,
|
|
715
|
-
vectara_udf_expression: str = None,
|
|
716
|
-
vectara_rerank_chain: List[Dict] = None,
|
|
715
|
+
vectara_udf_expression: Optional[str] = None,
|
|
716
|
+
vectara_rerank_chain: Optional[List[Dict]] = None,
|
|
717
717
|
vectara_n_sentences_before: int = 2,
|
|
718
718
|
vectara_n_sentences_after: int = 2,
|
|
719
719
|
vectara_summary_num_results: int = 10,
|
|
@@ -1047,7 +1047,7 @@ class Agent:
|
|
|
1047
1047
|
time.sleep(1)
|
|
1048
1048
|
attempt += 1
|
|
1049
1049
|
|
|
1050
|
-
return
|
|
1050
|
+
return AgentStreamingResponse(
|
|
1051
1051
|
response=(
|
|
1052
1052
|
f"For {orig_llm} LLM - failure can't be resolved after "
|
|
1053
1053
|
f"{max_attempts} attempts ({last_error})."
|
|
@@ -1059,7 +1059,11 @@ class Agent:
|
|
|
1059
1059
|
# workflow will always get these arguments in the StartEvent: agent, tools, llm, verbose
|
|
1060
1060
|
# the inputs argument comes from the call to run()
|
|
1061
1061
|
#
|
|
1062
|
-
async def run(
|
|
1062
|
+
async def run(
|
|
1063
|
+
self,
|
|
1064
|
+
inputs: Any,
|
|
1065
|
+
verbose: bool = False,
|
|
1066
|
+
) -> Any:
|
|
1063
1067
|
"""
|
|
1064
1068
|
Run a workflow using the agent.
|
|
1065
1069
|
workflow class must be provided in the agent constructor.
|
|
@@ -1067,7 +1071,7 @@ class Agent:
|
|
|
1067
1071
|
inputs (Any): The inputs to the workflow.
|
|
1068
1072
|
verbose (bool, optional): Whether to print verbose output. Defaults to False.
|
|
1069
1073
|
Returns:
|
|
1070
|
-
Any: The output of the workflow.
|
|
1074
|
+
Any: The output or context of the workflow.
|
|
1071
1075
|
"""
|
|
1072
1076
|
# Create workflow
|
|
1073
1077
|
if self.workflow_cls:
|
|
@@ -1079,20 +1083,38 @@ class Agent:
|
|
|
1079
1083
|
if not isinstance(inputs, self.workflow_cls.InputsModel):
|
|
1080
1084
|
raise ValueError(f"Inputs must be an instance of {workflow.InputsModel}.")
|
|
1081
1085
|
|
|
1082
|
-
|
|
1083
|
-
result = await workflow.run(
|
|
1084
|
-
agent=self,
|
|
1085
|
-
tools=self.tools,
|
|
1086
|
-
llm=self.llm,
|
|
1087
|
-
verbose=verbose,
|
|
1088
|
-
inputs=inputs,
|
|
1089
|
-
)
|
|
1090
|
-
|
|
1091
|
-
# return output in the form of workflow.OutputsModel
|
|
1086
|
+
workflow_context = Context(workflow=workflow)
|
|
1092
1087
|
try:
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1088
|
+
# run workflow
|
|
1089
|
+
result = await workflow.run(
|
|
1090
|
+
ctx=workflow_context,
|
|
1091
|
+
agent=self,
|
|
1092
|
+
tools=self.tools,
|
|
1093
|
+
llm=self.llm,
|
|
1094
|
+
verbose=verbose,
|
|
1095
|
+
inputs=inputs,
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
# return output in the form of workflow.OutputsModel(BaseModel)
|
|
1099
|
+
try:
|
|
1100
|
+
output = workflow.OutputsModel.model_validate(result)
|
|
1101
|
+
except ValidationError as e:
|
|
1102
|
+
raise ValueError(f"Failed to map workflow output to model: {e}") from e
|
|
1103
|
+
|
|
1104
|
+
except Exception as e:
|
|
1105
|
+
outputs_model_on_fail_cls = getattr(workflow.__class__, "OutputModelOnFail", None)
|
|
1106
|
+
if outputs_model_on_fail_cls:
|
|
1107
|
+
model_fields = outputs_model_on_fail_cls.model_fields
|
|
1108
|
+
input_dict = {
|
|
1109
|
+
key: await workflow_context.get(key, None)
|
|
1110
|
+
for key in model_fields
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
# return output in the form of workflow.OutputModelOnFail(BaseModel)
|
|
1114
|
+
output = outputs_model_on_fail_cls.model_validate(input_dict)
|
|
1115
|
+
else:
|
|
1116
|
+
print(f"Vectara Agentic: Workflow failed with unexpected error: {e}")
|
|
1117
|
+
raise type(e)(str(e)).with_traceback(e.__traceback__)
|
|
1096
1118
|
|
|
1097
1119
|
return output
|
|
1098
1120
|
|
|
@@ -1117,12 +1139,12 @@ class Agent:
|
|
|
1117
1139
|
fn_schema_serialized = {
|
|
1118
1140
|
"schema": (
|
|
1119
1141
|
fn_schema_cls.model_json_schema()
|
|
1120
|
-
if hasattr(fn_schema_cls, "model_json_schema")
|
|
1142
|
+
if fn_schema_cls and hasattr(fn_schema_cls, "model_json_schema")
|
|
1121
1143
|
else None
|
|
1122
1144
|
),
|
|
1123
1145
|
"metadata": {
|
|
1124
|
-
"module": fn_schema_cls.__module__,
|
|
1125
|
-
"class": fn_schema_cls.__name__,
|
|
1146
|
+
"module": fn_schema_cls.__module__ if fn_schema_cls else None,
|
|
1147
|
+
"class": fn_schema_cls.__name__ if fn_schema_cls else None,
|
|
1126
1148
|
},
|
|
1127
1149
|
}
|
|
1128
1150
|
else:
|
|
@@ -1171,7 +1193,7 @@ class Agent:
|
|
|
1171
1193
|
if data.get("fallback_agent_config")
|
|
1172
1194
|
else None
|
|
1173
1195
|
)
|
|
1174
|
-
tools = []
|
|
1196
|
+
tools: list[FunctionTool] = []
|
|
1175
1197
|
|
|
1176
1198
|
for tool_data in data["tools"]:
|
|
1177
1199
|
query_args_model = None
|
|
@@ -50,7 +50,7 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
50
50
|
answer: str
|
|
51
51
|
|
|
52
52
|
@step
|
|
53
|
-
async def query(self, ctx: Context, ev: StartEvent) -> QueryEvent:
|
|
53
|
+
async def query(self, ctx: Context, ev: StartEvent) -> QueryEvent | None:
|
|
54
54
|
"""
|
|
55
55
|
Given a user question, and a list of tools, output a list of relevant
|
|
56
56
|
sub-questions, such that the answers to all the sub-questions put together
|
|
@@ -130,7 +130,10 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
130
130
|
if sub_questions is None:
|
|
131
131
|
raise ValueError(f"Invalid LLM response format: {response_str}")
|
|
132
132
|
if not sub_questions:
|
|
133
|
-
|
|
133
|
+
# If the LLM returns an empty list, we need to handle it gracefully
|
|
134
|
+
# We use the original query as a single question fallback
|
|
135
|
+
print("LLM returned empty sub-questions list")
|
|
136
|
+
sub_questions = [original_query]
|
|
134
137
|
|
|
135
138
|
await ctx.set("sub_question_count", len(sub_questions))
|
|
136
139
|
for question in sub_questions:
|
|
@@ -112,6 +112,7 @@ class VectaraTool(FunctionTool):
|
|
|
112
112
|
tool_metadata: Optional[ToolMetadata] = None,
|
|
113
113
|
callback: Optional[Callable[[Any], Any]] = None,
|
|
114
114
|
async_callback: Optional[AsyncCallable] = None,
|
|
115
|
+
partial_params: Optional[Dict[str, Any]] = None,
|
|
115
116
|
tool_type: ToolType = ToolType.QUERY,
|
|
116
117
|
) -> "VectaraTool":
|
|
117
118
|
tool = FunctionTool.from_defaults(
|
|
@@ -124,6 +125,7 @@ class VectaraTool(FunctionTool):
|
|
|
124
125
|
tool_metadata,
|
|
125
126
|
callback,
|
|
126
127
|
async_callback,
|
|
128
|
+
partial_params
|
|
127
129
|
)
|
|
128
130
|
vectara_tool = cls(
|
|
129
131
|
tool_type=tool_type,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.17
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,7 +16,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.12.
|
|
19
|
+
Requires-Dist: llama-index==0.12.35
|
|
20
20
|
Requires-Dist: llama-index-indices-managed-vectara==0.4.5
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
@@ -36,15 +36,17 @@ Requires-Dist: llama-index-tools-google==0.3.0
|
|
|
36
36
|
Requires-Dist: llama-index-tools-tavily_research==0.3.0
|
|
37
37
|
Requires-Dist: llama_index.tools.brave_search==0.3.0
|
|
38
38
|
Requires-Dist: llama-index-tools-neo4j==0.3.0
|
|
39
|
+
Requires-Dist: llama-index-tools-waii==0.3.0
|
|
39
40
|
Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
|
|
41
|
+
Requires-Dist: llama-index-tools-salesforce==0.3.0
|
|
40
42
|
Requires-Dist: llama-index-tools-slack==0.3.0
|
|
41
43
|
Requires-Dist: llama-index-tools-exa==0.3.0
|
|
42
44
|
Requires-Dist: llama-index-tools-wikipedia==0.3.0
|
|
43
45
|
Requires-Dist: llama-index-tools-bing-search==0.3.0
|
|
44
|
-
Requires-Dist: tavily-python==0.
|
|
45
|
-
Requires-Dist: exa-py==1.12.
|
|
46
|
+
Requires-Dist: tavily-python==0.7.2
|
|
47
|
+
Requires-Dist: exa-py==1.12.1
|
|
46
48
|
Requires-Dist: openinference-instrumentation-llama-index==4.2.1
|
|
47
|
-
Requires-Dist: opentelemetry-proto
|
|
49
|
+
Requires-Dist: opentelemetry-proto>=1.31.0
|
|
48
50
|
Requires-Dist: arize-phoenix==8.26.1
|
|
49
51
|
Requires-Dist: arize-phoenix-otel==0.9.2
|
|
50
52
|
Requires-Dist: protobuf==5.29.3
|
|
@@ -365,51 +367,92 @@ vectara-agentic includes various other tools from LlamaIndex ToolSpecs:
|
|
|
365
367
|
* Tavily Search: Real-time web search using [Tavily API](https://tavily.com/)
|
|
366
368
|
```python
|
|
367
369
|
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
368
|
-
|
|
370
|
+
tools_factory = ToolsFactory()
|
|
371
|
+
tavily_tools = tools_factory.get_llama_index_tools(
|
|
372
|
+
tool_package_name="tavily_research",
|
|
373
|
+
tool_spec_name="TavilyToolSpec",
|
|
374
|
+
api_key=str(os.environ["TAVILY_API_KEY"]),
|
|
375
|
+
)
|
|
369
376
|
```
|
|
370
377
|
* EXA.AI: Advanced web search and data extraction
|
|
371
378
|
```python
|
|
372
|
-
|
|
379
|
+
exa_tools = tools_factory.get_llama_index_tools(
|
|
380
|
+
tool_package_name="exa.ai",
|
|
381
|
+
tool_spec_name="ExaToolSpec",
|
|
382
|
+
api_key=str(os.environ["EXA_API_KEY"]),
|
|
383
|
+
)
|
|
373
384
|
```
|
|
374
385
|
* Brave Search: Web search using Brave's search engine
|
|
375
386
|
```python
|
|
376
|
-
|
|
387
|
+
brave_tools = tools_factory.get_llama_index_tools(
|
|
388
|
+
tool_package_name="brave_search",
|
|
389
|
+
tool_spec_name="BraveSearchToolSpec",
|
|
390
|
+
api_key=str(os.environ["BRAVE_API_KEY"]),
|
|
391
|
+
)
|
|
377
392
|
```
|
|
378
393
|
|
|
379
394
|
* **Academic Tools**
|
|
380
395
|
* arXiv: Search and retrieve academic papers
|
|
381
396
|
```python
|
|
382
|
-
|
|
397
|
+
arxiv_tools = tools_factory.get_llama_index_tools(
|
|
398
|
+
tool_package_name="arxiv",
|
|
399
|
+
tool_spec_name="ArxivToolSpec",
|
|
400
|
+
)
|
|
383
401
|
```
|
|
384
402
|
|
|
385
|
-
* **
|
|
403
|
+
* **Database Tools**
|
|
386
404
|
* Neo4j: Graph database integration
|
|
387
405
|
```python
|
|
388
|
-
|
|
406
|
+
neo4j_tools = tools_factory.get_llama_index_tools(
|
|
407
|
+
tool_package_name="neo4j",
|
|
408
|
+
tool_spec_name="Neo4jQueryToolSpec",
|
|
409
|
+
)
|
|
389
410
|
```
|
|
390
411
|
* Kuzu: Lightweight graph database
|
|
391
412
|
```python
|
|
392
|
-
|
|
413
|
+
kuzu_tools = tools_factory.get_llama_index_tools(
|
|
414
|
+
tool_package_name="kuzu",
|
|
415
|
+
tool_spec_name="KuzuGraphStore",
|
|
416
|
+
)
|
|
417
|
+
```
|
|
418
|
+
* Waii: tools for natural langauge query of a relational database
|
|
419
|
+
```python
|
|
420
|
+
waii_tools = tools_factory.get_llama_index_tools(
|
|
421
|
+
tool_package_name="waii",
|
|
422
|
+
tool_spec_name="WaiiToolSpec",
|
|
423
|
+
)
|
|
393
424
|
```
|
|
394
425
|
|
|
395
426
|
* **Google Tools**
|
|
396
427
|
* Gmail: Read and send emails
|
|
397
428
|
```python
|
|
398
|
-
|
|
429
|
+
gmail_tools = tools_factory.get_llama_index_tools(
|
|
430
|
+
tool_package_name="google",
|
|
431
|
+
tool_spec_name="GmailToolSpec",
|
|
432
|
+
)
|
|
399
433
|
```
|
|
400
434
|
* Calendar: Manage calendar events
|
|
401
435
|
```python
|
|
402
|
-
|
|
436
|
+
calendar_tools = tools_factory.get_llama_index_tools(
|
|
437
|
+
tool_package_name="google",
|
|
438
|
+
tool_spec_name="GoogleCalendarToolSpec",
|
|
439
|
+
)
|
|
403
440
|
```
|
|
404
441
|
* Search: Google search integration
|
|
405
442
|
```python
|
|
406
|
-
|
|
443
|
+
search_tools = tools_factory.get_llama_index_tools(
|
|
444
|
+
tool_package_name="google",
|
|
445
|
+
tool_spec_name="GoogleSearchToolSpec",
|
|
446
|
+
)
|
|
407
447
|
```
|
|
408
448
|
|
|
409
449
|
* **Communication Tools**
|
|
410
450
|
* Slack: Send messages and interact with Slack
|
|
411
451
|
```python
|
|
412
|
-
|
|
452
|
+
slack_tools = tools_factory.get_llama_index_tools(
|
|
453
|
+
tool_package_name="slack",
|
|
454
|
+
tool_spec_name="SlackToolSpec",
|
|
455
|
+
)
|
|
413
456
|
```
|
|
414
457
|
|
|
415
458
|
For detailed setup instructions and API key requirements, please refer the instructions on [LlamaIndex hub](https://llamahub.ai/?tab=tools) for the specific tool.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama-index==0.12.
|
|
1
|
+
llama-index==0.12.35
|
|
2
2
|
llama-index-indices-managed-vectara==0.4.5
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
@@ -18,15 +18,17 @@ llama-index-tools-google==0.3.0
|
|
|
18
18
|
llama-index-tools-tavily_research==0.3.0
|
|
19
19
|
llama_index.tools.brave_search==0.3.0
|
|
20
20
|
llama-index-tools-neo4j==0.3.0
|
|
21
|
+
llama-index-tools-waii==0.3.0
|
|
21
22
|
llama-index-graph-stores-kuzu==0.7.0
|
|
23
|
+
llama-index-tools-salesforce==0.3.0
|
|
22
24
|
llama-index-tools-slack==0.3.0
|
|
23
25
|
llama-index-tools-exa==0.3.0
|
|
24
26
|
llama-index-tools-wikipedia==0.3.0
|
|
25
27
|
llama-index-tools-bing-search==0.3.0
|
|
26
|
-
tavily-python==0.
|
|
27
|
-
exa-py==1.12.
|
|
28
|
+
tavily-python==0.7.2
|
|
29
|
+
exa-py==1.12.1
|
|
28
30
|
openinference-instrumentation-llama-index==4.2.1
|
|
29
|
-
opentelemetry-proto
|
|
31
|
+
opentelemetry-proto>=1.31.0
|
|
30
32
|
arize-phoenix==8.26.1
|
|
31
33
|
arize-phoenix-otel==0.9.2
|
|
32
34
|
protobuf==5.29.3
|
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
import unittest
|
|
2
|
-
|
|
3
|
-
from vectara_agentic.agent import Agent
|
|
4
|
-
from vectara_agentic.agent_config import AgentConfig
|
|
5
|
-
from vectara_agentic.tools import ToolsFactory
|
|
6
|
-
from vectara_agentic.sub_query_workflow import SubQuestionQueryWorkflow, SequentialSubQuestionsWorkflow
|
|
7
|
-
|
|
8
|
-
def mult(x: float, y: float):
|
|
9
|
-
"""
|
|
10
|
-
Multiply two numbers.
|
|
11
|
-
"""
|
|
12
|
-
return x * y
|
|
13
|
-
|
|
14
|
-
def add(x: float, y: float):
|
|
15
|
-
"""
|
|
16
|
-
Add two numbers.
|
|
17
|
-
"""
|
|
18
|
-
return x + y
|
|
19
|
-
|
|
20
|
-
class TestWorkflowPackage(unittest.IsolatedAsyncioTestCase):
|
|
21
|
-
|
|
22
|
-
async def test_sub_query_workflow(self):
|
|
23
|
-
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
24
|
-
topic = "AI topic"
|
|
25
|
-
instructions = "You are a helpful AI assistant."
|
|
26
|
-
agent = Agent(
|
|
27
|
-
tools=tools,
|
|
28
|
-
topic=topic,
|
|
29
|
-
custom_instructions=instructions,
|
|
30
|
-
agent_config = AgentConfig(),
|
|
31
|
-
workflow_cls = SubQuestionQueryWorkflow,
|
|
32
|
-
)
|
|
33
|
-
|
|
34
|
-
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
35
|
-
query="Compute 5 times 3, then add 7 to the result."
|
|
36
|
-
)
|
|
37
|
-
res = await agent.run(inputs=inputs)
|
|
38
|
-
self.assertIn("22", res.response)
|
|
39
|
-
|
|
40
|
-
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
41
|
-
query="what is the sum of 10 with 21, and the multiplication of 3 and 6?"
|
|
42
|
-
)
|
|
43
|
-
res = await agent.run(inputs=inputs)
|
|
44
|
-
self.assertIn("31", res.response)
|
|
45
|
-
self.assertIn("18", res.response)
|
|
46
|
-
|
|
47
|
-
async def test_seq_sub_query_workflow(self):
|
|
48
|
-
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
49
|
-
topic = "AI topic"
|
|
50
|
-
instructions = "You are a helpful AI assistant."
|
|
51
|
-
agent = Agent(
|
|
52
|
-
tools=tools,
|
|
53
|
-
topic=topic,
|
|
54
|
-
custom_instructions=instructions,
|
|
55
|
-
agent_config = AgentConfig(),
|
|
56
|
-
workflow_cls = SequentialSubQuestionsWorkflow,
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
inputs = SequentialSubQuestionsWorkflow.InputsModel(
|
|
60
|
-
query="Compute 5 times 3, then add 7 to the result."
|
|
61
|
-
)
|
|
62
|
-
res = await agent.run(inputs=inputs, verbose=True)
|
|
63
|
-
self.assertIn("22", res.response)
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
if __name__ == "__main__":
|
|
67
|
-
unittest.main()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vectara_agentic-0.2.16 → vectara_agentic-0.2.17}/vectara_agentic.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|