vectara-agentic 0.4.6__tar.gz → 0.4.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (72) hide show
  1. {vectara_agentic-0.4.6/vectara_agentic.egg-info → vectara_agentic-0.4.8}/PKG-INFO +32 -31
  2. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/README.md +3 -3
  3. vectara_agentic-0.4.8/requirements.txt +45 -0
  4. vectara_agentic-0.4.8/tests/test_bedrock.py +170 -0
  5. vectara_agentic-0.4.8/tests/test_gemini.py +121 -0
  6. vectara_agentic-0.4.8/tests/test_groq.py +288 -0
  7. vectara_agentic-0.4.8/tests/test_openai.py +261 -0
  8. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_tools.py +161 -0
  9. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/_version.py +1 -1
  10. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent.py +1 -1
  11. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/prompts.py +12 -11
  12. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/streaming.py +178 -195
  13. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/llm_utils.py +1 -1
  14. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/sub_query_workflow.py +31 -31
  15. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/tools.py +108 -4
  16. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8/vectara_agentic.egg-info}/PKG-INFO +32 -31
  17. vectara_agentic-0.4.8/vectara_agentic.egg-info/requires.txt +45 -0
  18. vectara_agentic-0.4.6/requirements.txt +0 -44
  19. vectara_agentic-0.4.6/tests/test_bedrock.py +0 -69
  20. vectara_agentic-0.4.6/tests/test_gemini.py +0 -57
  21. vectara_agentic-0.4.6/tests/test_groq.py +0 -103
  22. vectara_agentic-0.4.6/tests/test_openai.py +0 -160
  23. vectara_agentic-0.4.6/vectara_agentic.egg-info/requires.txt +0 -44
  24. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/LICENSE +0 -0
  25. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/MANIFEST.in +0 -0
  26. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/setup.cfg +0 -0
  27. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/setup.py +0 -0
  28. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/__init__.py +0 -0
  29. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/benchmark_models.py +0 -0
  30. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/conftest.py +0 -0
  31. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/endpoint.py +0 -0
  32. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/run_tests.py +0 -0
  33. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_agent.py +0 -0
  34. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_agent_fallback_memory.py +0 -0
  35. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_agent_memory_consistency.py +0 -0
  36. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_agent_type.py +0 -0
  37. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_api_endpoint.py +0 -0
  38. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_fallback.py +0 -0
  39. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_private_llm.py +0 -0
  40. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_react_error_handling.py +0 -0
  41. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_react_memory.py +0 -0
  42. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_react_streaming.py +0 -0
  43. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_react_workflow_events.py +0 -0
  44. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_return_direct.py +0 -0
  45. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_serialization.py +0 -0
  46. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_session_memory.py +0 -0
  47. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_streaming.py +0 -0
  48. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_together.py +0 -0
  49. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_vectara_llms.py +0 -0
  50. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_vhc.py +0 -0
  51. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/tests/test_workflow.py +0 -0
  52. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/__init__.py +0 -0
  53. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/_callback.py +0 -0
  54. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/_observability.py +0 -0
  55. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_config.py +0 -0
  56. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/__init__.py +0 -0
  57. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/factory.py +0 -0
  58. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/serialization.py +0 -0
  59. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/utils/__init__.py +0 -0
  60. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/utils/hallucination.py +0 -0
  61. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/utils/logging.py +0 -0
  62. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/utils/schemas.py +0 -0
  63. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_core/utils/tools.py +0 -0
  64. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/agent_endpoint.py +0 -0
  65. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/db_tools.py +0 -0
  66. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/tool_utils.py +0 -0
  67. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/tools_catalog.py +0 -0
  68. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/types.py +0 -0
  69. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic/utils.py +0 -0
  70. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic.egg-info/SOURCES.txt +0 -0
  71. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  72. {vectara_agentic-0.4.6 → vectara_agentic-0.4.8}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.4.6
3
+ Version: 0.4.8
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,34 +16,35 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.13.3
20
- Requires-Dist: llama-index-core==0.13.3
21
- Requires-Dist: llama-index-workflows==1.3.0
22
- Requires-Dist: llama-index-cli==0.5.0
23
- Requires-Dist: llama-index-indices-managed-vectara==0.5.0
24
- Requires-Dist: llama-index-llms-openai==0.5.4
25
- Requires-Dist: llama-index-llms-openai-like==0.5.0
26
- Requires-Dist: llama-index-llms-anthropic==0.8.5
27
- Requires-Dist: llama-index-llms-together==0.4.0
28
- Requires-Dist: llama-index-llms-groq==0.4.0
29
- Requires-Dist: llama-index-llms-cohere==0.6.0
30
- Requires-Dist: llama-index-llms-google-genai==0.3.0
19
+ Requires-Dist: llama-index==0.14.2
20
+ Requires-Dist: llama-index-core==0.14.2
21
+ Requires-Dist: llama-index-workflows==2.2.2
22
+ Requires-Dist: llama-index-cli==0.5.1
23
+ Requires-Dist: llama-index-indices-managed-vectara==0.5.1
24
+ Requires-Dist: llama-index-llms-openai==0.5.6
25
+ Requires-Dist: llama-index-llms-openai-like==0.5.1
26
+ Requires-Dist: llama-index-llms-anthropic==0.8.6
27
+ Requires-Dist: llama-index-llms-together==0.4.1
28
+ Requires-Dist: llama-index-llms-groq==0.4.1
29
+ Requires-Dist: llama-index-llms-cohere==0.6.1
30
+ Requires-Dist: llama-index-llms-google-genai==0.5.0
31
+ Requires-Dist: llama-index-llms-baseten==0.1.4
31
32
  Requires-Dist: google_genai>=1.31.0
32
- Requires-Dist: llama-index-llms-bedrock-converse==0.8.2
33
- Requires-Dist: llama-index-tools-yahoo-finance==0.4.0
34
- Requires-Dist: llama-index-tools-arxiv==0.4.0
35
- Requires-Dist: llama-index-tools-database==0.4.0
36
- Requires-Dist: llama-index-tools-google==0.6.0
37
- Requires-Dist: llama-index-tools-tavily_research==0.4.0
38
- Requires-Dist: llama_index.tools.brave_search==0.4.0
39
- Requires-Dist: llama-index-tools-neo4j==0.4.0
40
- Requires-Dist: llama-index-tools-waii==0.4.0
41
- Requires-Dist: llama-index-graph-stores-kuzu==0.9.0
42
- Requires-Dist: llama-index-tools-salesforce==0.4.0
43
- Requires-Dist: llama-index-tools-slack==0.4.0
44
- Requires-Dist: llama-index-tools-exa==0.4.0
45
- Requires-Dist: llama-index-tools-wikipedia==0.4.0
46
- Requires-Dist: llama-index-tools-bing-search==0.4.0
33
+ Requires-Dist: llama-index-llms-bedrock-converse==0.9.2
34
+ Requires-Dist: llama-index-tools-yahoo-finance==0.4.1
35
+ Requires-Dist: llama-index-tools-arxiv==0.4.1
36
+ Requires-Dist: llama-index-tools-database==0.4.1
37
+ Requires-Dist: llama-index-tools-google==0.6.2
38
+ Requires-Dist: llama-index-tools-tavily_research==0.4.1
39
+ Requires-Dist: llama_index.tools.brave_search==0.4.1
40
+ Requires-Dist: llama-index-tools-neo4j==0.4.1
41
+ Requires-Dist: llama-index-tools-waii==0.4.1
42
+ Requires-Dist: llama-index-graph-stores-kuzu==0.9.1
43
+ Requires-Dist: llama-index-tools-salesforce==0.4.1
44
+ Requires-Dist: llama-index-tools-slack==0.4.1
45
+ Requires-Dist: llama-index-tools-exa==0.4.1
46
+ Requires-Dist: llama-index-tools-wikipedia==0.4.1
47
+ Requires-Dist: llama-index-tools-bing-search==0.4.1
47
48
  Requires-Dist: openai>=1.99.3
48
49
  Requires-Dist: tavily-python>=0.7.10
49
50
  Requires-Dist: exa-py>=1.14.20
@@ -736,13 +737,13 @@ If you want to use `agent`, `tools`, `llm` or `verbose` in other events (that ar
736
737
  the `Context` of the Workflow as follows:
737
738
 
738
739
  ```python
739
- await ctx.set("agent", ev.agent)
740
+ await ctx.store.set("agent", ev.agent)
740
741
  ```
741
742
 
742
743
  and then in any other event you can pull that agent object with
743
744
 
744
745
  ```python
745
- agent = await ctx.get("agent")
746
+ agent = await ctx.store.get("agent")
746
747
  ```
747
748
 
748
749
  Similarly you can reuse the `llm`, `tools` or `verbose` arguments within other nodes in the workflow.
@@ -886,7 +887,7 @@ The `AgentConfig` object may include the following items:
886
887
  - `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` (default: `OPENAI`).
887
888
 
888
889
  > **Note:** Fireworks AI support has been removed. If you were using Fireworks, please migrate to one of the supported providers listed above.
889
- - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Gemini uses gemini-2.5-flash-lite).
890
+ - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Anthropic uses claude-sonnet-4-0, Gemini uses models/gemini-2.5-flash, Together.AI uses deepseek-ai/DeepSeek-V3, GROQ uses openai/gpt-oss-20b, Bedrock uses us.anthropic.claude-sonnet-4-20250514-v1:0, Cohere uses command-a-03-2025).
890
891
  - `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
891
892
  - `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
892
893
 
@@ -661,13 +661,13 @@ If you want to use `agent`, `tools`, `llm` or `verbose` in other events (that ar
661
661
  the `Context` of the Workflow as follows:
662
662
 
663
663
  ```python
664
- await ctx.set("agent", ev.agent)
664
+ await ctx.store.set("agent", ev.agent)
665
665
  ```
666
666
 
667
667
  and then in any other event you can pull that agent object with
668
668
 
669
669
  ```python
670
- agent = await ctx.get("agent")
670
+ agent = await ctx.store.get("agent")
671
671
  ```
672
672
 
673
673
  Similarly you can reuse the `llm`, `tools` or `verbose` arguments within other nodes in the workflow.
@@ -811,7 +811,7 @@ The `AgentConfig` object may include the following items:
811
811
  - `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` (default: `OPENAI`).
812
812
 
813
813
  > **Note:** Fireworks AI support has been removed. If you were using Fireworks, please migrate to one of the supported providers listed above.
814
- - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Gemini uses gemini-2.5-flash-lite).
814
+ - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider: OpenAI uses gpt-4.1-mini, Anthropic uses claude-sonnet-4-0, Gemini uses models/gemini-2.5-flash, Together.AI uses deepseek-ai/DeepSeek-V3, GROQ uses openai/gpt-oss-20b, Bedrock uses us.anthropic.claude-sonnet-4-20250514-v1:0, Cohere uses command-a-03-2025).
815
815
  - `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
816
816
  - `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
817
817
 
@@ -0,0 +1,45 @@
1
+ llama-index==0.14.2
2
+ llama-index-core==0.14.2
3
+ llama-index-workflows==2.2.2
4
+ llama-index-cli==0.5.1
5
+ llama-index-indices-managed-vectara==0.5.1
6
+ llama-index-llms-openai==0.5.6
7
+ llama-index-llms-openai-like==0.5.1
8
+ llama-index-llms-anthropic==0.8.6
9
+ llama-index-llms-together==0.4.1
10
+ llama-index-llms-groq==0.4.1
11
+ llama-index-llms-cohere==0.6.1
12
+ llama-index-llms-google-genai==0.5.0
13
+ llama-index-llms-baseten==0.1.4
14
+ google_genai>=1.31.0
15
+ llama-index-llms-bedrock-converse==0.9.2
16
+ llama-index-tools-yahoo-finance==0.4.1
17
+ llama-index-tools-arxiv==0.4.1
18
+ llama-index-tools-database==0.4.1
19
+ llama-index-tools-google==0.6.2
20
+ llama-index-tools-tavily_research==0.4.1
21
+ llama_index.tools.brave_search==0.4.1
22
+ llama-index-tools-neo4j==0.4.1
23
+ llama-index-tools-waii==0.4.1
24
+ llama-index-graph-stores-kuzu==0.9.1
25
+ llama-index-tools-salesforce==0.4.1
26
+ llama-index-tools-slack==0.4.1
27
+ llama-index-tools-exa==0.4.1
28
+ llama-index-tools-wikipedia==0.4.1
29
+ llama-index-tools-bing-search==0.4.1
30
+ openai>=1.99.3
31
+ tavily-python>=0.7.10
32
+ exa-py>=1.14.20
33
+ openinference-instrumentation-llama-index==4.3.4
34
+ opentelemetry-proto>=1.31.0
35
+ arize-phoenix==10.9.1
36
+ arize-phoenix-otel==0.10.3
37
+ protobuf==5.29.5
38
+ tokenizers>=0.20
39
+ pydantic>=2.11.5
40
+ pandas==2.2.3
41
+ retrying==1.4.2
42
+ python-dotenv==1.0.1
43
+ cloudpickle>=3.1.1
44
+ httpx==0.28.1
45
+ commonmark==0.9.1
@@ -0,0 +1,170 @@
1
+ # Suppress external dependency warnings before any other imports
2
+ import warnings
3
+
4
+ warnings.simplefilter("ignore", DeprecationWarning)
5
+
6
+ import unittest
7
+ import threading
8
+
9
+ from vectara_agentic.agent import Agent
10
+ from vectara_agentic.tools import ToolsFactory
11
+ from vectara_agentic.tools_catalog import ToolsCatalog
12
+
13
+ import nest_asyncio
14
+
15
+ nest_asyncio.apply()
16
+
17
+ from conftest import (
18
+ mult,
19
+ add,
20
+ fc_config_bedrock,
21
+ STANDARD_TEST_TOPIC,
22
+ STANDARD_TEST_INSTRUCTIONS,
23
+ )
24
+
25
+ ARIZE_LOCK = threading.Lock()
26
+
27
+
28
+ class TestBedrock(unittest.IsolatedAsyncioTestCase):
29
+
30
+ async def test_multiturn(self):
31
+ with ARIZE_LOCK:
32
+ tools = [ToolsFactory().create_tool(mult)]
33
+ agent = Agent(
34
+ tools=tools,
35
+ topic=STANDARD_TEST_TOPIC,
36
+ custom_instructions=STANDARD_TEST_INSTRUCTIONS,
37
+ agent_config=fc_config_bedrock,
38
+ )
39
+
40
+ # First calculation: 5 * 10 = 50
41
+ stream1 = await agent.astream_chat(
42
+ "What is 5 times 10. Only give the answer, nothing else"
43
+ )
44
+ # Consume the stream
45
+ async for chunk in stream1.async_response_gen():
46
+ pass
47
+ _ = await stream1.aget_response()
48
+
49
+ # Second calculation: 3 * 7 = 21
50
+ stream2 = await agent.astream_chat(
51
+ "what is 3 times 7. Only give the answer, nothing else"
52
+ )
53
+ # Consume the stream
54
+ async for chunk in stream2.async_response_gen():
55
+ pass
56
+ _ = await stream2.aget_response()
57
+
58
+ # Final calculation: 50 * 21 = 1050
59
+ stream3 = await agent.astream_chat(
60
+ "multiply the results of the last two questions. Output only the answer."
61
+ )
62
+ # Consume the stream
63
+ async for chunk in stream3.async_response_gen():
64
+ pass
65
+ response3 = await stream3.aget_response()
66
+
67
+ self.assertEqual(response3.response, "1050")
68
+
69
+ async def test_claude_sonnet_4_multi_tool_chain(self):
70
+ """Test Claude Sonnet 4 with complex multi-step reasoning chain using multiple tools via Bedrock."""
71
+ with ARIZE_LOCK:
72
+ # Use Bedrock config (Claude Sonnet 4)
73
+ tools_catalog = ToolsCatalog(fc_config_bedrock)
74
+ tools = [
75
+ ToolsFactory().create_tool(mult),
76
+ ToolsFactory().create_tool(add),
77
+ ToolsFactory().create_tool(tools_catalog.summarize_text),
78
+ ToolsFactory().create_tool(tools_catalog.rephrase_text),
79
+ ]
80
+
81
+ agent = Agent(
82
+ agent_config=fc_config_bedrock,
83
+ tools=tools,
84
+ topic=STANDARD_TEST_TOPIC,
85
+ custom_instructions="You are a mathematical reasoning agent that explains your work step by step.",
86
+ )
87
+
88
+ # Complex multi-step reasoning task
89
+ complex_query = (
90
+ "Perform this calculation step by step: "
91
+ "First multiply 5 by 9, then add 13 to that result, "
92
+ "then multiply the new result by 2. "
93
+ "After getting the final number, summarize the entire mathematical process "
94
+ "with expertise in 'mathematics education', "
95
+ "then rephrase that summary as a 10-year-old would explain it."
96
+ )
97
+
98
+ print("\n🔍 Starting Claude Sonnet 4 multi-tool chain test (Bedrock)")
99
+ print(f"📝 Query: {complex_query}")
100
+ print("🌊 Streaming response:\n" + "="*50)
101
+
102
+ stream = await agent.astream_chat(complex_query)
103
+
104
+ # Capture streaming deltas and tool calls
105
+ streaming_deltas = []
106
+ tool_calls_made = []
107
+ full_response = ""
108
+
109
+ async for chunk in stream.async_response_gen():
110
+ if chunk and chunk.strip():
111
+ streaming_deltas.append(chunk)
112
+ full_response += chunk
113
+ # Display each streaming delta
114
+ print(f"📡 Delta: {repr(chunk)}")
115
+
116
+ # Track tool calls in the stream
117
+ if "mult" in chunk.lower():
118
+ if "mult" not in [call["tool"] for call in tool_calls_made]:
119
+ tool_calls_made.append({"tool": "mult", "order": len(tool_calls_made) + 1})
120
+ print(f"🔧 Tool call detected: mult (#{len(tool_calls_made)})")
121
+ if "add" in chunk.lower():
122
+ if "add" not in [call["tool"] for call in tool_calls_made]:
123
+ tool_calls_made.append({"tool": "add", "order": len(tool_calls_made) + 1})
124
+ print(f"🔧 Tool call detected: add (#{len(tool_calls_made)})")
125
+ if "summarize" in chunk.lower():
126
+ if "summarize_text" not in [call["tool"] for call in tool_calls_made]:
127
+ tool_calls_made.append({"tool": "summarize_text", "order": len(tool_calls_made) + 1})
128
+ print(f"🔧 Tool call detected: summarize_text (#{len(tool_calls_made)})")
129
+ if "rephrase" in chunk.lower():
130
+ if "rephrase_text" not in [call["tool"] for call in tool_calls_made]:
131
+ tool_calls_made.append({"tool": "rephrase_text", "order": len(tool_calls_made) + 1})
132
+ print(f"🔧 Tool call detected: rephrase_text (#{len(tool_calls_made)})")
133
+
134
+ response = await stream.aget_response()
135
+
136
+ print("="*50)
137
+ print(f"✅ Streaming completed. Total deltas: {len(streaming_deltas)}")
138
+ print(f"🔧 Tool calls made: {[call['tool'] for call in tool_calls_made]}")
139
+ print(f"📄 Final response length: {len(response.response)} chars")
140
+ print(f"🎯 Final response: {response.response}")
141
+
142
+ # Validate tool usage sequence
143
+ tools_used = [call["tool"] for call in tool_calls_made]
144
+ print(f"🧪 Tools used in order: {tools_used}")
145
+
146
+ # Check that at least multiplication happened (basic requirement)
147
+ self.assertIn("mult", tools_used, f"Expected multiplication tool to be used. Tools used: {tools_used}")
148
+
149
+ # Check for mathematical results in the full response or streaming deltas
150
+ # Expected: 5*9=45, 45+13=58, 58*2=116
151
+ expected_intermediate_results = ["45", "58", "116"]
152
+ all_text = (full_response + " " + response.response).lower()
153
+ math_results_found = sum(1 for result in expected_intermediate_results
154
+ if result in all_text)
155
+
156
+ print(f"🔢 Mathematical results found: {math_results_found}/3 expected")
157
+ print(f"🔍 Full text searched: {all_text[:200]}...")
158
+
159
+ # More lenient assertion - just check that some mathematical progress was made
160
+ self.assertGreaterEqual(math_results_found, 1,
161
+ f"Expected at least 1 mathematical result. Found {math_results_found}. "
162
+ f"Full text: {all_text}")
163
+
164
+ # Verify that streaming actually produced content
165
+ self.assertGreater(len(streaming_deltas), 0, "Expected streaming deltas to be produced")
166
+ self.assertGreater(len(response.response.strip()), 0, "Expected non-empty final response")
167
+
168
+
169
+ if __name__ == "__main__":
170
+ unittest.main()
@@ -0,0 +1,121 @@
1
+ # Suppress external dependency warnings before any other imports
2
+ import warnings
3
+
4
+ warnings.simplefilter("ignore", DeprecationWarning)
5
+
6
+ import unittest
7
+
8
+ from vectara_agentic.agent import Agent
9
+ from vectara_agentic.tools import ToolsFactory
10
+ from vectara_agentic.tools_catalog import ToolsCatalog
11
+
12
+
13
+ import nest_asyncio
14
+
15
+ nest_asyncio.apply()
16
+
17
+ from conftest import (
18
+ mult,
19
+ add,
20
+ fc_config_gemini,
21
+ STANDARD_TEST_TOPIC,
22
+ STANDARD_TEST_INSTRUCTIONS,
23
+ )
24
+
25
+
26
+ class TestGEMINI(unittest.TestCase):
27
+ def test_gemini(self):
28
+ tools = [ToolsFactory().create_tool(mult)]
29
+
30
+ agent = Agent(
31
+ agent_config=fc_config_gemini,
32
+ tools=tools,
33
+ topic=STANDARD_TEST_TOPIC,
34
+ custom_instructions=STANDARD_TEST_INSTRUCTIONS,
35
+ )
36
+ _ = agent.chat("What is 5 times 10. Only give the answer, nothing else")
37
+ _ = agent.chat("what is 3 times 7. Only give the answer, nothing else")
38
+ res = agent.chat(
39
+ "what is the result of multiplying the results of the last two multiplications. Only give the answer, nothing else."
40
+ )
41
+ self.assertIn("1050", res.response)
42
+
43
+ def test_gemini_single_prompt(self):
44
+ tools = [ToolsFactory().create_tool(mult)]
45
+
46
+ agent = Agent(
47
+ agent_config=fc_config_gemini,
48
+ tools=tools,
49
+ topic=STANDARD_TEST_TOPIC,
50
+ custom_instructions=STANDARD_TEST_INSTRUCTIONS,
51
+ )
52
+ res = agent.chat(
53
+ "First, multiply 5 by 10. Then, multiply 3 by 7. Finally, multiply the results of the first two calculations."
54
+ )
55
+ self.assertIn("1050", res.response)
56
+
57
+ def test_gemini_25_flash_multi_tool_chain(self):
58
+ """Test Gemini 2.5 Flash with complex multi-step reasoning chain using multiple tools."""
59
+ # Use Gemini config (Gemini 2.5 Flash)
60
+ tools_catalog = ToolsCatalog(fc_config_gemini)
61
+ tools = [
62
+ ToolsFactory().create_tool(mult),
63
+ ToolsFactory().create_tool(add),
64
+ ToolsFactory().create_tool(tools_catalog.summarize_text),
65
+ ToolsFactory().create_tool(tools_catalog.rephrase_text),
66
+ ]
67
+
68
+ agent = Agent(
69
+ agent_config=fc_config_gemini,
70
+ tools=tools,
71
+ topic=STANDARD_TEST_TOPIC,
72
+ custom_instructions="You are a mathematical reasoning agent that explains your work step by step.",
73
+ )
74
+
75
+ # Complex multi-step reasoning task
76
+ complex_query = (
77
+ "Perform this calculation step by step: "
78
+ "First multiply 3 by 8, then add 14 to that result, "
79
+ "then multiply the new result by 3. "
80
+ "After getting the final number, summarize the entire mathematical process "
81
+ "with expertise in 'mathematics education', "
82
+ "then rephrase that summary as a 10-year-old would explain it."
83
+ )
84
+
85
+ print("\n🔍 Starting Gemini 2.5 Flash multi-tool chain test")
86
+ print(f"📝 Query: {complex_query}")
87
+
88
+ # Note: Gemini tests use synchronous chat, not async streaming
89
+ response = agent.chat(complex_query)
90
+
91
+ print(f"🎯 Final response: {response.response}")
92
+ print(f"📄 Final response length: {len(response.response)} chars")
93
+
94
+ # Check for mathematical results in the response
95
+ # Expected: 3*8=24, 24+14=38, 38*3=114
96
+ expected_intermediate_results = ["24", "38", "114"]
97
+ response_text = response.response.lower()
98
+ math_results_found = sum(1 for result in expected_intermediate_results
99
+ if result in response_text)
100
+
101
+ print(f"🔢 Mathematical results found: {math_results_found}/3 expected")
102
+ print(f"🔍 Response text searched: {response_text[:200]}...")
103
+
104
+ # More lenient assertion - just check that some mathematical progress was made
105
+ self.assertGreaterEqual(math_results_found, 1,
106
+ f"Expected at least 1 mathematical result. Found {math_results_found}. "
107
+ f"Response: {response.response}")
108
+
109
+ # Verify response has content and mentions math concepts
110
+ self.assertGreater(len(response.response.strip()), 50, "Expected substantial response content")
111
+
112
+ # Check for indications of multi-tool usage (math, summary, or explanation content)
113
+ multi_tool_indicators = ["calculate", "multiply", "add", "summary", "explain", "mathematical", "process"]
114
+ indicators_found = sum(1 for indicator in multi_tool_indicators
115
+ if indicator in response_text)
116
+ self.assertGreaterEqual(indicators_found, 3,
117
+ f"Expected multiple tool usage indicators. Found {indicators_found}: {response.response}")
118
+
119
+
120
+ if __name__ == "__main__":
121
+ unittest.main()