vectara-agentic 0.2.18__tar.gz → 0.2.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (44) hide show
  1. {vectara_agentic-0.2.18/vectara_agentic.egg-info → vectara_agentic-0.2.20}/PKG-INFO +16 -10
  2. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/README.md +5 -1
  3. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/requirements.txt +10 -8
  4. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_agent.py +1 -0
  5. vectara_agentic-0.2.20/tests/test_bedrock.py +42 -0
  6. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/_version.py +1 -1
  7. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/llm_utils.py +15 -4
  8. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20/vectara_agentic.egg-info}/PKG-INFO +16 -10
  9. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic.egg-info/SOURCES.txt +1 -0
  10. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic.egg-info/requires.txt +10 -8
  11. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/LICENSE +0 -0
  12. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/MANIFEST.in +0 -0
  13. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/setup.cfg +0 -0
  14. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/setup.py +0 -0
  15. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/__init__.py +0 -0
  16. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/endpoint.py +0 -0
  17. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_agent_planning.py +0 -0
  18. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_agent_type.py +0 -0
  19. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_api_endpoint.py +0 -0
  20. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_fallback.py +0 -0
  21. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_gemini.py +0 -0
  22. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_groq.py +0 -0
  23. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_private_llm.py +0 -0
  24. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_return_direct.py +0 -0
  25. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_serialization.py +0 -0
  26. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_tools.py +0 -0
  27. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_vectara_llms.py +0 -0
  28. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/tests/test_workflow.py +0 -0
  29. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/__init__.py +0 -0
  30. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/_callback.py +0 -0
  31. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/_observability.py +0 -0
  32. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/_prompts.py +0 -0
  33. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/agent.py +0 -0
  34. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/agent_config.py +0 -0
  35. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/agent_endpoint.py +0 -0
  36. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/db_tools.py +0 -0
  37. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/sub_query_workflow.py +0 -0
  38. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/tool_utils.py +0 -0
  39. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/tools.py +0 -0
  40. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/tools_catalog.py +0 -0
  41. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/types.py +0 -0
  42. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic/utils.py +0 -0
  43. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  44. {vectara_agentic-0.2.18 → vectara_agentic-0.2.20}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,20 +16,21 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.35
19
+ Requires-Dist: llama-index==0.12.37
20
+ Requires-Dist: llama-index-core==0.12.37
20
21
  Requires-Dist: llama-index-indices-managed-vectara==0.4.5
21
- Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
+ Requires-Dist: llama-index-agent-llm-compiler==0.3.1
22
23
  Requires-Dist: llama-index-agent-lats==0.3.0
23
24
  Requires-Dist: llama-index-agent-openai==0.4.8
24
- Requires-Dist: llama-index-llms-openai==0.3.42
25
+ Requires-Dist: llama-index-llms-openai==0.3.44
25
26
  Requires-Dist: llama-index-llms-openai-like>=0.3.5
26
- Requires-Dist: llama-index-llms-anthropic==0.6.10
27
+ Requires-Dist: llama-index-llms-anthropic==0.6.19
27
28
  Requires-Dist: llama-index-llms-together==0.3.1
28
29
  Requires-Dist: llama-index-llms-groq==0.3.1
29
30
  Requires-Dist: llama-index-llms-fireworks==0.3.2
30
31
  Requires-Dist: llama-index-llms-cohere==0.4.1
31
- Requires-Dist: llama-index-llms-google-genai==0.1.12
32
- Requires-Dist: llama-index-llms-bedrock==0.3.8
32
+ Requires-Dist: llama-index-llms-google-genai==0.1.14
33
+ Requires-Dist: llama-index-llms-bedrock-converse==0.6.0
33
34
  Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
34
35
  Requires-Dist: llama-index-tools-arxiv==0.3.0
35
36
  Requires-Dist: llama-index-tools-database==0.3.0
@@ -44,8 +45,9 @@ Requires-Dist: llama-index-tools-slack==0.3.0
44
45
  Requires-Dist: llama-index-tools-exa==0.3.0
45
46
  Requires-Dist: llama-index-tools-wikipedia==0.3.0
46
47
  Requires-Dist: llama-index-tools-bing-search==0.3.0
47
- Requires-Dist: tavily-python==0.7.2
48
- Requires-Dist: exa-py==1.12.1
48
+ Requires-Dist: openai>=1.82.0
49
+ Requires-Dist: tavily-python==0.7.3
50
+ Requires-Dist: exa-py==1.13.1
49
51
  Requires-Dist: openinference-instrumentation-llama-index==4.2.1
50
52
  Requires-Dist: opentelemetry-proto>=1.31.0
51
53
  Requires-Dist: arize-phoenix==8.26.1
@@ -144,7 +146,11 @@ Check out our example AI assistants:
144
146
  - [Vectara account](https://console.vectara.com/signup/?utm_source=github&utm_medium=code&utm_term=DevRel&utm_content=vectara-agentic&utm_campaign=github-code-DevRel-vectara-agentic)
145
147
  - A Vectara corpus with an [API key](https://docs.vectara.com/docs/api-keys)
146
148
  - [Python 3.10 or higher](https://www.python.org/downloads/)
147
- - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Bedrock, Cohere, GEMINI or GROQ, if you choose to use them)
149
+ - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Cohere, GEMINI or GROQ, if you choose to use them).
150
+ To use AWS Bedrock, make sure that
151
+ * The Bedrock models you need are enabled on your account
152
+ * Your environment includes `AWS_PROFILE` with your AWS profile name.
153
+ * Your environment includes `AWS_REGION` set to the region where you want to consume the AWS Bedrock services (defaults to us-east-2)
148
154
 
149
155
  ### Installation
150
156
 
@@ -71,7 +71,11 @@ Check out our example AI assistants:
71
71
  - [Vectara account](https://console.vectara.com/signup/?utm_source=github&utm_medium=code&utm_term=DevRel&utm_content=vectara-agentic&utm_campaign=github-code-DevRel-vectara-agentic)
72
72
  - A Vectara corpus with an [API key](https://docs.vectara.com/docs/api-keys)
73
73
  - [Python 3.10 or higher](https://www.python.org/downloads/)
74
- - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Bedrock, Cohere, GEMINI or GROQ, if you choose to use them)
74
+ - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Cohere, GEMINI or GROQ, if you choose to use them).
75
+ To use AWS Bedrock, make sure that
76
+ * The Bedrock models you need are enabled on your account
77
+ * Your environment includes `AWS_PROFILE` with your AWS profile name.
78
+ * Your environment includes `AWS_REGION` set to the region where you want to consume the AWS Bedrock services (defaults to us-east-2)
75
79
 
76
80
  ### Installation
77
81
 
@@ -1,17 +1,18 @@
1
- llama-index==0.12.35
1
+ llama-index==0.12.37
2
+ llama-index-core==0.12.37
2
3
  llama-index-indices-managed-vectara==0.4.5
3
- llama-index-agent-llm-compiler==0.3.0
4
+ llama-index-agent-llm-compiler==0.3.1
4
5
  llama-index-agent-lats==0.3.0
5
6
  llama-index-agent-openai==0.4.8
6
- llama-index-llms-openai==0.3.42
7
+ llama-index-llms-openai==0.3.44
7
8
  llama-index-llms-openai-like>=0.3.5
8
- llama-index-llms-anthropic==0.6.10
9
+ llama-index-llms-anthropic==0.6.19
9
10
  llama-index-llms-together==0.3.1
10
11
  llama-index-llms-groq==0.3.1
11
12
  llama-index-llms-fireworks==0.3.2
12
13
  llama-index-llms-cohere==0.4.1
13
- llama-index-llms-google-genai ==0.1.12
14
- llama-index-llms-bedrock==0.3.8
14
+ llama-index-llms-google-genai==0.1.14
15
+ llama-index-llms-bedrock-converse==0.6.0
15
16
  llama-index-tools-yahoo-finance==0.3.0
16
17
  llama-index-tools-arxiv==0.3.0
17
18
  llama-index-tools-database==0.3.0
@@ -26,8 +27,9 @@ llama-index-tools-slack==0.3.0
26
27
  llama-index-tools-exa==0.3.0
27
28
  llama-index-tools-wikipedia==0.3.0
28
29
  llama-index-tools-bing-search==0.3.0
29
- tavily-python==0.7.2
30
- exa-py==1.12.1
30
+ openai>=1.82.0
31
+ tavily-python==0.7.3
32
+ exa-py==1.13.1
31
33
  openinference-instrumentation-llama-index==4.2.1
32
34
  opentelemetry-proto>=1.31.0
33
35
  arize-phoenix==8.26.1
@@ -11,6 +11,7 @@ from vectara_agentic._prompts import GENERAL_INSTRUCTIONS
11
11
 
12
12
 
13
13
  def mult(x: float, y: float) -> float:
14
+ "Multiply two numbers"
14
15
  return x * y
15
16
 
16
17
 
@@ -0,0 +1,42 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent import Agent, AgentType
4
+ from vectara_agentic.agent_config import AgentConfig
5
+ from vectara_agentic.tools import ToolsFactory
6
+ from vectara_agentic.types import ModelProvider
7
+
8
+ import nest_asyncio
9
+ nest_asyncio.apply()
10
+
11
+
12
+ def mult(x: float, y: float) -> float:
13
+ "Multiply two numbers"
14
+ return x * y
15
+
16
+
17
+ fc_config_bedrock = AgentConfig(
18
+ agent_type=AgentType.FUNCTION_CALLING,
19
+ main_llm_provider=ModelProvider.BEDROCK,
20
+ tool_llm_provider=ModelProvider.BEDROCK,
21
+ )
22
+
23
+ class TestBedrock(unittest.TestCase):
24
+
25
+ def test_multiturn(self):
26
+ tools = [ToolsFactory().create_tool(mult)]
27
+ topic = "AI topic"
28
+ instructions = "Always do as your father tells you, if your mother agrees!"
29
+ agent = Agent(
30
+ tools=tools,
31
+ topic=topic,
32
+ custom_instructions=instructions,
33
+ )
34
+
35
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
36
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
37
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
38
+ self.assertEqual(res.response, "1050")
39
+
40
+
41
+ if __name__ == "__main__":
42
+ unittest.main()
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.18"
4
+ __version__ = "0.2.20"
@@ -1,7 +1,9 @@
1
1
  """
2
2
  Utilities for the Vectara agentic.
3
3
  """
4
+
4
5
  from typing import Tuple, Callable, Optional
6
+ import os
5
7
  from functools import lru_cache
6
8
  import tiktoken
7
9
 
@@ -14,11 +16,11 @@ from .agent_config import AgentConfig
14
16
 
15
17
  provider_to_default_model_name = {
16
18
  ModelProvider.OPENAI: "gpt-4o",
17
- ModelProvider.ANTHROPIC: "claude-3-7-sonnet-latest",
19
+ ModelProvider.ANTHROPIC: "claude-sonnet-4-20250514",
18
20
  ModelProvider.TOGETHER: "Qwen/Qwen2.5-72B-Instruct-Turbo",
19
21
  ModelProvider.GROQ: "meta-llama/llama-4-scout-17b-16e-instruct",
20
22
  ModelProvider.FIREWORKS: "accounts/fireworks/models/firefunction-v2",
21
- ModelProvider.BEDROCK: "anthropic.claude-3-7-sonnet-20250219-v1:0",
23
+ ModelProvider.BEDROCK: "us.anthropic.claude-sonnet-4-20250514-v1:0",
22
24
  ModelProvider.COHERE: "command-a-03-2025",
23
25
  ModelProvider.GEMINI: "models/gemini-2.0-flash",
24
26
  }
@@ -136,9 +138,18 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
136
138
 
137
139
  llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
138
140
  elif model_provider == ModelProvider.BEDROCK:
139
- from llama_index.llms.bedrock import Bedrock
141
+ from llama_index.llms.bedrock_converse import BedrockConverse
142
+
143
+ aws_profile_name = os.getenv("AWS_PROFILE", None)
144
+ aws_region = os.getenv("AWS_REGION", "us-east-2")
140
145
 
141
- llm = Bedrock(model=model_name, temperature=0, max_tokens=max_tokens)
146
+ llm = BedrockConverse(
147
+ model=model_name,
148
+ temperature=0,
149
+ max_tokens=max_tokens,
150
+ profile_name=aws_profile_name,
151
+ region_name=aws_region,
152
+ )
142
153
  elif model_provider == ModelProvider.COHERE:
143
154
  from llama_index.llms.cohere import Cohere
144
155
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.18
3
+ Version: 0.2.20
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,20 +16,21 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.35
19
+ Requires-Dist: llama-index==0.12.37
20
+ Requires-Dist: llama-index-core==0.12.37
20
21
  Requires-Dist: llama-index-indices-managed-vectara==0.4.5
21
- Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
+ Requires-Dist: llama-index-agent-llm-compiler==0.3.1
22
23
  Requires-Dist: llama-index-agent-lats==0.3.0
23
24
  Requires-Dist: llama-index-agent-openai==0.4.8
24
- Requires-Dist: llama-index-llms-openai==0.3.42
25
+ Requires-Dist: llama-index-llms-openai==0.3.44
25
26
  Requires-Dist: llama-index-llms-openai-like>=0.3.5
26
- Requires-Dist: llama-index-llms-anthropic==0.6.10
27
+ Requires-Dist: llama-index-llms-anthropic==0.6.19
27
28
  Requires-Dist: llama-index-llms-together==0.3.1
28
29
  Requires-Dist: llama-index-llms-groq==0.3.1
29
30
  Requires-Dist: llama-index-llms-fireworks==0.3.2
30
31
  Requires-Dist: llama-index-llms-cohere==0.4.1
31
- Requires-Dist: llama-index-llms-google-genai==0.1.12
32
- Requires-Dist: llama-index-llms-bedrock==0.3.8
32
+ Requires-Dist: llama-index-llms-google-genai==0.1.14
33
+ Requires-Dist: llama-index-llms-bedrock-converse==0.6.0
33
34
  Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
34
35
  Requires-Dist: llama-index-tools-arxiv==0.3.0
35
36
  Requires-Dist: llama-index-tools-database==0.3.0
@@ -44,8 +45,9 @@ Requires-Dist: llama-index-tools-slack==0.3.0
44
45
  Requires-Dist: llama-index-tools-exa==0.3.0
45
46
  Requires-Dist: llama-index-tools-wikipedia==0.3.0
46
47
  Requires-Dist: llama-index-tools-bing-search==0.3.0
47
- Requires-Dist: tavily-python==0.7.2
48
- Requires-Dist: exa-py==1.12.1
48
+ Requires-Dist: openai>=1.82.0
49
+ Requires-Dist: tavily-python==0.7.3
50
+ Requires-Dist: exa-py==1.13.1
49
51
  Requires-Dist: openinference-instrumentation-llama-index==4.2.1
50
52
  Requires-Dist: opentelemetry-proto>=1.31.0
51
53
  Requires-Dist: arize-phoenix==8.26.1
@@ -144,7 +146,11 @@ Check out our example AI assistants:
144
146
  - [Vectara account](https://console.vectara.com/signup/?utm_source=github&utm_medium=code&utm_term=DevRel&utm_content=vectara-agentic&utm_campaign=github-code-DevRel-vectara-agentic)
145
147
  - A Vectara corpus with an [API key](https://docs.vectara.com/docs/api-keys)
146
148
  - [Python 3.10 or higher](https://www.python.org/downloads/)
147
- - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Bedrock, Cohere, GEMINI or GROQ, if you choose to use them)
149
+ - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Cohere, GEMINI or GROQ, if you choose to use them).
150
+ To use AWS Bedrock, make sure that
151
+ * The Bedrock models you need are enabled on your account
152
+ * Your environment includes `AWS_PROFILE` with your AWS profile name.
153
+ * Your environment includes `AWS_REGION` set to the region where you want to consume the AWS Bedrock services (defaults to us-east-2)
148
154
 
149
155
  ### Installation
150
156
 
@@ -9,6 +9,7 @@ tests/test_agent.py
9
9
  tests/test_agent_planning.py
10
10
  tests/test_agent_type.py
11
11
  tests/test_api_endpoint.py
12
+ tests/test_bedrock.py
12
13
  tests/test_fallback.py
13
14
  tests/test_gemini.py
14
15
  tests/test_groq.py
@@ -1,17 +1,18 @@
1
- llama-index==0.12.35
1
+ llama-index==0.12.37
2
+ llama-index-core==0.12.37
2
3
  llama-index-indices-managed-vectara==0.4.5
3
- llama-index-agent-llm-compiler==0.3.0
4
+ llama-index-agent-llm-compiler==0.3.1
4
5
  llama-index-agent-lats==0.3.0
5
6
  llama-index-agent-openai==0.4.8
6
- llama-index-llms-openai==0.3.42
7
+ llama-index-llms-openai==0.3.44
7
8
  llama-index-llms-openai-like>=0.3.5
8
- llama-index-llms-anthropic==0.6.10
9
+ llama-index-llms-anthropic==0.6.19
9
10
  llama-index-llms-together==0.3.1
10
11
  llama-index-llms-groq==0.3.1
11
12
  llama-index-llms-fireworks==0.3.2
12
13
  llama-index-llms-cohere==0.4.1
13
- llama-index-llms-google-genai==0.1.12
14
- llama-index-llms-bedrock==0.3.8
14
+ llama-index-llms-google-genai==0.1.14
15
+ llama-index-llms-bedrock-converse==0.6.0
15
16
  llama-index-tools-yahoo-finance==0.3.0
16
17
  llama-index-tools-arxiv==0.3.0
17
18
  llama-index-tools-database==0.3.0
@@ -26,8 +27,9 @@ llama-index-tools-slack==0.3.0
26
27
  llama-index-tools-exa==0.3.0
27
28
  llama-index-tools-wikipedia==0.3.0
28
29
  llama-index-tools-bing-search==0.3.0
29
- tavily-python==0.7.2
30
- exa-py==1.12.1
30
+ openai>=1.82.0
31
+ tavily-python==0.7.3
32
+ exa-py==1.13.1
31
33
  openinference-instrumentation-llama-index==4.2.1
32
34
  opentelemetry-proto>=1.31.0
33
35
  arize-phoenix==8.26.1