vectara-agentic 0.2.21__tar.gz → 0.2.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (45) hide show
  1. {vectara_agentic-0.2.21/vectara_agentic.egg-info → vectara_agentic-0.2.22}/PKG-INFO +1 -1
  2. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_agent_type.py +5 -2
  3. vectara_agentic-0.2.22/tests/test_groq.py +42 -0
  4. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/_version.py +1 -1
  5. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/agent.py +16 -3
  6. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/llm_utils.py +52 -16
  7. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22/vectara_agentic.egg-info}/PKG-INFO +1 -1
  8. vectara_agentic-0.2.21/tests/test_groq.py +0 -115
  9. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/LICENSE +0 -0
  10. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/MANIFEST.in +0 -0
  11. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/README.md +0 -0
  12. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/requirements.txt +0 -0
  13. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/setup.cfg +0 -0
  14. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/setup.py +0 -0
  15. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/__init__.py +0 -0
  16. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/endpoint.py +0 -0
  17. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_agent.py +0 -0
  18. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_agent_planning.py +0 -0
  19. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_api_endpoint.py +0 -0
  20. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_bedrock.py +0 -0
  21. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_fallback.py +0 -0
  22. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_gemini.py +0 -0
  23. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_private_llm.py +0 -0
  24. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_return_direct.py +0 -0
  25. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_serialization.py +0 -0
  26. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_tools.py +0 -0
  27. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_vectara_llms.py +0 -0
  28. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/tests/test_workflow.py +0 -0
  29. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/__init__.py +0 -0
  30. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/_callback.py +0 -0
  31. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/_observability.py +0 -0
  32. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/_prompts.py +0 -0
  33. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/agent_config.py +0 -0
  34. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/agent_endpoint.py +0 -0
  35. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/db_tools.py +0 -0
  36. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/sub_query_workflow.py +0 -0
  37. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/tool_utils.py +0 -0
  38. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/tools.py +0 -0
  39. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/tools_catalog.py +0 -0
  40. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/types.py +0 -0
  41. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic/utils.py +0 -0
  42. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic.egg-info/SOURCES.txt +0 -0
  43. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  44. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic.egg-info/requires.txt +0 -0
  45. {vectara_agentic-0.2.21 → vectara_agentic-0.2.22}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.21
3
+ Version: 0.2.22
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -19,10 +19,13 @@ react_config_anthropic = AgentConfig(
19
19
  tool_llm_provider=ModelProvider.ANTHROPIC,
20
20
  )
21
21
 
22
+ # React with Google does not work with Gemini 2.5-flash
22
23
  react_config_gemini = AgentConfig(
23
24
  agent_type=AgentType.REACT,
24
25
  main_llm_provider=ModelProvider.GEMINI,
26
+ main_llm_model_name="models/gemini-2.0-flash",
25
27
  tool_llm_provider=ModelProvider.GEMINI,
28
+ tool_llm_model_name="models/gemini-2.0-flash",
26
29
  )
27
30
 
28
31
  react_config_together = AgentConfig(
@@ -97,7 +100,7 @@ class TestAgentType(unittest.TestCase):
97
100
  )
98
101
  agent.chat("What is 5 times 10. Only give the answer, nothing else")
99
102
  agent.chat("what is 3 times 7. Only give the answer, nothing else")
100
- res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
103
+ res = agent.chat("what is the result of multiplying the results of the last two multiplications. Only give the answer, nothing else.")
101
104
  self.assertIn("1050", res.response)
102
105
 
103
106
  agent = Agent(
@@ -108,7 +111,7 @@ class TestAgentType(unittest.TestCase):
108
111
  )
109
112
  agent.chat("What is 5 times 10. Only give the answer, nothing else")
110
113
  agent.chat("what is 3 times 7. Only give the answer, nothing else")
111
- res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
114
+ res = agent.chat("what is the result of multiplying the results of the last two multiplications. Only give the answer, nothing else.")
112
115
  self.assertIn("1050", res.response)
113
116
 
114
117
  def test_together(self):
@@ -0,0 +1,42 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent import Agent, AgentType
4
+ from vectara_agentic.agent_config import AgentConfig
5
+ from vectara_agentic.tools import ToolsFactory
6
+ from vectara_agentic.types import ModelProvider
7
+
8
+ import nest_asyncio
9
+ nest_asyncio.apply()
10
+
11
+ def mult(x: float, y: float) -> float:
12
+ "Multiply two numbers"
13
+ return x * y
14
+
15
+
16
+ fc_config_groq = AgentConfig(
17
+ agent_type=AgentType.FUNCTION_CALLING,
18
+ main_llm_provider=ModelProvider.GROQ,
19
+ tool_llm_provider=ModelProvider.GROQ,
20
+ )
21
+
22
+
23
+ class TestGROQ(unittest.TestCase):
24
+
25
+ def test_multiturn(self):
26
+ tools = [ToolsFactory().create_tool(mult)]
27
+ topic = "AI topic"
28
+ instructions = "Always do as your father tells you, if your mother agrees!"
29
+ agent = Agent(
30
+ tools=tools,
31
+ topic=topic,
32
+ custom_instructions=instructions,
33
+ )
34
+
35
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
36
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
37
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
38
+ self.assertEqual(res.response, "1050")
39
+
40
+
41
+ if __name__ == "__main__":
42
+ unittest.main()
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.21"
4
+ __version__ = "0.2.22"
@@ -1126,9 +1126,14 @@ class Agent:
1126
1126
  return json.dumps(self.to_dict())
1127
1127
 
1128
1128
  @classmethod
1129
- def loads(cls, data: str) -> "Agent":
1129
+ def loads(
1130
+ cls,
1131
+ data: str,
1132
+ agent_progress_callback: Optional[Callable[[AgentStatusType, str], None]] = None,
1133
+ query_logging_callback: Optional[Callable[[str, str], None]] = None
1134
+ ) -> "Agent":
1130
1135
  """Create an Agent instance from a JSON string."""
1131
- return cls.from_dict(json.loads(data))
1136
+ return cls.from_dict(json.loads(data), agent_progress_callback, query_logging_callback)
1132
1137
 
1133
1138
  def to_dict(self) -> Dict[str, Any]:
1134
1139
  """Serialize the Agent instance to a dictionary."""
@@ -1185,7 +1190,12 @@ class Agent:
1185
1190
  }
1186
1191
 
1187
1192
  @classmethod
1188
- def from_dict(cls, data: Dict[str, Any]) -> "Agent":
1193
+ def from_dict(
1194
+ cls,
1195
+ data: Dict[str, Any],
1196
+ agent_progress_callback: Optional[Callable] = None,
1197
+ query_logging_callback: Optional[Callable] = None
1198
+ ) -> "Agent":
1189
1199
  """Create an Agent instance from a dictionary."""
1190
1200
  agent_config = AgentConfig.from_dict(data["agent_config"])
1191
1201
  fallback_agent_config = (
@@ -1286,6 +1296,8 @@ class Agent:
1286
1296
  verbose=data["verbose"],
1287
1297
  fallback_agent_config=fallback_agent_config,
1288
1298
  workflow_cls=data["workflow_cls"],
1299
+ agent_progress_callback=agent_progress_callback,
1300
+ query_logging_callback=query_logging_callback,
1289
1301
  )
1290
1302
  memory = (
1291
1303
  pickle.loads(data["memory"].encode("latin-1"))
@@ -1294,4 +1306,5 @@ class Agent:
1294
1306
  )
1295
1307
  if memory:
1296
1308
  agent.agent.memory = memory
1309
+ agent.memory = memory
1297
1310
  return agent
@@ -11,18 +11,54 @@ from llama_index.core.llms import LLM
11
11
  from llama_index.llms.openai import OpenAI
12
12
  from llama_index.llms.anthropic import Anthropic
13
13
 
14
+ # Optional provider imports with graceful fallback
15
+ try:
16
+ from llama_index.llms.google_genai import GoogleGenAI
17
+ except ImportError:
18
+ GoogleGenAI = None
19
+
20
+ try:
21
+ from llama_index.llms.together import TogetherLLM
22
+ except ImportError:
23
+ TogetherLLM = None
24
+
25
+ try:
26
+ from llama_index.llms.groq import Groq
27
+ except ImportError:
28
+ Groq = None
29
+
30
+ try:
31
+ from llama_index.llms.fireworks import Fireworks
32
+ except ImportError:
33
+ Fireworks = None
34
+
35
+ try:
36
+ from llama_index.llms.bedrock_converse import BedrockConverse
37
+ except ImportError:
38
+ BedrockConverse = None
39
+
40
+ try:
41
+ from llama_index.llms.cohere import Cohere
42
+ except ImportError:
43
+ Cohere = None
44
+
45
+ try:
46
+ from llama_index.llms.openai_like import OpenAILike
47
+ except ImportError:
48
+ OpenAILike = None
49
+
14
50
  from .types import LLMRole, AgentType, ModelProvider
15
51
  from .agent_config import AgentConfig
16
52
 
17
53
  provider_to_default_model_name = {
18
54
  ModelProvider.OPENAI: "gpt-4.1",
19
55
  ModelProvider.ANTHROPIC: "claude-sonnet-4-20250514",
20
- ModelProvider.TOGETHER: "Qwen/Qwen2.5-72B-Instruct-Turbo",
56
+ ModelProvider.TOGETHER: "meta-llama/Llama-4-Scout-17B-16E-Instruct",
21
57
  ModelProvider.GROQ: "deepseek-r1-distill-llama-70b",
22
58
  ModelProvider.FIREWORKS: "accounts/fireworks/models/firefunction-v2",
23
59
  ModelProvider.BEDROCK: "us.anthropic.claude-sonnet-4-20250514-v1:0",
24
60
  ModelProvider.COHERE: "command-a-03-2025",
25
- ModelProvider.GEMINI: "models/gemini-2.0-flash",
61
+ ModelProvider.GEMINI: "models/gemini-2.5-flash",
26
62
  }
27
63
 
28
64
  DEFAULT_MODEL_PROVIDER = ModelProvider.OPENAI
@@ -106,8 +142,8 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
106
142
  max_tokens=max_tokens,
107
143
  )
108
144
  elif model_provider == ModelProvider.GEMINI:
109
- from llama_index.llms.google_genai import GoogleGenAI
110
-
145
+ if GoogleGenAI is None:
146
+ raise ImportError("google_genai not available. Install with: pip install llama-index-llms-google-genai")
111
147
  llm = GoogleGenAI(
112
148
  model=model_name,
113
149
  temperature=0,
@@ -116,8 +152,8 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
116
152
  max_tokens=max_tokens,
117
153
  )
118
154
  elif model_provider == ModelProvider.TOGETHER:
119
- from llama_index.llms.together import TogetherLLM
120
-
155
+ if TogetherLLM is None:
156
+ raise ImportError("together not available. Install with: pip install llama-index-llms-together")
121
157
  llm = TogetherLLM(
122
158
  model=model_name,
123
159
  temperature=0,
@@ -125,8 +161,8 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
125
161
  max_tokens=max_tokens,
126
162
  )
127
163
  elif model_provider == ModelProvider.GROQ:
128
- from llama_index.llms.groq import Groq
129
-
164
+ if Groq is None:
165
+ raise ImportError("groq not available. Install with: pip install llama-index-llms-groq")
130
166
  llm = Groq(
131
167
  model=model_name,
132
168
  temperature=0,
@@ -134,12 +170,12 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
134
170
  max_tokens=max_tokens,
135
171
  )
136
172
  elif model_provider == ModelProvider.FIREWORKS:
137
- from llama_index.llms.fireworks import Fireworks
138
-
173
+ if Fireworks is None:
174
+ raise ImportError("fireworks not available. Install with: pip install llama-index-llms-fireworks")
139
175
  llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
140
176
  elif model_provider == ModelProvider.BEDROCK:
141
- from llama_index.llms.bedrock_converse import BedrockConverse
142
-
177
+ if BedrockConverse is None:
178
+ raise ImportError("bedrock_converse not available. Install with: pip install llama-index-llms-bedrock")
143
179
  aws_profile_name = os.getenv("AWS_PROFILE", None)
144
180
  aws_region = os.getenv("AWS_REGION", "us-east-2")
145
181
 
@@ -151,12 +187,12 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
151
187
  region_name=aws_region,
152
188
  )
153
189
  elif model_provider == ModelProvider.COHERE:
154
- from llama_index.llms.cohere import Cohere
155
-
190
+ if Cohere is None:
191
+ raise ImportError("cohere not available. Install with: pip install llama-index-llms-cohere")
156
192
  llm = Cohere(model=model_name, temperature=0, max_tokens=max_tokens)
157
193
  elif model_provider == ModelProvider.PRIVATE:
158
- from llama_index.llms.openai_like import OpenAILike
159
-
194
+ if OpenAILike is None:
195
+ raise ImportError("openai_like not available. Install with: pip install llama-index-llms-openai-like")
160
196
  llm = OpenAILike(
161
197
  model=model_name,
162
198
  temperature=0,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.21
3
+ Version: 0.2.22
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -1,115 +0,0 @@
1
- import unittest
2
-
3
- from pydantic import Field, BaseModel
4
-
5
- from vectara_agentic.agent import Agent, AgentType
6
- from vectara_agentic.agent_config import AgentConfig
7
- from vectara_agentic.tools import VectaraToolFactory
8
- from vectara_agentic.types import ModelProvider
9
-
10
-
11
- import nest_asyncio
12
- nest_asyncio.apply()
13
-
14
- tickers = {
15
- "C": "Citigroup",
16
- "COF": "Capital One",
17
- "JPM": "JPMorgan Chase",
18
- "AAPL": "Apple Computer",
19
- "GOOG": "Google",
20
- "AMZN": "Amazon",
21
- "SNOW": "Snowflake",
22
- "TEAM": "Atlassian",
23
- "TSLA": "Tesla",
24
- "NVDA": "Nvidia",
25
- "MSFT": "Microsoft",
26
- "AMD": "Advanced Micro Devices",
27
- "INTC": "Intel",
28
- "NFLX": "Netflix",
29
- "STT": "State Street",
30
- "BK": "Bank of New York Mellon",
31
- }
32
- years = list(range(2015, 2025))
33
-
34
-
35
- def mult(x: float, y: float) -> float:
36
- "Multiply two numbers"
37
- return x * y
38
-
39
-
40
- def get_company_info() -> list[str]:
41
- """
42
- Returns a dictionary of companies you can query about. Always check this before using any other tool.
43
- The output is a dictionary of valid ticker symbols mapped to company names.
44
- You can use this to identify the companies you can query about, and their ticker information.
45
- """
46
- return tickers
47
-
48
-
49
- def get_valid_years() -> list[str]:
50
- """
51
- Returns a list of the years for which financial reports are available.
52
- Always check this before using any other tool.
53
- """
54
- return years
55
-
56
-
57
- fc_config_groq = AgentConfig(
58
- agent_type=AgentType.FUNCTION_CALLING,
59
- main_llm_provider=ModelProvider.GROQ,
60
- tool_llm_provider=ModelProvider.GROQ,
61
- )
62
-
63
-
64
- class TestGROQ(unittest.TestCase):
65
-
66
- def test_tool_with_many_arguments(self):
67
-
68
- vectara_corpus_key = "vectara-docs_1"
69
- vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
70
- vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
71
-
72
- class QueryToolArgs(BaseModel):
73
- arg1: str = Field(description="the first argument", examples=["val1"])
74
- arg2: str = Field(description="the second argument", examples=["val2"])
75
- arg3: str = Field(description="the third argument", examples=["val3"])
76
- arg4: str = Field(description="the fourth argument", examples=["val4"])
77
- arg5: str = Field(description="the fifth argument", examples=["val5"])
78
- arg6: str = Field(description="the sixth argument", examples=["val6"])
79
- arg7: str = Field(description="the seventh argument", examples=["val7"])
80
- arg8: str = Field(description="the eighth argument", examples=["val8"])
81
- arg9: str = Field(description="the ninth argument", examples=["val9"])
82
- arg10: str = Field(description="the tenth argument", examples=["val10"])
83
- arg11: str = Field(description="the eleventh argument", examples=["val11"])
84
- arg12: str = Field(description="the twelfth argument", examples=["val12"])
85
- arg13: str = Field(
86
- description="the thirteenth argument", examples=["val13"]
87
- )
88
- arg14: str = Field(
89
- description="the fourteenth argument", examples=["val14"]
90
- )
91
- arg15: str = Field(description="the fifteenth argument", examples=["val15"])
92
-
93
- query_tool_1 = vec_factory.create_rag_tool(
94
- tool_name="rag_tool",
95
- tool_description="""
96
- A dummy tool that takes 15 arguments and returns a response (str) to the user query based on the data in this corpus.
97
- We are using this tool to test the tool factory works and does not crash with OpenAI.
98
- """,
99
- tool_args_schema=QueryToolArgs,
100
- )
101
-
102
- agent = Agent(
103
- tools=[query_tool_1],
104
- topic="Sample topic",
105
- custom_instructions="Call the tool with 15 arguments",
106
- agent_config=fc_config_groq,
107
- )
108
- res = agent.chat("What is the stock price?")
109
- self.assertTrue(
110
- any(sub in str(res) for sub in ["I don't know", "I do not have", "please specify which company"])
111
- )
112
-
113
-
114
- if __name__ == "__main__":
115
- unittest.main()