vectara-agentic 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vectara-agentic might be problematic. Click here for more details.
- tests/test_api_endpoint.py +126 -0
- tests/test_gemini.py +115 -0
- tests/test_groq.py +0 -7
- tests/test_tools.py +9 -33
- tests/test_workflow.py +67 -0
- vectara_agentic/_version.py +1 -1
- vectara_agentic/agent.py +50 -28
- vectara_agentic/agent_endpoint.py +213 -23
- vectara_agentic/llm_utils.py +0 -17
- vectara_agentic/sub_query_workflow.py +5 -2
- vectara_agentic/tool_utils.py +3 -43
- vectara_agentic/tools.py +2 -0
- vectara_agentic/utils.py +1 -1
- {vectara_agentic-0.2.16.dist-info → vectara_agentic-0.2.18.dist-info}/METADATA +62 -18
- {vectara_agentic-0.2.16.dist-info → vectara_agentic-0.2.18.dist-info}/RECORD +18 -16
- {vectara_agentic-0.2.16.dist-info → vectara_agentic-0.2.18.dist-info}/WHEEL +1 -1
- {vectara_agentic-0.2.16.dist-info → vectara_agentic-0.2.18.dist-info}/licenses/LICENSE +0 -0
- {vectara_agentic-0.2.16.dist-info → vectara_agentic-0.2.18.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
from uuid import UUID
|
|
3
|
+
|
|
4
|
+
from fastapi.testclient import TestClient
|
|
5
|
+
|
|
6
|
+
# Adjust this import to point at the file where you put create_app
|
|
7
|
+
from vectara_agentic.agent_endpoint import create_app
|
|
8
|
+
from vectara_agentic.agent import Agent
|
|
9
|
+
from vectara_agentic.agent_config import AgentConfig
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class DummyAgent(Agent):
|
|
13
|
+
def __init__(self):
|
|
14
|
+
# satisfy Agent.__init__(tools: ...)
|
|
15
|
+
super().__init__(tools=[])
|
|
16
|
+
|
|
17
|
+
def chat(self, message: str) -> str:
|
|
18
|
+
return f"Echo: {message}"
|
|
19
|
+
|
|
20
|
+
class APITestCase(unittest.TestCase):
|
|
21
|
+
@classmethod
|
|
22
|
+
def setUpClass(cls):
|
|
23
|
+
cls.agent = DummyAgent()
|
|
24
|
+
# Override only the endpoint_api_key, leave everything else as default
|
|
25
|
+
cls.config = AgentConfig(endpoint_api_key="testkey")
|
|
26
|
+
app = create_app(cls.agent, cls.config)
|
|
27
|
+
cls.client = TestClient(app)
|
|
28
|
+
cls.headers = {"X-API-Key": cls.config.endpoint_api_key}
|
|
29
|
+
|
|
30
|
+
def test_chat_success(self):
|
|
31
|
+
r = self.client.get("/chat", params={"message": "hello"}, headers=self.headers)
|
|
32
|
+
self.assertEqual(r.status_code, 200)
|
|
33
|
+
self.assertEqual(r.json(), {"response": "Echo: hello"})
|
|
34
|
+
|
|
35
|
+
def test_chat_empty_message(self):
|
|
36
|
+
r = self.client.get("/chat", params={"message": ""}, headers=self.headers)
|
|
37
|
+
self.assertEqual(r.status_code, 400)
|
|
38
|
+
self.assertIn("No message provided", r.json()["detail"])
|
|
39
|
+
|
|
40
|
+
def test_chat_unauthorized(self):
|
|
41
|
+
r = self.client.get("/chat", params={"message": "hello"}, headers={"X-API-Key": "bad"})
|
|
42
|
+
self.assertEqual(r.status_code, 403)
|
|
43
|
+
|
|
44
|
+
def test_completions_success(self):
|
|
45
|
+
payload = {"model": "m1", "prompt": "test"}
|
|
46
|
+
r = self.client.post("/v1/completions", json=payload, headers=self.headers)
|
|
47
|
+
self.assertEqual(r.status_code, 200)
|
|
48
|
+
data = r.json()
|
|
49
|
+
|
|
50
|
+
# ID prefix + valid UUID check
|
|
51
|
+
self.assertTrue(data["id"].startswith("cmpl-"))
|
|
52
|
+
UUID(data["id"].split("-", 1)[1])
|
|
53
|
+
|
|
54
|
+
self.assertEqual(data["model"], "m1")
|
|
55
|
+
self.assertEqual(data["choices"][0]["text"], "Echo: test")
|
|
56
|
+
# prompt_tokens=1, completion_tokens=2 ("Echo:", "test")
|
|
57
|
+
self.assertEqual(data["usage"]["prompt_tokens"], 1)
|
|
58
|
+
self.assertEqual(data["usage"]["completion_tokens"], 2)
|
|
59
|
+
|
|
60
|
+
def test_completions_no_prompt(self):
|
|
61
|
+
payload = {"model": "m1"} # missing prompt
|
|
62
|
+
r = self.client.post("/v1/completions", json=payload, headers=self.headers)
|
|
63
|
+
self.assertEqual(r.status_code, 400)
|
|
64
|
+
self.assertIn("`prompt` is required", r.json()["detail"])
|
|
65
|
+
|
|
66
|
+
def test_completions_unauthorized(self):
|
|
67
|
+
payload = {"model": "m1", "prompt": "hi"}
|
|
68
|
+
r = self.client.post("/v1/completions", json=payload, headers={"X-API-Key": "bad"})
|
|
69
|
+
self.assertEqual(r.status_code, 403)
|
|
70
|
+
|
|
71
|
+
def test_chat_completion_success(self):
|
|
72
|
+
payload = {
|
|
73
|
+
"model": "m1",
|
|
74
|
+
"messages": [{"role": "user", "content": "hello"}]
|
|
75
|
+
}
|
|
76
|
+
r = self.client.post("/v1/chat", json=payload, headers=self.headers)
|
|
77
|
+
self.assertEqual(r.status_code, 200)
|
|
78
|
+
data = r.json()
|
|
79
|
+
|
|
80
|
+
# ID prefix + valid UUID check
|
|
81
|
+
self.assertTrue(data["id"].startswith("chatcmpl-"))
|
|
82
|
+
UUID(data["id"].split("-", 1)[1])
|
|
83
|
+
|
|
84
|
+
self.assertEqual(data["model"], "m1")
|
|
85
|
+
self.assertEqual(data["choices"][0]["message"]["content"], "Echo: hello")
|
|
86
|
+
|
|
87
|
+
# prompt_tokens=1, completion_tokens=2 ("Echo:", "hello")
|
|
88
|
+
self.assertEqual(data["usage"]["prompt_tokens"], 1)
|
|
89
|
+
self.assertEqual(data["usage"]["completion_tokens"], 2)
|
|
90
|
+
|
|
91
|
+
def test_chat_completion_multiple_user_messages(self):
|
|
92
|
+
payload = {
|
|
93
|
+
"model": "m1",
|
|
94
|
+
"messages": [
|
|
95
|
+
{"role": "system", "content": "ignore me"},
|
|
96
|
+
{"role": "user", "content": "foo"},
|
|
97
|
+
{"role": "assistant", "content": "pong"},
|
|
98
|
+
{"role": "user", "content": "bar"}
|
|
99
|
+
]
|
|
100
|
+
}
|
|
101
|
+
r = self.client.post("/v1/chat", json=payload, headers=self.headers)
|
|
102
|
+
self.assertEqual(r.status_code, 200)
|
|
103
|
+
data = r.json()
|
|
104
|
+
|
|
105
|
+
# Should concatenate only user messages: "foo bar"
|
|
106
|
+
self.assertEqual(data["choices"][0]["message"]["content"], "Echo: foo bar")
|
|
107
|
+
self.assertEqual(data["usage"]["prompt_tokens"], 2) # "foo","bar"
|
|
108
|
+
self.assertEqual(data["usage"]["completion_tokens"], 3) # "Echo:","foo","bar"
|
|
109
|
+
|
|
110
|
+
def test_chat_completion_no_messages(self):
|
|
111
|
+
payload = {"model": "m1", "messages": []}
|
|
112
|
+
r = self.client.post("/v1/chat", json=payload, headers=self.headers)
|
|
113
|
+
self.assertEqual(r.status_code, 400)
|
|
114
|
+
self.assertIn("`messages` is required", r.json()["detail"])
|
|
115
|
+
|
|
116
|
+
def test_chat_completion_unauthorized(self):
|
|
117
|
+
payload = {
|
|
118
|
+
"model": "m1",
|
|
119
|
+
"messages": [{"role": "user", "content": "oops"}]
|
|
120
|
+
}
|
|
121
|
+
r = self.client.post("/v1/chat", json=payload, headers={"X-API-Key": "bad"})
|
|
122
|
+
self.assertEqual(r.status_code, 403)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
if __name__ == "__main__":
|
|
126
|
+
unittest.main()
|
tests/test_gemini.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import unittest
|
|
2
|
+
|
|
3
|
+
from pydantic import Field, BaseModel
|
|
4
|
+
|
|
5
|
+
from vectara_agentic.agent import Agent, AgentType
|
|
6
|
+
from vectara_agentic.agent_config import AgentConfig
|
|
7
|
+
from vectara_agentic.tools import VectaraToolFactory
|
|
8
|
+
from vectara_agentic.types import ModelProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
import nest_asyncio
|
|
12
|
+
nest_asyncio.apply()
|
|
13
|
+
|
|
14
|
+
tickers = {
|
|
15
|
+
"C": "Citigroup",
|
|
16
|
+
"COF": "Capital One",
|
|
17
|
+
"JPM": "JPMorgan Chase",
|
|
18
|
+
"AAPL": "Apple Computer",
|
|
19
|
+
"GOOG": "Google",
|
|
20
|
+
"AMZN": "Amazon",
|
|
21
|
+
"SNOW": "Snowflake",
|
|
22
|
+
"TEAM": "Atlassian",
|
|
23
|
+
"TSLA": "Tesla",
|
|
24
|
+
"NVDA": "Nvidia",
|
|
25
|
+
"MSFT": "Microsoft",
|
|
26
|
+
"AMD": "Advanced Micro Devices",
|
|
27
|
+
"INTC": "Intel",
|
|
28
|
+
"NFLX": "Netflix",
|
|
29
|
+
"STT": "State Street",
|
|
30
|
+
"BK": "Bank of New York Mellon",
|
|
31
|
+
}
|
|
32
|
+
years = list(range(2015, 2025))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def mult(x: float, y: float) -> float:
|
|
36
|
+
"Multiply two numbers"
|
|
37
|
+
return x * y
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_company_info() -> list[str]:
|
|
41
|
+
"""
|
|
42
|
+
Returns a dictionary of companies you can query about. Always check this before using any other tool.
|
|
43
|
+
The output is a dictionary of valid ticker symbols mapped to company names.
|
|
44
|
+
You can use this to identify the companies you can query about, and their ticker information.
|
|
45
|
+
"""
|
|
46
|
+
return tickers
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_valid_years() -> list[str]:
|
|
50
|
+
"""
|
|
51
|
+
Returns a list of the years for which financial reports are available.
|
|
52
|
+
Always check this before using any other tool.
|
|
53
|
+
"""
|
|
54
|
+
return years
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
fc_config_gemini = AgentConfig(
|
|
58
|
+
agent_type=AgentType.FUNCTION_CALLING,
|
|
59
|
+
main_llm_provider=ModelProvider.GEMINI,
|
|
60
|
+
tool_llm_provider=ModelProvider.GEMINI,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class TestGEMINI(unittest.TestCase):
|
|
65
|
+
|
|
66
|
+
def test_tool_with_many_arguments(self):
|
|
67
|
+
|
|
68
|
+
vectara_corpus_key = "vectara-docs_1"
|
|
69
|
+
vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
|
|
70
|
+
vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
|
|
71
|
+
|
|
72
|
+
class QueryToolArgs(BaseModel):
|
|
73
|
+
arg1: str = Field(description="the first argument", examples=["val1"])
|
|
74
|
+
arg2: str = Field(description="the second argument", examples=["val2"])
|
|
75
|
+
arg3: str = Field(description="the third argument", examples=["val3"])
|
|
76
|
+
arg4: str = Field(description="the fourth argument", examples=["val4"])
|
|
77
|
+
arg5: str = Field(description="the fifth argument", examples=["val5"])
|
|
78
|
+
arg6: str = Field(description="the sixth argument", examples=["val6"])
|
|
79
|
+
arg7: str = Field(description="the seventh argument", examples=["val7"])
|
|
80
|
+
arg8: str = Field(description="the eighth argument", examples=["val8"])
|
|
81
|
+
arg9: str = Field(description="the ninth argument", examples=["val9"])
|
|
82
|
+
arg10: str = Field(description="the tenth argument", examples=["val10"])
|
|
83
|
+
arg11: str = Field(description="the eleventh argument", examples=["val11"])
|
|
84
|
+
arg12: str = Field(description="the twelfth argument", examples=["val12"])
|
|
85
|
+
arg13: str = Field(
|
|
86
|
+
description="the thirteenth argument", examples=["val13"]
|
|
87
|
+
)
|
|
88
|
+
arg14: str = Field(
|
|
89
|
+
description="the fourteenth argument", examples=["val14"]
|
|
90
|
+
)
|
|
91
|
+
arg15: str = Field(description="the fifteenth argument", examples=["val15"])
|
|
92
|
+
|
|
93
|
+
query_tool_1 = vec_factory.create_rag_tool(
|
|
94
|
+
tool_name="rag_tool",
|
|
95
|
+
tool_description="""
|
|
96
|
+
A dummy tool that takes 15 arguments and returns a response (str) to the user query based on the data in this corpus.
|
|
97
|
+
We are using this tool to test the tool factory works and does not crash with OpenAI.
|
|
98
|
+
""",
|
|
99
|
+
tool_args_schema=QueryToolArgs,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
agent = Agent(
|
|
103
|
+
tools=[query_tool_1],
|
|
104
|
+
topic="Sample topic",
|
|
105
|
+
custom_instructions="Call the tool with 15 arguments",
|
|
106
|
+
agent_config=fc_config_gemini,
|
|
107
|
+
)
|
|
108
|
+
res = agent.chat("What is the stock price?")
|
|
109
|
+
self.assertTrue(
|
|
110
|
+
any(sub in str(res) for sub in ["I don't know", "I do not have"])
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
if __name__ == "__main__":
|
|
115
|
+
unittest.main()
|
tests/test_groq.py
CHANGED
|
@@ -54,13 +54,6 @@ def get_valid_years() -> list[str]:
|
|
|
54
54
|
return years
|
|
55
55
|
|
|
56
56
|
|
|
57
|
-
config_gemini = AgentConfig(
|
|
58
|
-
agent_type=AgentType.FUNCTION_CALLING,
|
|
59
|
-
main_llm_provider=ModelProvider.GEMINI,
|
|
60
|
-
tool_llm_provider=ModelProvider.GEMINI,
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
|
|
64
57
|
fc_config_groq = AgentConfig(
|
|
65
58
|
agent_type=AgentType.FUNCTION_CALLING,
|
|
66
59
|
main_llm_provider=ModelProvider.GROQ,
|
tests/test_tools.py
CHANGED
|
@@ -251,17 +251,19 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
251
251
|
arg11: str = Field(description="the eleventh argument", examples=["val11"])
|
|
252
252
|
arg12: str = Field(description="the twelfth argument", examples=["val12"])
|
|
253
253
|
arg13: str = Field(description="the thirteenth argument", examples=["val13"])
|
|
254
|
+
arg14: str = Field(description="the fourteenth argument", examples=["val14"])
|
|
255
|
+
arg15: str = Field(description="the fifteenth argument", examples=["val15"])
|
|
254
256
|
|
|
255
257
|
query_tool_1 = vec_factory.create_rag_tool(
|
|
256
258
|
tool_name="rag_tool",
|
|
257
259
|
tool_description="""
|
|
258
|
-
A dummy tool that takes
|
|
260
|
+
A dummy tool that takes 15 arguments and returns a response (str) to the user query based on the data in this corpus.
|
|
259
261
|
We are using this tool to test the tool factory works and does not crash with OpenAI.
|
|
260
262
|
""",
|
|
261
263
|
tool_args_schema=QueryToolArgs,
|
|
262
264
|
)
|
|
263
265
|
|
|
264
|
-
# Test with
|
|
266
|
+
# Test with 15 arguments to make sure no issues occur
|
|
265
267
|
config = AgentConfig(
|
|
266
268
|
agent_type=AgentType.OPENAI
|
|
267
269
|
)
|
|
@@ -272,9 +274,9 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
272
274
|
agent_config=config,
|
|
273
275
|
)
|
|
274
276
|
res = agent.chat("What is the stock price for Yahoo on 12/31/22?")
|
|
275
|
-
self.
|
|
277
|
+
self.assertNotIn("maximum length of 1024 characters", str(res))
|
|
276
278
|
|
|
277
|
-
# Same test but with GROQ
|
|
279
|
+
# Same test but with GROQ, should not have this limit
|
|
278
280
|
config = AgentConfig(
|
|
279
281
|
agent_type=AgentType.FUNCTION_CALLING,
|
|
280
282
|
main_llm_provider=ModelProvider.GROQ,
|
|
@@ -283,13 +285,13 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
283
285
|
agent = Agent(
|
|
284
286
|
tools=[query_tool_1],
|
|
285
287
|
topic="Sample topic",
|
|
286
|
-
custom_instructions="Call the tool with
|
|
288
|
+
custom_instructions="Call the tool with 15 arguments for GROQ",
|
|
287
289
|
agent_config=config,
|
|
288
290
|
)
|
|
289
291
|
res = agent.chat("What is the stock price?")
|
|
290
292
|
self.assertNotIn("maximum length of 1024 characters", str(res))
|
|
291
293
|
|
|
292
|
-
# Same test but with ANTHROPIC
|
|
294
|
+
# Same test but with ANTHROPIC, should not have this limit
|
|
293
295
|
config = AgentConfig(
|
|
294
296
|
agent_type=AgentType.FUNCTION_CALLING,
|
|
295
297
|
main_llm_provider=ModelProvider.ANTHROPIC,
|
|
@@ -298,38 +300,12 @@ class TestToolsPackage(unittest.TestCase):
|
|
|
298
300
|
agent = Agent(
|
|
299
301
|
tools=[query_tool_1],
|
|
300
302
|
topic="Sample topic",
|
|
301
|
-
custom_instructions="Call the tool with
|
|
303
|
+
custom_instructions="Call the tool with 15 arguments for ANTHROPIC",
|
|
302
304
|
agent_config=config,
|
|
303
305
|
)
|
|
304
306
|
res = agent.chat("What is the stock price?")
|
|
305
|
-
# ANTHROPIC does not have that 1024 limit
|
|
306
307
|
self.assertIn("stock price", str(res))
|
|
307
308
|
|
|
308
|
-
# But using Compact_docstring=True, we can pass 13 arguments successfully.
|
|
309
|
-
vec_factory = VectaraToolFactory(
|
|
310
|
-
vectara_corpus_key, vectara_api_key, compact_docstring=True
|
|
311
|
-
)
|
|
312
|
-
query_tool_2 = vec_factory.create_rag_tool(
|
|
313
|
-
tool_name="rag_tool",
|
|
314
|
-
tool_description="""
|
|
315
|
-
A dummy tool that takes 15 arguments and returns a response (str) to the user query based on the data in this corpus.
|
|
316
|
-
We are using this tool to test the tool factory works and doesn not crash with OpenAI.
|
|
317
|
-
""",
|
|
318
|
-
tool_args_schema=QueryToolArgs,
|
|
319
|
-
)
|
|
320
|
-
|
|
321
|
-
config = AgentConfig()
|
|
322
|
-
agent = Agent(
|
|
323
|
-
tools=[query_tool_2],
|
|
324
|
-
topic="Sample topic",
|
|
325
|
-
custom_instructions="Call the tool with 15 arguments",
|
|
326
|
-
agent_config=config,
|
|
327
|
-
)
|
|
328
|
-
res = agent.chat("What is the stock price?")
|
|
329
|
-
self.assertTrue(
|
|
330
|
-
any(sub in str(res) for sub in ["I don't know", "stock price"])
|
|
331
|
-
)
|
|
332
|
-
|
|
333
309
|
def test_public_repo(self):
|
|
334
310
|
vectara_corpus_key = "vectara-docs_1"
|
|
335
311
|
vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
|
tests/test_workflow.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
import unittest
|
|
2
2
|
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from llama_index.core.workflow import WorkflowTimeoutError
|
|
6
|
+
|
|
3
7
|
from vectara_agentic.agent import Agent
|
|
4
8
|
from vectara_agentic.agent_config import AgentConfig
|
|
5
9
|
from vectara_agentic.tools import ToolsFactory
|
|
@@ -62,6 +66,69 @@ class TestWorkflowPackage(unittest.IsolatedAsyncioTestCase):
|
|
|
62
66
|
res = await agent.run(inputs=inputs, verbose=True)
|
|
63
67
|
self.assertIn("22", res.response)
|
|
64
68
|
|
|
69
|
+
class TestWorkflowFailure(unittest.IsolatedAsyncioTestCase):
|
|
70
|
+
|
|
71
|
+
async def test_workflow_failure(self):
|
|
72
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
73
|
+
topic = "AI topic"
|
|
74
|
+
instructions = "You are a helpful AI assistant."
|
|
75
|
+
agent = Agent(
|
|
76
|
+
tools=tools,
|
|
77
|
+
topic=topic,
|
|
78
|
+
custom_instructions=instructions,
|
|
79
|
+
agent_config = AgentConfig(),
|
|
80
|
+
workflow_cls = SubQuestionQueryWorkflow,
|
|
81
|
+
workflow_timeout = 1
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
85
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
res = None
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
res = await agent.run(inputs=inputs)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
self.assertIsInstance(e, WorkflowTimeoutError)
|
|
94
|
+
|
|
95
|
+
self.assertIsNone(res)
|
|
96
|
+
|
|
97
|
+
async def test_workflow_with_fail_class(self):
|
|
98
|
+
tools = [ToolsFactory().create_tool(mult)] + [ToolsFactory().create_tool(add)]
|
|
99
|
+
topic = "AI topic"
|
|
100
|
+
instructions = "You are a helpful AI assistant."
|
|
101
|
+
|
|
102
|
+
class SubQuestionQueryWorkflowWithFailClass(SubQuestionQueryWorkflow):
|
|
103
|
+
class OutputModelOnFail(BaseModel):
|
|
104
|
+
"""
|
|
105
|
+
In case of failure, returns the user's original query
|
|
106
|
+
"""
|
|
107
|
+
original_query: str
|
|
108
|
+
|
|
109
|
+
agent = Agent(
|
|
110
|
+
tools=tools,
|
|
111
|
+
topic=topic,
|
|
112
|
+
custom_instructions=instructions,
|
|
113
|
+
agent_config = AgentConfig(),
|
|
114
|
+
workflow_cls = SubQuestionQueryWorkflowWithFailClass,
|
|
115
|
+
workflow_timeout = 1
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
inputs = SubQuestionQueryWorkflow.InputsModel(
|
|
119
|
+
query="Compute 5 times 3, then add 7 to the result."
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
res = None
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
res = await agent.run(inputs=inputs)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
assert isinstance(e, WorkflowTimeoutError)
|
|
128
|
+
|
|
129
|
+
self.assertIsInstance(res, SubQuestionQueryWorkflowWithFailClass.OutputModelOnFail)
|
|
130
|
+
self.assertEqual(res.original_query, "Compute 5 times 3, then add 7 to the result.")
|
|
131
|
+
|
|
65
132
|
|
|
66
133
|
if __name__ == "__main__":
|
|
67
134
|
unittest.main()
|
vectara_agentic/_version.py
CHANGED
vectara_agentic/agent.py
CHANGED
|
@@ -35,7 +35,7 @@ from llama_index.core.callbacks.base_handler import BaseCallbackHandler
|
|
|
35
35
|
from llama_index.agent.openai import OpenAIAgent
|
|
36
36
|
from llama_index.core.agent.runner.base import AgentRunner
|
|
37
37
|
from llama_index.core.agent.types import BaseAgent
|
|
38
|
-
from llama_index.core.workflow import Workflow
|
|
38
|
+
from llama_index.core.workflow import Workflow, Context
|
|
39
39
|
|
|
40
40
|
from .types import (
|
|
41
41
|
AgentType,
|
|
@@ -198,7 +198,7 @@ class Agent:
|
|
|
198
198
|
|
|
199
199
|
def __init__(
|
|
200
200
|
self,
|
|
201
|
-
tools:
|
|
201
|
+
tools: List[FunctionTool],
|
|
202
202
|
topic: str = "general",
|
|
203
203
|
custom_instructions: str = "",
|
|
204
204
|
general_instructions: str = GENERAL_INSTRUCTIONS,
|
|
@@ -213,7 +213,7 @@ class Agent:
|
|
|
213
213
|
fallback_agent_config: Optional[AgentConfig] = None,
|
|
214
214
|
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
215
215
|
validate_tools: bool = False,
|
|
216
|
-
workflow_cls: Workflow = None,
|
|
216
|
+
workflow_cls: Optional[Workflow] = None,
|
|
217
217
|
workflow_timeout: int = 120,
|
|
218
218
|
) -> None:
|
|
219
219
|
"""
|
|
@@ -287,10 +287,10 @@ class Agent:
|
|
|
287
287
|
A tool is invalid if it is mentioned in the instructions but not in the tools list.
|
|
288
288
|
A tool's name must have at least two characters.
|
|
289
289
|
Your response should be a comma-separated list of the invalid tools.
|
|
290
|
-
If
|
|
290
|
+
If no invalid tools exist, respond with "<OKAY>" (and nothing else).
|
|
291
291
|
"""
|
|
292
292
|
llm = get_llm(LLMRole.MAIN, config=self.agent_config)
|
|
293
|
-
bad_tools_str = llm.complete(prompt).text
|
|
293
|
+
bad_tools_str = llm.complete(prompt).text.strip('\n')
|
|
294
294
|
if bad_tools_str and bad_tools_str != "<OKAY>":
|
|
295
295
|
bad_tools = [tool.strip() for tool in bad_tools_str.split(",")]
|
|
296
296
|
numbered = ", ".join(
|
|
@@ -643,7 +643,7 @@ class Agent:
|
|
|
643
643
|
validate_tools: bool = False,
|
|
644
644
|
fallback_agent_config: Optional[AgentConfig] = None,
|
|
645
645
|
chat_history: Optional[list[Tuple[str, str]]] = None,
|
|
646
|
-
workflow_cls: Workflow = None,
|
|
646
|
+
workflow_cls: Optional[Workflow] = None,
|
|
647
647
|
workflow_timeout: int = 120,
|
|
648
648
|
) -> "Agent":
|
|
649
649
|
"""
|
|
@@ -712,8 +712,8 @@ class Agent:
|
|
|
712
712
|
vectara_rerank_limit: Optional[int] = None,
|
|
713
713
|
vectara_rerank_cutoff: Optional[float] = None,
|
|
714
714
|
vectara_diversity_bias: float = 0.2,
|
|
715
|
-
vectara_udf_expression: str = None,
|
|
716
|
-
vectara_rerank_chain: List[Dict] = None,
|
|
715
|
+
vectara_udf_expression: Optional[str] = None,
|
|
716
|
+
vectara_rerank_chain: Optional[List[Dict]] = None,
|
|
717
717
|
vectara_n_sentences_before: int = 2,
|
|
718
718
|
vectara_n_sentences_after: int = 2,
|
|
719
719
|
vectara_summary_num_results: int = 10,
|
|
@@ -1047,7 +1047,7 @@ class Agent:
|
|
|
1047
1047
|
time.sleep(1)
|
|
1048
1048
|
attempt += 1
|
|
1049
1049
|
|
|
1050
|
-
return
|
|
1050
|
+
return AgentStreamingResponse(
|
|
1051
1051
|
response=(
|
|
1052
1052
|
f"For {orig_llm} LLM - failure can't be resolved after "
|
|
1053
1053
|
f"{max_attempts} attempts ({last_error})."
|
|
@@ -1059,7 +1059,11 @@ class Agent:
|
|
|
1059
1059
|
# workflow will always get these arguments in the StartEvent: agent, tools, llm, verbose
|
|
1060
1060
|
# the inputs argument comes from the call to run()
|
|
1061
1061
|
#
|
|
1062
|
-
async def run(
|
|
1062
|
+
async def run(
|
|
1063
|
+
self,
|
|
1064
|
+
inputs: Any,
|
|
1065
|
+
verbose: bool = False,
|
|
1066
|
+
) -> Any:
|
|
1063
1067
|
"""
|
|
1064
1068
|
Run a workflow using the agent.
|
|
1065
1069
|
workflow class must be provided in the agent constructor.
|
|
@@ -1067,7 +1071,7 @@ class Agent:
|
|
|
1067
1071
|
inputs (Any): The inputs to the workflow.
|
|
1068
1072
|
verbose (bool, optional): Whether to print verbose output. Defaults to False.
|
|
1069
1073
|
Returns:
|
|
1070
|
-
Any: The output of the workflow.
|
|
1074
|
+
Any: The output or context of the workflow.
|
|
1071
1075
|
"""
|
|
1072
1076
|
# Create workflow
|
|
1073
1077
|
if self.workflow_cls:
|
|
@@ -1079,20 +1083,38 @@ class Agent:
|
|
|
1079
1083
|
if not isinstance(inputs, self.workflow_cls.InputsModel):
|
|
1080
1084
|
raise ValueError(f"Inputs must be an instance of {workflow.InputsModel}.")
|
|
1081
1085
|
|
|
1082
|
-
|
|
1083
|
-
result = await workflow.run(
|
|
1084
|
-
agent=self,
|
|
1085
|
-
tools=self.tools,
|
|
1086
|
-
llm=self.llm,
|
|
1087
|
-
verbose=verbose,
|
|
1088
|
-
inputs=inputs,
|
|
1089
|
-
)
|
|
1090
|
-
|
|
1091
|
-
# return output in the form of workflow.OutputsModel
|
|
1086
|
+
workflow_context = Context(workflow=workflow)
|
|
1092
1087
|
try:
|
|
1093
|
-
|
|
1094
|
-
|
|
1095
|
-
|
|
1088
|
+
# run workflow
|
|
1089
|
+
result = await workflow.run(
|
|
1090
|
+
ctx=workflow_context,
|
|
1091
|
+
agent=self,
|
|
1092
|
+
tools=self.tools,
|
|
1093
|
+
llm=self.llm,
|
|
1094
|
+
verbose=verbose,
|
|
1095
|
+
inputs=inputs,
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
# return output in the form of workflow.OutputsModel(BaseModel)
|
|
1099
|
+
try:
|
|
1100
|
+
output = workflow.OutputsModel.model_validate(result)
|
|
1101
|
+
except ValidationError as e:
|
|
1102
|
+
raise ValueError(f"Failed to map workflow output to model: {e}") from e
|
|
1103
|
+
|
|
1104
|
+
except Exception as e:
|
|
1105
|
+
outputs_model_on_fail_cls = getattr(workflow.__class__, "OutputModelOnFail", None)
|
|
1106
|
+
if outputs_model_on_fail_cls:
|
|
1107
|
+
model_fields = outputs_model_on_fail_cls.model_fields
|
|
1108
|
+
input_dict = {
|
|
1109
|
+
key: await workflow_context.get(key, None)
|
|
1110
|
+
for key in model_fields
|
|
1111
|
+
}
|
|
1112
|
+
|
|
1113
|
+
# return output in the form of workflow.OutputModelOnFail(BaseModel)
|
|
1114
|
+
output = outputs_model_on_fail_cls.model_validate(input_dict)
|
|
1115
|
+
else:
|
|
1116
|
+
print(f"Vectara Agentic: Workflow failed with unexpected error: {e}")
|
|
1117
|
+
raise type(e)(str(e)).with_traceback(e.__traceback__)
|
|
1096
1118
|
|
|
1097
1119
|
return output
|
|
1098
1120
|
|
|
@@ -1117,12 +1139,12 @@ class Agent:
|
|
|
1117
1139
|
fn_schema_serialized = {
|
|
1118
1140
|
"schema": (
|
|
1119
1141
|
fn_schema_cls.model_json_schema()
|
|
1120
|
-
if hasattr(fn_schema_cls, "model_json_schema")
|
|
1142
|
+
if fn_schema_cls and hasattr(fn_schema_cls, "model_json_schema")
|
|
1121
1143
|
else None
|
|
1122
1144
|
),
|
|
1123
1145
|
"metadata": {
|
|
1124
|
-
"module": fn_schema_cls.__module__,
|
|
1125
|
-
"class": fn_schema_cls.__name__,
|
|
1146
|
+
"module": fn_schema_cls.__module__ if fn_schema_cls else None,
|
|
1147
|
+
"class": fn_schema_cls.__name__ if fn_schema_cls else None,
|
|
1126
1148
|
},
|
|
1127
1149
|
}
|
|
1128
1150
|
else:
|
|
@@ -1171,7 +1193,7 @@ class Agent:
|
|
|
1171
1193
|
if data.get("fallback_agent_config")
|
|
1172
1194
|
else None
|
|
1173
1195
|
)
|
|
1174
|
-
tools = []
|
|
1196
|
+
tools: list[FunctionTool] = []
|
|
1175
1197
|
|
|
1176
1198
|
for tool_data in data["tools"]:
|
|
1177
1199
|
query_args_model = None
|
|
@@ -1,62 +1,252 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
agent_endpoint.py
|
|
3
3
|
"""
|
|
4
|
+
|
|
4
5
|
import logging
|
|
5
|
-
|
|
6
|
+
import time
|
|
7
|
+
import uuid
|
|
8
|
+
from typing import Any, List, Literal, Optional, Union
|
|
9
|
+
|
|
10
|
+
from fastapi import Depends, FastAPI, HTTPException
|
|
6
11
|
from fastapi.security.api_key import APIKeyHeader
|
|
7
|
-
from pydantic import BaseModel
|
|
12
|
+
from pydantic import BaseModel, Field
|
|
8
13
|
import uvicorn
|
|
9
14
|
|
|
10
15
|
from .agent import Agent
|
|
11
16
|
from .agent_config import AgentConfig
|
|
12
17
|
|
|
13
|
-
api_key_header = APIKeyHeader(name="X-API-Key")
|
|
14
18
|
|
|
15
19
|
class ChatRequest(BaseModel):
|
|
16
|
-
"""
|
|
17
|
-
|
|
18
|
-
"""
|
|
20
|
+
"""Request schema for the /chat endpoint."""
|
|
21
|
+
|
|
19
22
|
message: str
|
|
20
23
|
|
|
21
24
|
|
|
25
|
+
class CompletionRequest(BaseModel):
|
|
26
|
+
"""Request schema for the /v1/completions endpoint."""
|
|
27
|
+
|
|
28
|
+
model: str
|
|
29
|
+
prompt: Optional[Union[str, List[str]]] = None
|
|
30
|
+
max_tokens: Optional[int] = Field(16, ge=1)
|
|
31
|
+
temperature: Optional[float] = Field(1.0, ge=0.0, le=2.0)
|
|
32
|
+
top_p: Optional[float] = Field(1.0, ge=0.0, le=1.0)
|
|
33
|
+
n: Optional[int] = Field(1, ge=1)
|
|
34
|
+
stop: Optional[Union[str, List[str]]] = None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Choice(BaseModel):
|
|
38
|
+
"""Choice schema returned in CompletionResponse."""
|
|
39
|
+
|
|
40
|
+
text: str
|
|
41
|
+
index: int
|
|
42
|
+
logprobs: Optional[Any] = None
|
|
43
|
+
finish_reason: Literal["stop", "length", "error", None]
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class CompletionUsage(BaseModel):
|
|
47
|
+
"""Token usage details in CompletionResponse."""
|
|
48
|
+
|
|
49
|
+
prompt_tokens: int
|
|
50
|
+
completion_tokens: int
|
|
51
|
+
total_tokens: int
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class CompletionResponse(BaseModel):
|
|
55
|
+
"""Response schema for the /v1/completions endpoint."""
|
|
56
|
+
|
|
57
|
+
id: str
|
|
58
|
+
object: Literal["text_completion"]
|
|
59
|
+
created: int
|
|
60
|
+
model: str
|
|
61
|
+
choices: List[Choice]
|
|
62
|
+
usage: CompletionUsage
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ChatMessage(BaseModel):
|
|
66
|
+
"""Schema for individual chat messages in ChatCompletionRequest."""
|
|
67
|
+
role: Literal["system", "user", "assistant"]
|
|
68
|
+
content: str
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ChatCompletionRequest(BaseModel):
|
|
72
|
+
"""Request schema for the /v1/chat endpoint."""
|
|
73
|
+
model: str
|
|
74
|
+
messages: List[ChatMessage]
|
|
75
|
+
temperature: Optional[float] = Field(1.0, ge=0.0, le=2.0)
|
|
76
|
+
top_p: Optional[float] = Field(1.0, ge=0.0, le=1.0)
|
|
77
|
+
n: Optional[int] = Field(1, ge=1)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ChatCompletionChoice(BaseModel):
|
|
81
|
+
"""Choice schema returned in ChatCompletionResponse."""
|
|
82
|
+
index: int
|
|
83
|
+
message: ChatMessage
|
|
84
|
+
finish_reason: Literal["stop", "length", "error", None]
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ChatCompletionResponse(BaseModel):
|
|
88
|
+
"""Response schema for the /v1/chat endpoint."""
|
|
89
|
+
id: str
|
|
90
|
+
object: Literal["chat.completion"]
|
|
91
|
+
created: int
|
|
92
|
+
model: str
|
|
93
|
+
choices: List[ChatCompletionChoice]
|
|
94
|
+
usage: CompletionUsage
|
|
95
|
+
|
|
96
|
+
|
|
22
97
|
def create_app(agent: Agent, config: AgentConfig) -> FastAPI:
|
|
23
98
|
"""
|
|
24
|
-
Create
|
|
99
|
+
Create and configure the FastAPI app.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
agent (Agent): The agent instance to handle chat/completion.
|
|
103
|
+
config (AgentConfig): Configuration containing the API key.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
FastAPI: Configured FastAPI application.
|
|
25
107
|
"""
|
|
26
108
|
app = FastAPI()
|
|
27
109
|
logger = logging.getLogger("uvicorn.error")
|
|
28
110
|
logging.basicConfig(level=logging.INFO)
|
|
29
|
-
endpoint_api_key = config.endpoint_api_key
|
|
30
111
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
112
|
+
api_key_header = APIKeyHeader(name="X-API-Key")
|
|
113
|
+
|
|
114
|
+
async def _verify_api_key(api_key: str = Depends(api_key_header)):
|
|
115
|
+
"""
|
|
116
|
+
Dependency that verifies the X-API-Key header.
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
HTTPException(403): If the provided key does not match.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
bool: True if key is valid.
|
|
123
|
+
"""
|
|
124
|
+
if api_key != config.endpoint_api_key:
|
|
36
125
|
raise HTTPException(status_code=403, detail="Unauthorized")
|
|
126
|
+
return True
|
|
127
|
+
|
|
128
|
+
@app.get(
|
|
129
|
+
"/chat", summary="Chat with the agent", dependencies=[Depends(_verify_api_key)]
|
|
130
|
+
)
|
|
131
|
+
async def chat(message: str):
|
|
132
|
+
"""
|
|
133
|
+
Handle GET /chat requests.
|
|
37
134
|
|
|
135
|
+
Args:
|
|
136
|
+
message (str): The user's message to the agent.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
dict: Contains the agent's response under 'response'.
|
|
140
|
+
|
|
141
|
+
Raises:
|
|
142
|
+
HTTPException(400): If message is empty.
|
|
143
|
+
HTTPException(500): On internal errors.
|
|
144
|
+
"""
|
|
38
145
|
if not message:
|
|
39
|
-
logger.error("No message provided in the request")
|
|
40
146
|
raise HTTPException(status_code=400, detail="No message provided")
|
|
147
|
+
try:
|
|
148
|
+
res = agent.chat(message)
|
|
149
|
+
return {"response": res}
|
|
150
|
+
except Exception as e:
|
|
151
|
+
raise HTTPException(status_code=500, detail="Internal server error") from e
|
|
152
|
+
|
|
153
|
+
@app.post(
|
|
154
|
+
"/v1/completions",
|
|
155
|
+
response_model=CompletionResponse,
|
|
156
|
+
dependencies=[Depends(_verify_api_key)],
|
|
157
|
+
)
|
|
158
|
+
async def completions(req: CompletionRequest):
|
|
159
|
+
"""
|
|
160
|
+
Handle POST /v1/completions requests.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
req (CompletionRequest): The completion request payload.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
CompletionResponse: The generated completion and usage stats.
|
|
167
|
+
|
|
168
|
+
Raises:
|
|
169
|
+
HTTPException(400): If prompt is missing.
|
|
170
|
+
HTTPException(500): On internal errors.
|
|
171
|
+
"""
|
|
172
|
+
if not req.prompt:
|
|
173
|
+
raise HTTPException(status_code=400, detail="`prompt` is required")
|
|
174
|
+
raw = req.prompt if isinstance(req.prompt, str) else req.prompt[0]
|
|
175
|
+
try:
|
|
176
|
+
start = time.time()
|
|
177
|
+
text = agent.chat(raw)
|
|
178
|
+
logger.info(f"Agent returned in {time.time()-start:.2f}s")
|
|
179
|
+
except Exception as e:
|
|
180
|
+
raise HTTPException(status_code=500, detail="Internal server error") from e
|
|
181
|
+
|
|
182
|
+
p_tokens = len(raw.split())
|
|
183
|
+
c_tokens = len(text.split())
|
|
184
|
+
|
|
185
|
+
return CompletionResponse(
|
|
186
|
+
id=f"cmpl-{uuid.uuid4()}",
|
|
187
|
+
object="text_completion",
|
|
188
|
+
created=int(time.time()),
|
|
189
|
+
model=req.model,
|
|
190
|
+
choices=[Choice(text=text, index=0, logprobs=None, finish_reason="stop")],
|
|
191
|
+
usage=CompletionUsage(
|
|
192
|
+
prompt_tokens=p_tokens,
|
|
193
|
+
completion_tokens=c_tokens,
|
|
194
|
+
total_tokens=p_tokens + c_tokens,
|
|
195
|
+
),
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
@app.post(
|
|
199
|
+
"/v1/chat",
|
|
200
|
+
response_model=ChatCompletionResponse,
|
|
201
|
+
dependencies=[Depends(_verify_api_key)],
|
|
202
|
+
)
|
|
203
|
+
async def chat_completion(req: ChatCompletionRequest):
|
|
204
|
+
if not req.messages:
|
|
205
|
+
raise HTTPException(status_code=400, detail="`messages` is required")
|
|
206
|
+
|
|
207
|
+
# concatenate all user messages into a single prompt
|
|
208
|
+
raw = " ".join(m.content for m in req.messages if m.role == "user")
|
|
41
209
|
|
|
42
210
|
try:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
211
|
+
start = time.time()
|
|
212
|
+
text = agent.chat(raw)
|
|
213
|
+
logger.info(f"Agent returned in {time.time()-start:.2f}s")
|
|
46
214
|
except Exception as e:
|
|
47
|
-
logger.error(f"Error during agent processing: {e}")
|
|
48
215
|
raise HTTPException(status_code=500, detail="Internal server error") from e
|
|
49
216
|
|
|
217
|
+
p_tokens = len(raw.split())
|
|
218
|
+
c_tokens = len(text.split())
|
|
219
|
+
|
|
220
|
+
return ChatCompletionResponse(
|
|
221
|
+
id=f"chatcmpl-{uuid.uuid4()}",
|
|
222
|
+
object="chat.completion",
|
|
223
|
+
created=int(time.time()),
|
|
224
|
+
model=req.model,
|
|
225
|
+
choices=[
|
|
226
|
+
ChatCompletionChoice(
|
|
227
|
+
index=0,
|
|
228
|
+
message=ChatMessage(role="assistant", content=text),
|
|
229
|
+
finish_reason="stop",
|
|
230
|
+
)
|
|
231
|
+
],
|
|
232
|
+
usage=CompletionUsage(
|
|
233
|
+
prompt_tokens=p_tokens,
|
|
234
|
+
completion_tokens=c_tokens,
|
|
235
|
+
total_tokens=p_tokens + c_tokens,
|
|
236
|
+
),
|
|
237
|
+
)
|
|
238
|
+
|
|
50
239
|
return app
|
|
51
240
|
|
|
52
241
|
|
|
53
|
-
def start_app(agent: Agent, host=
|
|
242
|
+
def start_app(agent: Agent, host="0.0.0.0", port=8000):
|
|
54
243
|
"""
|
|
55
|
-
|
|
244
|
+
Launch the FastAPI application using Uvicorn.
|
|
56
245
|
|
|
57
246
|
Args:
|
|
58
|
-
|
|
59
|
-
|
|
247
|
+
agent (Agent): The agent instance for request handling.
|
|
248
|
+
host (str, optional): Host interface. Defaults to "0.0.0.0".
|
|
249
|
+
port (int, optional): Port number. Defaults to 8000.
|
|
60
250
|
"""
|
|
61
251
|
app = create_app(agent, config=AgentConfig())
|
|
62
252
|
uvicorn.run(app, host=host, port=port)
|
vectara_agentic/llm_utils.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Utilities for the Vectara agentic.
|
|
3
3
|
"""
|
|
4
|
-
from types import MethodType
|
|
5
4
|
from typing import Tuple, Callable, Optional
|
|
6
5
|
from functools import lru_cache
|
|
7
6
|
import tiktoken
|
|
@@ -12,7 +11,6 @@ from llama_index.llms.anthropic import Anthropic
|
|
|
12
11
|
|
|
13
12
|
from .types import LLMRole, AgentType, ModelProvider
|
|
14
13
|
from .agent_config import AgentConfig
|
|
15
|
-
from .tool_utils import _updated_openai_prepare_chat_with_tools
|
|
16
14
|
|
|
17
15
|
provider_to_default_model_name = {
|
|
18
16
|
ModelProvider.OPENAI: "gpt-4o",
|
|
@@ -124,11 +122,6 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
124
122
|
is_function_calling_model=True,
|
|
125
123
|
max_tokens=max_tokens,
|
|
126
124
|
)
|
|
127
|
-
# pylint: disable=protected-access
|
|
128
|
-
llm._prepare_chat_with_tools = MethodType(
|
|
129
|
-
_updated_openai_prepare_chat_with_tools,
|
|
130
|
-
llm,
|
|
131
|
-
)
|
|
132
125
|
elif model_provider == ModelProvider.GROQ:
|
|
133
126
|
from llama_index.llms.groq import Groq
|
|
134
127
|
|
|
@@ -138,11 +131,6 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
138
131
|
is_function_calling_model=True,
|
|
139
132
|
max_tokens=max_tokens,
|
|
140
133
|
)
|
|
141
|
-
# pylint: disable=protected-access
|
|
142
|
-
llm._prepare_chat_with_tools = MethodType(
|
|
143
|
-
_updated_openai_prepare_chat_with_tools,
|
|
144
|
-
llm,
|
|
145
|
-
)
|
|
146
134
|
elif model_provider == ModelProvider.FIREWORKS:
|
|
147
135
|
from llama_index.llms.fireworks import Fireworks
|
|
148
136
|
|
|
@@ -167,11 +155,6 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
167
155
|
api_key=config.private_llm_api_key,
|
|
168
156
|
max_tokens=max_tokens,
|
|
169
157
|
)
|
|
170
|
-
# pylint: disable=protected-access
|
|
171
|
-
llm._prepare_chat_with_tools = MethodType(
|
|
172
|
-
_updated_openai_prepare_chat_with_tools,
|
|
173
|
-
llm,
|
|
174
|
-
)
|
|
175
158
|
|
|
176
159
|
else:
|
|
177
160
|
raise ValueError(f"Unknown LLM provider: {model_provider}")
|
|
@@ -50,7 +50,7 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
50
50
|
answer: str
|
|
51
51
|
|
|
52
52
|
@step
|
|
53
|
-
async def query(self, ctx: Context, ev: StartEvent) -> QueryEvent:
|
|
53
|
+
async def query(self, ctx: Context, ev: StartEvent) -> QueryEvent | None:
|
|
54
54
|
"""
|
|
55
55
|
Given a user question, and a list of tools, output a list of relevant
|
|
56
56
|
sub-questions, such that the answers to all the sub-questions put together
|
|
@@ -130,7 +130,10 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
130
130
|
if sub_questions is None:
|
|
131
131
|
raise ValueError(f"Invalid LLM response format: {response_str}")
|
|
132
132
|
if not sub_questions:
|
|
133
|
-
|
|
133
|
+
# If the LLM returns an empty list, we need to handle it gracefully
|
|
134
|
+
# We use the original query as a single question fallback
|
|
135
|
+
print("LLM returned empty sub-questions list")
|
|
136
|
+
sub_questions = [original_query]
|
|
134
137
|
|
|
135
138
|
await ctx.set("sub_question_count", len(sub_questions))
|
|
136
139
|
for question in sub_questions:
|
vectara_agentic/tool_utils.py
CHANGED
|
@@ -7,7 +7,7 @@ import re
|
|
|
7
7
|
|
|
8
8
|
from typing import (
|
|
9
9
|
Callable, List, Dict, Any, Optional, Union, Type, Tuple,
|
|
10
|
-
|
|
10
|
+
get_origin, get_args
|
|
11
11
|
)
|
|
12
12
|
from pydantic import BaseModel, create_model
|
|
13
13
|
from pydantic_core import PydanticUndefined
|
|
@@ -17,52 +17,10 @@ from llama_index.core.tools.function_tool import AsyncCallable
|
|
|
17
17
|
from llama_index.core.tools.types import ToolMetadata, ToolOutput
|
|
18
18
|
from llama_index.core.workflow.context import Context
|
|
19
19
|
|
|
20
|
-
from llama_index.core.tools.types import BaseTool
|
|
21
|
-
from llama_index.core.base.llms.types import ChatMessage, MessageRole
|
|
22
|
-
from llama_index.llms.openai.utils import resolve_tool_choice
|
|
23
|
-
|
|
24
20
|
from .types import ToolType
|
|
25
21
|
from .utils import is_float
|
|
26
22
|
|
|
27
23
|
|
|
28
|
-
def _updated_openai_prepare_chat_with_tools(
|
|
29
|
-
self,
|
|
30
|
-
tools: Sequence["BaseTool"],
|
|
31
|
-
user_msg: Optional[Union[str, ChatMessage]] = None,
|
|
32
|
-
chat_history: Optional[List[ChatMessage]] = None,
|
|
33
|
-
verbose: bool = False,
|
|
34
|
-
allow_parallel_tool_calls: bool = False,
|
|
35
|
-
tool_choice: Union[str, dict] = "auto",
|
|
36
|
-
strict: Optional[bool] = None,
|
|
37
|
-
**kwargs: Any,
|
|
38
|
-
) -> Dict[str, Any]:
|
|
39
|
-
"""Predict and call the tool."""
|
|
40
|
-
tool_specs = [tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools]
|
|
41
|
-
|
|
42
|
-
# if strict is passed in, use, else default to the class-level attribute, else default to True`
|
|
43
|
-
strict = strict if strict is not None else self.strict
|
|
44
|
-
|
|
45
|
-
if self.metadata.is_function_calling_model:
|
|
46
|
-
for tool_spec in tool_specs:
|
|
47
|
-
if tool_spec["type"] == "function":
|
|
48
|
-
tool_spec["function"]["strict"] = strict
|
|
49
|
-
# in current openai 1.40.0 it is always false.
|
|
50
|
-
tool_spec["function"]["parameters"]["additionalProperties"] = False
|
|
51
|
-
|
|
52
|
-
if isinstance(user_msg, str):
|
|
53
|
-
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
|
|
54
|
-
|
|
55
|
-
messages = chat_history or []
|
|
56
|
-
if user_msg:
|
|
57
|
-
messages.append(user_msg)
|
|
58
|
-
|
|
59
|
-
return {
|
|
60
|
-
"messages": messages,
|
|
61
|
-
"tools": tool_specs or None,
|
|
62
|
-
"tool_choice": resolve_tool_choice(tool_choice) if tool_specs else None,
|
|
63
|
-
**kwargs,
|
|
64
|
-
}
|
|
65
|
-
|
|
66
24
|
class VectaraToolMetadata(ToolMetadata):
|
|
67
25
|
"""
|
|
68
26
|
A subclass of ToolMetadata adding the tool_type attribute.
|
|
@@ -112,6 +70,7 @@ class VectaraTool(FunctionTool):
|
|
|
112
70
|
tool_metadata: Optional[ToolMetadata] = None,
|
|
113
71
|
callback: Optional[Callable[[Any], Any]] = None,
|
|
114
72
|
async_callback: Optional[AsyncCallable] = None,
|
|
73
|
+
partial_params: Optional[Dict[str, Any]] = None,
|
|
115
74
|
tool_type: ToolType = ToolType.QUERY,
|
|
116
75
|
) -> "VectaraTool":
|
|
117
76
|
tool = FunctionTool.from_defaults(
|
|
@@ -124,6 +83,7 @@ class VectaraTool(FunctionTool):
|
|
|
124
83
|
tool_metadata,
|
|
125
84
|
callback,
|
|
126
85
|
async_callback,
|
|
86
|
+
partial_params
|
|
127
87
|
)
|
|
128
88
|
vectara_tool = cls(
|
|
129
89
|
tool_type=tool_type,
|
vectara_agentic/tools.py
CHANGED
vectara_agentic/utils.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.18
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -16,12 +16,13 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
|
16
16
|
Requires-Python: >=3.10
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
|
-
Requires-Dist: llama-index==0.12.
|
|
19
|
+
Requires-Dist: llama-index==0.12.35
|
|
20
20
|
Requires-Dist: llama-index-indices-managed-vectara==0.4.5
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
23
|
-
Requires-Dist: llama-index-agent-openai==0.4.
|
|
24
|
-
Requires-Dist: llama-index-llms-openai==0.3.
|
|
23
|
+
Requires-Dist: llama-index-agent-openai==0.4.8
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.3.42
|
|
25
|
+
Requires-Dist: llama-index-llms-openai-like>=0.3.5
|
|
25
26
|
Requires-Dist: llama-index-llms-anthropic==0.6.10
|
|
26
27
|
Requires-Dist: llama-index-llms-together==0.3.1
|
|
27
28
|
Requires-Dist: llama-index-llms-groq==0.3.1
|
|
@@ -36,15 +37,17 @@ Requires-Dist: llama-index-tools-google==0.3.0
|
|
|
36
37
|
Requires-Dist: llama-index-tools-tavily_research==0.3.0
|
|
37
38
|
Requires-Dist: llama_index.tools.brave_search==0.3.0
|
|
38
39
|
Requires-Dist: llama-index-tools-neo4j==0.3.0
|
|
40
|
+
Requires-Dist: llama-index-tools-waii==0.3.0
|
|
39
41
|
Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
|
|
42
|
+
Requires-Dist: llama-index-tools-salesforce==0.3.0
|
|
40
43
|
Requires-Dist: llama-index-tools-slack==0.3.0
|
|
41
44
|
Requires-Dist: llama-index-tools-exa==0.3.0
|
|
42
45
|
Requires-Dist: llama-index-tools-wikipedia==0.3.0
|
|
43
46
|
Requires-Dist: llama-index-tools-bing-search==0.3.0
|
|
44
|
-
Requires-Dist: tavily-python==0.
|
|
45
|
-
Requires-Dist: exa-py==1.12.
|
|
47
|
+
Requires-Dist: tavily-python==0.7.2
|
|
48
|
+
Requires-Dist: exa-py==1.12.1
|
|
46
49
|
Requires-Dist: openinference-instrumentation-llama-index==4.2.1
|
|
47
|
-
Requires-Dist: opentelemetry-proto
|
|
50
|
+
Requires-Dist: opentelemetry-proto>=1.31.0
|
|
48
51
|
Requires-Dist: arize-phoenix==8.26.1
|
|
49
52
|
Requires-Dist: arize-phoenix-otel==0.9.2
|
|
50
53
|
Requires-Dist: protobuf==5.29.3
|
|
@@ -365,51 +368,92 @@ vectara-agentic includes various other tools from LlamaIndex ToolSpecs:
|
|
|
365
368
|
* Tavily Search: Real-time web search using [Tavily API](https://tavily.com/)
|
|
366
369
|
```python
|
|
367
370
|
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
368
|
-
|
|
371
|
+
tools_factory = ToolsFactory()
|
|
372
|
+
tavily_tools = tools_factory.get_llama_index_tools(
|
|
373
|
+
tool_package_name="tavily_research",
|
|
374
|
+
tool_spec_name="TavilyToolSpec",
|
|
375
|
+
api_key=str(os.environ["TAVILY_API_KEY"]),
|
|
376
|
+
)
|
|
369
377
|
```
|
|
370
378
|
* EXA.AI: Advanced web search and data extraction
|
|
371
379
|
```python
|
|
372
|
-
|
|
380
|
+
exa_tools = tools_factory.get_llama_index_tools(
|
|
381
|
+
tool_package_name="exa.ai",
|
|
382
|
+
tool_spec_name="ExaToolSpec",
|
|
383
|
+
api_key=str(os.environ["EXA_API_KEY"]),
|
|
384
|
+
)
|
|
373
385
|
```
|
|
374
386
|
* Brave Search: Web search using Brave's search engine
|
|
375
387
|
```python
|
|
376
|
-
|
|
388
|
+
brave_tools = tools_factory.get_llama_index_tools(
|
|
389
|
+
tool_package_name="brave_search",
|
|
390
|
+
tool_spec_name="BraveSearchToolSpec",
|
|
391
|
+
api_key=str(os.environ["BRAVE_API_KEY"]),
|
|
392
|
+
)
|
|
377
393
|
```
|
|
378
394
|
|
|
379
395
|
* **Academic Tools**
|
|
380
396
|
* arXiv: Search and retrieve academic papers
|
|
381
397
|
```python
|
|
382
|
-
|
|
398
|
+
arxiv_tools = tools_factory.get_llama_index_tools(
|
|
399
|
+
tool_package_name="arxiv",
|
|
400
|
+
tool_spec_name="ArxivToolSpec",
|
|
401
|
+
)
|
|
383
402
|
```
|
|
384
403
|
|
|
385
|
-
* **
|
|
404
|
+
* **Database Tools**
|
|
386
405
|
* Neo4j: Graph database integration
|
|
387
406
|
```python
|
|
388
|
-
|
|
407
|
+
neo4j_tools = tools_factory.get_llama_index_tools(
|
|
408
|
+
tool_package_name="neo4j",
|
|
409
|
+
tool_spec_name="Neo4jQueryToolSpec",
|
|
410
|
+
)
|
|
389
411
|
```
|
|
390
412
|
* Kuzu: Lightweight graph database
|
|
391
413
|
```python
|
|
392
|
-
|
|
414
|
+
kuzu_tools = tools_factory.get_llama_index_tools(
|
|
415
|
+
tool_package_name="kuzu",
|
|
416
|
+
tool_spec_name="KuzuGraphStore",
|
|
417
|
+
)
|
|
418
|
+
```
|
|
419
|
+
* Waii: tools for natural langauge query of a relational database
|
|
420
|
+
```python
|
|
421
|
+
waii_tools = tools_factory.get_llama_index_tools(
|
|
422
|
+
tool_package_name="waii",
|
|
423
|
+
tool_spec_name="WaiiToolSpec",
|
|
424
|
+
)
|
|
393
425
|
```
|
|
394
426
|
|
|
395
427
|
* **Google Tools**
|
|
396
428
|
* Gmail: Read and send emails
|
|
397
429
|
```python
|
|
398
|
-
|
|
430
|
+
gmail_tools = tools_factory.get_llama_index_tools(
|
|
431
|
+
tool_package_name="google",
|
|
432
|
+
tool_spec_name="GmailToolSpec",
|
|
433
|
+
)
|
|
399
434
|
```
|
|
400
435
|
* Calendar: Manage calendar events
|
|
401
436
|
```python
|
|
402
|
-
|
|
437
|
+
calendar_tools = tools_factory.get_llama_index_tools(
|
|
438
|
+
tool_package_name="google",
|
|
439
|
+
tool_spec_name="GoogleCalendarToolSpec",
|
|
440
|
+
)
|
|
403
441
|
```
|
|
404
442
|
* Search: Google search integration
|
|
405
443
|
```python
|
|
406
|
-
|
|
444
|
+
search_tools = tools_factory.get_llama_index_tools(
|
|
445
|
+
tool_package_name="google",
|
|
446
|
+
tool_spec_name="GoogleSearchToolSpec",
|
|
447
|
+
)
|
|
407
448
|
```
|
|
408
449
|
|
|
409
450
|
* **Communication Tools**
|
|
410
451
|
* Slack: Send messages and interact with Slack
|
|
411
452
|
```python
|
|
412
|
-
|
|
453
|
+
slack_tools = tools_factory.get_llama_index_tools(
|
|
454
|
+
tool_package_name="slack",
|
|
455
|
+
tool_spec_name="SlackToolSpec",
|
|
456
|
+
)
|
|
413
457
|
```
|
|
414
458
|
|
|
415
459
|
For detailed setup instructions and API key requirements, please refer the instructions on [LlamaIndex hub](https://llamahub.ai/?tab=tools) for the specific tool.
|
|
@@ -3,32 +3,34 @@ tests/endpoint.py,sha256=frnpdZQpnuQNNKNYgAn2rFTarNG8MCJaNA77Bw_W22A,1420
|
|
|
3
3
|
tests/test_agent.py,sha256=o5U3K1AJllsSDvucrgFJPQRdAmHPq3LCuFpsnECUTFk,5483
|
|
4
4
|
tests/test_agent_planning.py,sha256=JwEebGooROAvsQ9JZoaH6KEcrSyv1F0lL4TD4FjP8a8,2213
|
|
5
5
|
tests/test_agent_type.py,sha256=mWo-pTQNDj4fWFPETm5jnb7Y5N48aW35keTVvxdIaCc,7173
|
|
6
|
+
tests/test_api_endpoint.py,sha256=M9YGFCy_Jphzq9JznP4ftHqxZ_yu6dgWdX1jRvdsORA,5002
|
|
6
7
|
tests/test_fallback.py,sha256=M5YD7NHZ0joVU1frYIr9_OiRAIje5mrXrYVcekzlyGs,2829
|
|
7
|
-
tests/
|
|
8
|
+
tests/test_gemini.py,sha256=QUBYWhZkX9AjnhPn5qa7sREf6YHZWeJEmYzKwVC23Io,4081
|
|
9
|
+
tests/test_groq.py,sha256=5RA6uFC6qra-Do55f6HUotk3EQqOosw0GjOGiHDBS4o,4071
|
|
8
10
|
tests/test_private_llm.py,sha256=CY-_rCpxGUuxnZ3ypkodw5Jj-sJCNdh6rLbCvULwuJI,2247
|
|
9
11
|
tests/test_return_direct.py,sha256=Y_K_v88eS_kJfxE6A0Yghma0nUT8u6COitj0SNnZGNs,1523
|
|
10
12
|
tests/test_serialization.py,sha256=Ed23GN2zhSJNdPFrVK4aqLkOhJKviczR_o0t-r9TuRI,4762
|
|
11
|
-
tests/test_tools.py,sha256=
|
|
13
|
+
tests/test_tools.py,sha256=sCgV74LZSRU1zKBhv_emUNe1ZmWIeGVrelNXpd9UV1c,14872
|
|
12
14
|
tests/test_vectara_llms.py,sha256=m-fDAamJR1I5IdV0IpXuTegerTUNCVRm27lsHd4wQjg,2367
|
|
13
|
-
tests/test_workflow.py,sha256=
|
|
15
|
+
tests/test_workflow.py,sha256=06NvgUQMzPb2b2mrxtVo7xribZEDQM1LdcXNJdiOfPc,4391
|
|
14
16
|
vectara_agentic/__init__.py,sha256=2GLDS3U6KckK-dBRl9v_x1kSV507gEhjOfuMmmu0Qxg,850
|
|
15
17
|
vectara_agentic/_callback.py,sha256=ron49t1t-ox-736WaXzrZ99vhN4NI9bMiHFyj0iIPqg,13062
|
|
16
18
|
vectara_agentic/_observability.py,sha256=UbJxiOJFOdLq3b1t0-Y7swMC3BzJu3IOlTUM-c1oUk8,4328
|
|
17
19
|
vectara_agentic/_prompts.py,sha256=vAb02oahA7GKRgLOsDGqgKl-BLBop2AjOlCTgLrf3M4,9694
|
|
18
|
-
vectara_agentic/_version.py,sha256=
|
|
19
|
-
vectara_agentic/agent.py,sha256=
|
|
20
|
+
vectara_agentic/_version.py,sha256=6bWhPhOhATgGaKBsmcgPdRKvZBguw3zZOhD6CJaNJPs,66
|
|
21
|
+
vectara_agentic/agent.py,sha256=zJ7ucFf8jc0VO4mTFqujfwREz2B-rJCpIgCJKAtNlEk,54884
|
|
20
22
|
vectara_agentic/agent_config.py,sha256=E-rtYMcpoGxnEAyy8231bizo2n0uGQ2qWxuSgTEfwdQ,4327
|
|
21
|
-
vectara_agentic/agent_endpoint.py,sha256=
|
|
23
|
+
vectara_agentic/agent_endpoint.py,sha256=PzIN7HhEHv8Mq_Zo5cZ2xYrgdv2AN6kx6dc_2AJq28I,7497
|
|
22
24
|
vectara_agentic/db_tools.py,sha256=Kfz6n-rSj5TQEbAiJnWGmqWtcwB0A5GpxD7d1UwGzlc,11194
|
|
23
|
-
vectara_agentic/llm_utils.py,sha256=
|
|
24
|
-
vectara_agentic/sub_query_workflow.py,sha256=
|
|
25
|
-
vectara_agentic/tool_utils.py,sha256=
|
|
26
|
-
vectara_agentic/tools.py,sha256=
|
|
25
|
+
vectara_agentic/llm_utils.py,sha256=_dkxA9DcBwyIzg-BOTi7NwZZhhjV5G2cnaVlzd9J7do,5687
|
|
26
|
+
vectara_agentic/sub_query_workflow.py,sha256=cPeossVPFajpSAwy45fSXhTXbQOfzv_l66pxSa4molM,12366
|
|
27
|
+
vectara_agentic/tool_utils.py,sha256=jv98vCMYb9afFa-HaPxI2A8BXxplfQRv2Z9b5w7ztZc,18919
|
|
28
|
+
vectara_agentic/tools.py,sha256=2_9YBqszFqYDpvlTIZfdfplRKffe660jQRxp0akM-cE,32918
|
|
27
29
|
vectara_agentic/tools_catalog.py,sha256=cAN_kDOWZUoW4GNFwY5GdS6ImMUQNnF2sggx9OGK9Cg,4906
|
|
28
30
|
vectara_agentic/types.py,sha256=HcS7vR8P2v2xQTlOc6ZFV2vvlr3OpzSNWhtcLMxqUZc,1792
|
|
29
|
-
vectara_agentic/utils.py,sha256=
|
|
30
|
-
vectara_agentic-0.2.
|
|
31
|
-
vectara_agentic-0.2.
|
|
32
|
-
vectara_agentic-0.2.
|
|
33
|
-
vectara_agentic-0.2.
|
|
34
|
-
vectara_agentic-0.2.
|
|
31
|
+
vectara_agentic/utils.py,sha256=R9HitEG5K3Q_p2M_teosT181OUxkhs1-hnj98qDYGbE,2545
|
|
32
|
+
vectara_agentic-0.2.18.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
33
|
+
vectara_agentic-0.2.18.dist-info/METADATA,sha256=FO_4pmBWrl_-7DWPWF32HwOHbbicHdB86YvRi4cA67Y,29946
|
|
34
|
+
vectara_agentic-0.2.18.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
|
35
|
+
vectara_agentic-0.2.18.dist-info/top_level.txt,sha256=Y7TQTFdOYGYodQRltUGRieZKIYuzeZj2kHqAUpfCUfg,22
|
|
36
|
+
vectara_agentic-0.2.18.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|