vectara-agentic 0.3.3__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vectara-agentic might be problematic. Click here for more details.
- tests/__init__.py +7 -0
- tests/conftest.py +312 -0
- tests/endpoint.py +54 -17
- tests/run_tests.py +111 -0
- tests/test_agent.py +10 -5
- tests/test_agent_type.py +82 -143
- tests/test_api_endpoint.py +4 -0
- tests/test_bedrock.py +4 -0
- tests/test_fallback.py +4 -0
- tests/test_gemini.py +28 -45
- tests/test_groq.py +4 -0
- tests/test_private_llm.py +11 -2
- tests/test_return_direct.py +6 -2
- tests/test_serialization.py +4 -0
- tests/test_streaming.py +88 -0
- tests/test_tools.py +10 -82
- tests/test_vectara_llms.py +4 -0
- tests/test_vhc.py +66 -0
- tests/test_workflow.py +4 -0
- vectara_agentic/__init__.py +27 -4
- vectara_agentic/_callback.py +65 -67
- vectara_agentic/_observability.py +30 -30
- vectara_agentic/_version.py +1 -1
- vectara_agentic/agent.py +375 -848
- vectara_agentic/agent_config.py +15 -14
- vectara_agentic/agent_core/__init__.py +22 -0
- vectara_agentic/agent_core/factory.py +501 -0
- vectara_agentic/{_prompts.py → agent_core/prompts.py} +3 -35
- vectara_agentic/agent_core/serialization.py +345 -0
- vectara_agentic/agent_core/streaming.py +495 -0
- vectara_agentic/agent_core/utils/__init__.py +34 -0
- vectara_agentic/agent_core/utils/hallucination.py +202 -0
- vectara_agentic/agent_core/utils/logging.py +52 -0
- vectara_agentic/agent_core/utils/prompt_formatting.py +56 -0
- vectara_agentic/agent_core/utils/schemas.py +87 -0
- vectara_agentic/agent_core/utils/tools.py +125 -0
- vectara_agentic/agent_endpoint.py +4 -6
- vectara_agentic/db_tools.py +37 -12
- vectara_agentic/llm_utils.py +41 -42
- vectara_agentic/sub_query_workflow.py +9 -14
- vectara_agentic/tool_utils.py +138 -83
- vectara_agentic/tools.py +36 -21
- vectara_agentic/tools_catalog.py +16 -16
- vectara_agentic/types.py +98 -6
- {vectara_agentic-0.3.3.dist-info → vectara_agentic-0.4.0.dist-info}/METADATA +69 -30
- vectara_agentic-0.4.0.dist-info/RECORD +50 -0
- tests/test_agent_planning.py +0 -64
- tests/test_hhem.py +0 -100
- vectara_agentic/hhem.py +0 -82
- vectara_agentic-0.3.3.dist-info/RECORD +0 -39
- {vectara_agentic-0.3.3.dist-info → vectara_agentic-0.4.0.dist-info}/WHEEL +0 -0
- {vectara_agentic-0.3.3.dist-info → vectara_agentic-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {vectara_agentic-0.3.3.dist-info → vectara_agentic-0.4.0.dist-info}/top_level.txt +0 -0
vectara_agentic/db_tools.py
CHANGED
|
@@ -6,6 +6,7 @@ It makes the following adjustments:
|
|
|
6
6
|
* Makes sure the load_data method returns a list of text values from the database (and not Document[] objects).
|
|
7
7
|
* Limits the returned rows to self.max_rows.
|
|
8
8
|
"""
|
|
9
|
+
|
|
9
10
|
from typing import Any, Optional, List, Awaitable, Callable
|
|
10
11
|
import asyncio
|
|
11
12
|
from inspect import signature
|
|
@@ -24,15 +25,20 @@ from llama_index.core.tools.utils import create_schema_from_function
|
|
|
24
25
|
|
|
25
26
|
AsyncCallable = Callable[..., Awaitable[Any]]
|
|
26
27
|
|
|
28
|
+
|
|
27
29
|
class DatabaseTools:
|
|
28
30
|
"""Database tools for vectara-agentic
|
|
29
31
|
This class provides a set of tools to interact with a database.
|
|
30
32
|
It allows you to load data, list tables, describe tables, and load unique values.
|
|
31
33
|
It also provides a method to load sample data from a specified table.
|
|
32
34
|
"""
|
|
35
|
+
|
|
33
36
|
spec_functions = [
|
|
34
|
-
"load_data",
|
|
35
|
-
"
|
|
37
|
+
"load_data",
|
|
38
|
+
"load_sample_data",
|
|
39
|
+
"list_tables",
|
|
40
|
+
"describe_tables",
|
|
41
|
+
"load_unique_values",
|
|
36
42
|
]
|
|
37
43
|
|
|
38
44
|
def __init__(
|
|
@@ -61,7 +67,7 @@ class DatabaseTools:
|
|
|
61
67
|
elif uri:
|
|
62
68
|
self.uri = uri
|
|
63
69
|
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
|
|
64
|
-
elif
|
|
70
|
+
elif scheme and host and port and user and password and dbname:
|
|
65
71
|
uri = f"{scheme}://{user}:{password}@{host}:{port}/{dbname}"
|
|
66
72
|
self.uri = uri
|
|
67
73
|
self.sql_database = SQLDatabase.from_uri(uri, *args, **kwargs)
|
|
@@ -76,7 +82,8 @@ class DatabaseTools:
|
|
|
76
82
|
self._metadata.reflect(bind=self.sql_database.engine)
|
|
77
83
|
|
|
78
84
|
def _get_metadata_from_fn_name(
|
|
79
|
-
self,
|
|
85
|
+
self,
|
|
86
|
+
fn_name: str,
|
|
80
87
|
) -> Optional[ToolMetadata]:
|
|
81
88
|
"""Return map from function name.
|
|
82
89
|
|
|
@@ -87,7 +94,9 @@ class DatabaseTools:
|
|
|
87
94
|
func = getattr(self, fn_name)
|
|
88
95
|
except AttributeError:
|
|
89
96
|
return None
|
|
90
|
-
name =
|
|
97
|
+
name = (
|
|
98
|
+
self.tool_name_prefix + "_" + fn_name if self.tool_name_prefix else fn_name
|
|
99
|
+
)
|
|
91
100
|
docstring = func.__doc__ or ""
|
|
92
101
|
description = f"{name}{signature(func)}\n{docstring}"
|
|
93
102
|
fn_schema = create_schema_from_function(fn_name, getattr(self, fn_name))
|
|
@@ -118,7 +127,9 @@ class DatabaseTools:
|
|
|
118
127
|
try:
|
|
119
128
|
count_rows = self._load_data(count_query)
|
|
120
129
|
except Exception as e:
|
|
121
|
-
return [
|
|
130
|
+
return [
|
|
131
|
+
f"Error ({str(e)}) occurred while counting number of rows, check your query."
|
|
132
|
+
]
|
|
122
133
|
num_rows = int(count_rows[0].text)
|
|
123
134
|
if num_rows > self.max_rows:
|
|
124
135
|
return [
|
|
@@ -128,7 +139,9 @@ class DatabaseTools:
|
|
|
128
139
|
try:
|
|
129
140
|
res = self._load_data(sql_query)
|
|
130
141
|
except Exception as e:
|
|
131
|
-
return [
|
|
142
|
+
return [
|
|
143
|
+
f"Error ({str(e)}) occurred while executing the query {sql_query}, check your query."
|
|
144
|
+
]
|
|
132
145
|
return [d.text for d in res]
|
|
133
146
|
|
|
134
147
|
def load_sample_data(self, table_name: str, num_rows: int = 25) -> Any:
|
|
@@ -149,7 +162,9 @@ class DatabaseTools:
|
|
|
149
162
|
try:
|
|
150
163
|
res = self._load_data(f"SELECT * FROM {table_name} LIMIT {num_rows}")
|
|
151
164
|
except Exception as e:
|
|
152
|
-
return [
|
|
165
|
+
return [
|
|
166
|
+
f"Error ({str(e)}) occurred while loading sample data for table {table_name}"
|
|
167
|
+
]
|
|
153
168
|
return [d.text for d in res]
|
|
154
169
|
|
|
155
170
|
def list_tables(self) -> List[str]:
|
|
@@ -179,7 +194,11 @@ class DatabaseTools:
|
|
|
179
194
|
table_schemas = []
|
|
180
195
|
for table_name in table_names:
|
|
181
196
|
table = next(
|
|
182
|
-
(
|
|
197
|
+
(
|
|
198
|
+
table
|
|
199
|
+
for table in self._metadata.sorted_tables
|
|
200
|
+
if table.name == table_name
|
|
201
|
+
),
|
|
183
202
|
None,
|
|
184
203
|
)
|
|
185
204
|
if table is None:
|
|
@@ -188,7 +207,9 @@ class DatabaseTools:
|
|
|
188
207
|
table_schemas.append(f"{schema}\n")
|
|
189
208
|
return "\n".join(table_schemas)
|
|
190
209
|
|
|
191
|
-
def load_unique_values(
|
|
210
|
+
def load_unique_values(
|
|
211
|
+
self, table_name: str, columns: list[str], num_vals: int = 200
|
|
212
|
+
) -> Any:
|
|
192
213
|
"""
|
|
193
214
|
Fetches the first num_vals unique values from the specified columns of the database table.
|
|
194
215
|
|
|
@@ -209,10 +230,14 @@ class DatabaseTools:
|
|
|
209
230
|
res = {}
|
|
210
231
|
try:
|
|
211
232
|
for column in columns:
|
|
212
|
-
unique_vals = self._load_data(
|
|
233
|
+
unique_vals = self._load_data(
|
|
234
|
+
f'SELECT DISTINCT "{column}" FROM {table_name} LIMIT {num_vals}'
|
|
235
|
+
)
|
|
213
236
|
res[column] = [d.text for d in unique_vals]
|
|
214
237
|
except Exception as e:
|
|
215
|
-
return {
|
|
238
|
+
return {
|
|
239
|
+
f"Error ({str(e)}) occurred while loading unique values for table {table_name}"
|
|
240
|
+
}
|
|
216
241
|
return res
|
|
217
242
|
|
|
218
243
|
def to_tool_list(self) -> List[FunctionTool]:
|
vectara_agentic/llm_utils.py
CHANGED
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
Utilities for the Vectara agentic.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from typing import Tuple,
|
|
5
|
+
from typing import Tuple, Optional
|
|
6
6
|
import os
|
|
7
7
|
from functools import lru_cache
|
|
8
|
-
import
|
|
8
|
+
import hashlib
|
|
9
9
|
|
|
10
10
|
from llama_index.core.llms import LLM
|
|
11
11
|
from llama_index.llms.openai import OpenAI
|
|
@@ -13,7 +13,7 @@ from llama_index.llms.anthropic import Anthropic
|
|
|
13
13
|
|
|
14
14
|
# LLM provider imports are now lazy-loaded in get_llm() function
|
|
15
15
|
|
|
16
|
-
from .types import LLMRole,
|
|
16
|
+
from .types import LLMRole, ModelProvider
|
|
17
17
|
from .agent_config import AgentConfig
|
|
18
18
|
|
|
19
19
|
provider_to_default_model_name = {
|
|
@@ -21,7 +21,6 @@ provider_to_default_model_name = {
|
|
|
21
21
|
ModelProvider.ANTHROPIC: "claude-sonnet-4-20250514",
|
|
22
22
|
ModelProvider.TOGETHER: "deepseek-ai/DeepSeek-V3",
|
|
23
23
|
ModelProvider.GROQ: "deepseek-r1-distill-llama-70b",
|
|
24
|
-
ModelProvider.FIREWORKS: "accounts/fireworks/models/firefunction-v2",
|
|
25
24
|
ModelProvider.BEDROCK: "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
|
26
25
|
ModelProvider.COHERE: "command-a-03-2025",
|
|
27
26
|
ModelProvider.GEMINI: "models/gemini-2.5-flash",
|
|
@@ -29,6 +28,30 @@ provider_to_default_model_name = {
|
|
|
29
28
|
|
|
30
29
|
DEFAULT_MODEL_PROVIDER = ModelProvider.OPENAI
|
|
31
30
|
|
|
31
|
+
# Manual cache for LLM instances to handle mutable AgentConfig objects
|
|
32
|
+
_llm_cache = {}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _create_llm_cache_key(role: LLMRole, config: Optional[AgentConfig] = None) -> str:
|
|
36
|
+
"""Create a hash-based cache key for LLM instances."""
|
|
37
|
+
if config is None:
|
|
38
|
+
config = AgentConfig()
|
|
39
|
+
|
|
40
|
+
# Extract only the relevant config parameters for the cache key
|
|
41
|
+
cache_data = {
|
|
42
|
+
"role": role.value,
|
|
43
|
+
"main_llm_provider": config.main_llm_provider.value,
|
|
44
|
+
"main_llm_model_name": config.main_llm_model_name,
|
|
45
|
+
"tool_llm_provider": config.tool_llm_provider.value,
|
|
46
|
+
"tool_llm_model_name": config.tool_llm_model_name,
|
|
47
|
+
"private_llm_api_base": config.private_llm_api_base,
|
|
48
|
+
"private_llm_api_key": config.private_llm_api_key,
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
# Create a stable hash from the cache data
|
|
52
|
+
cache_str = str(sorted(cache_data.items()))
|
|
53
|
+
return hashlib.md5(cache_str.encode()).hexdigest()
|
|
54
|
+
|
|
32
55
|
|
|
33
56
|
@lru_cache(maxsize=None)
|
|
34
57
|
def _get_llm_params_for_role(
|
|
@@ -54,42 +77,20 @@ def _get_llm_params_for_role(
|
|
|
54
77
|
model_provider
|
|
55
78
|
)
|
|
56
79
|
|
|
57
|
-
# If the agent type is OpenAI, check that the main LLM provider is also OpenAI.
|
|
58
|
-
if role == LLMRole.MAIN and config.agent_type == AgentType.OPENAI:
|
|
59
|
-
if model_provider != ModelProvider.OPENAI:
|
|
60
|
-
raise ValueError(
|
|
61
|
-
"OpenAI agent requested but main model provider is not OpenAI."
|
|
62
|
-
)
|
|
63
|
-
|
|
64
80
|
return model_provider, model_name
|
|
65
81
|
|
|
66
82
|
|
|
67
|
-
@lru_cache(maxsize=None)
|
|
68
|
-
def get_tokenizer_for_model(
|
|
69
|
-
role: LLMRole, config: Optional[AgentConfig] = None
|
|
70
|
-
) -> Optional[Callable]:
|
|
71
|
-
"""
|
|
72
|
-
Get the tokenizer for the specified model, as determined by the role & config.
|
|
73
|
-
"""
|
|
74
|
-
model_name = "Unknown model"
|
|
75
|
-
try:
|
|
76
|
-
model_provider, model_name = _get_llm_params_for_role(role, config)
|
|
77
|
-
if model_provider == ModelProvider.OPENAI:
|
|
78
|
-
return tiktoken.encoding_for_model("gpt-4o").encode
|
|
79
|
-
if model_provider == ModelProvider.ANTHROPIC:
|
|
80
|
-
return Anthropic().tokenizer
|
|
81
|
-
except Exception:
|
|
82
|
-
print(f"Error getting tokenizer for model {model_name}, ignoring")
|
|
83
|
-
return None
|
|
84
|
-
return None
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
@lru_cache(maxsize=None)
|
|
88
83
|
def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
89
84
|
"""
|
|
90
85
|
Get the LLM for the specified role, using the provided config
|
|
91
86
|
or a default if none is provided.
|
|
87
|
+
|
|
88
|
+
Uses a cache based on configuration parameters to avoid repeated LLM instantiation.
|
|
92
89
|
"""
|
|
90
|
+
# Check cache first
|
|
91
|
+
cache_key = _create_llm_cache_key(role, config)
|
|
92
|
+
if cache_key in _llm_cache:
|
|
93
|
+
return _llm_cache[cache_key]
|
|
93
94
|
model_provider, model_name = _get_llm_params_for_role(role, config)
|
|
94
95
|
max_tokens = (
|
|
95
96
|
16384
|
|
@@ -107,7 +108,7 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
107
108
|
model=model_name,
|
|
108
109
|
temperature=0,
|
|
109
110
|
is_function_calling_model=True,
|
|
110
|
-
strict=
|
|
111
|
+
strict=False,
|
|
111
112
|
max_tokens=max_tokens,
|
|
112
113
|
pydantic_program_mode="openai",
|
|
113
114
|
)
|
|
@@ -128,7 +129,6 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
128
129
|
model=model_name,
|
|
129
130
|
temperature=0,
|
|
130
131
|
is_function_calling_model=True,
|
|
131
|
-
allow_parallel_tool_calls=True,
|
|
132
132
|
max_tokens=max_tokens,
|
|
133
133
|
)
|
|
134
134
|
elif model_provider == ModelProvider.TOGETHER:
|
|
@@ -157,14 +157,6 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
157
157
|
is_function_calling_model=True,
|
|
158
158
|
max_tokens=max_tokens,
|
|
159
159
|
)
|
|
160
|
-
elif model_provider == ModelProvider.FIREWORKS:
|
|
161
|
-
try:
|
|
162
|
-
from llama_index.llms.fireworks import Fireworks
|
|
163
|
-
except ImportError as e:
|
|
164
|
-
raise ImportError(
|
|
165
|
-
"fireworks not available. Install with: pip install llama-index-llms-fireworks"
|
|
166
|
-
) from e
|
|
167
|
-
llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
|
|
168
160
|
elif model_provider == ModelProvider.BEDROCK:
|
|
169
161
|
try:
|
|
170
162
|
from llama_index.llms.bedrock_converse import BedrockConverse
|
|
@@ -197,6 +189,10 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
197
189
|
raise ImportError(
|
|
198
190
|
"openai_like not available. Install with: pip install llama-index-llms-openai-like"
|
|
199
191
|
) from e
|
|
192
|
+
if not config or not config.private_llm_api_base or not config.private_llm_api_key:
|
|
193
|
+
raise ValueError(
|
|
194
|
+
"Private LLM requires both private_llm_api_base and private_llm_api_key to be set in AgentConfig."
|
|
195
|
+
)
|
|
200
196
|
llm = OpenAILike(
|
|
201
197
|
model=model_name,
|
|
202
198
|
temperature=0,
|
|
@@ -209,4 +205,7 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
|
|
|
209
205
|
|
|
210
206
|
else:
|
|
211
207
|
raise ValueError(f"Unknown LLM provider: {model_provider}")
|
|
208
|
+
|
|
209
|
+
# Cache the created LLM instance
|
|
210
|
+
_llm_cache[cache_key] = llm
|
|
212
211
|
return llm
|
|
@@ -5,6 +5,8 @@ that takes a user question and a list of tools, and outputs a list of sub-questi
|
|
|
5
5
|
|
|
6
6
|
import re
|
|
7
7
|
import json
|
|
8
|
+
import logging
|
|
9
|
+
|
|
8
10
|
from pydantic import BaseModel, Field
|
|
9
11
|
|
|
10
12
|
from llama_index.core.workflow import (
|
|
@@ -70,7 +72,6 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
70
72
|
|
|
71
73
|
query = ev.inputs.query
|
|
72
74
|
await ctx.set("original_query", query)
|
|
73
|
-
print(f"Query is {query}")
|
|
74
75
|
|
|
75
76
|
required_attrs = ["agent", "llm", "tools"]
|
|
76
77
|
for attr in required_attrs:
|
|
@@ -114,9 +115,6 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
114
115
|
""",
|
|
115
116
|
)
|
|
116
117
|
|
|
117
|
-
if await ctx.get("verbose"):
|
|
118
|
-
print(f"Sub-questions are {response}")
|
|
119
|
-
|
|
120
118
|
response_str = str(response)
|
|
121
119
|
if not response_str:
|
|
122
120
|
raise ValueError(
|
|
@@ -139,7 +137,6 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
139
137
|
if not sub_questions:
|
|
140
138
|
# If the LLM returns an empty list, we need to handle it gracefully
|
|
141
139
|
# We use the original query as a single question fallback
|
|
142
|
-
print("LLM returned empty sub-questions list")
|
|
143
140
|
sub_questions = [original_query]
|
|
144
141
|
|
|
145
142
|
await ctx.set("sub_question_count", len(sub_questions))
|
|
@@ -154,7 +151,7 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
154
151
|
Given a sub-question, return the answer to the sub-question, using the agent.
|
|
155
152
|
"""
|
|
156
153
|
if await ctx.get("verbose"):
|
|
157
|
-
|
|
154
|
+
logging.info(f"Sub-question is {ev.question}")
|
|
158
155
|
agent = await ctx.get("agent")
|
|
159
156
|
question = ev.question
|
|
160
157
|
response = await agent.achat(question)
|
|
@@ -188,14 +185,13 @@ class SubQuestionQueryWorkflow(Workflow):
|
|
|
188
185
|
{answers}
|
|
189
186
|
"""
|
|
190
187
|
if await ctx.get("verbose"):
|
|
191
|
-
|
|
188
|
+
logging.info(f"Final prompt is {prompt}")
|
|
192
189
|
|
|
193
190
|
llm = await ctx.get("llm")
|
|
194
191
|
response = llm.complete(prompt)
|
|
195
192
|
|
|
196
193
|
if await ctx.get("verbose"):
|
|
197
|
-
|
|
198
|
-
|
|
194
|
+
logging.info(f"Final response is {response}")
|
|
199
195
|
return StopEvent(result=self.OutputsModel(response=str(response)))
|
|
200
196
|
|
|
201
197
|
|
|
@@ -274,8 +270,7 @@ class SequentialSubQuestionsWorkflow(Workflow):
|
|
|
274
270
|
|
|
275
271
|
original_query = await ctx.get("original_query")
|
|
276
272
|
if ev.verbose:
|
|
277
|
-
|
|
278
|
-
|
|
273
|
+
logging.info(f"Query is {original_query}")
|
|
279
274
|
llm = await ctx.get("llm")
|
|
280
275
|
response = llm.complete(
|
|
281
276
|
f"""
|
|
@@ -326,7 +321,7 @@ class SequentialSubQuestionsWorkflow(Workflow):
|
|
|
326
321
|
|
|
327
322
|
await ctx.set("sub_questions", sub_questions)
|
|
328
323
|
if await ctx.get("verbose"):
|
|
329
|
-
|
|
324
|
+
logging.info(f"Sub-questions are {sub_questions}")
|
|
330
325
|
|
|
331
326
|
return self.QueryEvent(question=sub_questions[0], prev_answer="", num=0)
|
|
332
327
|
|
|
@@ -338,7 +333,7 @@ class SequentialSubQuestionsWorkflow(Workflow):
|
|
|
338
333
|
Given a sub-question, return the answer to the sub-question, using the agent.
|
|
339
334
|
"""
|
|
340
335
|
if await ctx.get("verbose"):
|
|
341
|
-
|
|
336
|
+
logging.info(f"Sub-question is {ev.question}")
|
|
342
337
|
agent = await ctx.get("agent")
|
|
343
338
|
sub_questions = await ctx.get("sub_questions")
|
|
344
339
|
question = ev.question
|
|
@@ -353,7 +348,7 @@ class SequentialSubQuestionsWorkflow(Workflow):
|
|
|
353
348
|
response = await agent.achat(question)
|
|
354
349
|
answer = response.response
|
|
355
350
|
if await ctx.get("verbose"):
|
|
356
|
-
|
|
351
|
+
logging.info(f"Answer is {answer}")
|
|
357
352
|
|
|
358
353
|
if ev.num + 1 < len(sub_questions):
|
|
359
354
|
await ctx.set("qna", await ctx.get("qna", []) + [(question, answer)])
|