vectara-agentic 0.4.6__py3-none-any.whl → 0.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

@@ -42,6 +42,35 @@ def get_event_id(event) -> str:
42
42
 
43
43
  return str(uuid.uuid4())
44
44
 
45
+
46
+ def is_tool_related_event(event) -> bool:
47
+ """
48
+ Determine if an event is actually tool-related and should be tracked.
49
+
50
+ This should only return True for events that represent actual tool calls or tool outputs,
51
+ not for streaming text deltas or other LLM response events.
52
+
53
+ Args:
54
+ event: The stream event to check
55
+
56
+ Returns:
57
+ bool: True if this event should be tracked for tool purposes
58
+ """
59
+ # Track explicit tool events from LlamaIndex workflow
60
+ if isinstance(event, (ToolCall, ToolCallResult)):
61
+ return True
62
+
63
+ has_tool_id = getattr(event, "tool_id", None)
64
+ has_tool_name = getattr(event, "tool_name", None)
65
+ has_delta = getattr(event, "delta", None)
66
+
67
+ # Some providers don't emit ToolCall/ToolCallResult; avoid treating deltas as tool events
68
+ if (has_tool_id or has_tool_name) and not has_delta:
69
+ return True
70
+
71
+ return False
72
+
73
+
45
74
  class StreamingResponseAdapter:
46
75
  """
47
76
  Adapter class that provides a LlamaIndex-compatible streaming response interface.
@@ -90,25 +119,6 @@ class StreamingResponseAdapter:
90
119
  """
91
120
  return AgentResponse(response=self.response, metadata=self.metadata)
92
121
 
93
- def wait_for_completion(self) -> None:
94
- """
95
- Wait for post-processing to complete and update metadata.
96
- This should be called after streaming finishes but before accessing metadata.
97
- """
98
- if self.post_process_task and not self.post_process_task.done():
99
- return
100
- if self.post_process_task and self.post_process_task.done():
101
- try:
102
- final_response = self.post_process_task.result()
103
- if hasattr(final_response, "metadata") and final_response.metadata:
104
- # Update our metadata from the completed task
105
- self.metadata.update(final_response.metadata)
106
- except Exception as e:
107
- logging.error(
108
- f"Error during post-processing: {e}. "
109
- "Ensure the post-processing task is correctly implemented."
110
- )
111
-
112
122
 
113
123
  def extract_response_text_from_chat_message(response_text: Any) -> str:
114
124
  """
@@ -234,9 +244,8 @@ def create_stream_post_processing_task(
234
244
  async def _safe_post_process():
235
245
  try:
236
246
  return await _post_process()
237
- except Exception:
238
- traceback.print_exc()
239
- # Return empty response on error
247
+ except Exception as e:
248
+ logging.error(f"Error {e} occurred during post-processing: {traceback.format_exc()}")
240
249
  return AgentResponse(response="", metadata={})
241
250
 
242
251
  return asyncio.create_task(_safe_post_process())
@@ -244,204 +253,208 @@ def create_stream_post_processing_task(
244
253
 
245
254
  class FunctionCallingStreamHandler:
246
255
  """
247
- Handles streaming for function calling agents with proper event processing.
256
+ Streaming handler for function-calling agents with strict "no leaks" gating.
257
+
258
+ Core ideas:
259
+ - Buffer tokens PER LLM STEP.
260
+ - Commit the buffer ONLY if that step ends with AgentOutput.tool_calls == [].
261
+ - Drop the buffer if the step triggers tool calls (planning/tool-selection).
262
+ - Track pending tool results; handle multi-round (tool -> read -> tool -> ...) loops.
263
+ - Support return_direct tools (tool output is the final answer, no synthesis step).
264
+ - Optional optimistic streaming with rollback token for nicer UX.
248
265
  """
249
266
 
250
- def __init__(self, agent_instance, handler, prompt: str):
267
+ def __init__(
268
+ self,
269
+ agent_instance,
270
+ handler,
271
+ prompt: str,
272
+ *,
273
+ stream_policy: str = "final_only", # "final_only" | "optimistic_live"
274
+ rollback_token: str = "[[__rollback_current_step__]]", # UI control signal (optional)
275
+ ):
251
276
  self.agent_instance = agent_instance
252
- self.handler = handler
277
+ self.handler = handler # awaitable; also has .stream_events()
253
278
  self.prompt = prompt
279
+
280
+ self.stream_policy = stream_policy
281
+ self.rollback_token = rollback_token
282
+
283
+ # Plumbing for your existing adapter/post-processing
254
284
  self.final_response_container = {"resp": None}
255
285
  self.stream_complete_event = asyncio.Event()
256
286
 
257
287
  async def process_stream_events(self) -> AsyncIterator[str]:
258
288
  """
259
- Process streaming events and yield text tokens.
289
+ Process streaming events and yield only valid, final tokens.
260
290
 
261
- Yields:
262
- str: Text tokens from the streaming response
291
+ Contract:
292
+ - Never surface "planning" tokens (tool arguments, scratchpads, etc).
293
+ - Only surface tokens produced in the last, post-tool LLM step,
294
+ or a return_direct tool's output.
263
295
  """
264
- had_tool_calls = False
265
- transitioned_to_prose = False
296
+ # Step-scoped state
297
+ step_buffer: list[str] = []
298
+ step_has_tool_calls = False
299
+
300
+ # Run-scoped state
301
+ pending_tools = 0
302
+ committed_any_text = False
303
+
304
+ def _reset_step():
305
+ nonlocal step_has_tool_calls
306
+ step_buffer.clear()
307
+ step_has_tool_calls = False
266
308
 
267
309
  async for ev in self.handler.stream_events():
268
- # Store tool outputs for VHC regardless of progress callback
310
+ # ---- 1) Capture tool outputs for downstream logging/telemetry ----
269
311
  if isinstance(ev, ToolCallResult):
270
312
  if hasattr(self.agent_instance, "_add_tool_output"):
271
313
  # pylint: disable=W0212
272
- self.agent_instance._add_tool_output(
273
- ev.tool_name, str(ev.tool_output)
274
- )
314
+ self.agent_instance._add_tool_output(ev.tool_name, str(ev.tool_output))
315
+
316
+ pending_tools = max(0, pending_tools - 1)
275
317
 
276
- # Handle progress callbacks if available
318
+ # Return-direct short-circuit: surface tool output as the final answer
319
+ if getattr(ev, "return_direct", False):
320
+ yield str(ev.tool_output)
321
+ committed_any_text = True
322
+ # Do not early-break; keep draining events safely.
323
+
324
+ # ---- 2) Progress callback plumbing (safe and optional) ----
277
325
  if self.agent_instance.agent_progress_callback:
278
- # Only track events that are actual tool-related events
279
- if self._is_tool_related_event(ev):
326
+ if is_tool_related_event(ev):
280
327
  try:
281
328
  event_id = get_event_id(ev)
282
329
  await self._handle_progress_callback(ev, event_id)
283
- except ValueError as e:
284
- logging.warning(f"Skipping event due to missing ID: {e}")
285
- continue
330
+ except Exception as e:
331
+ logging.warning(f"[progress-callback] skipping event: {e}")
286
332
 
287
- # Process streaming text events
333
+ # ---- 3) Step boundaries & gating logic ----
334
+ # New step starts: clear per-step state
335
+ if isinstance(ev, AgentInput):
336
+ _reset_step()
337
+ continue
338
+
339
+ # Streaming deltas (provisional)
288
340
  if hasattr(ev, "__class__") and "AgentStream" in str(ev.__class__):
289
- if hasattr(ev, "tool_calls") and ev.tool_calls:
290
- had_tool_calls = True
291
- elif (
292
- hasattr(ev, "tool_calls")
293
- and not ev.tool_calls
294
- and had_tool_calls
295
- and not transitioned_to_prose
296
- ):
297
- yield "\n\n"
298
- transitioned_to_prose = True
299
- if hasattr(ev, "delta"):
300
- yield ev.delta
301
- elif (
302
- hasattr(ev, "tool_calls")
303
- and not ev.tool_calls
304
- and hasattr(ev, "delta")
305
- ):
306
- yield ev.delta
341
+ # If the model is constructing a function call, LlamaIndex will attach tool_calls here
342
+ if getattr(ev, "tool_calls", None):
343
+ step_has_tool_calls = True
307
344
 
308
- # When stream is done, await the handler to get the final response
345
+ delta = getattr(ev, "delta", None)
346
+ if not delta:
347
+ continue
348
+
349
+ # Always buffer first
350
+ step_buffer.append(delta)
351
+
352
+ # Optional "optimistic" UX: show live typing but be ready to roll it back
353
+ if self.stream_policy == "optimistic_live" and pending_tools == 0 and not step_has_tool_calls:
354
+ yield delta
355
+
356
+ continue
357
+
358
+ # Step end: decide to commit or drop
359
+ if isinstance(ev, AgentOutput):
360
+ n_calls = len(getattr(ev, "tool_calls", []) or [])
361
+
362
+ if n_calls == 0:
363
+ # Final text step -> commit
364
+ if self.stream_policy == "final_only":
365
+ # We held everything; now stream it out in order.
366
+ for chunk in step_buffer:
367
+ yield chunk
368
+ # In optimistic mode, UI already saw these chunks live.
369
+
370
+ committed_any_text = committed_any_text or bool(step_buffer)
371
+ _reset_step()
372
+
373
+ else:
374
+ # Planning/tool step -> drop buffer
375
+ if self.stream_policy == "optimistic_live" and step_buffer:
376
+ # Tell the UI to roll back the ephemeral message
377
+ # (only if your frontend supports it)
378
+ yield self.rollback_token
379
+
380
+ _reset_step()
381
+ pending_tools += n_calls
382
+
383
+ continue
384
+
385
+ # ---- 4) Finish: await the underlying handler for the final result ----
309
386
  try:
310
387
  self.final_response_container["resp"] = await self.handler
311
388
  except Exception as e:
312
389
  error_str = str(e).lower()
313
390
  if "rate limit" in error_str or "429" in error_str:
314
- logging.error(f"🔍 [RATE_LIMIT_ERROR] Rate limit exceeded: {e}")
391
+ logging.error(f"[RATE_LIMIT_ERROR] {e}")
315
392
  self.final_response_container["resp"] = AgentResponse(
316
393
  response="Rate limit exceeded. Please try again later.",
317
394
  source_nodes=[],
318
395
  metadata={"error_type": "rate_limit", "original_error": str(e)},
319
396
  )
320
397
  else:
321
- logging.error(f"🔍 [STREAM_ERROR] Error processing stream events: {e}")
322
- logging.error(
323
- f"🔍 [STREAM_ERROR] Full traceback: {traceback.format_exc()}"
324
- )
398
+ logging.error(f"[STREAM_ERROR] {e}")
325
399
  self.final_response_container["resp"] = AgentResponse(
326
400
  response="Response completion Error",
327
401
  source_nodes=[],
328
402
  metadata={"error_type": "general", "original_error": str(e)},
329
403
  )
330
404
  finally:
331
- # Signal that stream processing is complete
405
+ # If nothing was ever committed and we ended right after a tool,
406
+ # assume that tool's output is the "final answer" (common with return_direct).
332
407
  self.stream_complete_event.set()
333
408
 
334
- def _is_tool_related_event(self, event) -> bool:
409
+ async def _handle_progress_callback(self, event, event_id: str):
335
410
  """
336
- Determine if an event is actually tool-related and should be tracked.
337
-
338
- This should only return True for events that represent actual tool calls or tool outputs,
339
- not for streaming text deltas or other LLM response events.
340
-
341
- Args:
342
- event: The stream event to check
343
-
344
- Returns:
345
- bool: True if this event should be tracked for tool purposes
411
+ Fan out progress events to the user's callback (sync or async). Mirrors your existing logic.
346
412
  """
347
- # Track explicit tool events from LlamaIndex workflow
348
- if isinstance(event, (ToolCall, ToolCallResult)):
349
- return True
350
-
351
- has_tool_id = hasattr(event, "tool_id") and event.tool_id
352
- has_delta = hasattr(event, "delta") and event.delta
353
- has_tool_name = hasattr(event, "tool_name") and event.tool_name
354
-
355
- # We're not seeing ToolCall/ToolCallResult events in the stream, so let's be more liberal
356
- # but still avoid streaming deltas
357
- if (has_tool_id or has_tool_name) and not has_delta:
358
- return True
413
+ cb = self.agent_instance.agent_progress_callback
414
+ is_async = asyncio.iscoroutinefunction(cb)
359
415
 
360
- # Everything else (streaming deltas, agent outputs, workflow events, etc.)
361
- # should NOT be tracked as tool events
362
- return False
363
-
364
- async def _handle_progress_callback(self, event, event_id: str):
365
- """Handle progress callback events for different event types with proper context propagation."""
366
416
  try:
367
417
  if isinstance(event, ToolCall):
368
- # Check if callback is async or sync
369
- if asyncio.iscoroutinefunction(
370
- self.agent_instance.agent_progress_callback
371
- ):
372
- await self.agent_instance.agent_progress_callback(
373
- status_type=AgentStatusType.TOOL_CALL,
374
- msg={
375
- "tool_name": event.tool_name,
376
- "arguments": json.dumps(event.tool_kwargs),
377
- },
378
- event_id=event_id,
379
- )
418
+ payload = {
419
+ "tool_name": event.tool_name,
420
+ "arguments": json.dumps(getattr(event, "tool_kwargs", {})),
421
+ }
422
+ if is_async:
423
+ await cb(status_type=AgentStatusType.TOOL_CALL, msg=payload, event_id=event_id)
380
424
  else:
381
- # For sync callbacks, ensure we call them properly
382
- self.agent_instance.agent_progress_callback(
383
- status_type=AgentStatusType.TOOL_CALL,
384
- msg={
385
- "tool_name": event.tool_name,
386
- "arguments": json.dumps(event.tool_kwargs),
387
- },
388
- event_id=event_id,
389
- )
425
+ cb(status_type=AgentStatusType.TOOL_CALL, msg=payload, event_id=event_id)
390
426
 
391
427
  elif isinstance(event, ToolCallResult):
392
- # Check if callback is async or sync
393
- if asyncio.iscoroutinefunction(
394
- self.agent_instance.agent_progress_callback
395
- ):
396
- await self.agent_instance.agent_progress_callback(
397
- status_type=AgentStatusType.TOOL_OUTPUT,
398
- msg={
399
- "tool_name": event.tool_name,
400
- "content": str(event.tool_output),
401
- },
402
- event_id=event_id,
403
- )
428
+ payload = {
429
+ "tool_name": event.tool_name,
430
+ "content": str(event.tool_output),
431
+ }
432
+ if is_async:
433
+ await cb(status_type=AgentStatusType.TOOL_OUTPUT, msg=payload, event_id=event_id)
404
434
  else:
405
- self.agent_instance.agent_progress_callback(
406
- status_type=AgentStatusType.TOOL_OUTPUT,
407
- msg={
408
- "tool_name": event.tool_name,
409
- "content": str(event.tool_output),
410
- },
411
- event_id=event_id,
412
- )
435
+ cb(status_type=AgentStatusType.TOOL_OUTPUT, msg=payload, event_id=event_id)
413
436
 
414
437
  elif isinstance(event, AgentInput):
415
- self.agent_instance.agent_progress_callback(
416
- status_type=AgentStatusType.AGENT_UPDATE,
417
- msg={"content": f"Agent input: {event.input}"},
418
- event_id=event_id,
419
- )
438
+ payload = {"content": f"Agent input: {getattr(event, 'input', '')}"}
439
+ if is_async:
440
+ await cb(status_type=AgentStatusType.AGENT_UPDATE, msg=payload, event_id=event_id)
441
+ else:
442
+ cb(status_type=AgentStatusType.AGENT_UPDATE, msg=payload, event_id=event_id)
420
443
 
421
444
  elif isinstance(event, AgentOutput):
422
- self.agent_instance.agent_progress_callback(
423
- status_type=AgentStatusType.AGENT_UPDATE,
424
- msg={"content": f"Agent output: {event.response}"},
425
- event_id=event_id,
426
- )
445
+ payload = {"content": f"Agent output: {getattr(event, 'response', '')}"}
446
+ if is_async:
447
+ await cb(status_type=AgentStatusType.AGENT_UPDATE, msg=payload, event_id=event_id)
448
+ else:
449
+ cb(status_type=AgentStatusType.AGENT_UPDATE, msg=payload, event_id=event_id)
427
450
 
428
451
  except Exception as e:
452
+ logging.error(f"[progress-callback] Exception: {e}")
453
+ logging.error(traceback.format_exc())
429
454
 
430
- logging.error(f"Exception in progress callback: {e}")
431
- logging.error(f"Traceback: {traceback.format_exc()}")
432
- # Continue execution despite callback errors
433
-
434
- def create_streaming_response(
435
- self, user_metadata: Dict[str, Any]
436
- ) -> "StreamingResponseAdapter":
455
+ def create_streaming_response(self, user_metadata: Dict[str, Any]) -> "StreamingResponseAdapter":
437
456
  """
438
- Create a StreamingResponseAdapter with proper post-processing.
439
-
440
- Args:
441
- user_metadata: User metadata dictionary to update
442
-
443
- Returns:
444
- StreamingResponseAdapter: Configured streaming adapter
457
+ Build the adapter with post-processing wired in.
445
458
  """
446
459
  post_process_task = create_stream_post_processing_task(
447
460
  self.stream_complete_event,
@@ -453,8 +466,8 @@ class FunctionCallingStreamHandler:
453
466
 
454
467
  return StreamingResponseAdapter(
455
468
  async_response_gen=self.process_stream_events,
456
- response="", # will be filled post-stream
457
- metadata={},
469
+ response="", # will be set by post-processing
470
+ metadata={}, # will be set by post-processing
458
471
  post_process_task=post_process_task,
459
472
  )
460
473
 
@@ -492,7 +505,7 @@ class ReActStreamHandler:
492
505
  # Handle progress callbacks if available - this is the key missing piece!
493
506
  if self.agent_instance.agent_progress_callback:
494
507
  # Only track events that are actual tool-related events
495
- if self._is_tool_related_event(event):
508
+ if is_tool_related_event(event):
496
509
  try:
497
510
  # Get event ID from LlamaIndex event
498
511
  event_id = get_event_id(event)
@@ -590,10 +603,10 @@ class ReActStreamHandler:
590
603
  self.final_response_container["resp"] = await self.handler
591
604
  except Exception as e:
592
605
  logging.error(
593
- f"🔍 [REACT_STREAM_ERROR] Error processing ReAct stream events: {e}"
606
+ f"[REACT_STREAM_ERROR] Error processing ReAct stream events: {e}"
594
607
  )
595
608
  logging.error(
596
- f"🔍 [REACT_STREAM_ERROR] Full traceback: {traceback.format_exc()}"
609
+ f"[REACT_STREAM_ERROR] Full traceback: {traceback.format_exc()}"
597
610
  )
598
611
  self.final_response_container["resp"] = AgentResponse(
599
612
  response="ReAct Response completion Error", source_nodes=[], metadata={}
@@ -602,36 +615,6 @@ class ReActStreamHandler:
602
615
  # Signal that stream processing is complete
603
616
  self.stream_complete_event.set()
604
617
 
605
- def _is_tool_related_event(self, event) -> bool:
606
- """
607
- Determine if an event is actually tool-related and should be tracked.
608
-
609
- This should only return True for events that represent actual tool calls or tool outputs,
610
- not for streaming text deltas or other LLM response events.
611
-
612
- Args:
613
- event: The stream event to check
614
-
615
- Returns:
616
- bool: True if this event should be tracked for tool purposes
617
- """
618
- # Track explicit tool events from LlamaIndex workflow
619
- if isinstance(event, (ToolCall, ToolCallResult)):
620
- return True
621
-
622
- has_tool_id = hasattr(event, "tool_id") and event.tool_id
623
- has_delta = hasattr(event, "delta") and event.delta
624
- has_tool_name = hasattr(event, "tool_name") and event.tool_name
625
-
626
- # We're not seeing ToolCall/ToolCallResult events in the stream, so let's be more liberal
627
- # but still avoid streaming deltas
628
- if (has_tool_id or has_tool_name) and not has_delta:
629
- return True
630
-
631
- # Everything else (streaming deltas, agent outputs, workflow events, etc.)
632
- # should NOT be tracked as tool events
633
- return False
634
-
635
618
  def create_streaming_response(
636
619
  self, user_metadata: Dict[str, Any]
637
620
  ) -> "StreamingResponseAdapter":
@@ -182,7 +182,7 @@ def get_llm(role: LLMRole, config: Optional[AgentConfig] = None) -> LLM:
182
182
  ) from e
183
183
  additional_kwargs = {"seed": 42}
184
184
  if model_name in [
185
- "deepseek-ai/DeepSeek-V3.1", "openai/gpt-oss-120b",
185
+ "deepseek-ai/DeepSeek-V3.1",
186
186
  "deepseek-ai/DeepSeek-R1", "Qwen/Qwen3-235B-A22B-Thinking-2507"
187
187
  "openai/gpt-oss-120b", "openai/gpt-oss-20b",
188
188
  ]:
@@ -72,7 +72,7 @@ class SubQuestionQueryWorkflow(Workflow):
72
72
  raise ValueError(f"Expected inputs to be of type {self.InputsModel}")
73
73
 
74
74
  query = ev.inputs.query
75
- await ctx.set("original_query", query)
75
+ await ctx.store.set("original_query", query)
76
76
 
77
77
  required_attrs = ["agent", "llm", "tools"]
78
78
  for attr in required_attrs:
@@ -81,15 +81,15 @@ class SubQuestionQueryWorkflow(Workflow):
81
81
  f"{attr.capitalize()} not provided to workflow Start Event."
82
82
  )
83
83
 
84
- await ctx.set("agent", ev.agent)
85
- await ctx.set("llm", ev.llm)
86
- await ctx.set("tools", ev.tools)
87
- await ctx.set("verbose", getattr(ev, "verbose", False))
84
+ await ctx.store.set("agent", ev.agent)
85
+ await ctx.store.set("llm", ev.llm)
86
+ await ctx.store.set("tools", ev.tools)
87
+ await ctx.store.set("verbose", getattr(ev, "verbose", False))
88
88
 
89
89
  chat_history = [str(msg) for msg in ev.agent.memory.get()]
90
90
 
91
- llm = await ctx.get("llm")
92
- original_query = await ctx.get("original_query")
91
+ llm = await ctx.store.get("llm")
92
+ original_query = await ctx.store.get("original_query")
93
93
  response = llm.complete(
94
94
  f"""
95
95
  Given a user question, and a list of tools, output a list of
@@ -140,7 +140,7 @@ class SubQuestionQueryWorkflow(Workflow):
140
140
  # We use the original query as a single question fallback
141
141
  sub_questions = [original_query]
142
142
 
143
- await ctx.set("sub_question_count", len(sub_questions))
143
+ await ctx.store.set("sub_question_count", len(sub_questions))
144
144
  for question in sub_questions:
145
145
  ctx.send_event(self.QueryEvent(question=question))
146
146
 
@@ -151,13 +151,13 @@ class SubQuestionQueryWorkflow(Workflow):
151
151
  """
152
152
  Given a sub-question, return the answer to the sub-question, using the agent.
153
153
  """
154
- if await ctx.get("verbose"):
154
+ if await ctx.store.get("verbose"):
155
155
  logging.info(f"Sub-question is {ev.question}")
156
- agent = await ctx.get("agent")
156
+ agent = await ctx.store.get("agent")
157
157
  question = ev.question
158
158
  response = await agent.achat(question)
159
159
  answer = str(response)
160
- await ctx.set("qna", await ctx.get("qna", []) + [(question, answer)])
160
+ await ctx.store.set("qna", await ctx.store.get("qna", []) + [(question, answer)])
161
161
  return self.AnswerEvent(question=question, answer=answer)
162
162
 
163
163
  @step
@@ -166,7 +166,7 @@ class SubQuestionQueryWorkflow(Workflow):
166
166
  Given a list of answers to sub-questions, combine them into a single answer.
167
167
  """
168
168
  ready = ctx.collect_events(
169
- ev, [self.AnswerEvent] * await ctx.get("sub_question_count")
169
+ ev, [self.AnswerEvent] * await ctx.store.get("sub_question_count")
170
170
  )
171
171
  if ready is None:
172
172
  return None
@@ -180,18 +180,18 @@ class SubQuestionQueryWorkflow(Workflow):
180
180
  each of which has been answered. Combine the answers to all the sub-questions
181
181
  into a single answer to the original question.
182
182
 
183
- Original question: {await ctx.get('original_query')}
183
+ Original question: {await ctx.store.get('original_query')}
184
184
 
185
185
  Sub-questions and answers:
186
186
  {answers}
187
187
  """
188
- if await ctx.get("verbose"):
188
+ if await ctx.store.get("verbose"):
189
189
  logging.info(f"Final prompt is {prompt}")
190
190
 
191
- llm = await ctx.get("llm")
191
+ llm = await ctx.store.get("llm")
192
192
  response = llm.complete(prompt)
193
193
 
194
- if await ctx.get("verbose"):
194
+ if await ctx.store.get("verbose"):
195
195
  logging.info(f"Final response is {response}")
196
196
  return StopEvent(result=self.OutputsModel(response=str(response)))
197
197
 
@@ -246,33 +246,33 @@ class SequentialSubQuestionsWorkflow(Workflow):
246
246
  raise ValueError(f"Expected inputs to be of type {self.InputsModel}")
247
247
  if hasattr(ev, "inputs"):
248
248
  query = ev.inputs.query
249
- await ctx.set("original_query", query)
249
+ await ctx.store.set("original_query", query)
250
250
 
251
251
  if hasattr(ev, "agent"):
252
- await ctx.set("agent", ev.agent)
252
+ await ctx.store.set("agent", ev.agent)
253
253
  else:
254
254
  raise ValueError("Agent not provided to workflow Start Event.")
255
255
  chat_history = [str(msg) for msg in ev.agent.memory.get()]
256
256
 
257
257
  if hasattr(ev, "llm"):
258
- await ctx.set("llm", ev.llm)
258
+ await ctx.store.set("llm", ev.llm)
259
259
  else:
260
260
  raise ValueError("LLM not provided to workflow Start Event.")
261
261
 
262
262
  if hasattr(ev, "tools"):
263
- await ctx.set("tools", ev.tools)
263
+ await ctx.store.set("tools", ev.tools)
264
264
  else:
265
265
  raise ValueError("Tools not provided to workflow Start Event.")
266
266
 
267
267
  if hasattr(ev, "verbose"):
268
- await ctx.set("verbose", ev.verbose)
268
+ await ctx.store.set("verbose", ev.verbose)
269
269
  else:
270
- await ctx.set("verbose", False)
270
+ await ctx.store.set("verbose", False)
271
271
 
272
- original_query = await ctx.get("original_query")
272
+ original_query = await ctx.store.get("original_query")
273
273
  if ev.verbose:
274
274
  logging.info(f"Query is {original_query}")
275
- llm = await ctx.get("llm")
275
+ llm = await ctx.store.get("llm")
276
276
  response = llm.complete(
277
277
  f"""
278
278
  Given a user question, and a list of tools, output a list of
@@ -320,8 +320,8 @@ class SequentialSubQuestionsWorkflow(Workflow):
320
320
 
321
321
  sub_questions = response_obj.get("sub_questions")
322
322
 
323
- await ctx.set("sub_questions", sub_questions)
324
- if await ctx.get("verbose"):
323
+ await ctx.store.set("sub_questions", sub_questions)
324
+ if await ctx.store.get("verbose"):
325
325
  logging.info(f"Sub-questions are {sub_questions}")
326
326
 
327
327
  return self.QueryEvent(question=sub_questions[0], prev_answer="", num=0)
@@ -333,10 +333,10 @@ class SequentialSubQuestionsWorkflow(Workflow):
333
333
  """
334
334
  Given a sub-question, return the answer to the sub-question, using the agent.
335
335
  """
336
- if await ctx.get("verbose"):
336
+ if await ctx.store.get("verbose"):
337
337
  logging.info(f"Sub-question is {ev.question}")
338
- agent = await ctx.get("agent")
339
- sub_questions = await ctx.get("sub_questions")
338
+ agent = await ctx.store.get("agent")
339
+ sub_questions = await ctx.store.get("sub_questions")
340
340
  question = ev.question
341
341
  if ev.prev_answer:
342
342
  prev_question = sub_questions[ev.num - 1]
@@ -348,11 +348,11 @@ class SequentialSubQuestionsWorkflow(Workflow):
348
348
  else:
349
349
  response = await agent.achat(question)
350
350
  answer = response.response
351
- if await ctx.get("verbose"):
351
+ if await ctx.store.get("verbose"):
352
352
  logging.info(f"Answer is {answer}")
353
353
 
354
354
  if ev.num + 1 < len(sub_questions):
355
- await ctx.set("qna", await ctx.get("qna", []) + [(question, answer)])
355
+ await ctx.store.set("qna", await ctx.store.get("qna", []) + [(question, answer)])
356
356
  return self.QueryEvent(
357
357
  question=sub_questions[ev.num + 1],
358
358
  prev_answer=answer,