agnt5 0.2.8a7__cp310-abi3-macosx_11_0_arm64.whl → 0.2.8a8__cp310-abi3-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/client.py CHANGED
@@ -43,6 +43,8 @@ class Client:
43
43
  component: str,
44
44
  input_data: Optional[Dict[str, Any]] = None,
45
45
  component_type: str = "function",
46
+ session_id: Optional[str] = None,
47
+ user_id: Optional[str] = None,
46
48
  ) -> Dict[str, Any]:
47
49
  """Execute a component synchronously and wait for the result.
48
50
 
@@ -52,6 +54,8 @@ class Client:
52
54
  component: Name of the component to execute
53
55
  input_data: Input data for the component (will be sent as JSON body)
54
56
  component_type: Type of component - "function", "workflow", "agent", "tool" (default: "function")
57
+ session_id: Session identifier for multi-turn conversations (optional)
58
+ user_id: User identifier for user-scoped memory (optional)
55
59
 
56
60
  Returns:
57
61
  Dictionary containing the component's output
@@ -68,6 +72,12 @@ class Client:
68
72
  # Workflow execution (explicit)
69
73
  result = client.run("order_fulfillment", {"order_id": "123"}, component_type="workflow")
70
74
 
75
+ # Multi-turn conversation with session
76
+ result = client.run("chat", {"message": "Hello"}, session_id="session-123")
77
+
78
+ # User-scoped memory
79
+ result = client.run("assistant", {"message": "Help me"}, user_id="user-456")
80
+
71
81
  # No input data
72
82
  result = client.run("get_status")
73
83
  ```
@@ -78,11 +88,18 @@ class Client:
78
88
  # Build URL with component type
79
89
  url = urljoin(self.gateway_url + "/", f"v1/run/{component_type}/{component}")
80
90
 
91
+ # Build headers with memory scoping identifiers
92
+ headers = {"Content-Type": "application/json"}
93
+ if session_id:
94
+ headers["X-Session-ID"] = session_id
95
+ if user_id:
96
+ headers["X-User-ID"] = user_id
97
+
81
98
  # Make request
82
99
  response = self._client.post(
83
100
  url,
84
101
  json=input_data,
85
- headers={"Content-Type": "application/json"},
102
+ headers=headers,
86
103
  )
87
104
 
88
105
  # Handle errors
agnt5/context.py CHANGED
@@ -2,11 +2,19 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import contextvars
5
6
  import logging
6
7
  from typing import Any, Awaitable, Callable, Dict, List, Optional, TypeVar, Union
7
8
 
8
9
  T = TypeVar("T")
9
10
 
11
+ # Task-local context variable for automatic context propagation
12
+ # This is NOT a global variable - contextvars provide task-isolated storage
13
+ # Each asyncio task gets its own independent copy, preventing cross-contamination
14
+ _current_context: contextvars.ContextVar[Optional["Context"]] = contextvars.ContextVar(
15
+ "_current_context", default=None
16
+ )
17
+
10
18
 
11
19
  class _CorrelationFilter(logging.Filter):
12
20
  """Inject correlation IDs (run_id, trace_id, span_id) into every log record."""
@@ -81,4 +89,90 @@ class Context:
81
89
  return self._logger
82
90
 
83
91
 
92
+ def get_current_context() -> Optional[Context]:
93
+ """
94
+ Get the current execution context from task-local storage.
95
+
96
+ This function retrieves the context that was set by the nearest enclosing
97
+ decorator (@function, @workflow) or Agent.run() call in the current asyncio task.
98
+
99
+ Returns:
100
+ Current Context if available (WorkflowContext, FunctionContext, AgentContext),
101
+ None if no context is set (e.g., running outside AGNT5 execution)
102
+
103
+ Example:
104
+ >>> ctx = get_current_context()
105
+ >>> if ctx:
106
+ ... ctx.logger.info("Logging from anywhere in the call stack!")
107
+ ... runtime = ctx._runtime_context # Access tracing context
108
+
109
+ Note:
110
+ This uses Python's contextvars which provide task-local (NOT global) storage.
111
+ Each asyncio task has its own isolated context, preventing cross-contamination
112
+ between concurrent executions.
113
+ """
114
+ return _current_context.get()
115
+
116
+
117
+ def set_current_context(ctx: Context) -> contextvars.Token:
118
+ """
119
+ Set the current execution context in task-local storage.
120
+
121
+ This is typically called by decorators and framework code, not by user code.
122
+ Returns a token that can be used to reset the context to its previous value.
123
+
124
+ Args:
125
+ ctx: Context to set as current
126
+
127
+ Returns:
128
+ Token for resetting the context later (use with contextvars.Token.reset())
129
+
130
+ Example:
131
+ >>> token = set_current_context(my_context)
132
+ >>> try:
133
+ ... # Context is available via get_current_context()
134
+ ... do_work()
135
+ >>> finally:
136
+ ... _current_context.reset(token) # Restore previous context
137
+
138
+ Note:
139
+ Always use try/finally to ensure context is properly reset, even if
140
+ an exception occurs. This prevents context leakage between executions.
141
+ """
142
+ return _current_context.set(ctx)
143
+
144
+
145
+ def get_workflow_context() -> Optional["WorkflowContext"]:
146
+ """
147
+ Get the WorkflowContext from the current context or its parent chain.
148
+
149
+ This function traverses the context hierarchy to find a WorkflowContext,
150
+ which is needed for emitting workflow checkpoints from nested contexts
151
+ like AgentContext or FunctionContext.
152
+
153
+ Returns:
154
+ WorkflowContext if found in the context chain, None otherwise
155
+
156
+ Example:
157
+ >>> # Inside an agent called from a workflow
158
+ >>> workflow_ctx = get_workflow_context()
159
+ >>> if workflow_ctx:
160
+ ... workflow_ctx._send_checkpoint("workflow.lm.started", {...})
161
+ """
162
+ from .workflow import WorkflowContext
163
+
164
+ ctx = get_current_context()
165
+
166
+ # Traverse up the context chain looking for WorkflowContext
167
+ while ctx is not None:
168
+ if isinstance(ctx, WorkflowContext):
169
+ return ctx
170
+ # Check if this context has a parent_context attribute
171
+ if hasattr(ctx, 'parent_context'):
172
+ ctx = ctx.parent_context
173
+ else:
174
+ break
175
+
176
+ return None
177
+
84
178
 
agnt5/exceptions.py CHANGED
@@ -65,6 +65,8 @@ class WaitingForUserInputException(AGNT5Error):
65
65
  input_type: Type of input ("text", "approval", or "choice")
66
66
  options: List of options for approval/choice inputs
67
67
  checkpoint_state: Current workflow state for resume
68
+ agent_context: Optional agent execution state for agent-level HITL
69
+ Contains: agent_name, iteration, messages, tool_results, pending_tool_call, etc.
68
70
  """
69
71
 
70
72
  def __init__(
@@ -73,6 +75,7 @@ class WaitingForUserInputException(AGNT5Error):
73
75
  input_type: str,
74
76
  options: Optional[List[Dict]],
75
77
  checkpoint_state: Dict,
78
+ agent_context: Optional[Dict] = None,
76
79
  ) -> None:
77
80
  """Initialize WaitingForUserInputException.
78
81
 
@@ -81,9 +84,19 @@ class WaitingForUserInputException(AGNT5Error):
81
84
  input_type: Type of input - "text", "approval", or "choice"
82
85
  options: List of option dicts (for approval/choice)
83
86
  checkpoint_state: Workflow state snapshot for resume
87
+ agent_context: Optional agent execution state for resuming agents
88
+ Required fields when provided:
89
+ - agent_name: Name of the agent that paused
90
+ - iteration: Current iteration number (0-indexed)
91
+ - messages: LLM conversation history as list of dicts
92
+ - tool_results: Partial tool results for current iteration
93
+ - pending_tool_call: The HITL tool call awaiting response
94
+ - all_tool_calls: All tool calls made so far
95
+ - model_config: Model settings for resume
84
96
  """
85
97
  super().__init__(f"Waiting for user input: {question}")
86
98
  self.question = question
87
99
  self.input_type = input_type
88
100
  self.options = options or []
89
101
  self.checkpoint_state = checkpoint_state
102
+ self.agent_context = agent_context
agnt5/function.py CHANGED
@@ -10,7 +10,7 @@ from typing import Any, Awaitable, Callable, Dict, Optional, TypeVar, Union, cas
10
10
 
11
11
  from ._retry_utils import execute_with_retry, parse_backoff_policy, parse_retry_policy
12
12
  from ._schema_utils import extract_function_metadata, extract_function_schemas
13
- from .context import Context
13
+ from .context import Context, set_current_context
14
14
  from .exceptions import RetryError
15
15
  from .types import BackoffPolicy, BackoffType, FunctionConfig, HandlerFunc, RetryPolicy
16
16
 
@@ -292,16 +292,23 @@ def function(
292
292
  )
293
293
  func_args = args
294
294
 
295
- # Execute with retry
296
- return await execute_with_retry(
297
- handler_func,
298
- ctx,
299
- config.retries or RetryPolicy(),
300
- config.backoff or BackoffPolicy(),
301
- needs_context,
302
- *func_args,
303
- **kwargs,
304
- )
295
+ # Set context in task-local storage for automatic propagation
296
+ token = set_current_context(ctx)
297
+ try:
298
+ # Execute with retry
299
+ return await execute_with_retry(
300
+ handler_func,
301
+ ctx,
302
+ config.retries or RetryPolicy(),
303
+ config.backoff or BackoffPolicy(),
304
+ needs_context,
305
+ *func_args,
306
+ **kwargs,
307
+ )
308
+ finally:
309
+ # Always reset context to prevent leakage
310
+ from .context import _current_context
311
+ _current_context.reset(token)
305
312
 
306
313
  # Store config on wrapper for introspection
307
314
  wrapper._agnt5_config = config # type: ignore
agnt5/lm.py CHANGED
@@ -39,6 +39,7 @@ from enum import Enum
39
39
  from typing import Any, AsyncIterator, Dict, List, Optional
40
40
 
41
41
  from ._schema_utils import detect_format_type
42
+ from .context import get_current_context
42
43
 
43
44
  try:
44
45
  from ._core import LanguageModel as RustLanguageModel
@@ -366,12 +367,55 @@ class _LanguageModel(LanguageModel):
366
367
  # Serialize tool_choice to JSON for Rust
367
368
  kwargs["tool_choice"] = json.dumps(request.tool_choice.value)
368
369
 
369
- # Call Rust implementation - it returns a proper Python coroutine now
370
- # Using pyo3-async-runtimes for truly async HTTP calls without blocking
371
- rust_response = await self._rust_lm.generate(prompt=prompt, **kwargs)
370
+ # Pass runtime_context for proper trace linking
371
+ # Try to get from current context if available
372
+ current_ctx = get_current_context()
373
+ if current_ctx and hasattr(current_ctx, '_runtime_context') and current_ctx._runtime_context:
374
+ kwargs["runtime_context"] = current_ctx._runtime_context
375
+
376
+ # Emit checkpoint if called within a workflow context
377
+ from .context import get_workflow_context
378
+ workflow_ctx = get_workflow_context()
379
+ if workflow_ctx:
380
+ workflow_ctx._send_checkpoint("workflow.lm.started", {
381
+ "model": model,
382
+ "provider": self._provider,
383
+ "temperature": kwargs.get("temperature"),
384
+ "max_tokens": kwargs.get("max_tokens"),
385
+ })
372
386
 
373
- # Convert Rust response to Python
374
- return self._convert_response(rust_response)
387
+ try:
388
+ # Call Rust implementation - it returns a proper Python coroutine now
389
+ # Using pyo3-async-runtimes for truly async HTTP calls without blocking
390
+ rust_response = await self._rust_lm.generate(prompt=prompt, **kwargs)
391
+
392
+ # Convert Rust response to Python
393
+ response = self._convert_response(rust_response)
394
+
395
+ # Emit completion checkpoint with usage stats
396
+ if workflow_ctx:
397
+ usage_dict = None
398
+ if response.usage:
399
+ usage_dict = {
400
+ "prompt_tokens": response.usage.prompt_tokens,
401
+ "completion_tokens": response.usage.completion_tokens,
402
+ "total_tokens": response.usage.total_tokens,
403
+ }
404
+ workflow_ctx._send_checkpoint("workflow.lm.completed", {
405
+ "model": model,
406
+ "usage": usage_dict,
407
+ })
408
+
409
+ return response
410
+ except Exception as e:
411
+ # Emit error checkpoint for observability
412
+ if workflow_ctx:
413
+ workflow_ctx._send_checkpoint("workflow.lm.error", {
414
+ "model": model,
415
+ "error": str(e),
416
+ "error_type": type(e).__name__,
417
+ })
418
+ raise
375
419
 
376
420
  async def stream(self, request: GenerateRequest) -> AsyncIterator[str]:
377
421
  """Stream completion from LLM.
@@ -427,14 +471,43 @@ class _LanguageModel(LanguageModel):
427
471
  # Serialize tool_choice to JSON for Rust
428
472
  kwargs["tool_choice"] = json.dumps(request.tool_choice.value)
429
473
 
430
- # Call Rust implementation - it returns a proper Python coroutine now
431
- # Using pyo3-async-runtimes for truly async streaming without blocking
432
- rust_chunks = await self._rust_lm.stream(prompt=prompt, **kwargs)
474
+ # Emit checkpoint if called within a workflow context
475
+ from .context import get_workflow_context
476
+ workflow_ctx = get_workflow_context()
477
+ if workflow_ctx:
478
+ workflow_ctx._send_checkpoint("workflow.lm.started", {
479
+ "model": model,
480
+ "provider": self._provider,
481
+ "temperature": kwargs.get("temperature"),
482
+ "max_tokens": kwargs.get("max_tokens"),
483
+ "streaming": True,
484
+ })
433
485
 
434
- # Yield each chunk
435
- for chunk in rust_chunks:
436
- if chunk.text:
437
- yield chunk.text
486
+ try:
487
+ # Call Rust implementation - it returns a proper Python coroutine now
488
+ # Using pyo3-async-runtimes for truly async streaming without blocking
489
+ rust_chunks = await self._rust_lm.stream(prompt=prompt, **kwargs)
490
+
491
+ # Yield each chunk
492
+ for chunk in rust_chunks:
493
+ if chunk.text:
494
+ yield chunk.text
495
+
496
+ # Emit completion checkpoint after streaming finishes
497
+ if workflow_ctx:
498
+ workflow_ctx._send_checkpoint("workflow.lm.completed", {
499
+ "model": model,
500
+ "streaming": True,
501
+ })
502
+ except Exception as e:
503
+ # Emit error checkpoint for observability
504
+ if workflow_ctx:
505
+ workflow_ctx._send_checkpoint("workflow.lm.error", {
506
+ "model": model,
507
+ "error": str(e),
508
+ "error_type": type(e).__name__,
509
+ })
510
+ raise
438
511
 
439
512
  def _build_prompt_messages(self, request: GenerateRequest) -> List[Dict[str, str]]:
440
513
  """Build structured message list for Rust.
@@ -603,8 +676,12 @@ async def generate(
603
676
  response_schema=response_schema_json,
604
677
  )
605
678
 
679
+ # Checkpoints are emitted by _LanguageModel.generate() internally
680
+ # to avoid duplication. No need to emit them here.
681
+
606
682
  # Generate and return
607
- return await lm.generate(request)
683
+ result = await lm.generate(request)
684
+ return result
608
685
 
609
686
 
610
687
  async def stream(
@@ -700,6 +777,37 @@ async def stream(
700
777
  config=config,
701
778
  )
702
779
 
703
- # Stream and yield chunks
704
- async for chunk in lm.stream(request):
705
- yield chunk
780
+ # Emit checkpoint if called within a workflow context
781
+ from .context import get_workflow_context
782
+
783
+ workflow_ctx = get_workflow_context()
784
+ if workflow_ctx:
785
+ workflow_ctx._send_checkpoint("workflow.lm.started", {
786
+ "model": model,
787
+ "provider": provider,
788
+ "max_tokens": max_tokens,
789
+ "temperature": temperature,
790
+ "streaming": True,
791
+ })
792
+
793
+ try:
794
+ # Stream and yield chunks
795
+ async for chunk in lm.stream(request):
796
+ yield chunk
797
+
798
+ # Emit completion checkpoint (note: no usage stats for streaming)
799
+ if workflow_ctx:
800
+ workflow_ctx._send_checkpoint("workflow.lm.completed", {
801
+ "model": model,
802
+ "streaming": True,
803
+ })
804
+ except Exception as e:
805
+ # Emit error checkpoint for observability
806
+ if workflow_ctx:
807
+ workflow_ctx._send_checkpoint("workflow.lm.error", {
808
+ "model": model,
809
+ "error": str(e),
810
+ "error_type": type(e).__name__,
811
+ "streaming": True,
812
+ })
813
+ raise
agnt5/tool.py CHANGED
@@ -13,7 +13,7 @@ from typing import Any, Awaitable, Callable, Dict, List, Optional, TypeVar, get_
13
13
 
14
14
  from docstring_parser import parse as parse_docstring
15
15
 
16
- from .context import Context
16
+ from .context import Context, set_current_context
17
17
  from .exceptions import ConfigurationError
18
18
  from ._telemetry import setup_module_logger
19
19
 
@@ -229,26 +229,61 @@ class Tool:
229
229
  f"Tool '{self.name}' requires confirmation but confirmation is not yet implemented"
230
230
  )
231
231
 
232
- # Create span for tool execution with trace linking
233
- from ._core import create_span
232
+ # Emit checkpoint if called within a workflow context
233
+ from .context import get_workflow_context
234
234
 
235
- logger.debug(f"Invoking tool '{self.name}' with args: {list(kwargs.keys())}")
236
-
237
- # Create span with runtime_context for parent-child span linking
238
- with create_span(
239
- self.name,
240
- "tool",
241
- ctx._runtime_context if hasattr(ctx, "_runtime_context") else None,
242
- {
235
+ workflow_ctx = get_workflow_context()
236
+ if workflow_ctx:
237
+ workflow_ctx._send_checkpoint("workflow.tool.started", {
243
238
  "tool.name": self.name,
244
- "tool.args": ",".join(kwargs.keys()),
245
- },
246
- ) as span:
247
- # Handler is already async (validated in tool() decorator)
248
- result = await self.handler(ctx, **kwargs)
249
-
250
- logger.debug(f"Tool '{self.name}' completed successfully")
251
- return result
239
+ "tool.args": list(kwargs.keys()),
240
+ })
241
+
242
+ # Set context in task-local storage for automatic propagation to nested calls
243
+ token = set_current_context(ctx)
244
+ try:
245
+ try:
246
+ # Create span for tool execution with trace linking
247
+ from ._core import create_span
248
+
249
+ logger.debug(f"Invoking tool '{self.name}' with args: {list(kwargs.keys())}")
250
+
251
+ # Create span with runtime_context for parent-child span linking
252
+ with create_span(
253
+ self.name,
254
+ "tool",
255
+ ctx._runtime_context if hasattr(ctx, "_runtime_context") else None,
256
+ {
257
+ "tool.name": self.name,
258
+ "tool.args": ",".join(kwargs.keys()),
259
+ },
260
+ ) as span:
261
+ # Handler is already async (validated in tool() decorator)
262
+ result = await self.handler(ctx, **kwargs)
263
+
264
+ logger.debug(f"Tool '{self.name}' completed successfully")
265
+
266
+ # Emit completion checkpoint
267
+ if workflow_ctx:
268
+ workflow_ctx._send_checkpoint("workflow.tool.completed", {
269
+ "tool.name": self.name,
270
+ "tool.success": True,
271
+ })
272
+
273
+ return result
274
+ except Exception as e:
275
+ # Emit error checkpoint for observability
276
+ if workflow_ctx:
277
+ workflow_ctx._send_checkpoint("workflow.tool.error", {
278
+ "tool.name": self.name,
279
+ "error": str(e),
280
+ "error_type": type(e).__name__,
281
+ })
282
+ raise
283
+ finally:
284
+ # Always reset context to prevent leakage
285
+ from .context import _current_context
286
+ _current_context.reset(token)
252
287
 
253
288
  def get_schema(self) -> Dict[str, Any]:
254
289
  """
@@ -448,17 +483,18 @@ class AskUserTool(Tool):
448
483
  ```
449
484
  """
450
485
 
451
- def __init__(self, context: "WorkflowContext"): # type: ignore
486
+ def __init__(self, context: Optional["WorkflowContext"] = None): # type: ignore
452
487
  """
453
488
  Initialize AskUserTool.
454
489
 
455
490
  Args:
456
- context: Workflow context with wait_for_user capability
491
+ context: Optional workflow context with wait_for_user capability.
492
+ If not provided, will attempt to get from task-local contextvar.
457
493
  """
458
494
  # Import here to avoid circular dependency
459
495
  from .workflow import WorkflowContext
460
496
 
461
- if not isinstance(context, WorkflowContext):
497
+ if context is not None and not isinstance(context, WorkflowContext):
462
498
  raise ConfigurationError(
463
499
  "AskUserTool requires a WorkflowContext. "
464
500
  "This tool can only be used within workflows."
@@ -477,13 +513,35 @@ class AskUserTool(Tool):
477
513
  Ask user a question and wait for their response.
478
514
 
479
515
  Args:
480
- ctx: Execution context (unused, required by Tool signature)
516
+ ctx: Execution context (may contain WorkflowContext via contextvar)
481
517
  question: Question to ask the user
482
518
 
483
519
  Returns:
484
520
  User's text response
485
521
  """
486
- return await self.context.wait_for_user(question, input_type="text")
522
+ # Import here to avoid circular dependency
523
+ from .workflow import WorkflowContext
524
+ from .context import get_current_context
525
+
526
+ # Use explicit context if provided during __init__
527
+ workflow_ctx = self.context
528
+
529
+ # If not provided, try to get from task-local contextvar
530
+ if workflow_ctx is None:
531
+ current = get_current_context()
532
+ if isinstance(current, WorkflowContext):
533
+ workflow_ctx = current
534
+ elif hasattr(current, '_workflow_entity'):
535
+ # Current context has workflow entity (is WorkflowContext)
536
+ workflow_ctx = current # type: ignore
537
+
538
+ if workflow_ctx is None:
539
+ raise ConfigurationError(
540
+ "AskUserTool requires WorkflowContext. "
541
+ "Either pass context to __init__ or ensure tool is used within a workflow."
542
+ )
543
+
544
+ return await workflow_ctx.wait_for_user(question, input_type="text")
487
545
 
488
546
 
489
547
  class RequestApprovalTool(Tool):
@@ -516,17 +574,18 @@ class RequestApprovalTool(Tool):
516
574
  ```
517
575
  """
518
576
 
519
- def __init__(self, context: "WorkflowContext"): # type: ignore
577
+ def __init__(self, context: Optional["WorkflowContext"] = None): # type: ignore
520
578
  """
521
579
  Initialize RequestApprovalTool.
522
580
 
523
581
  Args:
524
- context: Workflow context with wait_for_user capability
582
+ context: Optional workflow context with wait_for_user capability.
583
+ If not provided, will attempt to get from task-local contextvar.
525
584
  """
526
585
  # Import here to avoid circular dependency
527
586
  from .workflow import WorkflowContext
528
587
 
529
- if not isinstance(context, WorkflowContext):
588
+ if context is not None and not isinstance(context, WorkflowContext):
530
589
  raise ConfigurationError(
531
590
  "RequestApprovalTool requires a WorkflowContext. "
532
591
  "This tool can only be used within workflows."
@@ -545,19 +604,41 @@ class RequestApprovalTool(Tool):
545
604
  Request approval from user for an action.
546
605
 
547
606
  Args:
548
- ctx: Execution context (unused, required by Tool signature)
607
+ ctx: Execution context (may contain WorkflowContext via contextvar)
549
608
  action: The action requiring approval
550
609
  details: Additional details about the action
551
610
 
552
611
  Returns:
553
612
  "approve" or "reject" based on user's decision
554
613
  """
614
+ # Import here to avoid circular dependency
615
+ from .workflow import WorkflowContext
616
+ from .context import get_current_context
617
+
618
+ # Use explicit context if provided during __init__
619
+ workflow_ctx = self.context
620
+
621
+ # If not provided, try to get from task-local contextvar
622
+ if workflow_ctx is None:
623
+ current = get_current_context()
624
+ if isinstance(current, WorkflowContext):
625
+ workflow_ctx = current
626
+ elif hasattr(current, '_workflow_entity'):
627
+ # Current context has workflow entity (is WorkflowContext)
628
+ workflow_ctx = current # type: ignore
629
+
630
+ if workflow_ctx is None:
631
+ raise ConfigurationError(
632
+ "RequestApprovalTool requires WorkflowContext. "
633
+ "Either pass context to __init__ or ensure tool is used within a workflow."
634
+ )
635
+
555
636
  question = f"Action: {action}"
556
637
  if details:
557
638
  question += f"\n\nDetails:\n{details}"
558
639
  question += "\n\nDo you approve?"
559
640
 
560
- return await self.context.wait_for_user(
641
+ return await workflow_ctx.wait_for_user(
561
642
  question,
562
643
  input_type="approval",
563
644
  options=[