agnt5 0.2.8a4__cp310-abi3-manylinux_2_34_x86_64.whl → 0.2.8a6__cp310-abi3-manylinux_2_34_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/__init__.py CHANGED
@@ -26,9 +26,10 @@ from .exceptions import (
26
26
  ExecutionError,
27
27
  RetryError,
28
28
  StateError,
29
+ WaitingForUserInputException,
29
30
  )
30
31
  from .function import FunctionRegistry, function
31
- from .tool import Tool, ToolRegistry, tool
32
+ from .tool import AskUserTool, RequestApprovalTool, Tool, ToolRegistry, tool
32
33
  from .types import BackoffPolicy, BackoffType, FunctionConfig, RetryPolicy, WorkflowConfig
33
34
  from .version import _get_version
34
35
  from .worker import Worker
@@ -62,6 +63,8 @@ __all__ = [
62
63
  "tool",
63
64
  "Tool",
64
65
  "ToolRegistry",
66
+ "AskUserTool",
67
+ "RequestApprovalTool",
65
68
  "agent",
66
69
  "Agent",
67
70
  "AgentRegistry",
@@ -81,6 +84,7 @@ __all__ = [
81
84
  "RetryError",
82
85
  "StateError",
83
86
  "CheckpointError",
87
+ "WaitingForUserInputException",
84
88
  "RunError",
85
89
  # Language Model (Simplified API)
86
90
  "lm",
agnt5/_core.abi3.so CHANGED
Binary file
agnt5/_telemetry.py CHANGED
@@ -73,10 +73,17 @@ class OpenTelemetryHandler(logging.Handler):
73
73
  exc_text = self.formatException(record.exc_info)
74
74
  message = f"{message}\n{exc_text}"
75
75
 
76
+ # Extract correlation IDs from LogRecord attributes (added by _CorrelationFilter)
77
+ # These ensure logs can be correlated with distributed traces in observability backends
78
+ trace_id = getattr(record, 'trace_id', None)
79
+ span_id = getattr(record, 'span_id', None)
80
+ run_id = getattr(record, 'run_id', None)
81
+
76
82
  # Forward to Rust tracing system
77
83
  # Rust side will:
78
84
  # - Add to current span context (inherits invocation.id)
79
- # - Send to OTLP exporter
85
+ # - Attach correlation IDs as span attributes for OTLP export
86
+ # - Send to OTLP exporter with trace context
80
87
  # - Print to console via fmt layer
81
88
  self._log_from_python(
82
89
  level=record.levelname,
@@ -84,7 +91,10 @@ class OpenTelemetryHandler(logging.Handler):
84
91
  target=record.name,
85
92
  module_path=record.module,
86
93
  filename=record.pathname,
87
- line=record.lineno
94
+ line=record.lineno,
95
+ trace_id=trace_id,
96
+ span_id=span_id,
97
+ run_id=run_id,
88
98
  )
89
99
  except Exception:
90
100
  # Don't let logging errors crash the application
agnt5/agent.py CHANGED
@@ -9,7 +9,8 @@ from __future__ import annotations
9
9
  import functools
10
10
  import json
11
11
  import logging
12
- from typing import Any, Callable, Dict, List, Optional
12
+ import time
13
+ from typing import Any, Callable, Dict, List, Optional, Union
13
14
 
14
15
  from .context import Context
15
16
  from . import lm
@@ -85,7 +86,6 @@ class AgentContext(Context):
85
86
  if state_manager:
86
87
  # Explicit state adapter provided (parameter name kept for backward compat)
87
88
  self._state_adapter = state_manager
88
- logger.debug(f"AgentContext using provided state adapter")
89
89
  elif parent_context:
90
90
  # Try to inherit state adapter from parent
91
91
  try:
@@ -93,38 +93,30 @@ class AgentContext(Context):
93
93
  if hasattr(parent_context, '_workflow_entity'):
94
94
  # WorkflowContext - get state adapter from worker context
95
95
  self._state_adapter = _get_state_adapter()
96
- logger.debug(f"AgentContext inheriting state from WorkflowContext")
97
96
  elif hasattr(parent_context, '_state_adapter'):
98
97
  # Parent AgentContext - share state adapter
99
98
  self._state_adapter = parent_context._state_adapter
100
- logger.debug(f"AgentContext inheriting state from parent AgentContext")
101
99
  elif hasattr(parent_context, '_state_manager'):
102
100
  # Backward compatibility: parent has old _state_manager
103
101
  self._state_adapter = parent_context._state_manager
104
- logger.debug(f"AgentContext inheriting state from parent (legacy)")
105
102
  else:
106
103
  # FunctionContext or base Context - create new state adapter
107
104
  self._state_adapter = EntityStateAdapter()
108
- logger.debug(f"AgentContext created new state adapter (parent has no state)")
109
105
  except RuntimeError as e:
110
106
  # _get_state_adapter() failed (not in worker context) - create standalone
111
107
  self._state_adapter = EntityStateAdapter()
112
- logger.debug(f"AgentContext created standalone state adapter (not in worker context)")
113
108
  else:
114
109
  # Try to get from worker context first
115
110
  try:
116
111
  self._state_adapter = _get_state_adapter()
117
- logger.debug(f"AgentContext got state adapter from worker context")
118
112
  except RuntimeError as e:
119
113
  # Standalone - create new state adapter
120
114
  self._state_adapter = EntityStateAdapter()
121
- logger.debug(f"AgentContext created standalone state adapter")
122
115
 
123
116
  # Conversation key for state storage (used for in-memory state)
124
117
  self._conversation_key = f"agent:{agent_name}:{self._session_id}:messages"
125
118
  # Entity key for database persistence (without :messages suffix to match API expectations)
126
119
  self._entity_key = f"agent:{agent_name}:{self._session_id}"
127
- logger.debug(f"AgentContext initialized - session_id={self._session_id}")
128
120
 
129
121
  @property
130
122
  def state(self):
@@ -173,15 +165,12 @@ class AgentContext(Context):
173
165
  if isinstance(session_data, dict) and "messages" in session_data:
174
166
  # New format with session metadata
175
167
  messages_data = session_data["messages"]
176
- logger.debug(f"Loaded {len(messages_data)} messages from session {entity_key}")
177
168
  elif isinstance(session_data, list):
178
169
  # Old format - just messages array
179
170
  messages_data = session_data
180
- logger.debug(f"Loaded {len(messages_data)} messages (legacy format)")
181
171
  else:
182
172
  # No messages found
183
173
  messages_data = []
184
- logger.debug(f"No conversation history found for {entity_key}")
185
174
 
186
175
  # Convert dict representations back to Message objects
187
176
  messages = []
@@ -214,17 +203,15 @@ class AgentContext(Context):
214
203
  Args:
215
204
  messages: List of Message objects to persist
216
205
  """
217
- logger.debug(f"Saving {len(messages)} messages to conversation history")
218
-
219
206
  # Convert Message objects to dict for JSON serialization
220
207
  messages_data = []
221
208
  for msg in messages:
222
209
  messages_data.append({
223
210
  "role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
224
- "content": msg.content
211
+ "content": msg.content,
212
+ "timestamp": time.time() # Add timestamp for each message
225
213
  })
226
214
 
227
- import time
228
215
  entity_type = "AgentSession"
229
216
  entity_key = self._entity_key
230
217
 
@@ -359,16 +346,23 @@ class Handoff:
359
346
  ```python
360
347
  specialist = Agent(name="specialist", ...)
361
348
 
362
- # Create handoff configuration
363
- handoff_to_specialist = Handoff(
364
- agent=specialist,
365
- description="Transfer to specialist for detailed analysis"
349
+ # Simple: Pass agent directly (auto-wrapped with defaults)
350
+ coordinator = Agent(
351
+ name="coordinator",
352
+ handoffs=[specialist] # Agent auto-converted to Handoff
366
353
  )
367
354
 
368
- # Use in coordinator agent
355
+ # Advanced: Use Handoff for custom configuration
369
356
  coordinator = Agent(
370
357
  name="coordinator",
371
- handoffs=[handoff_to_specialist]
358
+ handoffs=[
359
+ Handoff(
360
+ agent=specialist,
361
+ description="Custom description for LLM",
362
+ tool_name="custom_transfer_name",
363
+ pass_full_history=False
364
+ )
365
+ ]
372
366
  )
373
367
  ```
374
368
  """
@@ -446,7 +440,6 @@ class AgentRegistry:
446
440
  if agent.name in _AGENT_REGISTRY:
447
441
  logger.warning(f"Overwriting existing agent '{agent.name}'")
448
442
  _AGENT_REGISTRY[agent.name] = agent
449
- logger.debug(f"Registered agent '{agent.name}'")
450
443
 
451
444
  @staticmethod
452
445
  def get(name: str) -> Optional["Agent"]:
@@ -462,7 +455,6 @@ class AgentRegistry:
462
455
  def clear() -> None:
463
456
  """Clear all registered agents."""
464
457
  _AGENT_REGISTRY.clear()
465
- logger.debug("Cleared agent registry")
466
458
 
467
459
 
468
460
  class AgentResult:
@@ -527,7 +519,7 @@ class Agent:
527
519
  model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
528
520
  instructions: str,
529
521
  tools: Optional[List[Any]] = None,
530
- handoffs: Optional[List[Handoff]] = None,
522
+ handoffs: Optional[List[Union["Agent", Handoff]]] = None, # Accept Agent or Handoff instances
531
523
  temperature: float = 0.7,
532
524
  max_tokens: Optional[int] = None,
533
525
  top_p: Optional[float] = None,
@@ -542,7 +534,7 @@ class Agent:
542
534
  model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
543
535
  instructions: System instructions for the agent
544
536
  tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
545
- handoffs: List of Handoff configurations for agent-to-agent delegation
537
+ handoffs: List of handoff configurations - can be Agent instances (auto-wrapped) or Handoff instances for custom config
546
538
  temperature: LLM temperature (0.0 to 1.0)
547
539
  max_tokens: Maximum tokens to generate
548
540
  top_p: Nucleus sampling parameter
@@ -572,8 +564,18 @@ class Agent:
572
564
  else:
573
565
  raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
574
566
 
575
- # Store handoffs for building handoff tools
576
- self.handoffs = handoffs or []
567
+ # Normalize handoffs: convert Agent instances to Handoff instances
568
+ self.handoffs: List[Handoff] = []
569
+ if handoffs:
570
+ for handoff_item in handoffs:
571
+ if isinstance(handoff_item, Agent):
572
+ # Auto-wrap Agent in Handoff with sensible defaults
573
+ self.handoffs.append(Handoff(agent=handoff_item))
574
+ logger.info(f"Auto-wrapped agent '{handoff_item.name}' in Handoff for '{self.name}'")
575
+ elif isinstance(handoff_item, Handoff):
576
+ self.handoffs.append(handoff_item)
577
+ else:
578
+ raise TypeError(f"handoffs must contain Agent or Handoff instances, got {type(handoff_item)}")
577
579
 
578
580
  # Build tool registry (includes regular tools, agent-as-tools, and handoff tools)
579
581
  self.tools: Dict[str, Tool] = {}
agnt5/exceptions.py CHANGED
@@ -1,5 +1,7 @@
1
1
  """AGNT5 SDK exceptions and error types."""
2
2
 
3
+ from typing import Dict, List, Optional
4
+
3
5
 
4
6
  class AGNT5Error(Exception):
5
7
  """Base exception for all AGNT5 SDK errors."""
@@ -44,3 +46,44 @@ class NotImplementedError(AGNT5Error):
44
46
  """Raised when a feature is not yet implemented."""
45
47
 
46
48
  pass
49
+
50
+
51
+ class WaitingForUserInputException(AGNT5Error):
52
+ """Raised when workflow needs to pause for user input.
53
+
54
+ This exception is used internally by ctx.wait_for_user() to signal
55
+ that a workflow execution should pause and wait for user input.
56
+
57
+ The platform catches this exception and:
58
+ 1. Saves the workflow checkpoint state
59
+ 2. Returns awaiting_user_input status to the client
60
+ 3. Presents the question and options to the user
61
+ 4. Resumes execution when user responds
62
+
63
+ Attributes:
64
+ question: The question to ask the user
65
+ input_type: Type of input ("text", "approval", or "choice")
66
+ options: List of options for approval/choice inputs
67
+ checkpoint_state: Current workflow state for resume
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ question: str,
73
+ input_type: str,
74
+ options: Optional[List[Dict]],
75
+ checkpoint_state: Dict,
76
+ ) -> None:
77
+ """Initialize WaitingForUserInputException.
78
+
79
+ Args:
80
+ question: Question to ask the user
81
+ input_type: Type of input - "text", "approval", or "choice"
82
+ options: List of option dicts (for approval/choice)
83
+ checkpoint_state: Workflow state snapshot for resume
84
+ """
85
+ super().__init__(f"Waiting for user input: {question}")
86
+ self.question = question
87
+ self.input_type = input_type
88
+ self.options = options or []
89
+ self.checkpoint_state = checkpoint_state
agnt5/tool.py CHANGED
@@ -416,3 +416,152 @@ def tool(
416
416
  if _func is None:
417
417
  return decorator
418
418
  return decorator(_func)
419
+
420
+
421
+ # ============================================================================
422
+ # Built-in Human-in-the-Loop Tools
423
+ # ============================================================================
424
+
425
+ class AskUserTool(Tool):
426
+ """
427
+ Built-in tool that agents can use to request text input from users.
428
+
429
+ This tool pauses the workflow execution and waits for the user to provide
430
+ a text response. The workflow resumes when the user submits their input.
431
+
432
+ Example:
433
+ ```python
434
+ from agnt5 import Agent, workflow, WorkflowContext
435
+ from agnt5.tool import AskUserTool
436
+
437
+ @workflow(chat=True)
438
+ async def agent_with_hitl(ctx: WorkflowContext, query: str) -> dict:
439
+ agent = Agent(
440
+ name="research_agent",
441
+ model="openai/gpt-4o-mini",
442
+ instructions="You are a research assistant.",
443
+ tools=[AskUserTool(ctx)]
444
+ )
445
+
446
+ result = await agent.run(query, context=ctx)
447
+ return {"response": result.output}
448
+ ```
449
+ """
450
+
451
+ def __init__(self, context: "WorkflowContext"): # type: ignore
452
+ """
453
+ Initialize AskUserTool.
454
+
455
+ Args:
456
+ context: Workflow context with wait_for_user capability
457
+ """
458
+ # Import here to avoid circular dependency
459
+ from .workflow import WorkflowContext
460
+
461
+ if not isinstance(context, WorkflowContext):
462
+ raise ConfigurationError(
463
+ "AskUserTool requires a WorkflowContext. "
464
+ "This tool can only be used within workflows."
465
+ )
466
+
467
+ super().__init__(
468
+ name="ask_user",
469
+ description="Ask the user a question and wait for their text response",
470
+ handler=self._handler,
471
+ auto_schema=True
472
+ )
473
+ self.context = context
474
+
475
+ async def _handler(self, ctx: Context, question: str) -> str:
476
+ """
477
+ Ask user a question and wait for their response.
478
+
479
+ Args:
480
+ ctx: Execution context (unused, required by Tool signature)
481
+ question: Question to ask the user
482
+
483
+ Returns:
484
+ User's text response
485
+ """
486
+ return await self.context.wait_for_user(question, input_type="text")
487
+
488
+
489
+ class RequestApprovalTool(Tool):
490
+ """
491
+ Built-in tool that agents can use to request approval from users.
492
+
493
+ This tool pauses the workflow execution and presents an approval request
494
+ to the user with approve/reject options. The workflow resumes when the
495
+ user makes a decision.
496
+
497
+ Example:
498
+ ```python
499
+ from agnt5 import Agent, workflow, WorkflowContext
500
+ from agnt5.tool import RequestApprovalTool
501
+
502
+ @workflow(chat=True)
503
+ async def deployment_agent(ctx: WorkflowContext, changes: dict) -> dict:
504
+ agent = Agent(
505
+ name="deploy_agent",
506
+ model="openai/gpt-4o-mini",
507
+ instructions="You help deploy code changes safely.",
508
+ tools=[RequestApprovalTool(ctx)]
509
+ )
510
+
511
+ result = await agent.run(
512
+ f"Review and deploy these changes: {changes}",
513
+ context=ctx
514
+ )
515
+ return {"response": result.output}
516
+ ```
517
+ """
518
+
519
+ def __init__(self, context: "WorkflowContext"): # type: ignore
520
+ """
521
+ Initialize RequestApprovalTool.
522
+
523
+ Args:
524
+ context: Workflow context with wait_for_user capability
525
+ """
526
+ # Import here to avoid circular dependency
527
+ from .workflow import WorkflowContext
528
+
529
+ if not isinstance(context, WorkflowContext):
530
+ raise ConfigurationError(
531
+ "RequestApprovalTool requires a WorkflowContext. "
532
+ "This tool can only be used within workflows."
533
+ )
534
+
535
+ super().__init__(
536
+ name="request_approval",
537
+ description="Request user approval for an action before proceeding",
538
+ handler=self._handler,
539
+ auto_schema=True
540
+ )
541
+ self.context = context
542
+
543
+ async def _handler(self, ctx: Context, action: str, details: str = "") -> str:
544
+ """
545
+ Request approval from user for an action.
546
+
547
+ Args:
548
+ ctx: Execution context (unused, required by Tool signature)
549
+ action: The action requiring approval
550
+ details: Additional details about the action
551
+
552
+ Returns:
553
+ "approve" or "reject" based on user's decision
554
+ """
555
+ question = f"Action: {action}"
556
+ if details:
557
+ question += f"\n\nDetails:\n{details}"
558
+ question += "\n\nDo you approve?"
559
+
560
+ return await self.context.wait_for_user(
561
+ question,
562
+ input_type="approval",
563
+ options=[
564
+ {"id": "approve", "label": "Approve"},
565
+ {"id": "reject", "label": "Reject"}
566
+ ]
567
+ )
agnt5/worker.py CHANGED
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  import asyncio
6
6
  import contextvars
7
7
  import logging
8
+ import uuid
8
9
  from typing import Any, Dict, List, Optional
9
10
 
10
11
  from .function import FunctionRegistry
@@ -611,7 +612,6 @@ class Worker:
611
612
  elif component_type == "function":
612
613
  function_config = FunctionRegistry.get(component_name)
613
614
  if function_config:
614
- logger.info(f"🔥 WORKER: Received request for function: {component_name}")
615
615
  # Return coroutine, don't await it
616
616
  return self._execute_function(function_config, input_data, request)
617
617
 
@@ -636,7 +636,6 @@ class Worker:
636
636
  from ._core import PyExecuteComponentResponse
637
637
 
638
638
  exec_start = time.time()
639
- logger.info(f"🔥 WORKER: Executing function {config.name}")
640
639
 
641
640
  try:
642
641
  # Parse input data
@@ -654,26 +653,15 @@ class Worker:
654
653
  runtime_context=request.runtime_context,
655
654
  )
656
655
 
657
- # Create span for function execution with trace linking
658
- from ._core import create_span
659
-
660
- with create_span(
661
- config.name,
662
- "function",
663
- request.runtime_context,
664
- {
665
- "function.name": config.name,
666
- "service.name": self.service_name,
667
- },
668
- ) as span:
669
- # Execute function
670
- if input_dict:
671
- result = config.handler(ctx, **input_dict)
672
- else:
673
- result = config.handler(ctx)
674
-
675
- # Debug: Log what type result is
676
- logger.info(f"🔥 WORKER: Function result type: {type(result).__name__}, isasyncgen: {inspect.isasyncgen(result)}, iscoroutine: {inspect.iscoroutine(result)}")
656
+ # Execute function directly - Rust bridge handles tracing
657
+ # Note: Removed Python-level span creation to avoid duplicate spans.
658
+ # The Rust worker bridge (sdk-python/rust-src/worker.rs:413-659) already
659
+ # creates a comprehensive OpenTelemetry span with all necessary attributes.
660
+ # See DUPLICATE_SPANS_FIX.md for details.
661
+ if input_dict:
662
+ result = config.handler(ctx, **input_dict)
663
+ else:
664
+ result = config.handler(ctx)
677
665
 
678
666
  # Note: Removed flush_telemetry_py() call here - it was causing 2-second blocking delay!
679
667
  # The batch span processor handles flushing automatically with 5s timeout
@@ -758,16 +746,31 @@ class Worker:
758
746
  """Execute a workflow handler with automatic replay support."""
759
747
  import json
760
748
  from .workflow import WorkflowEntity, WorkflowContext
761
- from .entity import _get_state_adapter
749
+ from .entity import _get_state_adapter, _entity_state_adapter_ctx
750
+ from .exceptions import WaitingForUserInputException
762
751
  from ._core import PyExecuteComponentResponse
763
752
 
753
+ # Set entity state adapter in context so workflows can use Entities
754
+ _entity_state_adapter_ctx.set(self._entity_state_adapter)
755
+
764
756
  try:
765
757
  # Parse input data
766
758
  input_dict = json.loads(input_data.decode("utf-8")) if input_data else {}
767
759
 
760
+ # Extract or generate session_id for multi-turn conversation support (for chat workflows)
761
+ # If session_id is provided, the workflow can maintain conversation context
762
+ session_id = input_dict.get("session_id")
763
+
764
+ if not session_id:
765
+ session_id = str(uuid.uuid4())
766
+ logger.info(f"Created new workflow session: {session_id}")
767
+ else:
768
+ logger.info(f"Using existing workflow session: {session_id}")
769
+
768
770
  # Parse replay data from request metadata for crash recovery
769
771
  completed_steps = {}
770
772
  initial_state = {}
773
+ user_response = None
771
774
 
772
775
  if hasattr(request, 'metadata') and request.metadata:
773
776
  # Parse completed steps for replay
@@ -790,6 +793,11 @@ class Worker:
790
793
  except json.JSONDecodeError:
791
794
  logger.warning("Failed to parse workflow_state from metadata")
792
795
 
796
+ # Check for user response (workflow resume after pause)
797
+ if "user_response" in request.metadata:
798
+ user_response = request.metadata["user_response"]
799
+ logger.info(f"▶️ Resuming workflow with user response: {user_response}")
800
+
793
801
  # Create WorkflowEntity for state management
794
802
  workflow_entity = WorkflowEntity(run_id=f"{self.service_name}:{config.name}")
795
803
 
@@ -798,6 +806,11 @@ class Worker:
798
806
  workflow_entity._completed_steps = completed_steps
799
807
  logger.debug(f"Loaded {len(completed_steps)} completed steps into workflow entity")
800
808
 
809
+ # Inject user response if resuming from pause
810
+ if user_response:
811
+ workflow_entity.inject_user_response(user_response)
812
+ logger.debug(f"Injected user response into workflow entity")
813
+
801
814
  if initial_state:
802
815
  # Load initial state into entity's state adapter
803
816
  state_adapter = _get_state_adapter()
@@ -816,23 +829,14 @@ class Worker:
816
829
  runtime_context=request.runtime_context,
817
830
  )
818
831
 
819
- # Create span for workflow execution with trace linking
820
- from ._core import create_span
821
-
822
- with create_span(
823
- config.name,
824
- "workflow",
825
- request.runtime_context,
826
- {
827
- "workflow.name": config.name,
828
- "service.name": self.service_name,
829
- },
830
- ) as span:
831
- # Execute workflow
832
- if input_dict:
833
- result = await config.handler(ctx, **input_dict)
834
- else:
835
- result = await config.handler(ctx)
832
+ # Execute workflow directly - Rust bridge handles tracing
833
+ # Note: Removed Python-level span creation to avoid duplicate spans.
834
+ # The Rust worker bridge creates comprehensive OpenTelemetry spans.
835
+ # See DUPLICATE_SPANS_FIX.md for details.
836
+ if input_dict:
837
+ result = await config.handler(ctx, **input_dict)
838
+ else:
839
+ result = await config.handler(ctx)
836
840
 
837
841
  # Note: Removed flush_telemetry_py() call here - it was causing 2-second blocking delay!
838
842
  # The batch span processor handles flushing automatically with 5s timeout
@@ -860,13 +864,70 @@ class Worker:
860
864
 
861
865
  logger.info(f"Workflow completed successfully with {len(step_events)} steps")
862
866
 
867
+ # Add session_id to metadata for multi-turn conversation support
868
+ metadata["session_id"] = session_id
869
+
863
870
  return PyExecuteComponentResponse(
864
871
  invocation_id=request.invocation_id,
865
872
  success=True,
866
873
  output_data=output_data,
867
874
  state_update=None, # Not used for workflows (use metadata instead)
868
875
  error_message=None,
869
- metadata=metadata if metadata else None, # Include step events + state
876
+ metadata=metadata if metadata else None, # Include step events + state + session_id
877
+ is_chunk=False,
878
+ done=True,
879
+ chunk_index=0,
880
+ )
881
+
882
+ except WaitingForUserInputException as e:
883
+ # Workflow paused for user input
884
+ logger.info(f"⏸️ Workflow paused waiting for user input: {e.question}")
885
+
886
+ # Collect metadata for pause state
887
+ # Note: All metadata values must be strings for Rust FFI
888
+ pause_metadata = {
889
+ "status": "awaiting_user_input",
890
+ "question": e.question,
891
+ "input_type": e.input_type,
892
+ }
893
+
894
+ # Add optional fields only if they exist
895
+ if e.options:
896
+ pause_metadata["options"] = json.dumps(e.options)
897
+ if e.checkpoint_state:
898
+ pause_metadata["checkpoint_state"] = json.dumps(e.checkpoint_state)
899
+ if session_id:
900
+ pause_metadata["session_id"] = session_id
901
+
902
+ # Add step events to pause metadata for durability
903
+ step_events = ctx._workflow_entity._step_events
904
+ if step_events:
905
+ pause_metadata["step_events"] = json.dumps(step_events)
906
+ logger.debug(f"Paused workflow has {len(step_events)} recorded steps")
907
+
908
+ # Add current workflow state to pause metadata
909
+ if hasattr(ctx, '_workflow_entity') and ctx._workflow_entity._state is not None:
910
+ if ctx._workflow_entity._state.has_changes():
911
+ state_snapshot = ctx._workflow_entity._state.get_state_snapshot()
912
+ pause_metadata["workflow_state"] = json.dumps(state_snapshot)
913
+ logger.debug(f"Paused workflow state snapshot: {state_snapshot}")
914
+
915
+ # Return "success" with awaiting_user_input metadata
916
+ # The output contains the question details for the client
917
+ output = {
918
+ "question": e.question,
919
+ "input_type": e.input_type,
920
+ "options": e.options,
921
+ }
922
+ output_data = json.dumps(output).encode("utf-8")
923
+
924
+ return PyExecuteComponentResponse(
925
+ invocation_id=request.invocation_id,
926
+ success=True, # This is a valid pause state, not an error
927
+ output_data=output_data,
928
+ state_update=None,
929
+ error_message=None,
930
+ metadata=pause_metadata,
870
931
  is_chunk=False,
871
932
  done=True,
872
933
  chunk_index=0,
agnt5/workflow.py CHANGED
@@ -7,7 +7,7 @@ import functools
7
7
  import inspect
8
8
  import logging
9
9
  import uuid
10
- from typing import Any, Callable, Dict, Optional, TypeVar, cast
10
+ from typing import Any, Callable, Dict, List, Optional, TypeVar, cast
11
11
 
12
12
  from ._schema_utils import extract_function_metadata, extract_function_schemas
13
13
  from .context import Context
@@ -254,6 +254,86 @@ class WorkflowContext(Context):
254
254
 
255
255
  return result
256
256
 
257
+ async def wait_for_user(
258
+ self,
259
+ question: str,
260
+ input_type: str = "text",
261
+ options: Optional[List[Dict]] = None
262
+ ) -> str:
263
+ """
264
+ Pause workflow execution and wait for user input.
265
+
266
+ On replay (even after worker crash), resumes from this point
267
+ with the user's response. This method enables human-in-the-loop
268
+ workflows by pausing execution and waiting for user interaction.
269
+
270
+ Args:
271
+ question: Question to ask the user
272
+ input_type: Type of input - "text", "approval", or "choice"
273
+ options: For approval/choice, list of option dicts with 'id' and 'label'
274
+
275
+ Returns:
276
+ User's response string
277
+
278
+ Raises:
279
+ WaitingForUserInputException: When no cached response exists (first call)
280
+
281
+ Example (text input):
282
+ ```python
283
+ city = await ctx.wait_for_user("Which city?")
284
+ ```
285
+
286
+ Example (approval):
287
+ ```python
288
+ decision = await ctx.wait_for_user(
289
+ "Approve this action?",
290
+ input_type="approval",
291
+ options=[
292
+ {"id": "approve", "label": "Approve"},
293
+ {"id": "reject", "label": "Reject"}
294
+ ]
295
+ )
296
+ ```
297
+
298
+ Example (choice):
299
+ ```python
300
+ model = await ctx.wait_for_user(
301
+ "Which model?",
302
+ input_type="choice",
303
+ options=[
304
+ {"id": "gpt4", "label": "GPT-4"},
305
+ {"id": "claude", "label": "Claude"}
306
+ ]
307
+ )
308
+ ```
309
+ """
310
+ from .exceptions import WaitingForUserInputException
311
+
312
+ # Generate unique step name for this user input request
313
+ # Using run_id ensures uniqueness across workflow execution
314
+ response_key = f"user_response:{self.run_id}"
315
+
316
+ # Check if we already have the user's response (replay scenario)
317
+ if self._workflow_entity.has_completed_step(response_key):
318
+ response = self._workflow_entity.get_completed_step(response_key)
319
+ self._logger.info("🔄 Replaying user response from checkpoint")
320
+ return response
321
+
322
+ # No response yet - pause execution
323
+ # Collect current workflow state for checkpoint
324
+ checkpoint_state = {}
325
+ if hasattr(self._workflow_entity, '_state') and self._workflow_entity._state is not None:
326
+ checkpoint_state = self._workflow_entity._state.get_state_snapshot()
327
+
328
+ self._logger.info(f"⏸️ Pausing workflow for user input: {question}")
329
+
330
+ raise WaitingForUserInputException(
331
+ question=question,
332
+ input_type=input_type,
333
+ options=options,
334
+ checkpoint_state=checkpoint_state
335
+ )
336
+
257
337
 
258
338
  # ============================================================================
259
339
  # WorkflowEntity: Entity specialized for workflow execution state
@@ -337,6 +417,26 @@ class WorkflowEntity(Entity):
337
417
  """Check if step has been completed."""
338
418
  return step_name in self._completed_steps
339
419
 
420
+ def inject_user_response(self, response: str) -> None:
421
+ """
422
+ Inject user response as a completed step for workflow resume.
423
+
424
+ This method is called by the worker when resuming a paused workflow
425
+ with the user's response. It stores the response as if it was a
426
+ completed step, allowing wait_for_user() to retrieve it on replay.
427
+
428
+ Args:
429
+ response: User's response to inject
430
+
431
+ Example:
432
+ # Platform resumes workflow with user response
433
+ workflow_entity.inject_user_response("yes")
434
+ # On replay, wait_for_user() returns "yes" from cache
435
+ """
436
+ response_key = f"user_response:{self.run_id}"
437
+ self._completed_steps[response_key] = response
438
+ logger.info(f"Injected user response for {self.run_id}: {response}")
439
+
340
440
  @property
341
441
  def state(self) -> "WorkflowState":
342
442
  """
@@ -408,6 +508,14 @@ class WorkflowState(EntityState):
408
508
  "deleted": True
409
509
  })
410
510
 
511
+ def has_changes(self) -> bool:
512
+ """Check if any state changes have been tracked."""
513
+ return len(self._workflow_entity._state_changes) > 0
514
+
515
+ def get_state_snapshot(self) -> Dict[str, Any]:
516
+ """Get current state as a snapshot dictionary."""
517
+ return dict(self._state)
518
+
411
519
 
412
520
  class WorkflowRegistry:
413
521
  """Registry for workflow handlers."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agnt5
3
- Version: 0.2.8a4
3
+ Version: 0.2.8a6
4
4
  Classifier: Development Status :: 3 - Alpha
5
5
  Classifier: Intended Audience :: Developers
6
6
  Classifier: Programming Language :: Python :: 3
@@ -1,22 +1,22 @@
1
- agnt5-0.2.8a4.dist-info/METADATA,sha256=Dp8yJo_dx5B79OM0ptGgR80TjCUbyovpSc4Pt57dfuY,996
2
- agnt5-0.2.8a4.dist-info/WHEEL,sha256=AdMozAxftELsa3nYun92mL1tYO-R1ewuDPju53zvoK0,107
3
- agnt5/__init__.py,sha256=ACkK91EPdnv5tYip09QCZ9rfV4iBKzNjGfYVLJD1XGg,2045
1
+ agnt5-0.2.8a6.dist-info/METADATA,sha256=st25oz0S3w9locE4xf85jnN25ZtanIvYJJXTnckQ0QM,996
2
+ agnt5-0.2.8a6.dist-info/WHEEL,sha256=AdMozAxftELsa3nYun92mL1tYO-R1ewuDPju53zvoK0,107
3
+ agnt5/__init__.py,sha256=liMb9egh56qvgY4Xvs9s7grOzF3lXSE8-nIksJLNAy4,2195
4
4
  agnt5/_compat.py,sha256=BGuy3v5VDOHVa5f3Z-C22iMN19lAt0mPmXwF3qSSWxI,369
5
- agnt5/_core.abi3.so,sha256=CNmJC1AYLy8dABLguVfaWM6YqaTswjRnUZwM_07MXyk,15859720
5
+ agnt5/_core.abi3.so,sha256=6xzh1gJ7PTU3-7Tt1Wnh9S_x-XWNTNj5tERSW58SeFQ,15809776
6
6
  agnt5/_retry_utils.py,sha256=loHsWY5BR4wZy57IzcDEjQAy88DHVwVIr25Cn1d9GPA,5801
7
7
  agnt5/_schema_utils.py,sha256=MR67RW757T4Oq2Jqf4kB61H_b51zwaf3CLWELnkngRo,9572
8
- agnt5/_telemetry.py,sha256=bIY9AvBRjJBTHoBPbfR6X1OgaiUf-T0vCoi0_snsWXA,5957
9
- agnt5/agent.py,sha256=aBrhtPaUAHOHv3-h_Yb2UMqFHertr1P2hJ7fA_4IXcw,43225
8
+ agnt5/_telemetry.py,sha256=gx4TQNqxeQwpyPWvgVVknPjx5NbenmR6E-DGCj18Ssw,6510
9
+ agnt5/agent.py,sha256=ccV0rLj_9eF_G2VfrY8GWDBbNBadmDAYPrBJ6hMeuTE,43089
10
10
  agnt5/client.py,sha256=kXksazgxdVXWaG9OkjJA4cWruNtcS-ENhtnkrIdw-Nk,23212
11
11
  agnt5/context.py,sha256=S2OzPkhn_jnqSWfT21mSYOux8vHaLKQxcAvggZDHQek,2378
12
12
  agnt5/entity.py,sha256=AlHmSHVxQD5EYBvkmERKUkwv0ERrKaT8rvRK611hv_I,28941
13
- agnt5/exceptions.py,sha256=mZ0q-NK6OKhYxgwBJpIbgpgzk-CJaFIHDbp1EE-pS7I,925
13
+ agnt5/exceptions.py,sha256=2YB7o6B0FBW2S7x47HnV-HaaEYVSsjRDAdZ9_MSD8Tw,2431
14
14
  agnt5/function.py,sha256=f1vaAlJRwuo8cxCOGEd8XPido00mOhlPS8UJJx-6hJI,11041
15
15
  agnt5/lm.py,sha256=9dFjd6eQ3f3lFZe7H7rWZherYiP_58MT1F5xpwD8PCg,23195
16
- agnt5/tool.py,sha256=uc4L-Q9QyLzQDe-MZKk2Wo3o5e-mK8tfaQwVDgQdouQ,13133
16
+ agnt5/tool.py,sha256=dkShd97Y1cwSOUnTwvL2gr0CW-usRlaq4frki9kREXI,18008
17
17
  agnt5/tracing.py,sha256=Mh2-OfnQM61lM_P8gxJstafdsUA8Gxoo1lP-Joxhub8,5980
18
18
  agnt5/types.py,sha256=Zb71ZMwvrt1p4SH18cAKunp2y5tao_W5_jGYaPDejQo,2840
19
19
  agnt5/version.py,sha256=rOq1mObLihnnKgKqBrwZA0zwOPudEKVFcW1a48ynkqc,573
20
- agnt5/worker.py,sha256=NflbueeL2LT8NGywTQnEv1r-N8f54AENWcZARJ5wO8o,47975
21
- agnt5/workflow.py,sha256=3s9CY6a4UkJZ9YyHv2SAkY3UeCVBlfVi7jxJMFi8Dhg,19488
22
- agnt5-0.2.8a4.dist-info/RECORD,,
20
+ agnt5/worker.py,sha256=jRguDRF0uTjYAmmAgmn3GtdEvIxgTuUoIOsme3ba6mU,51172
21
+ agnt5/workflow.py,sha256=cDxKK21mLrsgjLGmosinhKnAme30CoQqs_fUFvRsyI0,23456
22
+ agnt5-0.2.8a6.dist-info/RECORD,,