agnt5 0.3.2a1__cp310-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/events.py ADDED
@@ -0,0 +1,567 @@
1
+ """Streaming event types for AGNT5 SDK.
2
+
3
+ These events are sent over SSE connections from gateway to clients
4
+ during component execution (agents, workflows, functions).
5
+
6
+ Event types align with the journal event taxonomy defined in:
7
+ platform/pkg/repository/dto/journal_event.go
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass, field, asdict
13
+ from enum import Enum
14
+ from typing import Any, Dict, List, Optional, Union
15
+ import json
16
+ import time
17
+
18
+
19
+ class EventType(str, Enum):
20
+ """All streaming event types."""
21
+
22
+ # Run lifecycle
23
+ RUN_STARTED = "run.started"
24
+ RUN_COMPLETED = "run.completed"
25
+ RUN_FAILED = "run.failed"
26
+ RUN_PAUSED = "run.paused"
27
+ RUN_RESUMED = "run.resumed"
28
+ RUN_CANCELLED = "run.cancelled"
29
+ RUN_TIMEOUT = "run.timeout"
30
+
31
+ # LM streaming lifecycle
32
+ LM_STREAM_STARTED = "lm.stream.started"
33
+ LM_STREAM_COMPLETED = "lm.stream.completed"
34
+ LM_STREAM_FAILED = "lm.stream.failed"
35
+
36
+ # LM thinking content blocks (extended thinking / chain-of-thought)
37
+ LM_THINKING_START = "lm.thinking.start"
38
+ LM_THINKING_DELTA = "lm.thinking.delta"
39
+ LM_THINKING_STOP = "lm.thinking.stop"
40
+
41
+ # LM message content blocks (assistant output)
42
+ LM_MESSAGE_START = "lm.message.start"
43
+ LM_MESSAGE_DELTA = "lm.message.delta"
44
+ LM_MESSAGE_STOP = "lm.message.stop"
45
+
46
+ # LM tool call content blocks (LLM deciding to call a tool)
47
+ LM_TOOL_CALL_START = "lm.tool_call.start"
48
+ LM_TOOL_CALL_DELTA = "lm.tool_call.delta"
49
+ LM_TOOL_CALL_STOP = "lm.tool_call.stop"
50
+
51
+ # User code output streaming
52
+ OUTPUT_START = "output.start"
53
+ OUTPUT_DELTA = "output.delta"
54
+ OUTPUT_STOP = "output.stop"
55
+
56
+ # Progress indicators
57
+ PROGRESS_UPDATE = "progress.update"
58
+
59
+ # Tool execution events
60
+ TOOL_INVOKED = "tool.invoked"
61
+ TOOL_COMPLETED = "tool.completed"
62
+ TOOL_FAILED = "tool.failed"
63
+
64
+ # Agent events
65
+ AGENT_STARTED = "agent.started"
66
+ AGENT_COMPLETED = "agent.completed"
67
+ AGENT_FAILED = "agent.failed"
68
+ AGENT_ITERATION_STARTED = "agent.iteration.started"
69
+ AGENT_ITERATION_COMPLETED = "agent.iteration.completed"
70
+ AGENT_MAX_ITERATIONS = "agent.max_iterations.reached"
71
+ AGENT_TOOL_CALL_STARTED = "agent.tool_call.started"
72
+ AGENT_TOOL_CALL_COMPLETED = "agent.tool_call.completed"
73
+
74
+ # HITL: Approval events
75
+ APPROVAL_REQUESTED = "approval.requested"
76
+ APPROVAL_APPROVED = "approval.approved"
77
+ APPROVAL_REJECTED = "approval.rejected"
78
+ APPROVAL_EXPIRED = "approval.expired"
79
+
80
+ # HITL: Input events
81
+ INPUT_REQUESTED = "input.requested"
82
+ INPUT_PROVIDED = "input.provided"
83
+ INPUT_EXPIRED = "input.expired"
84
+
85
+ # HITL: Feedback events
86
+ FEEDBACK_REQUESTED = "feedback.requested"
87
+ FEEDBACK_PROVIDED = "feedback.provided"
88
+ FEEDBACK_EXPIRED = "feedback.expired"
89
+
90
+ # Workflow step events
91
+ WORKFLOW_STEP_STARTED = "workflow.step.started"
92
+ WORKFLOW_STEP_COMPLETED = "workflow.step.completed"
93
+ WORKFLOW_STEP_FAILED = "workflow.step.failed"
94
+
95
+
96
+ # --- Data payloads for streaming events ---
97
+
98
+
99
+ @dataclass
100
+ class ContentBlockStart:
101
+ """Start of a content block (thinking, message, tool_call, output)."""
102
+
103
+ index: int = 0
104
+
105
+ def to_dict(self) -> Dict[str, Any]:
106
+ return {"index": self.index}
107
+
108
+
109
+ @dataclass
110
+ class ContentBlockDelta:
111
+ """Delta (incremental content) for a content block."""
112
+
113
+ content: str
114
+ index: int = 0
115
+
116
+ def to_dict(self) -> Dict[str, Any]:
117
+ return {"content": self.content, "index": self.index}
118
+
119
+
120
+ @dataclass
121
+ class ContentBlockStop:
122
+ """End of a content block."""
123
+
124
+ index: int = 0
125
+
126
+ def to_dict(self) -> Dict[str, Any]:
127
+ return {"index": self.index}
128
+
129
+
130
+ @dataclass
131
+ class ToolCallStart:
132
+ """Start of a tool call block."""
133
+
134
+ id: str
135
+ name: str
136
+ index: int = 0
137
+
138
+ def to_dict(self) -> Dict[str, Any]:
139
+ return {"id": self.id, "name": self.name, "index": self.index}
140
+
141
+
142
+ @dataclass
143
+ class ToolCallDelta:
144
+ """Delta for tool call input arguments."""
145
+
146
+ input_delta: str
147
+ index: int = 0
148
+
149
+ def to_dict(self) -> Dict[str, Any]:
150
+ return {"input_delta": self.input_delta, "index": self.index}
151
+
152
+
153
+ @dataclass
154
+ class ToolCallStop:
155
+ """End of a tool call block with complete input."""
156
+
157
+ id: str
158
+ name: str
159
+ input: Dict[str, Any]
160
+ index: int = 0
161
+
162
+ def to_dict(self) -> Dict[str, Any]:
163
+ return {"id": self.id, "name": self.name, "input": self.input, "index": self.index}
164
+
165
+
166
+ @dataclass
167
+ class ProgressUpdate:
168
+ """Progress indicator update."""
169
+
170
+ message: Optional[str] = None
171
+ percent: Optional[float] = None
172
+ current: Optional[int] = None
173
+ total: Optional[int] = None
174
+
175
+ def to_dict(self) -> Dict[str, Any]:
176
+ d: Dict[str, Any] = {}
177
+ if self.message is not None:
178
+ d["message"] = self.message
179
+ if self.percent is not None:
180
+ d["percent"] = self.percent
181
+ if self.current is not None:
182
+ d["current"] = self.current
183
+ if self.total is not None:
184
+ d["total"] = self.total
185
+ return d
186
+
187
+
188
+ @dataclass
189
+ class TokenUsage:
190
+ """Token usage statistics."""
191
+
192
+ input_tokens: int
193
+ output_tokens: int
194
+ total_tokens: int
195
+ thinking_tokens: Optional[int] = None
196
+
197
+ def to_dict(self) -> Dict[str, Any]:
198
+ d = {
199
+ "input_tokens": self.input_tokens,
200
+ "output_tokens": self.output_tokens,
201
+ "total_tokens": self.total_tokens,
202
+ }
203
+ if self.thinking_tokens is not None:
204
+ d["thinking_tokens"] = self.thinking_tokens
205
+ return d
206
+
207
+
208
+ @dataclass
209
+ class ErrorDetail:
210
+ """Error details."""
211
+
212
+ code: str
213
+ message: str
214
+ details: Optional[Dict[str, Any]] = None
215
+
216
+ def to_dict(self) -> Dict[str, Any]:
217
+ d = {"code": self.code, "message": self.message}
218
+ if self.details:
219
+ d["details"] = self.details
220
+ return d
221
+
222
+
223
+ # --- Event class ---
224
+
225
+
226
+ @dataclass
227
+ class Event:
228
+ """A streaming event with typed data payload.
229
+
230
+ This is the primary class for emitting events during streaming execution.
231
+ Events are serialized and sent via the gRPC response stream to the gateway,
232
+ which then emits them as SSE events to clients.
233
+
234
+ For delta events (message_delta, thinking_delta, etc.), data should be
235
+ the raw content value. The gateway wraps it with {"content": <data>, "index": ...}.
236
+ For other events, data is typically a dict with structured information.
237
+ """
238
+
239
+ event_type: EventType
240
+ data: Any # Raw value for deltas, dict for structured events
241
+ content_index: int = 0
242
+ sequence: int = 0
243
+ source_timestamp_ns: int = field(default_factory=time.time_ns)
244
+
245
+ def to_response_fields(self) -> Dict[str, Any]:
246
+ """Convert to fields for ExecuteComponentResponse proto.
247
+
248
+ Returns dict with:
249
+ - event_type: str
250
+ - content_index: int
251
+ - sequence: int
252
+ - output_data: bytes (JSON-encoded data)
253
+ """
254
+ return {
255
+ "event_type": self.event_type.value,
256
+ "content_index": self.content_index,
257
+ "sequence": self.sequence,
258
+ "output_data": json.dumps(self.data).encode("utf-8") if self.data else b"",
259
+ }
260
+
261
+ @classmethod
262
+ def thinking_start(cls, index: int = 0, sequence: int = 0) -> "Event":
263
+ """Create a thinking.start event."""
264
+ return cls(
265
+ event_type=EventType.LM_THINKING_START,
266
+ data=ContentBlockStart(index=index).to_dict(),
267
+ content_index=index,
268
+ sequence=sequence,
269
+ )
270
+
271
+ @classmethod
272
+ def thinking_delta(cls, content: str, index: int = 0, sequence: int = 0) -> "Event":
273
+ """Create a thinking.delta event.
274
+
275
+ Note: data is just the content string. Gateway wraps it with
276
+ {"content": <data>, "index": content_index}.
277
+ """
278
+ return cls(
279
+ event_type=EventType.LM_THINKING_DELTA,
280
+ data=content, # Raw content, gateway adds wrapper
281
+ content_index=index,
282
+ sequence=sequence,
283
+ )
284
+
285
+ @classmethod
286
+ def thinking_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
287
+ """Create a thinking.stop event."""
288
+ return cls(
289
+ event_type=EventType.LM_THINKING_STOP,
290
+ data=ContentBlockStop(index=index).to_dict(),
291
+ content_index=index,
292
+ sequence=sequence,
293
+ )
294
+
295
+ @classmethod
296
+ def message_start(cls, index: int = 0, sequence: int = 0) -> "Event":
297
+ """Create a message.start event."""
298
+ return cls(
299
+ event_type=EventType.LM_MESSAGE_START,
300
+ data=ContentBlockStart(index=index).to_dict(),
301
+ content_index=index,
302
+ sequence=sequence,
303
+ )
304
+
305
+ @classmethod
306
+ def message_delta(cls, content: str, index: int = 0, sequence: int = 0) -> "Event":
307
+ """Create a message.delta event.
308
+
309
+ Note: data is just the content string. Gateway wraps it with
310
+ {"content": <data>, "index": content_index}.
311
+ """
312
+ return cls(
313
+ event_type=EventType.LM_MESSAGE_DELTA,
314
+ data=content, # Raw content, gateway adds wrapper
315
+ content_index=index,
316
+ sequence=sequence,
317
+ )
318
+
319
+ @classmethod
320
+ def message_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
321
+ """Create a message.stop event."""
322
+ return cls(
323
+ event_type=EventType.LM_MESSAGE_STOP,
324
+ data=ContentBlockStop(index=index).to_dict(),
325
+ content_index=index,
326
+ sequence=sequence,
327
+ )
328
+
329
+ @classmethod
330
+ def tool_call_start(
331
+ cls, id: str, name: str, index: int = 0, sequence: int = 0
332
+ ) -> "Event":
333
+ """Create a tool_call.start event."""
334
+ return cls(
335
+ event_type=EventType.LM_TOOL_CALL_START,
336
+ data=ToolCallStart(id=id, name=name, index=index).to_dict(),
337
+ content_index=index,
338
+ sequence=sequence,
339
+ )
340
+
341
+ @classmethod
342
+ def tool_call_delta(cls, input_delta: str, index: int = 0, sequence: int = 0) -> "Event":
343
+ """Create a tool_call.delta event.
344
+
345
+ Note: data is just the input_delta string. Gateway wraps it with
346
+ {"content": <data>, "index": content_index}.
347
+ """
348
+ return cls(
349
+ event_type=EventType.LM_TOOL_CALL_DELTA,
350
+ data=input_delta, # Raw input delta, gateway adds wrapper
351
+ content_index=index,
352
+ sequence=sequence,
353
+ )
354
+
355
+ @classmethod
356
+ def tool_call_stop(
357
+ cls, id: str, name: str, input: Dict[str, Any], index: int = 0, sequence: int = 0
358
+ ) -> "Event":
359
+ """Create a tool_call.stop event."""
360
+ return cls(
361
+ event_type=EventType.LM_TOOL_CALL_STOP,
362
+ data=ToolCallStop(id=id, name=name, input=input, index=index).to_dict(),
363
+ content_index=index,
364
+ sequence=sequence,
365
+ )
366
+
367
+ @classmethod
368
+ def output_start(cls, index: int = 0, sequence: int = 0, content_type: str = "text/plain") -> "Event":
369
+ """Create an output.start event for user code streaming."""
370
+ return cls(
371
+ event_type=EventType.OUTPUT_START,
372
+ data={"index": index, "content_type": content_type},
373
+ content_index=index,
374
+ sequence=sequence,
375
+ )
376
+
377
+ @classmethod
378
+ def output_delta(cls, content: Any, index: int = 0, sequence: int = 0) -> "Event":
379
+ """Create an output.delta event for user code streaming.
380
+
381
+ Note: data is just the content value. Gateway wraps it with
382
+ {"content": <data>, "index": content_index}.
383
+ """
384
+ return cls(
385
+ event_type=EventType.OUTPUT_DELTA,
386
+ data=content, # Raw content, gateway adds wrapper
387
+ content_index=index,
388
+ sequence=sequence,
389
+ )
390
+
391
+ @classmethod
392
+ def output_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
393
+ """Create an output.stop event for user code streaming."""
394
+ return cls(
395
+ event_type=EventType.OUTPUT_STOP,
396
+ data={"index": index},
397
+ content_index=index,
398
+ sequence=sequence,
399
+ )
400
+
401
+ @classmethod
402
+ def progress(
403
+ cls,
404
+ message: Optional[str] = None,
405
+ percent: Optional[float] = None,
406
+ current: Optional[int] = None,
407
+ total: Optional[int] = None,
408
+ sequence: int = 0,
409
+ ) -> "Event":
410
+ """Create a progress.update event."""
411
+ return cls(
412
+ event_type=EventType.PROGRESS_UPDATE,
413
+ data=ProgressUpdate(
414
+ message=message, percent=percent, current=current, total=total
415
+ ).to_dict(),
416
+ content_index=0,
417
+ sequence=sequence,
418
+ )
419
+
420
+ @classmethod
421
+ def run_completed(cls, output: Any, usage: Optional[TokenUsage] = None, sequence: int = 0) -> "Event":
422
+ """Create a run.completed event."""
423
+ data: Dict[str, Any] = {"output": output}
424
+ if usage:
425
+ data["usage"] = usage.to_dict()
426
+ return cls(
427
+ event_type=EventType.RUN_COMPLETED,
428
+ data=data,
429
+ content_index=0,
430
+ sequence=sequence,
431
+ )
432
+
433
+ @classmethod
434
+ def run_failed(cls, error: ErrorDetail, sequence: int = 0) -> "Event":
435
+ """Create a run.failed event."""
436
+ return cls(
437
+ event_type=EventType.RUN_FAILED,
438
+ data={"error": error.to_dict()},
439
+ content_index=0,
440
+ sequence=sequence,
441
+ )
442
+
443
+ # --- Agent events ---
444
+
445
+ @classmethod
446
+ def agent_started(
447
+ cls,
448
+ agent_name: str,
449
+ model: str,
450
+ tools: Optional[List[str]] = None,
451
+ max_iterations: int = 10,
452
+ sequence: int = 0,
453
+ ) -> "Event":
454
+ """Create an agent.started event."""
455
+ return cls(
456
+ event_type=EventType.AGENT_STARTED,
457
+ data={
458
+ "agent_name": agent_name,
459
+ "model": model,
460
+ "tools": tools or [],
461
+ "max_iterations": max_iterations,
462
+ },
463
+ sequence=sequence,
464
+ )
465
+
466
+ @classmethod
467
+ def agent_completed(
468
+ cls,
469
+ output: str,
470
+ iterations: int = 1,
471
+ tool_calls: Optional[List[Dict[str, Any]]] = None,
472
+ handoff_to: Optional[str] = None,
473
+ max_iterations_reached: bool = False,
474
+ sequence: int = 0,
475
+ ) -> "Event":
476
+ """Create an agent.completed event."""
477
+ return cls(
478
+ event_type=EventType.AGENT_COMPLETED,
479
+ data={
480
+ "output": output,
481
+ "iterations": iterations,
482
+ "tool_calls": tool_calls or [],
483
+ "handoff_to": handoff_to,
484
+ "max_iterations_reached": max_iterations_reached,
485
+ },
486
+ sequence=sequence,
487
+ )
488
+
489
+ @classmethod
490
+ def agent_failed(
491
+ cls,
492
+ error: str,
493
+ error_type: str,
494
+ agent_name: Optional[str] = None,
495
+ sequence: int = 0,
496
+ ) -> "Event":
497
+ """Create an agent.failed event."""
498
+ return cls(
499
+ event_type=EventType.AGENT_FAILED,
500
+ data={
501
+ "error": error,
502
+ "error_type": error_type,
503
+ "agent_name": agent_name,
504
+ },
505
+ sequence=sequence,
506
+ )
507
+
508
+ @classmethod
509
+ def agent_tool_call_started(
510
+ cls,
511
+ tool_name: str,
512
+ arguments: str,
513
+ tool_call_id: Optional[str] = None,
514
+ content_index: int = 0,
515
+ sequence: int = 0,
516
+ ) -> "Event":
517
+ """Create an agent.tool_call.started event.
518
+
519
+ Args:
520
+ tool_name: Name of the tool being called
521
+ arguments: JSON-encoded arguments string
522
+ tool_call_id: Optional unique ID for this tool call (from LLM)
523
+ content_index: Index for parallel tool calls (0-based)
524
+ sequence: Event sequence number
525
+ """
526
+ return cls(
527
+ event_type=EventType.AGENT_TOOL_CALL_STARTED,
528
+ data={
529
+ "tool_name": tool_name,
530
+ "arguments": arguments,
531
+ "tool_call_id": tool_call_id,
532
+ },
533
+ content_index=content_index,
534
+ sequence=sequence,
535
+ )
536
+
537
+ @classmethod
538
+ def agent_tool_call_completed(
539
+ cls,
540
+ tool_name: str,
541
+ result: Any,
542
+ error: Optional[str] = None,
543
+ tool_call_id: Optional[str] = None,
544
+ content_index: int = 0,
545
+ sequence: int = 0,
546
+ ) -> "Event":
547
+ """Create an agent.tool_call.completed event.
548
+
549
+ Args:
550
+ tool_name: Name of the tool that was called
551
+ result: The tool's return value (JSON-serializable)
552
+ error: Error message if tool failed
553
+ tool_call_id: Optional unique ID for this tool call (from LLM)
554
+ content_index: Index for parallel tool calls (must match started event)
555
+ sequence: Event sequence number
556
+ """
557
+ return cls(
558
+ event_type=EventType.AGENT_TOOL_CALL_COMPLETED,
559
+ data={
560
+ "tool_name": tool_name,
561
+ "result": result,
562
+ "error": error,
563
+ "tool_call_id": tool_call_id,
564
+ },
565
+ content_index=content_index,
566
+ sequence=sequence,
567
+ )
agnt5/exceptions.py ADDED
@@ -0,0 +1,110 @@
1
+ """AGNT5 SDK exceptions and error types."""
2
+
3
+ from typing import Dict, List, Optional
4
+
5
+
6
+ class AGNT5Error(Exception):
7
+ """Base exception for all AGNT5 SDK errors."""
8
+
9
+ pass
10
+
11
+
12
+ class ConfigurationError(AGNT5Error):
13
+ """Raised when SDK configuration is invalid."""
14
+
15
+ pass
16
+
17
+
18
+ class ExecutionError(AGNT5Error):
19
+ """Raised when function or workflow execution fails."""
20
+
21
+ pass
22
+
23
+
24
+ class RetryError(ExecutionError):
25
+ """Raised when a function exceeds maximum retry attempts."""
26
+
27
+ def __init__(self, message: str, attempts: int, last_error: Exception) -> None:
28
+ super().__init__(message)
29
+ self.attempts = attempts
30
+ self.last_error = last_error
31
+
32
+
33
+ class StateError(AGNT5Error):
34
+ """Raised when state operations fail."""
35
+
36
+ pass
37
+
38
+
39
+ class CheckpointError(AGNT5Error):
40
+ """Raised when checkpoint operations fail."""
41
+
42
+ pass
43
+
44
+
45
+ class NotImplementedError(AGNT5Error):
46
+ """Raised when a feature is not yet implemented."""
47
+
48
+ pass
49
+
50
+
51
+ class WaitingForUserInputException(BaseException):
52
+ """Raised when workflow needs to pause for user input.
53
+
54
+ This exception is used internally by ctx.wait_for_user() to signal
55
+ that a workflow execution should pause and wait for user input.
56
+
57
+ IMPORTANT: This inherits from BaseException (not Exception) to prevent
58
+ accidental catching by broad `except Exception:` blocks. This is the same
59
+ pattern Python uses for KeyboardInterrupt and SystemExit.
60
+
61
+ The platform catches this exception and:
62
+ 1. Saves the workflow checkpoint state
63
+ 2. Returns awaiting_user_input status to the client
64
+ 3. Presents the question and options to the user
65
+ 4. Resumes execution when user responds
66
+
67
+ Attributes:
68
+ question: The question to ask the user
69
+ input_type: Type of input ("text", "approval", or "choice")
70
+ options: List of options for approval/choice inputs
71
+ checkpoint_state: Current workflow state for resume
72
+ pause_index: The index of this pause point (for multi-step HITL)
73
+ agent_context: Optional agent execution state for agent-level HITL
74
+ Contains: agent_name, iteration, messages, tool_results, pending_tool_call, etc.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ question: str,
80
+ input_type: str,
81
+ options: Optional[List[Dict]],
82
+ checkpoint_state: Dict,
83
+ pause_index: int = 0,
84
+ agent_context: Optional[Dict] = None,
85
+ ) -> None:
86
+ """Initialize WaitingForUserInputException.
87
+
88
+ Args:
89
+ question: Question to ask the user
90
+ input_type: Type of input - "text", "approval", or "choice"
91
+ options: List of option dicts (for approval/choice)
92
+ checkpoint_state: Workflow state snapshot for resume
93
+ pause_index: Index of this pause point (0-indexed, for multi-step HITL)
94
+ agent_context: Optional agent execution state for resuming agents
95
+ Required fields when provided:
96
+ - agent_name: Name of the agent that paused
97
+ - iteration: Current iteration number (0-indexed)
98
+ - messages: LLM conversation history as list of dicts
99
+ - tool_results: Partial tool results for current iteration
100
+ - pending_tool_call: The HITL tool call awaiting response
101
+ - all_tool_calls: All tool calls made so far
102
+ - model_config: Model settings for resume
103
+ """
104
+ super().__init__(f"Waiting for user input: {question}")
105
+ self.question = question
106
+ self.input_type = input_type
107
+ self.options = options or []
108
+ self.checkpoint_state = checkpoint_state
109
+ self.pause_index = pause_index
110
+ self.agent_context = agent_context