agnt5 0.3.0a8__cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/events.py ADDED
@@ -0,0 +1,566 @@
1
+ """Streaming event types for AGNT5 SDK.
2
+
3
+ These events are sent over SSE connections from gateway to clients
4
+ during component execution (agents, workflows, functions).
5
+
6
+ Event types align with the journal event taxonomy defined in:
7
+ platform/pkg/repository/dto/journal_event.go
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass, field, asdict
13
+ from enum import Enum
14
+ from typing import Any, Dict, List, Optional, Union
15
+ import json
16
+ import time
17
+
18
+
19
+ class EventType(str, Enum):
20
+ """All streaming event types."""
21
+
22
+ # Run lifecycle
23
+ RUN_STARTED = "run.started"
24
+ RUN_COMPLETED = "run.completed"
25
+ RUN_FAILED = "run.failed"
26
+ RUN_PAUSED = "run.paused"
27
+ RUN_RESUMED = "run.resumed"
28
+ RUN_CANCELLED = "run.cancelled"
29
+ RUN_TIMEOUT = "run.timeout"
30
+
31
+ # LM streaming lifecycle
32
+ LM_STREAM_STARTED = "lm.stream.started"
33
+ LM_STREAM_COMPLETED = "lm.stream.completed"
34
+ LM_STREAM_FAILED = "lm.stream.failed"
35
+
36
+ # LM thinking content blocks (extended thinking / chain-of-thought)
37
+ LM_THINKING_START = "lm.thinking.start"
38
+ LM_THINKING_DELTA = "lm.thinking.delta"
39
+ LM_THINKING_STOP = "lm.thinking.stop"
40
+
41
+ # LM message content blocks (assistant output)
42
+ LM_MESSAGE_START = "lm.message.start"
43
+ LM_MESSAGE_DELTA = "lm.message.delta"
44
+ LM_MESSAGE_STOP = "lm.message.stop"
45
+
46
+ # LM tool call content blocks (LLM deciding to call a tool)
47
+ LM_TOOL_CALL_START = "lm.tool_call.start"
48
+ LM_TOOL_CALL_DELTA = "lm.tool_call.delta"
49
+ LM_TOOL_CALL_STOP = "lm.tool_call.stop"
50
+
51
+ # User code output streaming
52
+ OUTPUT_START = "output.start"
53
+ OUTPUT_DELTA = "output.delta"
54
+ OUTPUT_STOP = "output.stop"
55
+
56
+ # Progress indicators
57
+ PROGRESS_UPDATE = "progress.update"
58
+
59
+ # Tool execution events
60
+ TOOL_INVOKED = "tool.invoked"
61
+ TOOL_COMPLETED = "tool.completed"
62
+ TOOL_FAILED = "tool.failed"
63
+
64
+ # Agent events
65
+ AGENT_STARTED = "agent.started"
66
+ AGENT_COMPLETED = "agent.completed"
67
+ AGENT_FAILED = "agent.failed"
68
+ AGENT_ITERATION_STARTED = "agent.iteration.started"
69
+ AGENT_ITERATION_COMPLETED = "agent.iteration.completed"
70
+ AGENT_MAX_ITERATIONS = "agent.max_iterations.reached"
71
+ AGENT_TOOL_CALL_STARTED = "agent.tool_call.started"
72
+ AGENT_TOOL_CALL_COMPLETED = "agent.tool_call.completed"
73
+
74
+ # HITL: Approval events
75
+ APPROVAL_REQUESTED = "approval.requested"
76
+ APPROVAL_APPROVED = "approval.approved"
77
+ APPROVAL_REJECTED = "approval.rejected"
78
+ APPROVAL_EXPIRED = "approval.expired"
79
+
80
+ # HITL: Input events
81
+ INPUT_REQUESTED = "input.requested"
82
+ INPUT_PROVIDED = "input.provided"
83
+ INPUT_EXPIRED = "input.expired"
84
+
85
+ # HITL: Feedback events
86
+ FEEDBACK_REQUESTED = "feedback.requested"
87
+ FEEDBACK_PROVIDED = "feedback.provided"
88
+ FEEDBACK_EXPIRED = "feedback.expired"
89
+
90
+ # Workflow step events
91
+ WORKFLOW_STEP_STARTED = "workflow.step.started"
92
+ WORKFLOW_STEP_COMPLETED = "workflow.step.completed"
93
+ WORKFLOW_STEP_FAILED = "workflow.step.failed"
94
+
95
+
96
+ # --- Data payloads for streaming events ---
97
+
98
+
99
+ @dataclass
100
+ class ContentBlockStart:
101
+ """Start of a content block (thinking, message, tool_call, output)."""
102
+
103
+ index: int = 0
104
+
105
+ def to_dict(self) -> Dict[str, Any]:
106
+ return {"index": self.index}
107
+
108
+
109
+ @dataclass
110
+ class ContentBlockDelta:
111
+ """Delta (incremental content) for a content block."""
112
+
113
+ content: str
114
+ index: int = 0
115
+
116
+ def to_dict(self) -> Dict[str, Any]:
117
+ return {"content": self.content, "index": self.index}
118
+
119
+
120
+ @dataclass
121
+ class ContentBlockStop:
122
+ """End of a content block."""
123
+
124
+ index: int = 0
125
+
126
+ def to_dict(self) -> Dict[str, Any]:
127
+ return {"index": self.index}
128
+
129
+
130
+ @dataclass
131
+ class ToolCallStart:
132
+ """Start of a tool call block."""
133
+
134
+ id: str
135
+ name: str
136
+ index: int = 0
137
+
138
+ def to_dict(self) -> Dict[str, Any]:
139
+ return {"id": self.id, "name": self.name, "index": self.index}
140
+
141
+
142
+ @dataclass
143
+ class ToolCallDelta:
144
+ """Delta for tool call input arguments."""
145
+
146
+ input_delta: str
147
+ index: int = 0
148
+
149
+ def to_dict(self) -> Dict[str, Any]:
150
+ return {"input_delta": self.input_delta, "index": self.index}
151
+
152
+
153
+ @dataclass
154
+ class ToolCallStop:
155
+ """End of a tool call block with complete input."""
156
+
157
+ id: str
158
+ name: str
159
+ input: Dict[str, Any]
160
+ index: int = 0
161
+
162
+ def to_dict(self) -> Dict[str, Any]:
163
+ return {"id": self.id, "name": self.name, "input": self.input, "index": self.index}
164
+
165
+
166
+ @dataclass
167
+ class ProgressUpdate:
168
+ """Progress indicator update."""
169
+
170
+ message: Optional[str] = None
171
+ percent: Optional[float] = None
172
+ current: Optional[int] = None
173
+ total: Optional[int] = None
174
+
175
+ def to_dict(self) -> Dict[str, Any]:
176
+ d: Dict[str, Any] = {}
177
+ if self.message is not None:
178
+ d["message"] = self.message
179
+ if self.percent is not None:
180
+ d["percent"] = self.percent
181
+ if self.current is not None:
182
+ d["current"] = self.current
183
+ if self.total is not None:
184
+ d["total"] = self.total
185
+ return d
186
+
187
+
188
+ @dataclass
189
+ class TokenUsage:
190
+ """Token usage statistics."""
191
+
192
+ input_tokens: int
193
+ output_tokens: int
194
+ total_tokens: int
195
+ thinking_tokens: Optional[int] = None
196
+
197
+ def to_dict(self) -> Dict[str, Any]:
198
+ d = {
199
+ "input_tokens": self.input_tokens,
200
+ "output_tokens": self.output_tokens,
201
+ "total_tokens": self.total_tokens,
202
+ }
203
+ if self.thinking_tokens is not None:
204
+ d["thinking_tokens"] = self.thinking_tokens
205
+ return d
206
+
207
+
208
+ @dataclass
209
+ class ErrorDetail:
210
+ """Error details."""
211
+
212
+ code: str
213
+ message: str
214
+ details: Optional[Dict[str, Any]] = None
215
+
216
+ def to_dict(self) -> Dict[str, Any]:
217
+ d = {"code": self.code, "message": self.message}
218
+ if self.details:
219
+ d["details"] = self.details
220
+ return d
221
+
222
+
223
+ # --- Event class ---
224
+
225
+
226
+ @dataclass
227
+ class Event:
228
+ """A streaming event with typed data payload.
229
+
230
+ This is the primary class for emitting events during streaming execution.
231
+ Events are serialized and sent via the gRPC response stream to the gateway,
232
+ which then emits them as SSE events to clients.
233
+
234
+ For delta events (message_delta, thinking_delta, etc.), data should be
235
+ the raw content value. The gateway wraps it with {"content": <data>, "index": ...}.
236
+ For other events, data is typically a dict with structured information.
237
+ """
238
+
239
+ event_type: EventType
240
+ data: Any # Raw value for deltas, dict for structured events
241
+ content_index: int = 0
242
+ sequence: int = 0
243
+
244
+ def to_response_fields(self) -> Dict[str, Any]:
245
+ """Convert to fields for ExecuteComponentResponse proto.
246
+
247
+ Returns dict with:
248
+ - event_type: str
249
+ - content_index: int
250
+ - sequence: int
251
+ - output_data: bytes (JSON-encoded data)
252
+ """
253
+ return {
254
+ "event_type": self.event_type.value,
255
+ "content_index": self.content_index,
256
+ "sequence": self.sequence,
257
+ "output_data": json.dumps(self.data).encode("utf-8") if self.data else b"",
258
+ }
259
+
260
+ @classmethod
261
+ def thinking_start(cls, index: int = 0, sequence: int = 0) -> "Event":
262
+ """Create a thinking.start event."""
263
+ return cls(
264
+ event_type=EventType.LM_THINKING_START,
265
+ data=ContentBlockStart(index=index).to_dict(),
266
+ content_index=index,
267
+ sequence=sequence,
268
+ )
269
+
270
+ @classmethod
271
+ def thinking_delta(cls, content: str, index: int = 0, sequence: int = 0) -> "Event":
272
+ """Create a thinking.delta event.
273
+
274
+ Note: data is just the content string. Gateway wraps it with
275
+ {"content": <data>, "index": content_index}.
276
+ """
277
+ return cls(
278
+ event_type=EventType.LM_THINKING_DELTA,
279
+ data=content, # Raw content, gateway adds wrapper
280
+ content_index=index,
281
+ sequence=sequence,
282
+ )
283
+
284
+ @classmethod
285
+ def thinking_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
286
+ """Create a thinking.stop event."""
287
+ return cls(
288
+ event_type=EventType.LM_THINKING_STOP,
289
+ data=ContentBlockStop(index=index).to_dict(),
290
+ content_index=index,
291
+ sequence=sequence,
292
+ )
293
+
294
+ @classmethod
295
+ def message_start(cls, index: int = 0, sequence: int = 0) -> "Event":
296
+ """Create a message.start event."""
297
+ return cls(
298
+ event_type=EventType.LM_MESSAGE_START,
299
+ data=ContentBlockStart(index=index).to_dict(),
300
+ content_index=index,
301
+ sequence=sequence,
302
+ )
303
+
304
+ @classmethod
305
+ def message_delta(cls, content: str, index: int = 0, sequence: int = 0) -> "Event":
306
+ """Create a message.delta event.
307
+
308
+ Note: data is just the content string. Gateway wraps it with
309
+ {"content": <data>, "index": content_index}.
310
+ """
311
+ return cls(
312
+ event_type=EventType.LM_MESSAGE_DELTA,
313
+ data=content, # Raw content, gateway adds wrapper
314
+ content_index=index,
315
+ sequence=sequence,
316
+ )
317
+
318
+ @classmethod
319
+ def message_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
320
+ """Create a message.stop event."""
321
+ return cls(
322
+ event_type=EventType.LM_MESSAGE_STOP,
323
+ data=ContentBlockStop(index=index).to_dict(),
324
+ content_index=index,
325
+ sequence=sequence,
326
+ )
327
+
328
+ @classmethod
329
+ def tool_call_start(
330
+ cls, id: str, name: str, index: int = 0, sequence: int = 0
331
+ ) -> "Event":
332
+ """Create a tool_call.start event."""
333
+ return cls(
334
+ event_type=EventType.LM_TOOL_CALL_START,
335
+ data=ToolCallStart(id=id, name=name, index=index).to_dict(),
336
+ content_index=index,
337
+ sequence=sequence,
338
+ )
339
+
340
+ @classmethod
341
+ def tool_call_delta(cls, input_delta: str, index: int = 0, sequence: int = 0) -> "Event":
342
+ """Create a tool_call.delta event.
343
+
344
+ Note: data is just the input_delta string. Gateway wraps it with
345
+ {"content": <data>, "index": content_index}.
346
+ """
347
+ return cls(
348
+ event_type=EventType.LM_TOOL_CALL_DELTA,
349
+ data=input_delta, # Raw input delta, gateway adds wrapper
350
+ content_index=index,
351
+ sequence=sequence,
352
+ )
353
+
354
+ @classmethod
355
+ def tool_call_stop(
356
+ cls, id: str, name: str, input: Dict[str, Any], index: int = 0, sequence: int = 0
357
+ ) -> "Event":
358
+ """Create a tool_call.stop event."""
359
+ return cls(
360
+ event_type=EventType.LM_TOOL_CALL_STOP,
361
+ data=ToolCallStop(id=id, name=name, input=input, index=index).to_dict(),
362
+ content_index=index,
363
+ sequence=sequence,
364
+ )
365
+
366
+ @classmethod
367
+ def output_start(cls, index: int = 0, sequence: int = 0, content_type: str = "text/plain") -> "Event":
368
+ """Create an output.start event for user code streaming."""
369
+ return cls(
370
+ event_type=EventType.OUTPUT_START,
371
+ data={"index": index, "content_type": content_type},
372
+ content_index=index,
373
+ sequence=sequence,
374
+ )
375
+
376
+ @classmethod
377
+ def output_delta(cls, content: Any, index: int = 0, sequence: int = 0) -> "Event":
378
+ """Create an output.delta event for user code streaming.
379
+
380
+ Note: data is just the content value. Gateway wraps it with
381
+ {"content": <data>, "index": content_index}.
382
+ """
383
+ return cls(
384
+ event_type=EventType.OUTPUT_DELTA,
385
+ data=content, # Raw content, gateway adds wrapper
386
+ content_index=index,
387
+ sequence=sequence,
388
+ )
389
+
390
+ @classmethod
391
+ def output_stop(cls, index: int = 0, sequence: int = 0) -> "Event":
392
+ """Create an output.stop event for user code streaming."""
393
+ return cls(
394
+ event_type=EventType.OUTPUT_STOP,
395
+ data={"index": index},
396
+ content_index=index,
397
+ sequence=sequence,
398
+ )
399
+
400
+ @classmethod
401
+ def progress(
402
+ cls,
403
+ message: Optional[str] = None,
404
+ percent: Optional[float] = None,
405
+ current: Optional[int] = None,
406
+ total: Optional[int] = None,
407
+ sequence: int = 0,
408
+ ) -> "Event":
409
+ """Create a progress.update event."""
410
+ return cls(
411
+ event_type=EventType.PROGRESS_UPDATE,
412
+ data=ProgressUpdate(
413
+ message=message, percent=percent, current=current, total=total
414
+ ).to_dict(),
415
+ content_index=0,
416
+ sequence=sequence,
417
+ )
418
+
419
+ @classmethod
420
+ def run_completed(cls, output: Any, usage: Optional[TokenUsage] = None, sequence: int = 0) -> "Event":
421
+ """Create a run.completed event."""
422
+ data: Dict[str, Any] = {"output": output}
423
+ if usage:
424
+ data["usage"] = usage.to_dict()
425
+ return cls(
426
+ event_type=EventType.RUN_COMPLETED,
427
+ data=data,
428
+ content_index=0,
429
+ sequence=sequence,
430
+ )
431
+
432
+ @classmethod
433
+ def run_failed(cls, error: ErrorDetail, sequence: int = 0) -> "Event":
434
+ """Create a run.failed event."""
435
+ return cls(
436
+ event_type=EventType.RUN_FAILED,
437
+ data={"error": error.to_dict()},
438
+ content_index=0,
439
+ sequence=sequence,
440
+ )
441
+
442
+ # --- Agent events ---
443
+
444
+ @classmethod
445
+ def agent_started(
446
+ cls,
447
+ agent_name: str,
448
+ model: str,
449
+ tools: Optional[List[str]] = None,
450
+ max_iterations: int = 10,
451
+ sequence: int = 0,
452
+ ) -> "Event":
453
+ """Create an agent.started event."""
454
+ return cls(
455
+ event_type=EventType.AGENT_STARTED,
456
+ data={
457
+ "agent_name": agent_name,
458
+ "model": model,
459
+ "tools": tools or [],
460
+ "max_iterations": max_iterations,
461
+ },
462
+ sequence=sequence,
463
+ )
464
+
465
+ @classmethod
466
+ def agent_completed(
467
+ cls,
468
+ output: str,
469
+ iterations: int = 1,
470
+ tool_calls: Optional[List[Dict[str, Any]]] = None,
471
+ handoff_to: Optional[str] = None,
472
+ max_iterations_reached: bool = False,
473
+ sequence: int = 0,
474
+ ) -> "Event":
475
+ """Create an agent.completed event."""
476
+ return cls(
477
+ event_type=EventType.AGENT_COMPLETED,
478
+ data={
479
+ "output": output,
480
+ "iterations": iterations,
481
+ "tool_calls": tool_calls or [],
482
+ "handoff_to": handoff_to,
483
+ "max_iterations_reached": max_iterations_reached,
484
+ },
485
+ sequence=sequence,
486
+ )
487
+
488
+ @classmethod
489
+ def agent_failed(
490
+ cls,
491
+ error: str,
492
+ error_type: str,
493
+ agent_name: Optional[str] = None,
494
+ sequence: int = 0,
495
+ ) -> "Event":
496
+ """Create an agent.failed event."""
497
+ return cls(
498
+ event_type=EventType.AGENT_FAILED,
499
+ data={
500
+ "error": error,
501
+ "error_type": error_type,
502
+ "agent_name": agent_name,
503
+ },
504
+ sequence=sequence,
505
+ )
506
+
507
+ @classmethod
508
+ def agent_tool_call_started(
509
+ cls,
510
+ tool_name: str,
511
+ arguments: str,
512
+ tool_call_id: Optional[str] = None,
513
+ content_index: int = 0,
514
+ sequence: int = 0,
515
+ ) -> "Event":
516
+ """Create an agent.tool_call.started event.
517
+
518
+ Args:
519
+ tool_name: Name of the tool being called
520
+ arguments: JSON-encoded arguments string
521
+ tool_call_id: Optional unique ID for this tool call (from LLM)
522
+ content_index: Index for parallel tool calls (0-based)
523
+ sequence: Event sequence number
524
+ """
525
+ return cls(
526
+ event_type=EventType.AGENT_TOOL_CALL_STARTED,
527
+ data={
528
+ "tool_name": tool_name,
529
+ "arguments": arguments,
530
+ "tool_call_id": tool_call_id,
531
+ },
532
+ content_index=content_index,
533
+ sequence=sequence,
534
+ )
535
+
536
+ @classmethod
537
+ def agent_tool_call_completed(
538
+ cls,
539
+ tool_name: str,
540
+ result: Any,
541
+ error: Optional[str] = None,
542
+ tool_call_id: Optional[str] = None,
543
+ content_index: int = 0,
544
+ sequence: int = 0,
545
+ ) -> "Event":
546
+ """Create an agent.tool_call.completed event.
547
+
548
+ Args:
549
+ tool_name: Name of the tool that was called
550
+ result: The tool's return value (JSON-serializable)
551
+ error: Error message if tool failed
552
+ tool_call_id: Optional unique ID for this tool call (from LLM)
553
+ content_index: Index for parallel tool calls (must match started event)
554
+ sequence: Event sequence number
555
+ """
556
+ return cls(
557
+ event_type=EventType.AGENT_TOOL_CALL_COMPLETED,
558
+ data={
559
+ "tool_name": tool_name,
560
+ "result": result,
561
+ "error": error,
562
+ "tool_call_id": tool_call_id,
563
+ },
564
+ content_index=content_index,
565
+ sequence=sequence,
566
+ )
agnt5/exceptions.py ADDED
@@ -0,0 +1,102 @@
1
+ """AGNT5 SDK exceptions and error types."""
2
+
3
+ from typing import Dict, List, Optional
4
+
5
+
6
+ class AGNT5Error(Exception):
7
+ """Base exception for all AGNT5 SDK errors."""
8
+
9
+ pass
10
+
11
+
12
+ class ConfigurationError(AGNT5Error):
13
+ """Raised when SDK configuration is invalid."""
14
+
15
+ pass
16
+
17
+
18
+ class ExecutionError(AGNT5Error):
19
+ """Raised when function or workflow execution fails."""
20
+
21
+ pass
22
+
23
+
24
+ class RetryError(ExecutionError):
25
+ """Raised when a function exceeds maximum retry attempts."""
26
+
27
+ def __init__(self, message: str, attempts: int, last_error: Exception) -> None:
28
+ super().__init__(message)
29
+ self.attempts = attempts
30
+ self.last_error = last_error
31
+
32
+
33
+ class StateError(AGNT5Error):
34
+ """Raised when state operations fail."""
35
+
36
+ pass
37
+
38
+
39
+ class CheckpointError(AGNT5Error):
40
+ """Raised when checkpoint operations fail."""
41
+
42
+ pass
43
+
44
+
45
+ class NotImplementedError(AGNT5Error):
46
+ """Raised when a feature is not yet implemented."""
47
+
48
+ pass
49
+
50
+
51
+ class WaitingForUserInputException(AGNT5Error):
52
+ """Raised when workflow needs to pause for user input.
53
+
54
+ This exception is used internally by ctx.wait_for_user() to signal
55
+ that a workflow execution should pause and wait for user input.
56
+
57
+ The platform catches this exception and:
58
+ 1. Saves the workflow checkpoint state
59
+ 2. Returns awaiting_user_input status to the client
60
+ 3. Presents the question and options to the user
61
+ 4. Resumes execution when user responds
62
+
63
+ Attributes:
64
+ question: The question to ask the user
65
+ input_type: Type of input ("text", "approval", or "choice")
66
+ options: List of options for approval/choice inputs
67
+ checkpoint_state: Current workflow state for resume
68
+ agent_context: Optional agent execution state for agent-level HITL
69
+ Contains: agent_name, iteration, messages, tool_results, pending_tool_call, etc.
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ question: str,
75
+ input_type: str,
76
+ options: Optional[List[Dict]],
77
+ checkpoint_state: Dict,
78
+ agent_context: Optional[Dict] = None,
79
+ ) -> None:
80
+ """Initialize WaitingForUserInputException.
81
+
82
+ Args:
83
+ question: Question to ask the user
84
+ input_type: Type of input - "text", "approval", or "choice"
85
+ options: List of option dicts (for approval/choice)
86
+ checkpoint_state: Workflow state snapshot for resume
87
+ agent_context: Optional agent execution state for resuming agents
88
+ Required fields when provided:
89
+ - agent_name: Name of the agent that paused
90
+ - iteration: Current iteration number (0-indexed)
91
+ - messages: LLM conversation history as list of dicts
92
+ - tool_results: Partial tool results for current iteration
93
+ - pending_tool_call: The HITL tool call awaiting response
94
+ - all_tool_calls: All tool calls made so far
95
+ - model_config: Model settings for resume
96
+ """
97
+ super().__init__(f"Waiting for user input: {question}")
98
+ self.question = question
99
+ self.input_type = input_type
100
+ self.options = options or []
101
+ self.checkpoint_state = checkpoint_state
102
+ self.agent_context = agent_context