grasp_agents 0.5.5__py3-none-any.whl → 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ from openai.types.chat.chat_completion_chunk import (
15
15
  from openai.types.chat.chat_completion_token_logprob import (
16
16
  ChatCompletionTokenLogprob as OpenAITokenLogprob,
17
17
  )
18
- from pydantic import BaseModel, Field
18
+ from pydantic import BaseModel, Field, ValidationError, field_validator
19
19
 
20
20
  from ..errors import CombineCompletionChunksError
21
21
  from .completion import Completion, CompletionChoice, FinishReason, Usage
@@ -38,13 +38,77 @@ class CompletionChunkDeltaToolCall(BaseModel):
38
38
  class CompletionChunkChoiceDelta(BaseModel):
39
39
  content: str | None = None
40
40
  refusal: str | None = None
41
- role: Role | None
42
- tool_calls: list[CompletionChunkDeltaToolCall] | None
41
+ role: Role | None = None
42
+ tool_calls: list[CompletionChunkDeltaToolCall] | None = None
43
43
  reasoning_content: str | None = None
44
44
  thinking_blocks: list[ThinkingBlock | RedactedThinkingBlock] | None = None
45
45
  annotations: list[LiteLLMAnnotation] | None = None
46
46
  provider_specific_fields: dict[str, Any] | None = None
47
47
 
48
+ @property
49
+ def thinking_delta(self) -> "CompletionChunkChoiceDelta | None":
50
+ return (
51
+ CompletionChunkChoiceDelta(
52
+ reasoning_content=self.reasoning_content,
53
+ thinking_blocks=self.thinking_blocks,
54
+ role=self.role,
55
+ provider_specific_fields=self.provider_specific_fields,
56
+ )
57
+ if self.reasoning_content or self.thinking_blocks
58
+ else None
59
+ )
60
+
61
+ @property
62
+ def tool_call_deltas(self) -> "list[CompletionChunkChoiceDelta] | None":
63
+ return (
64
+ [
65
+ CompletionChunkChoiceDelta(
66
+ tool_calls=[tool_call],
67
+ role=self.role,
68
+ provider_specific_fields=self.provider_specific_fields,
69
+ )
70
+ for tool_call in self.tool_calls
71
+ ]
72
+ if self.tool_calls
73
+ else None
74
+ )
75
+
76
+ @property
77
+ def response_delta(self) -> "CompletionChunkChoiceDelta | None":
78
+ return (
79
+ CompletionChunkChoiceDelta(
80
+ content=self.content,
81
+ role=self.role,
82
+ provider_specific_fields=self.provider_specific_fields,
83
+ )
84
+ if self.content
85
+ else None
86
+ )
87
+
88
+ @property
89
+ def annotations_delta(self) -> "CompletionChunkChoiceDelta | None":
90
+ return (
91
+ CompletionChunkChoiceDelta(
92
+ annotations=self.annotations,
93
+ role=self.role,
94
+ provider_specific_fields=self.provider_specific_fields,
95
+ )
96
+ if self.annotations
97
+ else None
98
+ )
99
+
100
+ @property
101
+ def refusal_delta(self) -> "CompletionChunkChoiceDelta | None":
102
+ return (
103
+ CompletionChunkChoiceDelta(
104
+ refusal=self.refusal,
105
+ role=self.role,
106
+ provider_specific_fields=self.provider_specific_fields,
107
+ )
108
+ if self.refusal
109
+ else None
110
+ )
111
+
48
112
 
49
113
  class CompletionChunkChoice(BaseModel):
50
114
  delta: CompletionChunkChoiceDelta
@@ -66,6 +130,241 @@ class CompletionChunk(BaseModel):
66
130
  response_ms: float | None = None
67
131
  hidden_params: dict[str, Any] | None = None
68
132
 
133
+ def split_into_specialized(
134
+ self,
135
+ ) -> "list[CompletionChunk]":
136
+ if len(self.choices) != 1:
137
+ raise ValidationError(
138
+ "CompletionChunk must have exactly one choice for specialization."
139
+ )
140
+ delta = self.choices[0].delta
141
+
142
+ specialized_chunks: list[CompletionChunk] = []
143
+
144
+ thinking_delta = delta.thinking_delta
145
+ tool_call_deltas = delta.tool_call_deltas
146
+ response_delta = delta.response_delta
147
+ annotations_delta = delta.annotations_delta
148
+ refusal_delta = delta.refusal_delta
149
+
150
+ if thinking_delta is not None:
151
+ new_choice = self.choices[0].model_copy(update={"delta": thinking_delta})
152
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
153
+ specialized_chunks.append(
154
+ ThinkingChunk.model_validate(new_chunk.model_dump())
155
+ )
156
+
157
+ if tool_call_deltas:
158
+ for delta_tool_call in tool_call_deltas:
159
+ new_choice = self.choices[0].model_copy(
160
+ update={"delta": delta_tool_call}
161
+ )
162
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
163
+ specialized_chunks.append(
164
+ ToolCallChunk.model_validate(new_chunk.model_dump())
165
+ )
166
+
167
+ if response_delta is not None:
168
+ new_choice = self.choices[0].model_copy(update={"delta": response_delta})
169
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
170
+ specialized_chunks.append(
171
+ ResponseChunk.model_validate(new_chunk.model_dump())
172
+ )
173
+
174
+ if annotations_delta is not None:
175
+ new_choice = self.choices[0].model_copy(update={"delta": annotations_delta})
176
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
177
+ specialized_chunks.append(
178
+ AnnotationsChunk.model_validate(new_chunk.model_dump())
179
+ )
180
+
181
+ if refusal_delta is not None:
182
+ new_choice = self.choices[0].model_copy(update={"delta": refusal_delta})
183
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
184
+ specialized_chunks.append(
185
+ RefusalChunk.model_validate(new_chunk.model_dump())
186
+ )
187
+
188
+ return specialized_chunks
189
+
190
+
191
+ class ResponseChunk(CompletionChunk):
192
+ @field_validator("choices")
193
+ @classmethod
194
+ def validate_response_chunk(
195
+ cls, choices: list[CompletionChunkChoice]
196
+ ) -> list[CompletionChunkChoice]:
197
+ if len(choices) != 1:
198
+ raise ValidationError("ResponseChunk must have exactly one choice.")
199
+
200
+ delta = choices[0].delta
201
+
202
+ if not delta.content:
203
+ raise ValidationError("ResponseChunk must have content in deltas.")
204
+
205
+ if (
206
+ delta.reasoning_content is not None
207
+ or delta.thinking_blocks is not None
208
+ or delta.tool_calls is not None
209
+ or delta.refusal is not None
210
+ or delta.annotations is not None
211
+ ):
212
+ raise ValidationError(
213
+ "ResponseChunk should not have reasoning content, thinking blocks, "
214
+ "tool calls, refusal, or annotations in deltas."
215
+ )
216
+
217
+ return choices
218
+
219
+ @property
220
+ def response(self) -> str:
221
+ assert self.choices[0].delta.content
222
+ return self.choices[0].delta.content
223
+
224
+
225
+ class ThinkingChunk(CompletionChunk):
226
+ @field_validator("choices")
227
+ @classmethod
228
+ def validate_thinking_chunk(
229
+ cls, choices: list[CompletionChunkChoice]
230
+ ) -> list[CompletionChunkChoice]:
231
+ if len(choices) != 1:
232
+ raise ValidationError("ThinkingChunk must have exactly one choice.")
233
+
234
+ delta = choices[0].delta
235
+
236
+ if not (delta.thinking_blocks or delta.reasoning_content):
237
+ raise ValidationError(
238
+ "ThinkingChunk must have reasoning content or "
239
+ "at least one thinking block."
240
+ )
241
+ if (
242
+ delta.content is not None
243
+ or delta.tool_calls is not None
244
+ or delta.refusal is not None
245
+ or delta.annotations is not None
246
+ ):
247
+ raise ValidationError(
248
+ "ThinkingChunk should not have content, tool calls, "
249
+ "refusal, or annotations in deltas."
250
+ )
251
+
252
+ return choices
253
+
254
+ @property
255
+ def thinking(self) -> str | list[ThinkingBlock | RedactedThinkingBlock]:
256
+ delta = self.choices[0].delta
257
+ if delta.reasoning_content:
258
+ return delta.reasoning_content
259
+ if delta.thinking_blocks:
260
+ return delta.thinking_blocks
261
+ raise ValueError("ThinkingChunk has no reasoning_content or thinking_blocks")
262
+
263
+
264
+ class ToolCallChunk(CompletionChunk):
265
+ @field_validator("choices")
266
+ @classmethod
267
+ def validate_tool_call_chunk(
268
+ cls, choices: list[CompletionChunkChoice]
269
+ ) -> list[CompletionChunkChoice]:
270
+ if len(choices) != 1:
271
+ raise ValidationError("ToolCallChunk must have exactly one choice.")
272
+
273
+ delta = choices[0].delta
274
+
275
+ if not delta.tool_calls:
276
+ raise ValidationError("ToolCallChunk must have tool calls in deltas.")
277
+ if len(delta.tool_calls) != 1:
278
+ raise ValidationError(
279
+ "ToolCallChunk must have exactly one tool call in deltas."
280
+ )
281
+
282
+ if (
283
+ delta.reasoning_content is not None
284
+ or delta.thinking_blocks is not None
285
+ or delta.content is not None
286
+ or delta.refusal is not None
287
+ or delta.annotations is not None
288
+ ):
289
+ raise ValidationError(
290
+ "ToolCallChunk should not have reasoning content, thinking blocks, "
291
+ "content, refusal, or annotations in deltas."
292
+ )
293
+
294
+ return choices
295
+
296
+ @property
297
+ def tool_call(self) -> CompletionChunkDeltaToolCall:
298
+ assert self.choices[0].delta.tool_calls is not None
299
+ return self.choices[0].delta.tool_calls[0]
300
+
301
+
302
+ class AnnotationsChunk(CompletionChunk):
303
+ @field_validator("choices")
304
+ @classmethod
305
+ def validate_annotations_chunk(
306
+ cls, choices: list[CompletionChunkChoice]
307
+ ) -> list[CompletionChunkChoice]:
308
+ if len(choices) != 1:
309
+ raise ValidationError("AnnotationsChunk must have exactly one choice.")
310
+
311
+ delta = choices[0].delta
312
+
313
+ if not delta.annotations:
314
+ raise ValidationError("AnnotationsChunk must have annotations in deltas.")
315
+
316
+ if (
317
+ delta.reasoning_content is not None
318
+ or delta.thinking_blocks is not None
319
+ or delta.content is not None
320
+ or delta.tool_calls is not None
321
+ or delta.refusal is not None
322
+ ):
323
+ raise ValidationError(
324
+ "AnnotationsChunk should not have reasoning content, thinking blocks, "
325
+ "content, tool calls, or refusal in deltas."
326
+ )
327
+
328
+ return choices
329
+
330
+ @property
331
+ def annotations(self) -> list[LiteLLMAnnotation]:
332
+ assert self.choices[0].delta.annotations is not None
333
+ return self.choices[0].delta.annotations
334
+
335
+
336
+ class RefusalChunk(CompletionChunk):
337
+ @field_validator("choices")
338
+ @classmethod
339
+ def validate_refusal_chunk(
340
+ cls, choices: list[CompletionChunkChoice]
341
+ ) -> list[CompletionChunkChoice]:
342
+ if len(choices) != 1:
343
+ raise ValidationError("RefusalChunk must have exactly one choice.")
344
+
345
+ delta = choices[0].delta
346
+
347
+ if not delta.refusal:
348
+ raise ValidationError("RefusalChunk must have refusal in deltas.")
349
+
350
+ if (
351
+ delta.reasoning_content is not None
352
+ or delta.thinking_blocks is not None
353
+ or delta.content is not None
354
+ or delta.tool_calls is not None
355
+ or delta.annotations is not None
356
+ ):
357
+ raise ValidationError(
358
+ "RefusalChunk should not have reasoning content, thinking blocks, "
359
+ "content, tool calls, or annotations in deltas."
360
+ )
361
+
362
+ return choices
363
+
364
+ @property
365
+ def refusal(self) -> str | None:
366
+ return self.choices[0].delta.refusal
367
+
69
368
 
70
369
  def combine_completion_chunks(chunks: list[CompletionChunk]) -> Completion:
71
370
  if not chunks:
@@ -1,14 +1,28 @@
1
1
  import time
2
2
  from enum import StrEnum
3
- from typing import Any, Generic, Literal, TypeVar
3
+ from typing import Any, Generic, Literal, TypeVar, get_args
4
4
  from uuid import uuid4
5
5
 
6
6
  from pydantic import BaseModel, ConfigDict, Field
7
7
 
8
8
  from ..packet import Packet
9
9
  from .completion import Completion
10
- from .completion_chunk import CompletionChunk
11
- from .message import AssistantMessage, SystemMessage, ToolCall, ToolMessage, UserMessage
10
+ from .completion_chunk import (
11
+ AnnotationsChunk,
12
+ CompletionChunk,
13
+ RefusalChunk,
14
+ ResponseChunk,
15
+ ThinkingChunk,
16
+ ToolCallChunk,
17
+ )
18
+ from .message import (
19
+ AssistantMessage,
20
+ MessageBase,
21
+ SystemMessage,
22
+ ToolCall,
23
+ ToolMessage,
24
+ UserMessage,
25
+ )
12
26
 
13
27
 
14
28
  class EventSourceType(StrEnum):
@@ -29,69 +43,68 @@ class EventType(StrEnum):
29
43
  GEN_MSG = "gen_message"
30
44
 
31
45
  COMP = "completion"
46
+ COMP_START = "completion_start"
47
+ COMP_END = "completion_end"
48
+
32
49
  COMP_CHUNK = "completion_chunk"
50
+ THINK_CHUNK = "thinking_chunk"
51
+ RESP_CHUNK = "response_chunk"
52
+ TOOL_CALL_CHUNK = "tool_call_chunk"
53
+ ANNOT_CHUNK = "annotations_chunk"
54
+ REFUSAL_CHUNK = "refusal_chunk"
55
+
56
+ RESP_START = "response_start"
57
+ RESP_END = "response_end"
58
+ THINK_START = "thinking_start"
59
+ THINK_END = "thinking_end"
60
+ TOOL_CALL_START = "tool_call_start"
61
+ TOOL_CALL_END = "tool_call_end"
62
+ ANNOT_START = "annotations_start"
63
+ ANNOT_END = "annotations_end"
64
+
33
65
  LLM_ERR = "llm_error"
34
66
 
35
- PROC_START = "processor_start"
36
67
  PACKET_OUT = "packet_output"
37
68
  PAYLOAD_OUT = "payload_output"
38
69
  PROC_FINISH = "processor_finish"
39
70
  PROC_ERR = "processor_error"
71
+ PROC_START = "processor_start"
72
+ PROC_END = "processor_end"
40
73
 
41
74
  WORKFLOW_RES = "workflow_result"
42
75
  RUN_RES = "run_result"
43
76
 
44
- # COMP_THINK_CHUNK = "completion_thinking_chunk"
45
- # COMP_RESP_CHUNK = "completion_response_chunk"
46
77
 
78
+ _T_co = TypeVar("_T_co", covariant=True)
79
+ _M_co = TypeVar("_M_co", covariant=True, bound=MessageBase)
80
+ _C_co = TypeVar("_C_co", covariant=True, bound=CompletionChunk)
47
81
 
48
- _T = TypeVar("_T")
49
82
 
50
-
51
- class Event(BaseModel, Generic[_T], frozen=True):
83
+ class Event(BaseModel, Generic[_T_co], frozen=True):
52
84
  type: EventType
53
85
  source: EventSourceType
54
86
  id: str = Field(default_factory=lambda: str(uuid4()))
55
87
  created: int = Field(default_factory=lambda: int(time.time()))
56
88
  proc_name: str | None = None
57
89
  call_id: str | None = None
58
- data: _T
90
+ data: _T_co
59
91
 
60
92
 
61
- class CompletionEvent(Event[Completion], frozen=True):
62
- type: Literal[EventType.COMP] = EventType.COMP
63
- source: Literal[EventSourceType.LLM] = EventSourceType.LLM
93
+ class DummyEvent(Event[Any], frozen=True):
94
+ type: Literal[EventType.PAYLOAD_OUT] = EventType.PAYLOAD_OUT
95
+ source: Literal[EventSourceType.PROC] = EventSourceType.PROC
96
+ data: Any = None
64
97
 
65
98
 
66
- class CompletionChunkEvent(Event[CompletionChunk], frozen=True):
67
- type: Literal[EventType.COMP_CHUNK] = EventType.COMP_CHUNK
68
- source: Literal[EventSourceType.LLM] = EventSourceType.LLM
99
+ # Non-streamed completion events
69
100
 
70
101
 
71
- class LLMStreamingErrorData(BaseModel):
72
- error: Exception
73
- model_name: str | None = None
74
- model_id: str | None = None
75
-
76
- model_config = ConfigDict(arbitrary_types_allowed=True)
77
-
78
-
79
- class LLMStreamingErrorEvent(Event[LLMStreamingErrorData], frozen=True):
80
- type: Literal[EventType.LLM_ERR] = EventType.LLM_ERR
102
+ class CompletionEvent(Event[Completion], frozen=True):
103
+ type: Literal[EventType.COMP] = EventType.COMP
81
104
  source: Literal[EventSourceType.LLM] = EventSourceType.LLM
82
105
 
83
106
 
84
- # class CompletionThinkingChunkEvent(Event[CompletionChunk], frozen=True):
85
- # type: Literal[EventType.COMP_THINK_CHUNK] = EventType.COMP_THINK_CHUNK
86
- # source: Literal[EventSourceType.LLM] = EventSourceType.LLM
87
-
88
-
89
- # class CompletionResponseChunkEvent(Event[CompletionChunk], frozen=True):
90
- # type: Literal[EventType.COMP_RESP_CHUNK] = EventType.COMP_RESP_CHUNK
91
- # source: Literal[EventSourceType.LLM] = EventSourceType.LLM
92
-
93
-
94
- class MessageEvent(Event[_T], Generic[_T], frozen=True):
107
+ class MessageEvent(Event[_M_co], Generic[_M_co], frozen=True):
95
108
  pass
96
109
 
97
110
 
@@ -120,13 +133,207 @@ class ToolCallEvent(Event[ToolCall], frozen=True):
120
133
  source: Literal[EventSourceType.AGENT] = EventSourceType.AGENT
121
134
 
122
135
 
136
+ # Streamed completion events
137
+
138
+ StreamedCompletionEventTypes = Literal[
139
+ EventType.COMP_CHUNK,
140
+ EventType.COMP_START,
141
+ EventType.COMP_END,
142
+ EventType.RESP_CHUNK,
143
+ EventType.RESP_START,
144
+ EventType.RESP_END,
145
+ EventType.THINK_CHUNK,
146
+ EventType.THINK_START,
147
+ EventType.THINK_END,
148
+ EventType.TOOL_CALL_CHUNK,
149
+ EventType.TOOL_CALL_START,
150
+ EventType.TOOL_CALL_END,
151
+ EventType.ANNOT_CHUNK,
152
+ EventType.ANNOT_START,
153
+ EventType.ANNOT_END,
154
+ EventType.REFUSAL_CHUNK,
155
+ ]
156
+
157
+
158
+ class CompletionChunkEvent(Event[_C_co], Generic[_C_co], frozen=True):
159
+ type: StreamedCompletionEventTypes = EventType.COMP_CHUNK
160
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
161
+ data: _C_co
162
+
163
+ def split_into_specialized(
164
+ self,
165
+ ) -> "list[CompletionChunkEvent[Any]]":
166
+ specialized_events: list[CompletionChunkEvent[Any]] = []
167
+
168
+ for sub_chunk in self.data.split_into_specialized():
169
+ if isinstance(sub_chunk, ResponseChunk):
170
+ new_event = self.model_copy(
171
+ update={"data": sub_chunk, "type": EventType.RESP_CHUNK}
172
+ )
173
+ specialized_events.append(
174
+ ResponseChunkEvent.model_validate(new_event.model_dump())
175
+ )
176
+ if isinstance(sub_chunk, ThinkingChunk):
177
+ new_event = self.model_copy(
178
+ update={"data": sub_chunk, "type": EventType.THINK_CHUNK}
179
+ )
180
+ specialized_events.append(
181
+ ThinkingChunkEvent.model_validate(new_event.model_dump())
182
+ )
183
+ if isinstance(sub_chunk, ToolCallChunk):
184
+ new_event = self.model_copy(
185
+ update={"data": sub_chunk, "type": EventType.TOOL_CALL_CHUNK}
186
+ )
187
+ specialized_events.append(
188
+ ToolCallChunkEvent.model_validate(new_event.model_dump())
189
+ )
190
+ if isinstance(sub_chunk, AnnotationsChunk):
191
+ new_event = self.model_copy(
192
+ update={"data": sub_chunk, "type": EventType.ANNOT_CHUNK}
193
+ )
194
+ specialized_events.append(
195
+ AnnotationsChunkEvent.model_validate(new_event.model_dump())
196
+ )
197
+ if isinstance(sub_chunk, RefusalChunk):
198
+ new_event = self.model_copy(
199
+ update={"data": sub_chunk, "type": EventType.REFUSAL_CHUNK}
200
+ )
201
+ specialized_events.append(
202
+ RefusalChunkEvent.model_validate(new_event.model_dump())
203
+ )
204
+
205
+ return specialized_events
206
+
207
+
208
+ class ResponseChunkEvent(CompletionChunkEvent[ResponseChunk], frozen=True):
209
+ type: Literal[EventType.RESP_CHUNK] = EventType.RESP_CHUNK
210
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
211
+
212
+
213
+ class ThinkingChunkEvent(CompletionChunkEvent[ThinkingChunk], frozen=True):
214
+ type: Literal[EventType.THINK_CHUNK] = EventType.THINK_CHUNK
215
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
216
+
217
+
218
+ class ToolCallChunkEvent(CompletionChunkEvent[ToolCallChunk], frozen=True):
219
+ type: Literal[EventType.TOOL_CALL_CHUNK] = EventType.TOOL_CALL_CHUNK
220
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
221
+
222
+
223
+ class AnnotationsChunkEvent(CompletionChunkEvent[AnnotationsChunk], frozen=True):
224
+ type: Literal[EventType.ANNOT_CHUNK] = EventType.ANNOT_CHUNK
225
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
226
+
227
+
228
+ class RefusalChunkEvent(CompletionChunkEvent[RefusalChunk], frozen=True):
229
+ type: Literal[EventType.REFUSAL_CHUNK] = EventType.REFUSAL_CHUNK
230
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
231
+
232
+
233
+ START_END_MAP: dict[EventType, list[EventType]] = {
234
+ EventType.COMP_CHUNK: [EventType.COMP_START, EventType.COMP_END],
235
+ EventType.RESP_CHUNK: [EventType.RESP_START, EventType.RESP_END],
236
+ EventType.THINK_CHUNK: [EventType.THINK_START, EventType.THINK_END],
237
+ EventType.TOOL_CALL_CHUNK: [EventType.TOOL_CALL_START, EventType.TOOL_CALL_END],
238
+ EventType.ANNOT_CHUNK: [EventType.ANNOT_START, EventType.ANNOT_END],
239
+ }
240
+
241
+
242
+ class LLMStateChangeEvent(CompletionChunkEvent[_C_co], Generic[_C_co], frozen=True):
243
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
244
+ start: bool = True
245
+
246
+ @classmethod
247
+ def from_chunk_event(
248
+ cls, event: CompletionChunkEvent[CompletionChunk]
249
+ ) -> "LLMStateChangeEvent[_C_co]":
250
+ _type = get_args(cls.model_fields["type"].annotation)[0]
251
+ return cls(**event.model_copy(update={"type": _type}).model_dump())
252
+
253
+
254
+ class CompletionStartEvent(LLMStateChangeEvent[CompletionChunk], frozen=True):
255
+ type: Literal[EventType.COMP_START] = EventType.COMP_START
256
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
257
+ start: Literal[True] = True
258
+
259
+
260
+ class CompletionEndEvent(LLMStateChangeEvent[CompletionChunk], frozen=True):
261
+ type: Literal[EventType.COMP_END] = EventType.COMP_END
262
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
263
+ start: Literal[False] = False
264
+
265
+
266
+ class ResponseStartEvent(LLMStateChangeEvent[ResponseChunk], frozen=True):
267
+ type: Literal[EventType.RESP_START] = EventType.RESP_START
268
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
269
+ start: Literal[True] = True
270
+
271
+
272
+ class ResponseEndEvent(LLMStateChangeEvent[ResponseChunk], frozen=True):
273
+ type: Literal[EventType.RESP_END] = EventType.RESP_END
274
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
275
+ start: Literal[False] = False
276
+
277
+
278
+ class ThinkingStartEvent(LLMStateChangeEvent[ThinkingChunk], frozen=True):
279
+ type: Literal[EventType.THINK_START] = EventType.THINK_START
280
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
281
+ start: Literal[True] = True
282
+
283
+
284
+ class ThinkingEndEvent(LLMStateChangeEvent[ThinkingChunk], frozen=True):
285
+ type: Literal[EventType.THINK_END] = EventType.THINK_END
286
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
287
+ start: Literal[False] = False
288
+
289
+
290
+ class ToolCallStartEvent(LLMStateChangeEvent[ToolCallChunk], frozen=True):
291
+ type: Literal[EventType.TOOL_CALL_START] = EventType.TOOL_CALL_START
292
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
293
+ start: Literal[True] = True
294
+
295
+
296
+ class ToolCallEndEvent(LLMStateChangeEvent[ToolCallChunk], frozen=True):
297
+ type: Literal[EventType.TOOL_CALL_END] = EventType.TOOL_CALL_END
298
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
299
+ start: Literal[False] = False
300
+
301
+
302
+ class AnnotationsStartEvent(LLMStateChangeEvent[AnnotationsChunk], frozen=True):
303
+ type: Literal[EventType.ANNOT_START] = EventType.ANNOT_START
304
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
305
+ start: Literal[True] = True
306
+
307
+
308
+ class AnnotationsEndEvent(LLMStateChangeEvent[AnnotationsChunk], frozen=True):
309
+ type: Literal[EventType.ANNOT_END] = EventType.ANNOT_END
310
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
311
+ start: Literal[False] = False
312
+
313
+
314
+ class LLMStreamingErrorData(BaseModel):
315
+ error: Exception
316
+ model_name: str | None = None
317
+ model_id: str | None = None
318
+
319
+ model_config = ConfigDict(arbitrary_types_allowed=True)
320
+
321
+
322
+ class LLMStreamingErrorEvent(Event[LLMStreamingErrorData], frozen=True):
323
+ type: Literal[EventType.LLM_ERR] = EventType.LLM_ERR
324
+ source: Literal[EventSourceType.LLM] = EventSourceType.LLM
325
+
326
+
327
+ # Processor events
328
+
329
+
123
330
  class ProcStartEvent(Event[None], frozen=True):
124
331
  type: Literal[EventType.PROC_START] = EventType.PROC_START
125
332
  source: Literal[EventSourceType.PROC] = EventSourceType.PROC
126
333
 
127
334
 
128
- class ProcFinishEvent(Event[None], frozen=True):
129
- type: Literal[EventType.PROC_FINISH] = EventType.PROC_FINISH
335
+ class ProcEndEvent(Event[None], frozen=True):
336
+ type: Literal[EventType.PROC_END] = EventType.PROC_END
130
337
  source: Literal[EventSourceType.PROC] = EventSourceType.PROC
131
338
 
132
339
 
@@ -144,16 +351,6 @@ class ProcPacketOutputEvent(Event[Packet[Any]], frozen=True):
144
351
  ] = EventSourceType.PROC
145
352
 
146
353
 
147
- class WorkflowResultEvent(ProcPacketOutputEvent, frozen=True):
148
- type: Literal[EventType.WORKFLOW_RES] = EventType.WORKFLOW_RES
149
- source: Literal[EventSourceType.WORKFLOW] = EventSourceType.WORKFLOW
150
-
151
-
152
- class RunResultEvent(ProcPacketOutputEvent, frozen=True):
153
- type: Literal[EventType.RUN_RES] = EventType.RUN_RES
154
- source: Literal[EventSourceType.RUN] = EventSourceType.RUN
155
-
156
-
157
354
  class ProcStreamingErrorData(BaseModel):
158
355
  error: Exception
159
356
  call_id: str | None = None
@@ -164,3 +361,16 @@ class ProcStreamingErrorData(BaseModel):
164
361
  class ProcStreamingErrorEvent(Event[ProcStreamingErrorData], frozen=True):
165
362
  type: Literal[EventType.PROC_ERR] = EventType.PROC_ERR
166
363
  source: Literal[EventSourceType.PROC] = EventSourceType.PROC
364
+
365
+
366
+ # Workflow and run events
367
+
368
+
369
+ class WorkflowResultEvent(ProcPacketOutputEvent, frozen=True):
370
+ type: Literal[EventType.WORKFLOW_RES] = EventType.WORKFLOW_RES
371
+ source: Literal[EventSourceType.WORKFLOW] = EventSourceType.WORKFLOW
372
+
373
+
374
+ class RunResultEvent(ProcPacketOutputEvent, frozen=True):
375
+ type: Literal[EventType.RUN_RES] = EventType.RUN_RES
376
+ source: Literal[EventSourceType.RUN] = EventSourceType.RUN