grasp_agents 0.5.6__py3-none-any.whl → 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,16 +1,13 @@
1
1
  import asyncio
2
2
  import logging
3
3
  from collections.abc import AsyncIterator, Sequence
4
- from typing import Any, ClassVar, Generic, cast
5
-
4
+ from typing import Any, ClassVar, Generic, cast
6
5
 
7
6
  from ..errors import PacketRoutingError
8
7
  from ..memory import MemT
9
8
  from ..packet import Packet
10
9
  from ..run_context import CtxT, RunContext
11
- from ..typing.events import (
12
- Event, ProcPacketOutputEvent, ProcPayloadOutputEvent
13
- )
10
+ from ..typing.events import Event, ProcPacketOutputEvent, ProcPayloadOutputEvent
14
11
  from ..typing.io import InT, OutT
15
12
  from ..utils import stream_concurrent
16
13
  from .base_processor import BaseProcessor, with_retry, with_retry_stream
@@ -18,7 +15,9 @@ from .base_processor import BaseProcessor, with_retry, with_retry_stream
18
15
  logger = logging.getLogger(__name__)
19
16
 
20
17
 
21
- class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, CtxT]):
18
+ class ParallelProcessor(
19
+ BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT, MemT, CtxT]
20
+ ):
22
21
  _generic_arg_to_instance_attr_map: ClassVar[dict[int, str]] = {
23
22
  0: "_in_type",
24
23
  1: "_out_type",
@@ -33,7 +32,7 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
33
32
  call_id: str,
34
33
  ctx: RunContext[CtxT] | None = None,
35
34
  ) -> OutT:
36
- return cast(OutT, in_args)
35
+ return cast("OutT", in_args)
37
36
 
38
37
  async def _process_stream(
39
38
  self,
@@ -44,7 +43,7 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
44
43
  call_id: str,
45
44
  ctx: RunContext[CtxT] | None = None,
46
45
  ) -> AsyncIterator[Event[Any]]:
47
- output = cast(OutT, in_args)
46
+ output = cast("OutT", in_args)
48
47
  yield ProcPayloadOutputEvent(data=output, proc_name=self.name, call_id=call_id)
49
48
 
50
49
  def _validate_parallel_recipients(
@@ -59,7 +58,7 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
59
58
  message="Parallel runs must return the same recipients "
60
59
  f"[proc_name={self.name}; call_id={call_id}]",
61
60
  )
62
-
61
+
63
62
  @with_retry
64
63
  async def _run_single(
65
64
  self,
@@ -86,7 +85,6 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
86
85
 
87
86
  return Packet(payloads=[val_output], sender=self.name, recipients=recipients)
88
87
 
89
-
90
88
  async def _run_parallel(
91
89
  self, in_args: list[InT], call_id: str, ctx: RunContext[CtxT] | None = None
92
90
  ) -> Packet[OutT]:
@@ -125,8 +123,10 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
125
123
  )
126
124
 
127
125
  if val_in_args and len(val_in_args) > 1:
128
- return await self._run_parallel(in_args=val_in_args, call_id=call_id, ctx=ctx)
129
-
126
+ return await self._run_parallel(
127
+ in_args=val_in_args, call_id=call_id, ctx=ctx
128
+ )
129
+
130
130
  return await self._run_single(
131
131
  chat_inputs=chat_inputs,
132
132
  in_args=val_in_args[0] if val_in_args else None,
@@ -231,7 +231,9 @@ class ParallelProcessor(BaseProcessor[InT, OutT, MemT, CtxT], Generic[InT, OutT,
231
231
  )
232
232
 
233
233
  if val_in_args and len(val_in_args) > 1:
234
- stream = self._run_parallel_stream(in_args=val_in_args, call_id=call_id, ctx=ctx)
234
+ stream = self._run_parallel_stream(
235
+ in_args=val_in_args, call_id=call_id, ctx=ctx
236
+ )
235
237
  else:
236
238
  stream = self._run_single_stream(
237
239
  chat_inputs=chat_inputs,
grasp_agents/runner.py CHANGED
@@ -6,7 +6,7 @@ from typing import Any, Generic
6
6
  from .errors import RunnerError
7
7
  from .packet import Packet, StartPacket
8
8
  from .packet_pool import END_PROC_NAME, PacketPool
9
- from .processors.processor import Processor
9
+ from .processors.base_processor import BaseProcessor
10
10
  from .run_context import CtxT, RunContext
11
11
  from .typing.events import Event, ProcPacketOutputEvent, RunResultEvent
12
12
  from .typing.io import OutT
@@ -17,8 +17,8 @@ logger = logging.getLogger(__name__)
17
17
  class Runner(Generic[OutT, CtxT]):
18
18
  def __init__(
19
19
  self,
20
- entry_proc: Processor[Any, Any, Any, CtxT],
21
- procs: Sequence[Processor[Any, Any, Any, CtxT]],
20
+ entry_proc: BaseProcessor[Any, Any, Any, CtxT],
21
+ procs: Sequence[BaseProcessor[Any, Any, Any, CtxT]],
22
22
  ctx: RunContext[CtxT] | None = None,
23
23
  ) -> None:
24
24
  if entry_proc not in procs:
@@ -34,7 +34,6 @@ class Runner(Generic[OutT, CtxT]):
34
34
  self._entry_proc = entry_proc
35
35
  self._procs = procs
36
36
  self._ctx = ctx or RunContext[CtxT]()
37
- self._packet_pool: PacketPool[CtxT] = PacketPool()
38
37
 
39
38
  @property
40
39
  def ctx(self) -> RunContext[CtxT]:
@@ -49,9 +48,10 @@ class Runner(Generic[OutT, CtxT]):
49
48
 
50
49
  async def _packet_handler(
51
50
  self,
52
- proc: Processor[Any, Any, Any, CtxT],
53
- pool: PacketPool[CtxT],
54
51
  packet: Packet[Any],
52
+ *,
53
+ proc: BaseProcessor[Any, Any, Any, CtxT],
54
+ pool: PacketPool,
55
55
  ctx: RunContext[CtxT],
56
56
  **run_kwargs: Any,
57
57
  ) -> None:
@@ -72,9 +72,10 @@ class Runner(Generic[OutT, CtxT]):
72
72
 
73
73
  async def _packet_handler_stream(
74
74
  self,
75
- proc: Processor[Any, Any, Any, CtxT],
76
- pool: PacketPool[CtxT],
77
75
  packet: Packet[Any],
76
+ *,
77
+ proc: BaseProcessor[Any, Any, Any, CtxT],
78
+ pool: PacketPool,
78
79
  ctx: RunContext[CtxT],
79
80
  **run_kwargs: Any,
80
81
  ) -> None:
@@ -99,18 +100,18 @@ class Runner(Generic[OutT, CtxT]):
99
100
 
100
101
  await pool.post(out_packet)
101
102
 
102
- async def run(
103
- self,
104
- chat_input: Any = "start",
105
- **run_args: Any,
106
- ) -> Packet[OutT]:
107
- async with PacketPool[CtxT]() as pool:
103
+ async def run(self, chat_input: Any = "start", **run_args: Any) -> Packet[OutT]:
104
+ async with PacketPool() as pool:
108
105
  for proc in self._procs:
109
106
  pool.register_packet_handler(
110
107
  proc_name=proc.name,
111
- handler=partial(self._packet_handler, proc, pool),
112
- ctx=self._ctx,
113
- **run_args,
108
+ handler=partial(
109
+ self._packet_handler,
110
+ proc=proc,
111
+ pool=pool,
112
+ ctx=self._ctx,
113
+ **run_args,
114
+ ),
114
115
  )
115
116
  await pool.post(
116
117
  StartPacket[Any](
@@ -120,17 +121,19 @@ class Runner(Generic[OutT, CtxT]):
120
121
  return await pool.final_result()
121
122
 
122
123
  async def run_stream(
123
- self,
124
- chat_input: Any = "start",
125
- **run_args: Any,
124
+ self, chat_input: Any = "start", **run_args: Any
126
125
  ) -> AsyncIterator[Event[Any]]:
127
- async with PacketPool[CtxT]() as pool:
126
+ async with PacketPool() as pool:
128
127
  for proc in self._procs:
129
128
  pool.register_packet_handler(
130
129
  proc_name=proc.name,
131
- handler=partial(self._packet_handler_stream, proc, pool),
132
- ctx=self._ctx,
133
- **run_args,
130
+ handler=partial(
131
+ self._packet_handler_stream,
132
+ proc=proc,
133
+ pool=pool,
134
+ ctx=self._ctx,
135
+ **run_args,
136
+ ),
134
137
  )
135
138
  await pool.post(
136
139
  StartPacket[Any](
@@ -15,7 +15,7 @@ from openai.types.chat.chat_completion_chunk import (
15
15
  from openai.types.chat.chat_completion_token_logprob import (
16
16
  ChatCompletionTokenLogprob as OpenAITokenLogprob,
17
17
  )
18
- from pydantic import BaseModel, Field
18
+ from pydantic import BaseModel, Field, ValidationError, field_validator
19
19
 
20
20
  from ..errors import CombineCompletionChunksError
21
21
  from .completion import Completion, CompletionChoice, FinishReason, Usage
@@ -38,13 +38,77 @@ class CompletionChunkDeltaToolCall(BaseModel):
38
38
  class CompletionChunkChoiceDelta(BaseModel):
39
39
  content: str | None = None
40
40
  refusal: str | None = None
41
- role: Role | None
42
- tool_calls: list[CompletionChunkDeltaToolCall] | None
41
+ role: Role | None = None
42
+ tool_calls: list[CompletionChunkDeltaToolCall] | None = None
43
43
  reasoning_content: str | None = None
44
44
  thinking_blocks: list[ThinkingBlock | RedactedThinkingBlock] | None = None
45
45
  annotations: list[LiteLLMAnnotation] | None = None
46
46
  provider_specific_fields: dict[str, Any] | None = None
47
47
 
48
+ @property
49
+ def thinking_delta(self) -> "CompletionChunkChoiceDelta | None":
50
+ return (
51
+ CompletionChunkChoiceDelta(
52
+ reasoning_content=self.reasoning_content,
53
+ thinking_blocks=self.thinking_blocks,
54
+ role=self.role,
55
+ provider_specific_fields=self.provider_specific_fields,
56
+ )
57
+ if self.reasoning_content or self.thinking_blocks
58
+ else None
59
+ )
60
+
61
+ @property
62
+ def tool_call_deltas(self) -> "list[CompletionChunkChoiceDelta] | None":
63
+ return (
64
+ [
65
+ CompletionChunkChoiceDelta(
66
+ tool_calls=[tool_call],
67
+ role=self.role,
68
+ provider_specific_fields=self.provider_specific_fields,
69
+ )
70
+ for tool_call in self.tool_calls
71
+ ]
72
+ if self.tool_calls
73
+ else None
74
+ )
75
+
76
+ @property
77
+ def response_delta(self) -> "CompletionChunkChoiceDelta | None":
78
+ return (
79
+ CompletionChunkChoiceDelta(
80
+ content=self.content,
81
+ role=self.role,
82
+ provider_specific_fields=self.provider_specific_fields,
83
+ )
84
+ if self.content
85
+ else None
86
+ )
87
+
88
+ @property
89
+ def annotations_delta(self) -> "CompletionChunkChoiceDelta | None":
90
+ return (
91
+ CompletionChunkChoiceDelta(
92
+ annotations=self.annotations,
93
+ role=self.role,
94
+ provider_specific_fields=self.provider_specific_fields,
95
+ )
96
+ if self.annotations
97
+ else None
98
+ )
99
+
100
+ @property
101
+ def refusal_delta(self) -> "CompletionChunkChoiceDelta | None":
102
+ return (
103
+ CompletionChunkChoiceDelta(
104
+ refusal=self.refusal,
105
+ role=self.role,
106
+ provider_specific_fields=self.provider_specific_fields,
107
+ )
108
+ if self.refusal
109
+ else None
110
+ )
111
+
48
112
 
49
113
  class CompletionChunkChoice(BaseModel):
50
114
  delta: CompletionChunkChoiceDelta
@@ -66,6 +130,241 @@ class CompletionChunk(BaseModel):
66
130
  response_ms: float | None = None
67
131
  hidden_params: dict[str, Any] | None = None
68
132
 
133
+ def split_into_specialized(
134
+ self,
135
+ ) -> "list[CompletionChunk]":
136
+ if len(self.choices) != 1:
137
+ raise ValidationError(
138
+ "CompletionChunk must have exactly one choice for specialization."
139
+ )
140
+ delta = self.choices[0].delta
141
+
142
+ specialized_chunks: list[CompletionChunk] = []
143
+
144
+ thinking_delta = delta.thinking_delta
145
+ tool_call_deltas = delta.tool_call_deltas
146
+ response_delta = delta.response_delta
147
+ annotations_delta = delta.annotations_delta
148
+ refusal_delta = delta.refusal_delta
149
+
150
+ if thinking_delta is not None:
151
+ new_choice = self.choices[0].model_copy(update={"delta": thinking_delta})
152
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
153
+ specialized_chunks.append(
154
+ ThinkingChunk.model_validate(new_chunk.model_dump())
155
+ )
156
+
157
+ if tool_call_deltas:
158
+ for delta_tool_call in tool_call_deltas:
159
+ new_choice = self.choices[0].model_copy(
160
+ update={"delta": delta_tool_call}
161
+ )
162
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
163
+ specialized_chunks.append(
164
+ ToolCallChunk.model_validate(new_chunk.model_dump())
165
+ )
166
+
167
+ if response_delta is not None:
168
+ new_choice = self.choices[0].model_copy(update={"delta": response_delta})
169
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
170
+ specialized_chunks.append(
171
+ ResponseChunk.model_validate(new_chunk.model_dump())
172
+ )
173
+
174
+ if annotations_delta is not None:
175
+ new_choice = self.choices[0].model_copy(update={"delta": annotations_delta})
176
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
177
+ specialized_chunks.append(
178
+ AnnotationsChunk.model_validate(new_chunk.model_dump())
179
+ )
180
+
181
+ if refusal_delta is not None:
182
+ new_choice = self.choices[0].model_copy(update={"delta": refusal_delta})
183
+ new_chunk = self.model_copy(update={"choices": [new_choice]})
184
+ specialized_chunks.append(
185
+ RefusalChunk.model_validate(new_chunk.model_dump())
186
+ )
187
+
188
+ return specialized_chunks
189
+
190
+
191
+ class ResponseChunk(CompletionChunk):
192
+ @field_validator("choices")
193
+ @classmethod
194
+ def validate_response_chunk(
195
+ cls, choices: list[CompletionChunkChoice]
196
+ ) -> list[CompletionChunkChoice]:
197
+ if len(choices) != 1:
198
+ raise ValidationError("ResponseChunk must have exactly one choice.")
199
+
200
+ delta = choices[0].delta
201
+
202
+ if not delta.content:
203
+ raise ValidationError("ResponseChunk must have content in deltas.")
204
+
205
+ if (
206
+ delta.reasoning_content is not None
207
+ or delta.thinking_blocks is not None
208
+ or delta.tool_calls is not None
209
+ or delta.refusal is not None
210
+ or delta.annotations is not None
211
+ ):
212
+ raise ValidationError(
213
+ "ResponseChunk should not have reasoning content, thinking blocks, "
214
+ "tool calls, refusal, or annotations in deltas."
215
+ )
216
+
217
+ return choices
218
+
219
+ @property
220
+ def response(self) -> str:
221
+ assert self.choices[0].delta.content
222
+ return self.choices[0].delta.content
223
+
224
+
225
+ class ThinkingChunk(CompletionChunk):
226
+ @field_validator("choices")
227
+ @classmethod
228
+ def validate_thinking_chunk(
229
+ cls, choices: list[CompletionChunkChoice]
230
+ ) -> list[CompletionChunkChoice]:
231
+ if len(choices) != 1:
232
+ raise ValidationError("ThinkingChunk must have exactly one choice.")
233
+
234
+ delta = choices[0].delta
235
+
236
+ if not (delta.thinking_blocks or delta.reasoning_content):
237
+ raise ValidationError(
238
+ "ThinkingChunk must have reasoning content or "
239
+ "at least one thinking block."
240
+ )
241
+ if (
242
+ delta.content is not None
243
+ or delta.tool_calls is not None
244
+ or delta.refusal is not None
245
+ or delta.annotations is not None
246
+ ):
247
+ raise ValidationError(
248
+ "ThinkingChunk should not have content, tool calls, "
249
+ "refusal, or annotations in deltas."
250
+ )
251
+
252
+ return choices
253
+
254
+ @property
255
+ def thinking(self) -> str | list[ThinkingBlock | RedactedThinkingBlock]:
256
+ delta = self.choices[0].delta
257
+ if delta.reasoning_content:
258
+ return delta.reasoning_content
259
+ if delta.thinking_blocks:
260
+ return delta.thinking_blocks
261
+ raise ValueError("ThinkingChunk has no reasoning_content or thinking_blocks")
262
+
263
+
264
+ class ToolCallChunk(CompletionChunk):
265
+ @field_validator("choices")
266
+ @classmethod
267
+ def validate_tool_call_chunk(
268
+ cls, choices: list[CompletionChunkChoice]
269
+ ) -> list[CompletionChunkChoice]:
270
+ if len(choices) != 1:
271
+ raise ValidationError("ToolCallChunk must have exactly one choice.")
272
+
273
+ delta = choices[0].delta
274
+
275
+ if not delta.tool_calls:
276
+ raise ValidationError("ToolCallChunk must have tool calls in deltas.")
277
+ if len(delta.tool_calls) != 1:
278
+ raise ValidationError(
279
+ "ToolCallChunk must have exactly one tool call in deltas."
280
+ )
281
+
282
+ if (
283
+ delta.reasoning_content is not None
284
+ or delta.thinking_blocks is not None
285
+ or delta.content is not None
286
+ or delta.refusal is not None
287
+ or delta.annotations is not None
288
+ ):
289
+ raise ValidationError(
290
+ "ToolCallChunk should not have reasoning content, thinking blocks, "
291
+ "content, refusal, or annotations in deltas."
292
+ )
293
+
294
+ return choices
295
+
296
+ @property
297
+ def tool_call(self) -> CompletionChunkDeltaToolCall:
298
+ assert self.choices[0].delta.tool_calls is not None
299
+ return self.choices[0].delta.tool_calls[0]
300
+
301
+
302
+ class AnnotationsChunk(CompletionChunk):
303
+ @field_validator("choices")
304
+ @classmethod
305
+ def validate_annotations_chunk(
306
+ cls, choices: list[CompletionChunkChoice]
307
+ ) -> list[CompletionChunkChoice]:
308
+ if len(choices) != 1:
309
+ raise ValidationError("AnnotationsChunk must have exactly one choice.")
310
+
311
+ delta = choices[0].delta
312
+
313
+ if not delta.annotations:
314
+ raise ValidationError("AnnotationsChunk must have annotations in deltas.")
315
+
316
+ if (
317
+ delta.reasoning_content is not None
318
+ or delta.thinking_blocks is not None
319
+ or delta.content is not None
320
+ or delta.tool_calls is not None
321
+ or delta.refusal is not None
322
+ ):
323
+ raise ValidationError(
324
+ "AnnotationsChunk should not have reasoning content, thinking blocks, "
325
+ "content, tool calls, or refusal in deltas."
326
+ )
327
+
328
+ return choices
329
+
330
+ @property
331
+ def annotations(self) -> list[LiteLLMAnnotation]:
332
+ assert self.choices[0].delta.annotations is not None
333
+ return self.choices[0].delta.annotations
334
+
335
+
336
+ class RefusalChunk(CompletionChunk):
337
+ @field_validator("choices")
338
+ @classmethod
339
+ def validate_refusal_chunk(
340
+ cls, choices: list[CompletionChunkChoice]
341
+ ) -> list[CompletionChunkChoice]:
342
+ if len(choices) != 1:
343
+ raise ValidationError("RefusalChunk must have exactly one choice.")
344
+
345
+ delta = choices[0].delta
346
+
347
+ if not delta.refusal:
348
+ raise ValidationError("RefusalChunk must have refusal in deltas.")
349
+
350
+ if (
351
+ delta.reasoning_content is not None
352
+ or delta.thinking_blocks is not None
353
+ or delta.content is not None
354
+ or delta.tool_calls is not None
355
+ or delta.annotations is not None
356
+ ):
357
+ raise ValidationError(
358
+ "RefusalChunk should not have reasoning content, thinking blocks, "
359
+ "content, tool calls, or annotations in deltas."
360
+ )
361
+
362
+ return choices
363
+
364
+ @property
365
+ def refusal(self) -> str | None:
366
+ return self.choices[0].delta.refusal
367
+
69
368
 
70
369
  def combine_completion_chunks(chunks: list[CompletionChunk]) -> Completion:
71
370
  if not chunks: