grasp_agents 0.5.8__tar.gz → 0.5.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/PKG-INFO +12 -13
  2. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/README.md +11 -12
  3. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/pyproject.toml +1 -1
  4. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/lite_llm.py +22 -1
  5. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/llm.py +17 -29
  6. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/openai_llm.py +11 -3
  7. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/.gitignore +0 -0
  8. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/LICENSE.md +0 -0
  9. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/__init__.py +0 -0
  10. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/cloud_llm.py +0 -0
  11. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/costs_dict.yaml +0 -0
  12. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/errors.py +0 -0
  13. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/generics_utils.py +0 -0
  14. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/grasp_logging.py +0 -0
  15. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/http_client.py +0 -0
  16. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/__init__.py +0 -0
  17. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/completion_chunk_converters.py +0 -0
  18. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/completion_converters.py +0 -0
  19. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/converters.py +0 -0
  20. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/litellm/message_converters.py +0 -0
  21. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/llm_agent.py +0 -0
  22. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/llm_agent_memory.py +0 -0
  23. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/llm_policy_executor.py +0 -0
  24. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/memory.py +0 -0
  25. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/__init__.py +0 -0
  26. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/completion_chunk_converters.py +0 -0
  27. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/completion_converters.py +0 -0
  28. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/content_converters.py +0 -0
  29. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/converters.py +0 -0
  30. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/message_converters.py +0 -0
  31. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/openai/tool_converters.py +0 -0
  32. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/packet.py +0 -0
  33. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/packet_pool.py +0 -0
  34. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/printer.py +0 -0
  35. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/processors/base_processor.py +0 -0
  36. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/processors/parallel_processor.py +0 -0
  37. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/processors/processor.py +0 -0
  38. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/prompt_builder.py +0 -0
  39. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/rate_limiting/__init__.py +0 -0
  40. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/rate_limiting/rate_limiter_chunked.py +0 -0
  41. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/rate_limiting/types.py +0 -0
  42. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/rate_limiting/utils.py +0 -0
  43. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/run_context.py +0 -0
  44. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/runner.py +0 -0
  45. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/__init__.py +0 -0
  46. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/completion.py +0 -0
  47. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/completion_chunk.py +0 -0
  48. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/content.py +0 -0
  49. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/converters.py +0 -0
  50. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/events.py +0 -0
  51. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/io.py +0 -0
  52. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/message.py +0 -0
  53. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/typing/tool.py +0 -0
  54. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/usage_tracker.py +0 -0
  55. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/utils.py +0 -0
  56. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/workflow/__init__.py +0 -0
  57. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/workflow/looped_workflow.py +0 -0
  58. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/workflow/sequential_workflow.py +0 -0
  59. {grasp_agents-0.5.8 → grasp_agents-0.5.9}/src/grasp_agents/workflow/workflow_processor.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: grasp_agents
3
- Version: 0.5.8
3
+ Version: 0.5.9
4
4
  Summary: Grasp Agents Library
5
5
  License-File: LICENSE.md
6
6
  Requires-Python: <4,>=3.11.4
@@ -37,31 +37,30 @@ Description-Content-Type: text/markdown
37
37
 
38
38
  ## Features
39
39
 
40
- - Clean formulation of agents as generic entities over:
41
- - I/O schemas
42
- - Memory
43
- - Shared context
40
+ - Clean formulation of agents as generic entities over I/O schemas and shared context.
44
41
  - Transparent implementation of common agentic patterns:
45
- - Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
42
+ - Single-agent loops
46
43
  - Workflows (static communication topology), including loops
47
44
  - Agents-as-tools for task delegation
48
45
  - Freeform A2A communication via the in-process actor model
49
- - Parallel processing with flexible retries and rate limiting
50
- - Simple logging and usage/cost tracking
46
+ - Built-in parallel processing with flexible retries and rate limiting.
47
+ - Support for all popular API providers via LiteLLM.
48
+ - Granular event streaming with separate events for standard outputs, thinking, and tool calls.
49
+ - Callbacks via decorators or subclassing for straightforward customisation of agentic loops and context management.
51
50
 
52
51
  ## Project Structure
53
52
 
54
- - `processor.py`, `comm_processor.py`, `llm_agent.py`: Core processor and agent class implementations.
55
- - `packet.py`, `packet_pool.py`: Communication management.
53
+ - `processors/`, `llm_agent.py`: Core processor and agent class implementations.
54
+ - `packet.py`, `packet_pool.py`, `runner.py`: Communication management.
56
55
  - `llm_policy_executor.py`: LLM actions and tool call loops.
57
56
  - `prompt_builder.py`: Tools for constructing prompts.
58
57
  - `workflow/`: Modules for defining and managing static agent workflows.
59
58
  - `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
60
59
  - `openai/`: Modules specific to OpenAI API integration.
61
- - `memory.py`, `llm_agent_memory.py`: Memory management.
60
+ - `litellm/`: Modules specific to LiteLLM integration.
61
+ - `memory.py`, `llm_agent_memory.py`: Basic agent memory management.
62
62
  - `run_context.py`: Shared context management for agent runs.
63
63
  - `usage_tracker.py`: Tracking of API usage and costs.
64
- - `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
65
64
  - `rate_limiting/`: Basic rate limiting tools.
66
65
 
67
66
  ## Quickstart & Installation Variants (UV Package manager)
@@ -190,7 +189,7 @@ teacher = LLMAgent[None, Problem, None](
190
189
  )
191
190
 
192
191
  async def main():
193
- ctx = RunContext[None](print_messages=True)
192
+ ctx = RunContext[None](log_messages=True)
194
193
  out = await teacher.run("start", ctx=ctx)
195
194
  print(out.payloads[0])
196
195
  print(ctx.usage_tracker.total_usage)
@@ -20,31 +20,30 @@
20
20
 
21
21
  ## Features
22
22
 
23
- - Clean formulation of agents as generic entities over:
24
- - I/O schemas
25
- - Memory
26
- - Shared context
23
+ - Clean formulation of agents as generic entities over I/O schemas and shared context.
27
24
  - Transparent implementation of common agentic patterns:
28
- - Single-agent loops with an optional "ReAct mode" to enforce reasoning between the tool calls
25
+ - Single-agent loops
29
26
  - Workflows (static communication topology), including loops
30
27
  - Agents-as-tools for task delegation
31
28
  - Freeform A2A communication via the in-process actor model
32
- - Parallel processing with flexible retries and rate limiting
33
- - Simple logging and usage/cost tracking
29
+ - Built-in parallel processing with flexible retries and rate limiting.
30
+ - Support for all popular API providers via LiteLLM.
31
+ - Granular event streaming with separate events for standard outputs, thinking, and tool calls.
32
+ - Callbacks via decorators or subclassing for straightforward customisation of agentic loops and context management.
34
33
 
35
34
  ## Project Structure
36
35
 
37
- - `processor.py`, `comm_processor.py`, `llm_agent.py`: Core processor and agent class implementations.
38
- - `packet.py`, `packet_pool.py`: Communication management.
36
+ - `processors/`, `llm_agent.py`: Core processor and agent class implementations.
37
+ - `packet.py`, `packet_pool.py`, `runner.py`: Communication management.
39
38
  - `llm_policy_executor.py`: LLM actions and tool call loops.
40
39
  - `prompt_builder.py`: Tools for constructing prompts.
41
40
  - `workflow/`: Modules for defining and managing static agent workflows.
42
41
  - `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
43
42
  - `openai/`: Modules specific to OpenAI API integration.
44
- - `memory.py`, `llm_agent_memory.py`: Memory management.
43
+ - `litellm/`: Modules specific to LiteLLM integration.
44
+ - `memory.py`, `llm_agent_memory.py`: Basic agent memory management.
45
45
  - `run_context.py`: Shared context management for agent runs.
46
46
  - `usage_tracker.py`: Tracking of API usage and costs.
47
- - `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
48
47
  - `rate_limiting/`: Basic rate limiting tools.
49
48
 
50
49
  ## Quickstart & Installation Variants (UV Package manager)
@@ -173,7 +172,7 @@ teacher = LLMAgent[None, Problem, None](
173
172
  )
174
173
 
175
174
  async def main():
176
- ctx = RunContext[None](print_messages=True)
175
+ ctx = RunContext[None](log_messages=True)
177
176
  out = await teacher.run("start", ctx=ctx)
178
177
  print(out.payloads[0])
179
178
  print(ctx.usage_tracker.total_usage)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "grasp_agents"
3
- version = "0.5.8"
3
+ version = "0.5.9"
4
4
  description = "Grasp Agents Library"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11.4,<4"
@@ -1,5 +1,7 @@
1
1
  import logging
2
+ from collections import defaultdict
2
3
  from collections.abc import AsyncIterator, Mapping
4
+ from copy import deepcopy
3
5
  from typing import Any, cast
4
6
 
5
7
  import litellm
@@ -90,10 +92,19 @@ class LiteLLM(CloudLLM[LiteLLMSettings, LiteLLMConverters]):
90
92
  "was specified. Please provide a valid API provider or use a different "
91
93
  "model."
92
94
  )
95
+
96
+ if llm_settings is not None:
97
+ stream_options = llm_settings.get("stream_options") or {}
98
+ stream_options["include_usage"] = True
99
+ _llm_settings = deepcopy(llm_settings)
100
+ _llm_settings["stream_options"] = stream_options
101
+ else:
102
+ _llm_settings = LiteLLMSettings(stream_options={"include_usage": True})
103
+
93
104
  super().__init__(
94
105
  model_name=model_name,
95
106
  model_id=model_id,
96
- llm_settings=llm_settings,
107
+ llm_settings=_llm_settings,
97
108
  converters=LiteLLMConverters(),
98
109
  tools=tools,
99
110
  response_schema=response_schema,
@@ -192,7 +203,17 @@ class LiteLLM(CloudLLM[LiteLLMSettings, LiteLLMConverters]):
192
203
  )
193
204
  stream = cast("CustomStreamWrapper", stream)
194
205
 
206
+ tc_indices: dict[int, set[int]] = defaultdict(set)
207
+
195
208
  async for completion_chunk in stream:
209
+ # Fix tool call indices to be unique within each choice
210
+ for n, choice in enumerate(completion_chunk.choices):
211
+ for tc in choice.delta.tool_calls or []:
212
+ # Tool call ID is not None only when it is a new tool call
213
+ if tc.id and tc.index in tc_indices[n]:
214
+ tc.index = max(tc_indices[n]) + 1
215
+ tc_indices[n].add(tc.index)
216
+
196
217
  yield completion_chunk
197
218
 
198
219
  def combine_completion_chunks(
@@ -25,7 +25,7 @@ from .typing.events import (
25
25
  AnnotationsEndEvent,
26
26
  AnnotationsStartEvent,
27
27
  CompletionChunkEvent,
28
- CompletionEndEvent,
28
+ # CompletionEndEvent,
29
29
  CompletionEvent,
30
30
  CompletionStartEvent,
31
31
  LLMStateChangeEvent,
@@ -196,7 +196,9 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
196
196
  annotations_op_evt: AnnotationsChunkEvent | None = None
197
197
  tool_calls_op_evt: ToolCallChunkEvent | None = None
198
198
 
199
- def _close_open_events() -> list[LLMStateChangeEvent[Any]]:
199
+ def _close_open_events(
200
+ _event: CompletionChunkEvent[CompletionChunk] | None = None,
201
+ ) -> list[LLMStateChangeEvent[Any]]:
200
202
  nonlocal \
201
203
  chunk_op_evt, \
202
204
  thinking_op_evt, \
@@ -206,26 +208,21 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
206
208
 
207
209
  events: list[LLMStateChangeEvent[Any]] = []
208
210
 
209
- if tool_calls_op_evt:
211
+ if not isinstance(_event, ThinkingChunkEvent) and thinking_op_evt:
212
+ events.append(ThinkingEndEvent.from_chunk_event(thinking_op_evt))
213
+ thinking_op_evt = None
214
+
215
+ if not isinstance(_event, ToolCallChunkEvent) and tool_calls_op_evt:
210
216
  events.append(ToolCallEndEvent.from_chunk_event(tool_calls_op_evt))
217
+ tool_calls_op_evt = None
211
218
 
212
- if response_op_evt:
219
+ if not isinstance(_event, ResponseChunkEvent) and response_op_evt:
213
220
  events.append(ResponseEndEvent.from_chunk_event(response_op_evt))
221
+ response_op_evt = None
214
222
 
215
- if thinking_op_evt:
216
- events.append(ThinkingEndEvent.from_chunk_event(thinking_op_evt))
217
-
218
- if annotations_op_evt:
223
+ if not isinstance(_event, AnnotationsChunkEvent) and annotations_op_evt:
219
224
  events.append(AnnotationsEndEvent.from_chunk_event(annotations_op_evt))
220
-
221
- if chunk_op_evt:
222
- events.append(CompletionEndEvent.from_chunk_event(chunk_op_evt))
223
-
224
- chunk_op_evt = None
225
- thinking_op_evt = None
226
- tool_calls_op_evt = None
227
- response_op_evt = None
228
- annotations_op_evt = None
225
+ annotations_op_evt = None
229
226
 
230
227
  return events
231
228
 
@@ -252,14 +249,14 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
252
249
  sub_events = event.split_into_specialized()
253
250
 
254
251
  for sub_event in sub_events:
252
+ for close_event in _close_open_events(sub_event):
253
+ yield close_event
254
+
255
255
  if isinstance(sub_event, ThinkingChunkEvent):
256
256
  if not thinking_op_evt:
257
257
  thinking_op_evt = sub_event
258
258
  yield ThinkingStartEvent.from_chunk_event(sub_event)
259
259
  yield sub_event
260
- elif thinking_op_evt:
261
- yield ThinkingEndEvent.from_chunk_event(thinking_op_evt)
262
- thinking_op_evt = None
263
260
 
264
261
  if isinstance(sub_event, ToolCallChunkEvent):
265
262
  tc = sub_event.data.tool_call
@@ -273,27 +270,18 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
273
270
  tool_calls_op_evt = sub_event
274
271
  yield ToolCallStartEvent.from_chunk_event(sub_event)
275
272
  yield sub_event
276
- elif tool_calls_op_evt:
277
- yield ToolCallEndEvent.from_chunk_event(tool_calls_op_evt)
278
- tool_calls_op_evt = None
279
273
 
280
274
  if isinstance(sub_event, ResponseChunkEvent):
281
275
  if not response_op_evt:
282
276
  response_op_evt = sub_event
283
277
  yield ResponseStartEvent.from_chunk_event(sub_event)
284
278
  yield sub_event
285
- elif response_op_evt:
286
- yield ResponseEndEvent.from_chunk_event(response_op_evt)
287
- response_op_evt = None
288
279
 
289
280
  if isinstance(sub_event, AnnotationsChunkEvent):
290
281
  if not annotations_op_evt:
291
282
  annotations_op_evt = sub_event
292
283
  yield AnnotationsStartEvent.from_chunk_event(sub_event)
293
284
  yield sub_event
294
- elif annotations_op_evt:
295
- yield AnnotationsEndEvent.from_chunk_event(annotations_op_evt)
296
- annotations_op_evt = None
297
285
 
298
286
  prev_completion_id = chunk.id
299
287
 
@@ -127,8 +127,8 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
127
127
  provider_name, provider_model_name = model_name_parts
128
128
  if provider_name not in compat_providers_map:
129
129
  raise ValueError(
130
- f"OpenAI compatible API provider '{provider_name}' "
131
- "is not supported. Supported providers are: "
130
+ f"API provider '{provider_name}' is not a supported OpenAI "
131
+ f"compatible provider. Supported providers are: "
132
132
  f"{', '.join(compat_providers_map.keys())}"
133
133
  )
134
134
  api_provider = compat_providers_map[provider_name]
@@ -138,10 +138,18 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
138
138
  "you must provide an 'api_provider' argument."
139
139
  )
140
140
 
141
+ if llm_settings is not None:
142
+ stream_options = llm_settings.get("stream_options") or {}
143
+ stream_options["include_usage"] = True
144
+ _llm_settings = deepcopy(llm_settings)
145
+ _llm_settings["stream_options"] = stream_options
146
+ else:
147
+ _llm_settings = OpenAILLMSettings(stream_options={"include_usage": True})
148
+
141
149
  super().__init__(
142
150
  model_name=provider_model_name,
143
151
  model_id=model_id,
144
- llm_settings=llm_settings,
152
+ llm_settings=_llm_settings,
145
153
  converters=OpenAIConverters(),
146
154
  tools=tools,
147
155
  response_schema=response_schema,
File without changes
File without changes