grasp_agents 0.5.8__py3-none-any.whl → 0.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/litellm/lite_llm.py +22 -1
- grasp_agents/llm.py +17 -29
- grasp_agents/openai/openai_llm.py +11 -3
- {grasp_agents-0.5.8.dist-info → grasp_agents-0.5.9.dist-info}/METADATA +12 -13
- {grasp_agents-0.5.8.dist-info → grasp_agents-0.5.9.dist-info}/RECORD +7 -7
- {grasp_agents-0.5.8.dist-info → grasp_agents-0.5.9.dist-info}/WHEEL +0 -0
- {grasp_agents-0.5.8.dist-info → grasp_agents-0.5.9.dist-info}/licenses/LICENSE.md +0 -0
grasp_agents/litellm/lite_llm.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
import logging
|
2
|
+
from collections import defaultdict
|
2
3
|
from collections.abc import AsyncIterator, Mapping
|
4
|
+
from copy import deepcopy
|
3
5
|
from typing import Any, cast
|
4
6
|
|
5
7
|
import litellm
|
@@ -90,10 +92,19 @@ class LiteLLM(CloudLLM[LiteLLMSettings, LiteLLMConverters]):
|
|
90
92
|
"was specified. Please provide a valid API provider or use a different "
|
91
93
|
"model."
|
92
94
|
)
|
95
|
+
|
96
|
+
if llm_settings is not None:
|
97
|
+
stream_options = llm_settings.get("stream_options") or {}
|
98
|
+
stream_options["include_usage"] = True
|
99
|
+
_llm_settings = deepcopy(llm_settings)
|
100
|
+
_llm_settings["stream_options"] = stream_options
|
101
|
+
else:
|
102
|
+
_llm_settings = LiteLLMSettings(stream_options={"include_usage": True})
|
103
|
+
|
93
104
|
super().__init__(
|
94
105
|
model_name=model_name,
|
95
106
|
model_id=model_id,
|
96
|
-
llm_settings=
|
107
|
+
llm_settings=_llm_settings,
|
97
108
|
converters=LiteLLMConverters(),
|
98
109
|
tools=tools,
|
99
110
|
response_schema=response_schema,
|
@@ -192,7 +203,17 @@ class LiteLLM(CloudLLM[LiteLLMSettings, LiteLLMConverters]):
|
|
192
203
|
)
|
193
204
|
stream = cast("CustomStreamWrapper", stream)
|
194
205
|
|
206
|
+
tc_indices: dict[int, set[int]] = defaultdict(set)
|
207
|
+
|
195
208
|
async for completion_chunk in stream:
|
209
|
+
# Fix tool call indices to be unique within each choice
|
210
|
+
for n, choice in enumerate(completion_chunk.choices):
|
211
|
+
for tc in choice.delta.tool_calls or []:
|
212
|
+
# Tool call ID is not None only when it is a new tool call
|
213
|
+
if tc.id and tc.index in tc_indices[n]:
|
214
|
+
tc.index = max(tc_indices[n]) + 1
|
215
|
+
tc_indices[n].add(tc.index)
|
216
|
+
|
196
217
|
yield completion_chunk
|
197
218
|
|
198
219
|
def combine_completion_chunks(
|
grasp_agents/llm.py
CHANGED
@@ -25,7 +25,7 @@ from .typing.events import (
|
|
25
25
|
AnnotationsEndEvent,
|
26
26
|
AnnotationsStartEvent,
|
27
27
|
CompletionChunkEvent,
|
28
|
-
CompletionEndEvent,
|
28
|
+
# CompletionEndEvent,
|
29
29
|
CompletionEvent,
|
30
30
|
CompletionStartEvent,
|
31
31
|
LLMStateChangeEvent,
|
@@ -196,7 +196,9 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
|
|
196
196
|
annotations_op_evt: AnnotationsChunkEvent | None = None
|
197
197
|
tool_calls_op_evt: ToolCallChunkEvent | None = None
|
198
198
|
|
199
|
-
def _close_open_events(
|
199
|
+
def _close_open_events(
|
200
|
+
_event: CompletionChunkEvent[CompletionChunk] | None = None,
|
201
|
+
) -> list[LLMStateChangeEvent[Any]]:
|
200
202
|
nonlocal \
|
201
203
|
chunk_op_evt, \
|
202
204
|
thinking_op_evt, \
|
@@ -206,26 +208,21 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
|
|
206
208
|
|
207
209
|
events: list[LLMStateChangeEvent[Any]] = []
|
208
210
|
|
209
|
-
if
|
211
|
+
if not isinstance(_event, ThinkingChunkEvent) and thinking_op_evt:
|
212
|
+
events.append(ThinkingEndEvent.from_chunk_event(thinking_op_evt))
|
213
|
+
thinking_op_evt = None
|
214
|
+
|
215
|
+
if not isinstance(_event, ToolCallChunkEvent) and tool_calls_op_evt:
|
210
216
|
events.append(ToolCallEndEvent.from_chunk_event(tool_calls_op_evt))
|
217
|
+
tool_calls_op_evt = None
|
211
218
|
|
212
|
-
if response_op_evt:
|
219
|
+
if not isinstance(_event, ResponseChunkEvent) and response_op_evt:
|
213
220
|
events.append(ResponseEndEvent.from_chunk_event(response_op_evt))
|
221
|
+
response_op_evt = None
|
214
222
|
|
215
|
-
if
|
216
|
-
events.append(ThinkingEndEvent.from_chunk_event(thinking_op_evt))
|
217
|
-
|
218
|
-
if annotations_op_evt:
|
223
|
+
if not isinstance(_event, AnnotationsChunkEvent) and annotations_op_evt:
|
219
224
|
events.append(AnnotationsEndEvent.from_chunk_event(annotations_op_evt))
|
220
|
-
|
221
|
-
if chunk_op_evt:
|
222
|
-
events.append(CompletionEndEvent.from_chunk_event(chunk_op_evt))
|
223
|
-
|
224
|
-
chunk_op_evt = None
|
225
|
-
thinking_op_evt = None
|
226
|
-
tool_calls_op_evt = None
|
227
|
-
response_op_evt = None
|
228
|
-
annotations_op_evt = None
|
225
|
+
annotations_op_evt = None
|
229
226
|
|
230
227
|
return events
|
231
228
|
|
@@ -252,14 +249,14 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
|
|
252
249
|
sub_events = event.split_into_specialized()
|
253
250
|
|
254
251
|
for sub_event in sub_events:
|
252
|
+
for close_event in _close_open_events(sub_event):
|
253
|
+
yield close_event
|
254
|
+
|
255
255
|
if isinstance(sub_event, ThinkingChunkEvent):
|
256
256
|
if not thinking_op_evt:
|
257
257
|
thinking_op_evt = sub_event
|
258
258
|
yield ThinkingStartEvent.from_chunk_event(sub_event)
|
259
259
|
yield sub_event
|
260
|
-
elif thinking_op_evt:
|
261
|
-
yield ThinkingEndEvent.from_chunk_event(thinking_op_evt)
|
262
|
-
thinking_op_evt = None
|
263
260
|
|
264
261
|
if isinstance(sub_event, ToolCallChunkEvent):
|
265
262
|
tc = sub_event.data.tool_call
|
@@ -273,27 +270,18 @@ class LLM(ABC, Generic[SettingsT_co, ConvertT_co]):
|
|
273
270
|
tool_calls_op_evt = sub_event
|
274
271
|
yield ToolCallStartEvent.from_chunk_event(sub_event)
|
275
272
|
yield sub_event
|
276
|
-
elif tool_calls_op_evt:
|
277
|
-
yield ToolCallEndEvent.from_chunk_event(tool_calls_op_evt)
|
278
|
-
tool_calls_op_evt = None
|
279
273
|
|
280
274
|
if isinstance(sub_event, ResponseChunkEvent):
|
281
275
|
if not response_op_evt:
|
282
276
|
response_op_evt = sub_event
|
283
277
|
yield ResponseStartEvent.from_chunk_event(sub_event)
|
284
278
|
yield sub_event
|
285
|
-
elif response_op_evt:
|
286
|
-
yield ResponseEndEvent.from_chunk_event(response_op_evt)
|
287
|
-
response_op_evt = None
|
288
279
|
|
289
280
|
if isinstance(sub_event, AnnotationsChunkEvent):
|
290
281
|
if not annotations_op_evt:
|
291
282
|
annotations_op_evt = sub_event
|
292
283
|
yield AnnotationsStartEvent.from_chunk_event(sub_event)
|
293
284
|
yield sub_event
|
294
|
-
elif annotations_op_evt:
|
295
|
-
yield AnnotationsEndEvent.from_chunk_event(annotations_op_evt)
|
296
|
-
annotations_op_evt = None
|
297
285
|
|
298
286
|
prev_completion_id = chunk.id
|
299
287
|
|
@@ -127,8 +127,8 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
127
127
|
provider_name, provider_model_name = model_name_parts
|
128
128
|
if provider_name not in compat_providers_map:
|
129
129
|
raise ValueError(
|
130
|
-
f"
|
131
|
-
"
|
130
|
+
f"API provider '{provider_name}' is not a supported OpenAI "
|
131
|
+
f"compatible provider. Supported providers are: "
|
132
132
|
f"{', '.join(compat_providers_map.keys())}"
|
133
133
|
)
|
134
134
|
api_provider = compat_providers_map[provider_name]
|
@@ -138,10 +138,18 @@ class OpenAILLM(CloudLLM[OpenAILLMSettings, OpenAIConverters]):
|
|
138
138
|
"you must provide an 'api_provider' argument."
|
139
139
|
)
|
140
140
|
|
141
|
+
if llm_settings is not None:
|
142
|
+
stream_options = llm_settings.get("stream_options") or {}
|
143
|
+
stream_options["include_usage"] = True
|
144
|
+
_llm_settings = deepcopy(llm_settings)
|
145
|
+
_llm_settings["stream_options"] = stream_options
|
146
|
+
else:
|
147
|
+
_llm_settings = OpenAILLMSettings(stream_options={"include_usage": True})
|
148
|
+
|
141
149
|
super().__init__(
|
142
150
|
model_name=provider_model_name,
|
143
151
|
model_id=model_id,
|
144
|
-
llm_settings=
|
152
|
+
llm_settings=_llm_settings,
|
145
153
|
converters=OpenAIConverters(),
|
146
154
|
tools=tools,
|
147
155
|
response_schema=response_schema,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: grasp_agents
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.9
|
4
4
|
Summary: Grasp Agents Library
|
5
5
|
License-File: LICENSE.md
|
6
6
|
Requires-Python: <4,>=3.11.4
|
@@ -37,31 +37,30 @@ Description-Content-Type: text/markdown
|
|
37
37
|
|
38
38
|
## Features
|
39
39
|
|
40
|
-
- Clean formulation of agents as generic entities over
|
41
|
-
- I/O schemas
|
42
|
-
- Memory
|
43
|
-
- Shared context
|
40
|
+
- Clean formulation of agents as generic entities over I/O schemas and shared context.
|
44
41
|
- Transparent implementation of common agentic patterns:
|
45
|
-
- Single-agent loops
|
42
|
+
- Single-agent loops
|
46
43
|
- Workflows (static communication topology), including loops
|
47
44
|
- Agents-as-tools for task delegation
|
48
45
|
- Freeform A2A communication via the in-process actor model
|
49
|
-
-
|
50
|
-
-
|
46
|
+
- Built-in parallel processing with flexible retries and rate limiting.
|
47
|
+
- Support for all popular API providers via LiteLLM.
|
48
|
+
- Granular event streaming with separate events for standard outputs, thinking, and tool calls.
|
49
|
+
- Callbacks via decorators or subclassing for straightforward customisation of agentic loops and context management.
|
51
50
|
|
52
51
|
## Project Structure
|
53
52
|
|
54
|
-
- `
|
55
|
-
- `packet.py`, `packet_pool.py`: Communication management.
|
53
|
+
- `processors/`, `llm_agent.py`: Core processor and agent class implementations.
|
54
|
+
- `packet.py`, `packet_pool.py`, `runner.py`: Communication management.
|
56
55
|
- `llm_policy_executor.py`: LLM actions and tool call loops.
|
57
56
|
- `prompt_builder.py`: Tools for constructing prompts.
|
58
57
|
- `workflow/`: Modules for defining and managing static agent workflows.
|
59
58
|
- `llm.py`, `cloud_llm.py`: LLM integration and base LLM functionalities.
|
60
59
|
- `openai/`: Modules specific to OpenAI API integration.
|
61
|
-
- `
|
60
|
+
- `litellm/`: Modules specific to LiteLLM integration.
|
61
|
+
- `memory.py`, `llm_agent_memory.py`: Basic agent memory management.
|
62
62
|
- `run_context.py`: Shared context management for agent runs.
|
63
63
|
- `usage_tracker.py`: Tracking of API usage and costs.
|
64
|
-
- `costs_dict.yaml`: Dictionary for cost tracking (update if needed).
|
65
64
|
- `rate_limiting/`: Basic rate limiting tools.
|
66
65
|
|
67
66
|
## Quickstart & Installation Variants (UV Package manager)
|
@@ -190,7 +189,7 @@ teacher = LLMAgent[None, Problem, None](
|
|
190
189
|
)
|
191
190
|
|
192
191
|
async def main():
|
193
|
-
ctx = RunContext[None](
|
192
|
+
ctx = RunContext[None](log_messages=True)
|
194
193
|
out = await teacher.run("start", ctx=ctx)
|
195
194
|
print(out.payloads[0])
|
196
195
|
print(ctx.usage_tracker.total_usage)
|
@@ -5,7 +5,7 @@ grasp_agents/errors.py,sha256=K-22TCM1Klhsej47Rg5eTqnGiGPaXgKOpdOZZ7cPipw,4633
|
|
5
5
|
grasp_agents/generics_utils.py,sha256=5Pw3I9dlnKC2VGqYKC4ZZUO3Z_vTNT-NPFovNfPkl6I,6542
|
6
6
|
grasp_agents/grasp_logging.py,sha256=H1GYhXdQvVkmauFDZ-KDwvVmPQHZUUm9sRqX_ObK2xI,1111
|
7
7
|
grasp_agents/http_client.py,sha256=Es8NXGDkp4Nem7g24-jW0KFGA9Hp_o2Cv3cOvjup-iU,859
|
8
|
-
grasp_agents/llm.py,sha256=
|
8
|
+
grasp_agents/llm.py,sha256=HjZss_1xiwFN8MCaw341c75rmv6dYIYMv6cx6EuT3fM,11713
|
9
9
|
grasp_agents/llm_agent.py,sha256=hX3T2Y5qiTt5CrsahNo5t08HFCyBWEiurzYnFykJN9Y,13513
|
10
10
|
grasp_agents/llm_agent_memory.py,sha256=gQwH3g4Ib3ciW2jrBiW13ttwax_pcPobH5RhXRmbc0E,1842
|
11
11
|
grasp_agents/llm_policy_executor.py,sha256=bP-O1itfaG-dr2PC9_zLCNMI862cPRqW7vsj9tRCz-I,17062
|
@@ -22,7 +22,7 @@ grasp_agents/litellm/__init__.py,sha256=wD8RZBYokFDfbS9Cs7nO_zKb3w7RIVwEGj7g2D5C
|
|
22
22
|
grasp_agents/litellm/completion_chunk_converters.py,sha256=J5PPxzoTBqkvKQnCoBxQxJo7Q8Xfl9cbv2GRZox8Cjo,2689
|
23
23
|
grasp_agents/litellm/completion_converters.py,sha256=JQ7XvQwwc-biFqVMcRO61SL5VGs_SkUvAhUz1QD7EmU,2516
|
24
24
|
grasp_agents/litellm/converters.py,sha256=3u648xjrphr9zPp12PO8fU13G4nI6_e9714Xcvh6SHc,4721
|
25
|
-
grasp_agents/litellm/lite_llm.py,sha256=
|
25
|
+
grasp_agents/litellm/lite_llm.py,sha256=wZpVEuwtLsX1_UDJqoY1FweJIkIHJAzf1KOn-IFfDfI,9026
|
26
26
|
grasp_agents/litellm/message_converters.py,sha256=PsGLIJEcAeEoluHIh-utEufJ_9WeMYzXkwnR-8jyULQ,2037
|
27
27
|
grasp_agents/openai/__init__.py,sha256=xaRnblUskiLvypIhMe4NRp9dxCG-gNR7dPiugUbPbhE,4717
|
28
28
|
grasp_agents/openai/completion_chunk_converters.py,sha256=3MnMskdlp7ycsggc1ok1XpCHaP4Us2rLYaxImPLw1eI,2573
|
@@ -30,7 +30,7 @@ grasp_agents/openai/completion_converters.py,sha256=UlDeQSl0AEFUS-QI5e8rrjfmXZoj
|
|
30
30
|
grasp_agents/openai/content_converters.py,sha256=sMsZhoatuL_8t0IdVaGWIVZLB4nyi1ajD61GewQmeY4,2503
|
31
31
|
grasp_agents/openai/converters.py,sha256=CXHF2GehEHLEzjL45HywZ_1qaB3N29-lbac5oBDnLGA,4634
|
32
32
|
grasp_agents/openai/message_converters.py,sha256=fhSN81uK51EGbLyM2-f0MvPX_UBrMy7SF3JQPo-dkXg,4686
|
33
|
-
grasp_agents/openai/openai_llm.py,sha256=
|
33
|
+
grasp_agents/openai/openai_llm.py,sha256=3EvfRFT_qGzLr7vy3AVby3uOwzxX15eY86ygJ7ouNn4,10940
|
34
34
|
grasp_agents/openai/tool_converters.py,sha256=IotZvpe3xMQcBfcjUTfAsn4LtZljj3zkU9bfpcoiqPw,1177
|
35
35
|
grasp_agents/processors/base_processor.py,sha256=j2_QY6HUjckdxfsf7yAF0xRDp_V-DNDb7hIRMRKUyWw,10685
|
36
36
|
grasp_agents/processors/parallel_processor.py,sha256=4NH2gfGgUheZWQGKn3NEMp0uQ0kOeJRZ3Ja0a7qmqpg,7863
|
@@ -52,7 +52,7 @@ grasp_agents/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
52
52
|
grasp_agents/workflow/looped_workflow.py,sha256=WHp9O3Za2sBVfY_BLOdvPvtY20XsjZQaWSO2-oAFvOY,6806
|
53
53
|
grasp_agents/workflow/sequential_workflow.py,sha256=e3BIWzy_2novmEWNwIteyMbrzvl1-evHrTBE3r3SpU8,3648
|
54
54
|
grasp_agents/workflow/workflow_processor.py,sha256=yrxqAGfznmdkbP5zScKKJguxATfU4ObmA6BDR7YCBNU,3549
|
55
|
-
grasp_agents-0.5.
|
56
|
-
grasp_agents-0.5.
|
57
|
-
grasp_agents-0.5.
|
58
|
-
grasp_agents-0.5.
|
55
|
+
grasp_agents-0.5.9.dist-info/METADATA,sha256=RrgcN5SiOYUc6LNz5hvyn0pGpgIj-yVdPbL96kRzcjg,6997
|
56
|
+
grasp_agents-0.5.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
57
|
+
grasp_agents-0.5.9.dist-info/licenses/LICENSE.md,sha256=-nNNdWqGB8gJ2O-peFQ2Irshv5tW5pHKyTcYkwvH7CE,1201
|
58
|
+
grasp_agents-0.5.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|