grasp_agents 0.4.7__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- grasp_agents/cloud_llm.py +191 -224
- grasp_agents/comm_processor.py +101 -100
- grasp_agents/errors.py +69 -9
- grasp_agents/litellm/__init__.py +106 -0
- grasp_agents/litellm/completion_chunk_converters.py +68 -0
- grasp_agents/litellm/completion_converters.py +72 -0
- grasp_agents/litellm/converters.py +138 -0
- grasp_agents/litellm/lite_llm.py +210 -0
- grasp_agents/litellm/message_converters.py +66 -0
- grasp_agents/llm.py +84 -49
- grasp_agents/llm_agent.py +136 -120
- grasp_agents/llm_agent_memory.py +3 -3
- grasp_agents/llm_policy_executor.py +167 -174
- grasp_agents/memory.py +4 -0
- grasp_agents/openai/__init__.py +24 -9
- grasp_agents/openai/completion_chunk_converters.py +6 -6
- grasp_agents/openai/completion_converters.py +12 -14
- grasp_agents/openai/content_converters.py +1 -3
- grasp_agents/openai/converters.py +6 -8
- grasp_agents/openai/message_converters.py +21 -3
- grasp_agents/openai/openai_llm.py +155 -103
- grasp_agents/openai/tool_converters.py +4 -6
- grasp_agents/packet.py +5 -2
- grasp_agents/packet_pool.py +14 -13
- grasp_agents/printer.py +234 -72
- grasp_agents/processor.py +228 -88
- grasp_agents/prompt_builder.py +2 -2
- grasp_agents/run_context.py +11 -20
- grasp_agents/runner.py +42 -0
- grasp_agents/typing/completion.py +16 -9
- grasp_agents/typing/completion_chunk.py +51 -22
- grasp_agents/typing/events.py +95 -19
- grasp_agents/typing/message.py +25 -1
- grasp_agents/typing/tool.py +2 -0
- grasp_agents/usage_tracker.py +31 -37
- grasp_agents/utils.py +95 -84
- grasp_agents/workflow/looped_workflow.py +60 -11
- grasp_agents/workflow/sequential_workflow.py +43 -11
- grasp_agents/workflow/workflow_processor.py +25 -24
- {grasp_agents-0.4.7.dist-info → grasp_agents-0.5.0.dist-info}/METADATA +7 -6
- grasp_agents-0.5.0.dist-info/RECORD +57 -0
- grasp_agents-0.4.7.dist-info/RECORD +0 -50
- {grasp_agents-0.4.7.dist-info → grasp_agents-0.5.0.dist-info}/WHEEL +0 -0
- {grasp_agents-0.4.7.dist-info → grasp_agents-0.5.0.dist-info}/licenses/LICENSE.md +0 -0
grasp_agents/openai/__init__.py
CHANGED
@@ -3,8 +3,11 @@
|
|
3
3
|
from openai._streaming import (
|
4
4
|
AsyncStream as OpenAIAsyncStream, # type: ignore[import] # noqa: PLC2701
|
5
5
|
)
|
6
|
-
from openai.types import CompletionUsage as
|
6
|
+
from openai.types import CompletionUsage as OpenAIUsage
|
7
7
|
from openai.types.chat.chat_completion import ChatCompletion as OpenAICompletion
|
8
|
+
from openai.types.chat.chat_completion import (
|
9
|
+
Choice as OpenAIChoice,
|
10
|
+
)
|
8
11
|
from openai.types.chat.chat_completion import (
|
9
12
|
ChoiceLogprobs as OpenAIChoiceLogprobs,
|
10
13
|
)
|
@@ -14,21 +17,18 @@ from openai.types.chat.chat_completion_assistant_message_param import (
|
|
14
17
|
from openai.types.chat.chat_completion_chunk import (
|
15
18
|
ChatCompletionChunk as OpenAICompletionChunk,
|
16
19
|
)
|
17
|
-
from openai.types.chat.chat_completion_chunk import (
|
18
|
-
Choice as CompletionChunkChoice,
|
19
|
-
)
|
20
20
|
from openai.types.chat.chat_completion_chunk import (
|
21
21
|
Choice as OpenAIChunkChoice,
|
22
22
|
)
|
23
|
-
from openai.types.chat.chat_completion_chunk import (
|
24
|
-
ChoiceDelta as CompletionChunkChoiceDelta,
|
25
|
-
)
|
26
23
|
from openai.types.chat.chat_completion_chunk import (
|
27
24
|
ChoiceDelta as OpenAIChunkChoiceDelta,
|
28
25
|
)
|
29
26
|
from openai.types.chat.chat_completion_chunk import (
|
30
27
|
ChoiceDeltaToolCall as OpenAIChunkChoiceDeltaToolCall,
|
31
28
|
)
|
29
|
+
from openai.types.chat.chat_completion_chunk import (
|
30
|
+
ChoiceLogprobs as OpenAIChunkChoiceLogprobs,
|
31
|
+
)
|
32
32
|
from openai.types.chat.chat_completion_content_part_image_param import (
|
33
33
|
ChatCompletionContentPartImageParam as OpenAIContentPartImageParam,
|
34
34
|
)
|
@@ -48,7 +48,13 @@ from openai.types.chat.chat_completion_function_message_param import (
|
|
48
48
|
ChatCompletionFunctionMessageParam as OpenAIFunctionMessageParam,
|
49
49
|
)
|
50
50
|
from openai.types.chat.chat_completion_message import (
|
51
|
-
|
51
|
+
Annotation as OpenAIAnnotation,
|
52
|
+
)
|
53
|
+
from openai.types.chat.chat_completion_message import (
|
54
|
+
AnnotationURLCitation as OpenAIAnnotationURLCitation,
|
55
|
+
)
|
56
|
+
from openai.types.chat.chat_completion_message import (
|
57
|
+
ChatCompletionMessage as OpenAICompletionMessage,
|
52
58
|
)
|
53
59
|
from openai.types.chat.chat_completion_message_param import (
|
54
60
|
ChatCompletionMessageParam as OpenAIMessageParam,
|
@@ -86,15 +92,24 @@ from openai.types.chat.chat_completion_tool_param import (
|
|
86
92
|
from openai.types.chat.chat_completion_user_message_param import (
|
87
93
|
ChatCompletionUserMessageParam as OpenAIUserMessageParam,
|
88
94
|
)
|
95
|
+
from openai.types.chat.completion_create_params import (
|
96
|
+
WebSearchOptions as OpenAIWebSearchOptions,
|
97
|
+
)
|
89
98
|
from openai.types.chat.parsed_chat_completion import (
|
90
99
|
ParsedChatCompletion as OpenAIParsedCompletion,
|
91
100
|
)
|
92
101
|
from openai.types.chat.parsed_chat_completion import (
|
93
|
-
ParsedChatCompletionMessage as
|
102
|
+
ParsedChatCompletionMessage as OpenAIParsedCompletionMessage,
|
94
103
|
)
|
95
104
|
from openai.types.chat.parsed_chat_completion import (
|
96
105
|
ParsedChoice as OpenAIParsedChoice,
|
97
106
|
)
|
107
|
+
from openai.types.shared_params import (
|
108
|
+
ResponseFormatJSONObject as OpenAIResponseFormatJSONObject,
|
109
|
+
)
|
110
|
+
from openai.types.shared_params import (
|
111
|
+
ResponseFormatText as OpenAIResponseFormatText,
|
112
|
+
)
|
98
113
|
from openai.types.shared_params.function_definition import (
|
99
114
|
FunctionDefinition as OpenAIFunctionDefinition,
|
100
115
|
)
|
@@ -1,3 +1,5 @@
|
|
1
|
+
from typing import cast
|
2
|
+
|
1
3
|
from ..errors import CompletionError
|
2
4
|
from ..typing.completion_chunk import (
|
3
5
|
CompletionChunk,
|
@@ -5,7 +7,8 @@ from ..typing.completion_chunk import (
|
|
5
7
|
CompletionChunkChoiceDelta,
|
6
8
|
CompletionChunkDeltaToolCall,
|
7
9
|
)
|
8
|
-
from . import
|
10
|
+
from ..typing.message import Role
|
11
|
+
from . import OpenAICompletionChunk
|
9
12
|
from .completion_converters import from_api_completion_usage
|
10
13
|
|
11
14
|
|
@@ -21,7 +24,6 @@ def from_api_completion_chunk(
|
|
21
24
|
choices: list[CompletionChunkChoice] = []
|
22
25
|
|
23
26
|
for api_choice in api_completion_chunk.choices:
|
24
|
-
index = api_choice.index
|
25
27
|
finish_reason = api_choice.finish_reason
|
26
28
|
|
27
29
|
if api_choice.delta is None: # type: ignore
|
@@ -38,7 +40,7 @@ def from_api_completion_chunk(
|
|
38
40
|
delta = CompletionChunkChoiceDelta(
|
39
41
|
content=api_choice.delta.content,
|
40
42
|
refusal=api_choice.delta.refusal,
|
41
|
-
role=api_choice.delta.role,
|
43
|
+
role=cast("Role", api_choice.delta.role),
|
42
44
|
tool_calls=[
|
43
45
|
CompletionChunkDeltaToolCall(
|
44
46
|
id=tool_call.id,
|
@@ -52,11 +54,9 @@ def from_api_completion_chunk(
|
|
52
54
|
)
|
53
55
|
|
54
56
|
choice = CompletionChunkChoice(
|
55
|
-
index=index,
|
57
|
+
index=api_choice.index,
|
56
58
|
delta=delta,
|
57
59
|
finish_reason=finish_reason,
|
58
|
-
# OpenAI logprobs have identical, but separately defined, types for
|
59
|
-
# completion chunks and completions
|
60
60
|
logprobs=api_choice.logprobs,
|
61
61
|
)
|
62
62
|
|
@@ -1,9 +1,10 @@
|
|
1
|
+
from ..errors import CompletionError
|
1
2
|
from ..typing.completion import Completion, CompletionChoice, Usage
|
2
|
-
from . import OpenAICompletion,
|
3
|
+
from . import OpenAICompletion, OpenAIUsage
|
3
4
|
from .message_converters import from_api_assistant_message
|
4
5
|
|
5
6
|
|
6
|
-
def from_api_completion_usage(api_usage:
|
7
|
+
def from_api_completion_usage(api_usage: OpenAIUsage) -> Usage:
|
7
8
|
reasoning_tokens = None
|
8
9
|
cached_tokens = None
|
9
10
|
|
@@ -27,42 +28,39 @@ def from_api_completion(
|
|
27
28
|
api_completion: OpenAICompletion, name: str | None = None
|
28
29
|
) -> Completion:
|
29
30
|
choices: list[CompletionChoice] = []
|
30
|
-
usage: Usage | None = None
|
31
31
|
|
32
32
|
if api_completion.choices is None: # type: ignore
|
33
33
|
# Some providers return None for the choices when there is an error
|
34
34
|
# TODO: add custom error types
|
35
|
-
raise
|
35
|
+
raise CompletionError(
|
36
36
|
f"Completion API error: {getattr(api_completion, 'error', None)}"
|
37
37
|
)
|
38
38
|
for api_choice in api_completion.choices:
|
39
|
-
index = api_choice.index
|
40
39
|
finish_reason = api_choice.finish_reason
|
41
40
|
|
42
41
|
# Some providers return None for the message when finish_reason is other than "stop"
|
43
42
|
if api_choice.message is None: # type: ignore
|
44
|
-
raise
|
43
|
+
raise CompletionError(
|
45
44
|
f"API returned None for message with finish_reason: {finish_reason}"
|
46
45
|
)
|
47
46
|
|
48
|
-
# usage = from_api_completion_usage(api_usage) if api_usage else None
|
49
|
-
|
50
|
-
usage = (
|
51
|
-
from_api_completion_usage(api_completion.usage)
|
52
|
-
if api_completion.usage
|
53
|
-
else None
|
54
|
-
)
|
55
47
|
message = from_api_assistant_message(api_choice.message, name=name)
|
56
48
|
|
57
49
|
choices.append(
|
58
50
|
CompletionChoice(
|
59
|
-
index=index,
|
51
|
+
index=api_choice.index,
|
60
52
|
message=message,
|
61
53
|
finish_reason=finish_reason,
|
62
54
|
logprobs=api_choice.logprobs,
|
63
55
|
)
|
64
56
|
)
|
65
57
|
|
58
|
+
usage = (
|
59
|
+
from_api_completion_usage(api_completion.usage)
|
60
|
+
if api_completion.usage
|
61
|
+
else None
|
62
|
+
)
|
63
|
+
|
66
64
|
return Completion(
|
67
65
|
id=api_completion.id,
|
68
66
|
created=api_completion.created,
|
@@ -56,9 +56,7 @@ def from_api_content(
|
|
56
56
|
return Content(parts=content_parts)
|
57
57
|
|
58
58
|
|
59
|
-
def to_api_content(
|
60
|
-
content: Content,
|
61
|
-
) -> Iterable[OpenAIContentPartParam]:
|
59
|
+
def to_api_content(content: Content) -> Iterable[OpenAIContentPartParam]:
|
62
60
|
api_content: list[OpenAIContentPartParam] = []
|
63
61
|
for content_part in content.parts:
|
64
62
|
api_content_part: OpenAIContentPartParam
|
@@ -13,13 +13,13 @@ from . import (
|
|
13
13
|
OpenAIAssistantMessageParam,
|
14
14
|
OpenAICompletion,
|
15
15
|
OpenAICompletionChunk,
|
16
|
-
|
16
|
+
OpenAICompletionMessage,
|
17
17
|
OpenAIContentPartParam,
|
18
|
-
OpenAIMessage,
|
19
18
|
OpenAISystemMessageParam,
|
20
19
|
OpenAIToolChoiceOptionParam,
|
21
20
|
OpenAIToolMessageParam,
|
22
21
|
OpenAIToolParam,
|
22
|
+
OpenAIUsage,
|
23
23
|
OpenAIUserMessageParam,
|
24
24
|
)
|
25
25
|
from .completion_chunk_converters import from_api_completion_chunk
|
@@ -74,12 +74,12 @@ class OpenAIConverters(Converters):
|
|
74
74
|
return to_api_assistant_message(assistant_message, **kwargs)
|
75
75
|
|
76
76
|
@staticmethod
|
77
|
-
def from_completion_usage(raw_usage:
|
77
|
+
def from_completion_usage(raw_usage: OpenAIUsage, **kwargs: Any) -> Usage:
|
78
78
|
return from_api_completion_usage(raw_usage, **kwargs)
|
79
79
|
|
80
80
|
@staticmethod
|
81
81
|
def from_assistant_message(
|
82
|
-
raw_message:
|
82
|
+
raw_message: OpenAICompletionMessage, name: str | None = None, **kwargs: Any
|
83
83
|
) -> AssistantMessage:
|
84
84
|
return from_api_assistant_message(raw_message, name=name, **kwargs)
|
85
85
|
|
@@ -96,10 +96,8 @@ class OpenAIConverters(Converters):
|
|
96
96
|
return from_api_tool_message(raw_message, name=name, **kwargs)
|
97
97
|
|
98
98
|
@staticmethod
|
99
|
-
def to_tool(
|
100
|
-
tool
|
101
|
-
) -> OpenAIToolParam:
|
102
|
-
return to_api_tool(tool, strict=strict, **kwargs)
|
99
|
+
def to_tool(tool: BaseTool[BaseModel, Any, Any], **kwargs: Any) -> OpenAIToolParam:
|
100
|
+
return to_api_tool(tool, **kwargs)
|
103
101
|
|
104
102
|
@staticmethod
|
105
103
|
def to_tool_choice(
|
@@ -1,5 +1,10 @@
|
|
1
1
|
from typing import TypeAlias
|
2
2
|
|
3
|
+
from litellm.types.llms.openai import (
|
4
|
+
ChatCompletionAnnotation,
|
5
|
+
ChatCompletionAnnotationURLCitation,
|
6
|
+
)
|
7
|
+
|
3
8
|
from ..typing.content import Content
|
4
9
|
from ..typing.message import (
|
5
10
|
AssistantMessage,
|
@@ -10,9 +15,9 @@ from ..typing.message import (
|
|
10
15
|
from ..typing.tool import ToolCall
|
11
16
|
from . import (
|
12
17
|
OpenAIAssistantMessageParam,
|
18
|
+
OpenAICompletionMessage,
|
13
19
|
OpenAIDeveloperMessageParam,
|
14
20
|
OpenAIFunctionMessageParam,
|
15
|
-
OpenAIMessage,
|
16
21
|
OpenAISystemMessageParam,
|
17
22
|
OpenAIToolCallFunction,
|
18
23
|
OpenAIToolCallParam,
|
@@ -55,7 +60,7 @@ def to_api_user_message(message: UserMessage) -> OpenAIUserMessageParam:
|
|
55
60
|
|
56
61
|
|
57
62
|
def from_api_assistant_message(
|
58
|
-
api_message:
|
63
|
+
api_message: OpenAICompletionMessage, name: str | None = None
|
59
64
|
) -> AssistantMessage:
|
60
65
|
tool_calls = None
|
61
66
|
if api_message.tool_calls is not None:
|
@@ -68,10 +73,23 @@ def from_api_assistant_message(
|
|
68
73
|
for tool_call in api_message.tool_calls
|
69
74
|
]
|
70
75
|
|
76
|
+
annotations = None
|
77
|
+
if api_message.annotations is not None:
|
78
|
+
annotations = [
|
79
|
+
ChatCompletionAnnotation(
|
80
|
+
type="url_citation",
|
81
|
+
url_citation=ChatCompletionAnnotationURLCitation(
|
82
|
+
**api_annotation.url_citation.model_dump()
|
83
|
+
),
|
84
|
+
)
|
85
|
+
for api_annotation in api_message.annotations
|
86
|
+
]
|
87
|
+
|
71
88
|
return AssistantMessage(
|
72
89
|
content=api_message.content,
|
73
90
|
tool_calls=tool_calls,
|
74
91
|
refusal=api_message.refusal,
|
92
|
+
annotations=annotations,
|
75
93
|
name=name,
|
76
94
|
)
|
77
95
|
|
@@ -102,7 +120,7 @@ def to_api_assistant_message(
|
|
102
120
|
if message.refusal is not None:
|
103
121
|
api_message["refusal"] = message.refusal
|
104
122
|
|
105
|
-
# TODO: hack
|
123
|
+
# TODO: avoid this hack
|
106
124
|
if message.content is None:
|
107
125
|
# Some API providers return None in the generated content without errors,
|
108
126
|
# even though None in the input content is not accepted.
|