hammad-python 0.0.29__py3-none-any.whl → 0.0.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ham/__init__.py +10 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
- hammad_python-0.0.31.dist-info/RECORD +6 -0
- hammad/__init__.py +0 -84
- hammad/_internal.py +0 -256
- hammad/_main.py +0 -226
- hammad/cache/__init__.py +0 -40
- hammad/cache/base_cache.py +0 -181
- hammad/cache/cache.py +0 -169
- hammad/cache/decorators.py +0 -261
- hammad/cache/file_cache.py +0 -80
- hammad/cache/ttl_cache.py +0 -74
- hammad/cli/__init__.py +0 -33
- hammad/cli/animations.py +0 -573
- hammad/cli/plugins.py +0 -867
- hammad/cli/styles/__init__.py +0 -55
- hammad/cli/styles/settings.py +0 -139
- hammad/cli/styles/types.py +0 -358
- hammad/cli/styles/utils.py +0 -634
- hammad/data/__init__.py +0 -90
- hammad/data/collections/__init__.py +0 -49
- hammad/data/collections/collection.py +0 -326
- hammad/data/collections/indexes/__init__.py +0 -37
- hammad/data/collections/indexes/qdrant/__init__.py +0 -1
- hammad/data/collections/indexes/qdrant/index.py +0 -723
- hammad/data/collections/indexes/qdrant/settings.py +0 -94
- hammad/data/collections/indexes/qdrant/utils.py +0 -210
- hammad/data/collections/indexes/tantivy/__init__.py +0 -1
- hammad/data/collections/indexes/tantivy/index.py +0 -426
- hammad/data/collections/indexes/tantivy/settings.py +0 -40
- hammad/data/collections/indexes/tantivy/utils.py +0 -176
- hammad/data/configurations/__init__.py +0 -35
- hammad/data/configurations/configuration.py +0 -564
- hammad/data/models/__init__.py +0 -50
- hammad/data/models/extensions/__init__.py +0 -4
- hammad/data/models/extensions/pydantic/__init__.py +0 -42
- hammad/data/models/extensions/pydantic/converters.py +0 -759
- hammad/data/models/fields.py +0 -546
- hammad/data/models/model.py +0 -1078
- hammad/data/models/utils.py +0 -280
- hammad/data/sql/__init__.py +0 -24
- hammad/data/sql/database.py +0 -576
- hammad/data/sql/types.py +0 -127
- hammad/data/types/__init__.py +0 -75
- hammad/data/types/file.py +0 -431
- hammad/data/types/multimodal/__init__.py +0 -36
- hammad/data/types/multimodal/audio.py +0 -200
- hammad/data/types/multimodal/image.py +0 -182
- hammad/data/types/text.py +0 -1308
- hammad/formatting/__init__.py +0 -33
- hammad/formatting/json/__init__.py +0 -27
- hammad/formatting/json/converters.py +0 -158
- hammad/formatting/text/__init__.py +0 -63
- hammad/formatting/text/converters.py +0 -723
- hammad/formatting/text/markdown.py +0 -131
- hammad/formatting/yaml/__init__.py +0 -26
- hammad/formatting/yaml/converters.py +0 -5
- hammad/genai/__init__.py +0 -217
- hammad/genai/a2a/__init__.py +0 -32
- hammad/genai/a2a/workers.py +0 -552
- hammad/genai/agents/__init__.py +0 -59
- hammad/genai/agents/agent.py +0 -1973
- hammad/genai/agents/run.py +0 -1024
- hammad/genai/agents/types/__init__.py +0 -42
- hammad/genai/agents/types/agent_context.py +0 -13
- hammad/genai/agents/types/agent_event.py +0 -128
- hammad/genai/agents/types/agent_hooks.py +0 -220
- hammad/genai/agents/types/agent_messages.py +0 -31
- hammad/genai/agents/types/agent_response.py +0 -125
- hammad/genai/agents/types/agent_stream.py +0 -327
- hammad/genai/graphs/__init__.py +0 -125
- hammad/genai/graphs/_utils.py +0 -190
- hammad/genai/graphs/base.py +0 -1828
- hammad/genai/graphs/plugins.py +0 -316
- hammad/genai/graphs/types.py +0 -638
- hammad/genai/models/__init__.py +0 -1
- hammad/genai/models/embeddings/__init__.py +0 -43
- hammad/genai/models/embeddings/model.py +0 -226
- hammad/genai/models/embeddings/run.py +0 -163
- hammad/genai/models/embeddings/types/__init__.py +0 -37
- hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
- hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
- hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
- hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
- hammad/genai/models/language/__init__.py +0 -57
- hammad/genai/models/language/model.py +0 -1098
- hammad/genai/models/language/run.py +0 -878
- hammad/genai/models/language/types/__init__.py +0 -40
- hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
- hammad/genai/models/language/types/language_model_messages.py +0 -28
- hammad/genai/models/language/types/language_model_name.py +0 -239
- hammad/genai/models/language/types/language_model_request.py +0 -127
- hammad/genai/models/language/types/language_model_response.py +0 -217
- hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
- hammad/genai/models/language/types/language_model_settings.py +0 -89
- hammad/genai/models/language/types/language_model_stream.py +0 -600
- hammad/genai/models/language/utils/__init__.py +0 -28
- hammad/genai/models/language/utils/requests.py +0 -421
- hammad/genai/models/language/utils/structured_outputs.py +0 -135
- hammad/genai/models/model_provider.py +0 -4
- hammad/genai/models/multimodal.py +0 -47
- hammad/genai/models/reranking.py +0 -26
- hammad/genai/types/__init__.py +0 -1
- hammad/genai/types/base.py +0 -215
- hammad/genai/types/history.py +0 -290
- hammad/genai/types/tools.py +0 -507
- hammad/logging/__init__.py +0 -35
- hammad/logging/decorators.py +0 -834
- hammad/logging/logger.py +0 -1018
- hammad/mcp/__init__.py +0 -53
- hammad/mcp/client/__init__.py +0 -35
- hammad/mcp/client/client.py +0 -624
- hammad/mcp/client/client_service.py +0 -400
- hammad/mcp/client/settings.py +0 -178
- hammad/mcp/servers/__init__.py +0 -26
- hammad/mcp/servers/launcher.py +0 -1161
- hammad/runtime/__init__.py +0 -32
- hammad/runtime/decorators.py +0 -142
- hammad/runtime/run.py +0 -299
- hammad/service/__init__.py +0 -49
- hammad/service/create.py +0 -527
- hammad/service/decorators.py +0 -283
- hammad/types.py +0 -288
- hammad/typing/__init__.py +0 -435
- hammad/web/__init__.py +0 -43
- hammad/web/http/__init__.py +0 -1
- hammad/web/http/client.py +0 -944
- hammad/web/models.py +0 -275
- hammad/web/openapi/__init__.py +0 -1
- hammad/web/openapi/client.py +0 -740
- hammad/web/search/__init__.py +0 -1
- hammad/web/search/client.py +0 -1023
- hammad/web/utils.py +0 -472
- hammad_python-0.0.29.dist-info/RECORD +0 -135
- {hammad → ham}/py.typed +0 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
hammad/genai/types/base.py
DELETED
@@ -1,215 +0,0 @@
|
|
1
|
-
"""hammad.genai.types.base"""
|
2
|
-
|
3
|
-
from abc import ABC, abstractmethod
|
4
|
-
from typing import (
|
5
|
-
Any,
|
6
|
-
AsyncIterator,
|
7
|
-
Callable,
|
8
|
-
Dict,
|
9
|
-
Iterator,
|
10
|
-
Generic,
|
11
|
-
ParamSpec,
|
12
|
-
TypeVar,
|
13
|
-
TYPE_CHECKING,
|
14
|
-
Union,
|
15
|
-
)
|
16
|
-
|
17
|
-
from pydantic import BaseModel, ConfigDict
|
18
|
-
|
19
|
-
if TYPE_CHECKING:
|
20
|
-
from openai.types.chat import (
|
21
|
-
ChatCompletionMessageParam,
|
22
|
-
ChatCompletionContentPartParam,
|
23
|
-
ChatCompletionMessageParam,
|
24
|
-
)
|
25
|
-
|
26
|
-
|
27
|
-
__all__ = [
|
28
|
-
"T",
|
29
|
-
"P",
|
30
|
-
"R",
|
31
|
-
"BaseGenAIModelEvent",
|
32
|
-
"BaseGenAIModelStream",
|
33
|
-
"BaseTool",
|
34
|
-
"BaseGenAIModelResponse",
|
35
|
-
"BaseGenAIModelSettings",
|
36
|
-
"BaseGenAIModel",
|
37
|
-
]
|
38
|
-
|
39
|
-
|
40
|
-
T = TypeVar("T")
|
41
|
-
P = ParamSpec("P")
|
42
|
-
R = TypeVar("R")
|
43
|
-
|
44
|
-
|
45
|
-
class BaseGenAIModelEvent(BaseModel, Generic[T]):
|
46
|
-
"""Base class for all events that a Generative AI model can
|
47
|
-
emit / return.
|
48
|
-
|
49
|
-
This is a base class used only for type hinting and incorporates
|
50
|
-
no fields.
|
51
|
-
"""
|
52
|
-
|
53
|
-
model_config = ConfigDict(
|
54
|
-
arbitrary_types_allowed=True,
|
55
|
-
)
|
56
|
-
|
57
|
-
def to_message(self) -> "ChatCompletionMessageParam":
|
58
|
-
"""Converts the event into a message dictionary that is compatible
|
59
|
-
with any message interface within the `hammad.genai` module."""
|
60
|
-
raise NotImplementedError(
|
61
|
-
f"to_message() is not implemented for {self.__class__.__name__}"
|
62
|
-
)
|
63
|
-
|
64
|
-
def to_content_part(self) -> "ChatCompletionContentPartParam":
|
65
|
-
"""Converts the event into a content part dictionary that can be added
|
66
|
-
within chat messages."""
|
67
|
-
raise NotImplementedError(
|
68
|
-
f"to_content_part() is not implemented for {self.__class__.__name__}"
|
69
|
-
)
|
70
|
-
|
71
|
-
|
72
|
-
class BaseGenAIModelStream(BaseGenAIModelEvent[T]):
|
73
|
-
"""Base class for all streams from Generative AI models within the
|
74
|
-
`hammad.genai` module.
|
75
|
-
|
76
|
-
This class manages both sync and async streaming.
|
77
|
-
"""
|
78
|
-
|
79
|
-
model_config = ConfigDict(
|
80
|
-
arbitrary_types_allowed=True,
|
81
|
-
)
|
82
|
-
|
83
|
-
type: str
|
84
|
-
"""The type of the model, can be `language_model`, `embedding_model`,
|
85
|
-
`image_model`..."""
|
86
|
-
|
87
|
-
model: str
|
88
|
-
"""The model that was used to generate the stream."""
|
89
|
-
|
90
|
-
stream: Union[Iterator[T], AsyncIterator[T]] | None = None
|
91
|
-
"""The streamed content generated by the model."""
|
92
|
-
|
93
|
-
def __iter__(self) -> Iterator[T]:
|
94
|
-
raise NotImplementedError(
|
95
|
-
f"__iter__() is not implemented for {self.__class__.__name__}"
|
96
|
-
)
|
97
|
-
|
98
|
-
def __aiter__(self) -> AsyncIterator[T]:
|
99
|
-
raise NotImplementedError(
|
100
|
-
f"__aiter__() is not implemented for {self.__class__.__name__}"
|
101
|
-
)
|
102
|
-
|
103
|
-
|
104
|
-
class BaseTool(BaseModel, Generic[P, R]):
|
105
|
-
"""Base class for tools. All generative AI models within the
|
106
|
-
`hammad.genai` module can be converted into tools usable by
|
107
|
-
agents and language models.
|
108
|
-
"""
|
109
|
-
|
110
|
-
model_config = ConfigDict(
|
111
|
-
arbitrary_types_allowed=True,
|
112
|
-
)
|
113
|
-
|
114
|
-
name: str
|
115
|
-
"""The name of the tool."""
|
116
|
-
|
117
|
-
description: str
|
118
|
-
"""Description of what the tool does."""
|
119
|
-
|
120
|
-
function: Callable[P, R]
|
121
|
-
"""The Python function to execute."""
|
122
|
-
|
123
|
-
parameters_json_schema: Dict[str, Any]
|
124
|
-
"""JSON schema for the tool's parameters."""
|
125
|
-
|
126
|
-
takes_context: bool = False
|
127
|
-
"""Whether the function expects a context as first parameter."""
|
128
|
-
|
129
|
-
strict: bool = True
|
130
|
-
"""Whether to enforce strict JSON schema validation."""
|
131
|
-
|
132
|
-
|
133
|
-
class BaseGenAIModelResponse(BaseGenAIModelEvent[T]):
|
134
|
-
"""Base class for all responses from Generative AI models within the
|
135
|
-
`hammad.genai` module.
|
136
|
-
"""
|
137
|
-
|
138
|
-
model_config = ConfigDict(
|
139
|
-
arbitrary_types_allowed=True,
|
140
|
-
)
|
141
|
-
|
142
|
-
type: str
|
143
|
-
"""The type of the model, can be `language_model`, `embedding_model`,
|
144
|
-
`image_model`..."""
|
145
|
-
|
146
|
-
model: str
|
147
|
-
"""The model that was used to generate the response."""
|
148
|
-
|
149
|
-
output: T
|
150
|
-
"""The final response or output generated by the model. This can be
|
151
|
-
anything from chat messages, embeddings, ..."""
|
152
|
-
|
153
|
-
|
154
|
-
class BaseGenAIModelSettings(BaseModel):
|
155
|
-
"""Represents the defaults & base for additional settings that
|
156
|
-
can be applied to any model within the `hammad.genai` module.
|
157
|
-
"""
|
158
|
-
|
159
|
-
|
160
|
-
class BaseGenAIModel(BaseModel, ABC):
|
161
|
-
"""Base class for all Generative AI models available within the
|
162
|
-
`hammad.genai` module.
|
163
|
-
|
164
|
-
NOTE:
|
165
|
-
All models within this library use `litellm` directly as the
|
166
|
-
client, if you dont have a need for any of the opinionation
|
167
|
-
given by this package, I would recommend just using
|
168
|
-
`litellm` directly.
|
169
|
-
"""
|
170
|
-
|
171
|
-
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
|
172
|
-
|
173
|
-
model: str | None = None
|
174
|
-
"""The model to use. This is always in the `litellm` format:
|
175
|
-
|
176
|
-
`<provider>/<model>`
|
177
|
-
|
178
|
-
`openai/gpt-4o-mini`
|
179
|
-
`openai/text-embedding-3-small`
|
180
|
-
"""
|
181
|
-
|
182
|
-
base_url: str | None = None
|
183
|
-
"""A custom base URL to use for the model.
|
184
|
-
"""
|
185
|
-
|
186
|
-
api_key: str | None = None
|
187
|
-
"""The API key to use for the model.
|
188
|
-
"""
|
189
|
-
|
190
|
-
settings: BaseGenAIModelSettings | None = None
|
191
|
-
"""The default (additional) settings to use when generating outputs
|
192
|
-
with this model."""
|
193
|
-
|
194
|
-
@abstractmethod
|
195
|
-
def run(
|
196
|
-
self,
|
197
|
-
*args,
|
198
|
-
**kwargs,
|
199
|
-
) -> Any: ...
|
200
|
-
|
201
|
-
@abstractmethod
|
202
|
-
async def async_run(
|
203
|
-
self,
|
204
|
-
*args,
|
205
|
-
**kwargs,
|
206
|
-
) -> Any: ...
|
207
|
-
|
208
|
-
def as_tool(
|
209
|
-
self,
|
210
|
-
*args,
|
211
|
-
**kwargs,
|
212
|
-
) -> BaseTool[P, R]:
|
213
|
-
raise NotImplementedError(
|
214
|
-
f"as_tool() is not implemented for {self.__class__.__name__}"
|
215
|
-
)
|
hammad/genai/types/history.py
DELETED
@@ -1,290 +0,0 @@
|
|
1
|
-
"""hammad.genai.types.history"""
|
2
|
-
|
3
|
-
from typing import List, Union, overload, TYPE_CHECKING, Any, Dict
|
4
|
-
from typing_extensions import Literal
|
5
|
-
import json
|
6
|
-
|
7
|
-
if TYPE_CHECKING:
|
8
|
-
try:
|
9
|
-
from openai.types.chat import ChatCompletionMessageParam
|
10
|
-
except ImportError:
|
11
|
-
ChatCompletionMessageParam = Any
|
12
|
-
|
13
|
-
from ..models.language.types import LanguageModelResponse, LanguageModelStream
|
14
|
-
from ..agents.types.agent_response import AgentResponse
|
15
|
-
from ..agents.types.agent_stream import AgentStream
|
16
|
-
|
17
|
-
__all__ = ["History"]
|
18
|
-
|
19
|
-
|
20
|
-
class History:
|
21
|
-
"""A conversation history manager that handles messages and responses.
|
22
|
-
|
23
|
-
This class provides a clean interface for managing conversation history,
|
24
|
-
including adding messages, responses, and rendering the complete history
|
25
|
-
with optional tool call formatting.
|
26
|
-
"""
|
27
|
-
|
28
|
-
def __init__(self):
|
29
|
-
"""Initialize an empty conversation history."""
|
30
|
-
self.messages: List["ChatCompletionMessageParam"] = []
|
31
|
-
|
32
|
-
@overload
|
33
|
-
def add(
|
34
|
-
self,
|
35
|
-
content: str,
|
36
|
-
*,
|
37
|
-
role: Literal["user", "assistant", "system", "tool"] = "user",
|
38
|
-
) -> None:
|
39
|
-
"""Add a simple text message to the history.
|
40
|
-
|
41
|
-
Args:
|
42
|
-
content: The message content
|
43
|
-
role: The role of the message sender
|
44
|
-
"""
|
45
|
-
...
|
46
|
-
|
47
|
-
@overload
|
48
|
-
def add(self, content: List["ChatCompletionMessageParam"]) -> None:
|
49
|
-
"""Add a list of messages to the history.
|
50
|
-
|
51
|
-
Args:
|
52
|
-
content: List of ChatCompletionMessageParam messages
|
53
|
-
"""
|
54
|
-
...
|
55
|
-
|
56
|
-
def add(
|
57
|
-
self,
|
58
|
-
content: Union[str, List["ChatCompletionMessageParam"]],
|
59
|
-
*,
|
60
|
-
role: Literal["user", "assistant", "system", "tool"] = "user",
|
61
|
-
) -> None:
|
62
|
-
"""Add content to the conversation history.
|
63
|
-
|
64
|
-
Args:
|
65
|
-
content: Either a string message or a list of messages
|
66
|
-
role: The role for string messages (ignored for message lists)
|
67
|
-
"""
|
68
|
-
if isinstance(content, str):
|
69
|
-
self.messages.append({"role": role, "content": content})
|
70
|
-
elif isinstance(content, list):
|
71
|
-
self.messages.extend(content)
|
72
|
-
else:
|
73
|
-
raise TypeError(
|
74
|
-
f"Expected str or List[ChatCompletionMessageParam], got {type(content)}"
|
75
|
-
)
|
76
|
-
|
77
|
-
def add_message(self, message: "ChatCompletionMessageParam") -> None:
|
78
|
-
"""Add a single message to the history.
|
79
|
-
|
80
|
-
Args:
|
81
|
-
message: A ChatCompletionMessageParam to add
|
82
|
-
"""
|
83
|
-
self.messages.append(message)
|
84
|
-
|
85
|
-
@overload
|
86
|
-
def add_response(
|
87
|
-
self, response: LanguageModelResponse, *, format_tool_calls: bool = False
|
88
|
-
) -> None:
|
89
|
-
"""Add a LanguageModelResponse to the history.
|
90
|
-
|
91
|
-
Args:
|
92
|
-
response: The language model response to add
|
93
|
-
format_tool_calls: Whether to format tool calls in the message
|
94
|
-
"""
|
95
|
-
...
|
96
|
-
|
97
|
-
@overload
|
98
|
-
def add_response(
|
99
|
-
self, response: LanguageModelStream, *, format_tool_calls: bool = False
|
100
|
-
) -> None:
|
101
|
-
"""Add a Stream response to the history after collecting it.
|
102
|
-
|
103
|
-
Args:
|
104
|
-
response: The stream to collect and add
|
105
|
-
format_tool_calls: Whether to format tool calls in the message
|
106
|
-
"""
|
107
|
-
...
|
108
|
-
|
109
|
-
@overload
|
110
|
-
def add_response(
|
111
|
-
self, response: AgentResponse, *, format_tool_calls: bool = False
|
112
|
-
) -> None:
|
113
|
-
"""Add an AgentResponse to the history.
|
114
|
-
|
115
|
-
Args:
|
116
|
-
response: The agent response to add
|
117
|
-
format_tool_calls: Whether to format tool calls in the message
|
118
|
-
"""
|
119
|
-
...
|
120
|
-
|
121
|
-
@overload
|
122
|
-
def add_response(
|
123
|
-
self, response: AgentStream, *, format_tool_calls: bool = False
|
124
|
-
) -> None:
|
125
|
-
"""Add an AgentStream to the history after collecting it.
|
126
|
-
|
127
|
-
Args:
|
128
|
-
response: The agent stream to collect and add
|
129
|
-
format_tool_calls: Whether to format tool calls in the message
|
130
|
-
"""
|
131
|
-
...
|
132
|
-
|
133
|
-
def add_response(
|
134
|
-
self,
|
135
|
-
response: Union[
|
136
|
-
LanguageModelResponse, LanguageModelStream, AgentResponse, AgentStream
|
137
|
-
],
|
138
|
-
*,
|
139
|
-
format_tool_calls: bool = False,
|
140
|
-
) -> None:
|
141
|
-
"""Add a language model or agent response to the history.
|
142
|
-
|
143
|
-
Args:
|
144
|
-
response: The response or stream to add
|
145
|
-
format_tool_calls: Whether to format tool calls in the message content
|
146
|
-
"""
|
147
|
-
if isinstance(response, (LanguageModelResponse, AgentResponse)):
|
148
|
-
# Direct response - convert to message
|
149
|
-
message = response.to_message(format_tool_calls=format_tool_calls)
|
150
|
-
self.messages.append(message)
|
151
|
-
elif isinstance(response, (LanguageModelStream, AgentStream)):
|
152
|
-
raise RuntimeError(
|
153
|
-
"Cannot add uncollected streams to history. "
|
154
|
-
"Please collect the stream first using stream.collect() or stream.to_response(), "
|
155
|
-
"then add the resulting response to history."
|
156
|
-
)
|
157
|
-
else:
|
158
|
-
raise TypeError(
|
159
|
-
f"Expected LanguageModelResponse, AgentResponse, Stream, or AgentStream, got {type(response)}"
|
160
|
-
)
|
161
|
-
|
162
|
-
def _summarize_content(self, content: str, max_length: int = 100) -> str:
|
163
|
-
"""Summarize content by truncating with ellipsis if too long.
|
164
|
-
|
165
|
-
Args:
|
166
|
-
content: The content to summarize
|
167
|
-
max_length: Maximum length before truncation
|
168
|
-
|
169
|
-
Returns:
|
170
|
-
Summarized content
|
171
|
-
"""
|
172
|
-
if len(content) <= max_length:
|
173
|
-
return content
|
174
|
-
return content[: max_length - 3] + "..."
|
175
|
-
|
176
|
-
def _format_and_merge_tool_calls(
|
177
|
-
self,
|
178
|
-
messages: List["ChatCompletionMessageParam"],
|
179
|
-
summarize_tool_calls: bool = True,
|
180
|
-
) -> List["ChatCompletionMessageParam"]:
|
181
|
-
"""Format tool calls and merge tool responses into assistant messages.
|
182
|
-
|
183
|
-
Args:
|
184
|
-
messages: List of messages to process
|
185
|
-
summarize_tool_calls: Whether to summarize tool call content
|
186
|
-
|
187
|
-
Returns:
|
188
|
-
Formatted messages with tool calls and responses merged
|
189
|
-
"""
|
190
|
-
# Create a mapping of tool_call_id to tool response content
|
191
|
-
tool_responses: Dict[str, str] = {}
|
192
|
-
tool_message_indices: List[int] = []
|
193
|
-
|
194
|
-
for i, message in enumerate(messages):
|
195
|
-
if message.get("role") == "tool":
|
196
|
-
tool_call_id = message.get("tool_call_id")
|
197
|
-
if tool_call_id:
|
198
|
-
tool_responses[tool_call_id] = message.get("content", "")
|
199
|
-
tool_message_indices.append(i)
|
200
|
-
|
201
|
-
# Process messages and format tool calls
|
202
|
-
formatted_messages = []
|
203
|
-
indices_to_skip = set(tool_message_indices)
|
204
|
-
|
205
|
-
for i, message in enumerate(messages):
|
206
|
-
if i in indices_to_skip:
|
207
|
-
continue
|
208
|
-
|
209
|
-
if message.get("role") == "assistant" and message.get("tool_calls"):
|
210
|
-
# Create a copy of the message
|
211
|
-
formatted_message = dict(message)
|
212
|
-
|
213
|
-
# Format tool calls and merge responses
|
214
|
-
content_parts = []
|
215
|
-
if message.get("content"):
|
216
|
-
content_parts.append(message["content"])
|
217
|
-
|
218
|
-
for tool_call in message["tool_calls"]:
|
219
|
-
tool_id = tool_call.get("id")
|
220
|
-
tool_name = tool_call["function"]["name"]
|
221
|
-
tool_args = tool_call["function"]["arguments"]
|
222
|
-
|
223
|
-
# Format arguments nicely
|
224
|
-
try:
|
225
|
-
args_dict = (
|
226
|
-
json.loads(tool_args)
|
227
|
-
if isinstance(tool_args, str)
|
228
|
-
else tool_args
|
229
|
-
)
|
230
|
-
args_str = json.dumps(args_dict, indent=2)
|
231
|
-
except:
|
232
|
-
args_str = str(tool_args)
|
233
|
-
|
234
|
-
# Create the tool call section
|
235
|
-
tool_section = f"I called the function `{tool_name}` with arguments:\n{args_str}"
|
236
|
-
|
237
|
-
# Add tool response if available
|
238
|
-
if tool_id and tool_id in tool_responses:
|
239
|
-
response_content = tool_responses[tool_id]
|
240
|
-
if summarize_tool_calls and len(response_content) > 100:
|
241
|
-
response_content = self._summarize_content(response_content)
|
242
|
-
tool_section += f"\n\nResponse: {response_content}"
|
243
|
-
|
244
|
-
content_parts.append(tool_section)
|
245
|
-
|
246
|
-
formatted_message["content"] = "\n\n".join(content_parts)
|
247
|
-
# Remove tool_calls from the formatted message
|
248
|
-
formatted_message.pop("tool_calls", None)
|
249
|
-
|
250
|
-
formatted_messages.append(formatted_message)
|
251
|
-
else:
|
252
|
-
formatted_messages.append(message)
|
253
|
-
|
254
|
-
return formatted_messages
|
255
|
-
|
256
|
-
def render(
|
257
|
-
self, *, format_tool_calls: bool = False, summarize_tool_calls: bool = True
|
258
|
-
) -> List["ChatCompletionMessageParam"]:
|
259
|
-
"""Render the conversation history as a list of messages.
|
260
|
-
|
261
|
-
Args:
|
262
|
-
format_tool_calls: Whether to format tool calls in assistant messages
|
263
|
-
for better readability and merge tool responses
|
264
|
-
summarize_tool_calls: Whether to summarize tool call responses when
|
265
|
-
format_tool_calls is True (defaults to True)
|
266
|
-
|
267
|
-
Returns:
|
268
|
-
List of ChatCompletionMessageParam messages
|
269
|
-
"""
|
270
|
-
if format_tool_calls:
|
271
|
-
return self._format_and_merge_tool_calls(
|
272
|
-
self.messages, summarize_tool_calls
|
273
|
-
)
|
274
|
-
return self.messages.copy()
|
275
|
-
|
276
|
-
def clear(self) -> None:
|
277
|
-
"""Clear all messages from the history."""
|
278
|
-
self.messages.clear()
|
279
|
-
|
280
|
-
def __len__(self) -> int:
|
281
|
-
"""Return the number of messages in the history."""
|
282
|
-
return len(self.messages)
|
283
|
-
|
284
|
-
def __bool__(self) -> bool:
|
285
|
-
"""Return True if there are messages in the history."""
|
286
|
-
return bool(self.messages)
|
287
|
-
|
288
|
-
def __repr__(self) -> str:
|
289
|
-
"""Return a string representation of the history."""
|
290
|
-
return f"History(messages={len(self.messages)})"
|