hammad-python 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +178 -0
- hammad/_internal.py +237 -0
- hammad/cache/__init__.py +40 -0
- hammad/cache/base_cache.py +181 -0
- hammad/cache/cache.py +169 -0
- hammad/cache/decorators.py +261 -0
- hammad/cache/file_cache.py +80 -0
- hammad/cache/ttl_cache.py +74 -0
- hammad/cli/__init__.py +35 -0
- hammad/cli/_runner.py +265 -0
- hammad/cli/animations.py +573 -0
- hammad/cli/plugins.py +836 -0
- hammad/cli/styles/__init__.py +55 -0
- hammad/cli/styles/settings.py +139 -0
- hammad/cli/styles/types.py +358 -0
- hammad/cli/styles/utils.py +626 -0
- hammad/data/__init__.py +83 -0
- hammad/data/collections/__init__.py +44 -0
- hammad/data/collections/collection.py +274 -0
- hammad/data/collections/indexes/__init__.py +37 -0
- hammad/data/collections/indexes/qdrant/__init__.py +1 -0
- hammad/data/collections/indexes/qdrant/index.py +735 -0
- hammad/data/collections/indexes/qdrant/settings.py +94 -0
- hammad/data/collections/indexes/qdrant/utils.py +220 -0
- hammad/data/collections/indexes/tantivy/__init__.py +1 -0
- hammad/data/collections/indexes/tantivy/index.py +428 -0
- hammad/data/collections/indexes/tantivy/settings.py +51 -0
- hammad/data/collections/indexes/tantivy/utils.py +200 -0
- hammad/data/configurations/__init__.py +35 -0
- hammad/data/configurations/configuration.py +564 -0
- hammad/data/models/__init__.py +55 -0
- hammad/data/models/extensions/__init__.py +4 -0
- hammad/data/models/extensions/pydantic/__init__.py +42 -0
- hammad/data/models/extensions/pydantic/converters.py +759 -0
- hammad/data/models/fields.py +546 -0
- hammad/data/models/model.py +1078 -0
- hammad/data/models/utils.py +280 -0
- hammad/data/sql/__init__.py +23 -0
- hammad/data/sql/database.py +578 -0
- hammad/data/sql/types.py +141 -0
- hammad/data/types/__init__.py +39 -0
- hammad/data/types/file.py +358 -0
- hammad/data/types/multimodal/__init__.py +24 -0
- hammad/data/types/multimodal/audio.py +96 -0
- hammad/data/types/multimodal/image.py +80 -0
- hammad/data/types/text.py +1066 -0
- hammad/formatting/__init__.py +20 -0
- hammad/formatting/json/__init__.py +27 -0
- hammad/formatting/json/converters.py +158 -0
- hammad/formatting/text/__init__.py +63 -0
- hammad/formatting/text/converters.py +723 -0
- hammad/formatting/text/markdown.py +131 -0
- hammad/formatting/yaml/__init__.py +26 -0
- hammad/formatting/yaml/converters.py +5 -0
- hammad/genai/__init__.py +78 -0
- hammad/genai/agents/__init__.py +1 -0
- hammad/genai/agents/types/__init__.py +35 -0
- hammad/genai/agents/types/history.py +277 -0
- hammad/genai/agents/types/tool.py +490 -0
- hammad/genai/embedding_models/__init__.py +41 -0
- hammad/genai/embedding_models/embedding_model.py +193 -0
- hammad/genai/embedding_models/embedding_model_name.py +77 -0
- hammad/genai/embedding_models/embedding_model_request.py +65 -0
- hammad/genai/embedding_models/embedding_model_response.py +69 -0
- hammad/genai/embedding_models/run.py +161 -0
- hammad/genai/language_models/__init__.py +35 -0
- hammad/genai/language_models/_streaming.py +622 -0
- hammad/genai/language_models/_types.py +276 -0
- hammad/genai/language_models/_utils/__init__.py +31 -0
- hammad/genai/language_models/_utils/_completions.py +131 -0
- hammad/genai/language_models/_utils/_messages.py +89 -0
- hammad/genai/language_models/_utils/_requests.py +202 -0
- hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
- hammad/genai/language_models/language_model.py +734 -0
- hammad/genai/language_models/language_model_request.py +135 -0
- hammad/genai/language_models/language_model_response.py +219 -0
- hammad/genai/language_models/language_model_response_chunk.py +53 -0
- hammad/genai/language_models/run.py +530 -0
- hammad/genai/multimodal_models.py +48 -0
- hammad/genai/rerank_models.py +26 -0
- hammad/logging/__init__.py +35 -0
- hammad/logging/decorators.py +834 -0
- hammad/logging/logger.py +954 -0
- hammad/mcp/__init__.py +50 -0
- hammad/mcp/client/__init__.py +36 -0
- hammad/mcp/client/client.py +624 -0
- hammad/mcp/client/client_service.py +400 -0
- hammad/mcp/client/settings.py +178 -0
- hammad/mcp/servers/__init__.py +25 -0
- hammad/mcp/servers/launcher.py +1161 -0
- hammad/runtime/__init__.py +32 -0
- hammad/runtime/decorators.py +142 -0
- hammad/runtime/run.py +299 -0
- hammad/service/__init__.py +49 -0
- hammad/service/create.py +527 -0
- hammad/service/decorators.py +285 -0
- hammad/typing/__init__.py +435 -0
- hammad/web/__init__.py +43 -0
- hammad/web/http/__init__.py +1 -0
- hammad/web/http/client.py +944 -0
- hammad/web/models.py +277 -0
- hammad/web/openapi/__init__.py +1 -0
- hammad/web/openapi/client.py +740 -0
- hammad/web/search/__init__.py +1 -0
- hammad/web/search/client.py +1035 -0
- hammad/web/utils.py +472 -0
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/METADATA +8 -1
- hammad_python-0.0.17.dist-info/RECORD +110 -0
- hammad_python-0.0.15.dist-info/RECORD +0 -4
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,131 @@
|
|
1
|
+
"""hammad.formatting.text.markdown"""
|
2
|
+
|
3
|
+
from typing import (
|
4
|
+
List,
|
5
|
+
Literal,
|
6
|
+
Optional,
|
7
|
+
)
|
8
|
+
|
9
|
+
__all__ = (
|
10
|
+
"markdown_bold",
|
11
|
+
"markdown_italic",
|
12
|
+
"markdown_code",
|
13
|
+
"markdown_code_block",
|
14
|
+
"markdown_heading",
|
15
|
+
"markdown_link",
|
16
|
+
"markdown_list_item",
|
17
|
+
"markdown_table_row",
|
18
|
+
"markdown_blockquote",
|
19
|
+
"markdown_horizontal_rule",
|
20
|
+
"markdown_table",
|
21
|
+
)
|
22
|
+
|
23
|
+
|
24
|
+
def markdown_bold(text: str) -> str:
|
25
|
+
"""Format text as bold in Markdown."""
|
26
|
+
return f"**{text}**"
|
27
|
+
|
28
|
+
|
29
|
+
def markdown_italic(text: str) -> str:
|
30
|
+
"""Format text as italic in Markdown."""
|
31
|
+
return f"*{text}*"
|
32
|
+
|
33
|
+
|
34
|
+
def markdown_code(text: str) -> str:
|
35
|
+
"""Format text as inline code in Markdown."""
|
36
|
+
return f"`{text}`"
|
37
|
+
|
38
|
+
|
39
|
+
def markdown_code_block(text: str, language: str = "") -> str:
|
40
|
+
"""Format text as a code block in Markdown."""
|
41
|
+
return f"```{language}\n{text}\n```"
|
42
|
+
|
43
|
+
|
44
|
+
def markdown_heading(text: str, level: int = 1) -> str:
|
45
|
+
"""Format text as a heading in Markdown."""
|
46
|
+
if not 1 <= level <= 6:
|
47
|
+
level = 1
|
48
|
+
return f"{'#' * level} {text}"
|
49
|
+
|
50
|
+
|
51
|
+
def markdown_link(text: str, url: str) -> str:
|
52
|
+
"""Format text as a link in Markdown."""
|
53
|
+
return f"[{text}]({url})"
|
54
|
+
|
55
|
+
|
56
|
+
def markdown_list_item(
|
57
|
+
text: str, level: int = 0, ordered: bool = False, index: int = 1
|
58
|
+
) -> str:
|
59
|
+
"""Format text as a list item in Markdown."""
|
60
|
+
indent = " " * level
|
61
|
+
marker = f"{index}." if ordered else "-"
|
62
|
+
return f"{indent}{marker} {text}"
|
63
|
+
|
64
|
+
|
65
|
+
def markdown_table_row(cells: List[str], is_header: bool = False) -> str:
|
66
|
+
"""Format a table row in Markdown."""
|
67
|
+
row = "| " + " | ".join(cells) + " |"
|
68
|
+
if is_header:
|
69
|
+
separator = "|" + "|".join([" --- " for _ in cells]) + "|"
|
70
|
+
return f"{row}\n{separator}"
|
71
|
+
return row
|
72
|
+
|
73
|
+
|
74
|
+
def markdown_blockquote(text: str, level: int = 1) -> str:
|
75
|
+
"""Format text as a blockquote in Markdown."""
|
76
|
+
prefix = ">" * level + " "
|
77
|
+
lines = text.split("\n")
|
78
|
+
return "\n".join(f"{prefix}{line}" for line in lines)
|
79
|
+
|
80
|
+
|
81
|
+
def markdown_horizontal_rule() -> str:
|
82
|
+
"""Create a horizontal rule in Markdown."""
|
83
|
+
return "---"
|
84
|
+
|
85
|
+
|
86
|
+
def markdown_table(
|
87
|
+
headers: List[str],
|
88
|
+
rows: List[List[str]],
|
89
|
+
alignment: Optional[List[Literal["left", "center", "right"]]] = None,
|
90
|
+
) -> str:
|
91
|
+
"""
|
92
|
+
Create a Markdown table from headers and rows.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
headers: List of header strings
|
96
|
+
rows: List of row data (each row is a list of strings)
|
97
|
+
alignment: Optional list of alignments for each column
|
98
|
+
|
99
|
+
Returns:
|
100
|
+
Formatted Markdown table
|
101
|
+
"""
|
102
|
+
if not headers:
|
103
|
+
return ""
|
104
|
+
|
105
|
+
parts = []
|
106
|
+
|
107
|
+
# Add header
|
108
|
+
parts.append("| " + " | ".join(headers) + " |")
|
109
|
+
|
110
|
+
# Add separator with alignment
|
111
|
+
separators = []
|
112
|
+
for i, header in enumerate(headers):
|
113
|
+
if alignment and i < len(alignment):
|
114
|
+
align = alignment[i]
|
115
|
+
if align == "center":
|
116
|
+
separators.append(" :---: ")
|
117
|
+
elif align == "right":
|
118
|
+
separators.append(" ---: ")
|
119
|
+
else:
|
120
|
+
separators.append(" --- ")
|
121
|
+
else:
|
122
|
+
separators.append(" --- ")
|
123
|
+
parts.append("|" + "|".join(separators) + "|")
|
124
|
+
|
125
|
+
# Add rows
|
126
|
+
for row in rows:
|
127
|
+
# Ensure row has same number of columns as headers
|
128
|
+
padded_row = row + [""] * (len(headers) - len(row))
|
129
|
+
parts.append("| " + " | ".join(padded_row[: len(headers)]) + " |")
|
130
|
+
|
131
|
+
return "\n".join(parts)
|
@@ -0,0 +1,26 @@
|
|
1
|
+
"""hammad.formatting.yaml
|
2
|
+
|
3
|
+
Simply extends the `msgspec.yaml` submodule."""
|
4
|
+
|
5
|
+
from typing import TYPE_CHECKING
|
6
|
+
from ..._internal import create_getattr_importer
|
7
|
+
|
8
|
+
if TYPE_CHECKING:
|
9
|
+
from .converters import (
|
10
|
+
encode_yaml,
|
11
|
+
decode_yaml,
|
12
|
+
)
|
13
|
+
|
14
|
+
|
15
|
+
__all__ = (
|
16
|
+
"encode_yaml",
|
17
|
+
"decode_yaml",
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
__getattr__ = create_getattr_importer(__all__)
|
22
|
+
|
23
|
+
|
24
|
+
def __dir__() -> list[str]:
|
25
|
+
"""Get the attributes of the yaml module."""
|
26
|
+
return list(__all__)
|
hammad/genai/__init__.py
ADDED
@@ -0,0 +1,78 @@
|
|
1
|
+
"""hammad.genai"""
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING
|
4
|
+
from .._internal import create_getattr_importer
|
5
|
+
|
6
|
+
if TYPE_CHECKING:
|
7
|
+
from .embedding_models import (
|
8
|
+
EmbeddingModel,
|
9
|
+
EmbeddingModelRequest,
|
10
|
+
EmbeddingModelResponse,
|
11
|
+
run_embedding_model,
|
12
|
+
async_run_embedding_model
|
13
|
+
)
|
14
|
+
from .language_models import (
|
15
|
+
LanguageModel,
|
16
|
+
LanguageModelRequest,
|
17
|
+
LanguageModelResponse,
|
18
|
+
run_language_model,
|
19
|
+
async_run_language_model,
|
20
|
+
)
|
21
|
+
from .rerank_models import (
|
22
|
+
run_rerank_model,
|
23
|
+
async_run_rerank_model,
|
24
|
+
)
|
25
|
+
from .multimodal_models import (
|
26
|
+
run_image_generation_model,
|
27
|
+
async_run_image_generation_model,
|
28
|
+
run_image_edit_model,
|
29
|
+
async_run_image_edit_model,
|
30
|
+
run_image_variation_model,
|
31
|
+
async_run_image_variation_model,
|
32
|
+
|
33
|
+
run_tts_model,
|
34
|
+
async_run_tts_model,
|
35
|
+
run_transcription_model,
|
36
|
+
async_run_transcription_model,
|
37
|
+
)
|
38
|
+
|
39
|
+
|
40
|
+
__all__ = (
|
41
|
+
# hammad.genai.embedding_models
|
42
|
+
"EmbeddingModel",
|
43
|
+
"EmbeddingModelRequest",
|
44
|
+
"EmbeddingModelResponse",
|
45
|
+
"run_embedding_model",
|
46
|
+
"async_run_embedding_model",
|
47
|
+
|
48
|
+
# hammad.genai.language_models
|
49
|
+
"LanguageModel",
|
50
|
+
"LanguageModelRequest",
|
51
|
+
"LanguageModelResponse",
|
52
|
+
"run_language_model",
|
53
|
+
"async_run_language_model",
|
54
|
+
|
55
|
+
# hammad.genai.rerank_models
|
56
|
+
"run_rerank_model",
|
57
|
+
"async_run_rerank_model",
|
58
|
+
|
59
|
+
# hammad.genai.multimodal_models
|
60
|
+
"run_image_generation_model",
|
61
|
+
"async_run_image_generation_model",
|
62
|
+
"run_image_edit_model",
|
63
|
+
"async_run_image_edit_model",
|
64
|
+
"run_image_variation_model",
|
65
|
+
"async_run_image_variation_model",
|
66
|
+
"run_tts_model",
|
67
|
+
"async_run_tts_model",
|
68
|
+
"run_transcription_model",
|
69
|
+
"async_run_transcription_model",
|
70
|
+
)
|
71
|
+
|
72
|
+
|
73
|
+
__getattr__ = create_getattr_importer(__all__)
|
74
|
+
|
75
|
+
|
76
|
+
def __dir__() -> list[str]:
|
77
|
+
"""Get the attributes of the genai module."""
|
78
|
+
return list(__all__)
|
@@ -0,0 +1 @@
|
|
1
|
+
"""hammad.genai.agents"""
|
@@ -0,0 +1,35 @@
|
|
1
|
+
"""hammad.genai.types
|
2
|
+
|
3
|
+
Contains functional types usable with various components within
|
4
|
+
the `hammad.genai` module."""
|
5
|
+
|
6
|
+
from typing import TYPE_CHECKING
|
7
|
+
from ...._internal import create_getattr_importer
|
8
|
+
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from .history import (
|
12
|
+
History,
|
13
|
+
)
|
14
|
+
from .tool import (
|
15
|
+
Tool,
|
16
|
+
ToolResponseMessage,
|
17
|
+
function_tool,
|
18
|
+
)
|
19
|
+
|
20
|
+
|
21
|
+
__all__ = (
|
22
|
+
# hammad.genai.types.history
|
23
|
+
"History",
|
24
|
+
# hammad.genai.types.tool
|
25
|
+
"Tool",
|
26
|
+
"function_tool",
|
27
|
+
"ToolResponseMessage",
|
28
|
+
)
|
29
|
+
|
30
|
+
|
31
|
+
__getattr__ = create_getattr_importer(__all__)
|
32
|
+
|
33
|
+
|
34
|
+
def __dir__() -> list[str]:
|
35
|
+
return __all__
|
@@ -0,0 +1,277 @@
|
|
1
|
+
"""hammad.genai.types.history"""
|
2
|
+
|
3
|
+
from typing import List, Union, overload, TYPE_CHECKING, Any, Dict
|
4
|
+
from typing_extensions import Literal
|
5
|
+
import json
|
6
|
+
|
7
|
+
if TYPE_CHECKING:
|
8
|
+
try:
|
9
|
+
from openai.types.chat import ChatCompletionMessageParam
|
10
|
+
except ImportError:
|
11
|
+
ChatCompletionMessageParam = Any
|
12
|
+
|
13
|
+
from ...language_models import LanguageModelResponse
|
14
|
+
from ...language_models._streaming import Stream, AsyncStream
|
15
|
+
|
16
|
+
__all__ = ["History"]
|
17
|
+
|
18
|
+
|
19
|
+
class History:
|
20
|
+
"""A conversation history manager that handles messages and responses.
|
21
|
+
|
22
|
+
This class provides a clean interface for managing conversation history,
|
23
|
+
including adding messages, responses, and rendering the complete history
|
24
|
+
with optional tool call formatting.
|
25
|
+
"""
|
26
|
+
|
27
|
+
def __init__(self):
|
28
|
+
"""Initialize an empty conversation history."""
|
29
|
+
self.messages: List["ChatCompletionMessageParam"] = []
|
30
|
+
|
31
|
+
@overload
|
32
|
+
def add(self, content: str, *, role: Literal["user", "assistant", "system", "tool"] = "user") -> None:
|
33
|
+
"""Add a simple text message to the history.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
content: The message content
|
37
|
+
role: The role of the message sender
|
38
|
+
"""
|
39
|
+
...
|
40
|
+
|
41
|
+
@overload
|
42
|
+
def add(self, content: List["ChatCompletionMessageParam"]) -> None:
|
43
|
+
"""Add a list of messages to the history.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
content: List of ChatCompletionMessageParam messages
|
47
|
+
"""
|
48
|
+
...
|
49
|
+
|
50
|
+
def add(
|
51
|
+
self,
|
52
|
+
content: Union[str, List["ChatCompletionMessageParam"]],
|
53
|
+
*,
|
54
|
+
role: Literal["user", "assistant", "system", "tool"] = "user"
|
55
|
+
) -> None:
|
56
|
+
"""Add content to the conversation history.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
content: Either a string message or a list of messages
|
60
|
+
role: The role for string messages (ignored for message lists)
|
61
|
+
"""
|
62
|
+
if isinstance(content, str):
|
63
|
+
self.messages.append({
|
64
|
+
"role": role,
|
65
|
+
"content": content
|
66
|
+
})
|
67
|
+
elif isinstance(content, list):
|
68
|
+
self.messages.extend(content)
|
69
|
+
else:
|
70
|
+
raise TypeError(f"Expected str or List[ChatCompletionMessageParam], got {type(content)}")
|
71
|
+
|
72
|
+
def add_message(self, message: "ChatCompletionMessageParam") -> None:
|
73
|
+
"""Add a single message to the history.
|
74
|
+
|
75
|
+
Args:
|
76
|
+
message: A ChatCompletionMessageParam to add
|
77
|
+
"""
|
78
|
+
self.messages.append(message)
|
79
|
+
|
80
|
+
@overload
|
81
|
+
def add_response(
|
82
|
+
self,
|
83
|
+
response: LanguageModelResponse,
|
84
|
+
*,
|
85
|
+
format_tool_calls: bool = False
|
86
|
+
) -> None:
|
87
|
+
"""Add a LanguageModelResponse to the history.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
response: The language model response to add
|
91
|
+
format_tool_calls: Whether to format tool calls in the message
|
92
|
+
"""
|
93
|
+
...
|
94
|
+
|
95
|
+
@overload
|
96
|
+
def add_response(
|
97
|
+
self,
|
98
|
+
response: Stream,
|
99
|
+
*,
|
100
|
+
format_tool_calls: bool = False
|
101
|
+
) -> None:
|
102
|
+
"""Add a Stream response to the history after collecting it.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
response: The stream to collect and add
|
106
|
+
format_tool_calls: Whether to format tool calls in the message
|
107
|
+
"""
|
108
|
+
...
|
109
|
+
|
110
|
+
@overload
|
111
|
+
def add_response(
|
112
|
+
self,
|
113
|
+
response: AsyncStream,
|
114
|
+
*,
|
115
|
+
format_tool_calls: bool = False
|
116
|
+
) -> None:
|
117
|
+
"""Add an AsyncStream response to the history after collecting it.
|
118
|
+
|
119
|
+
Args:
|
120
|
+
response: The async stream to collect and add
|
121
|
+
format_tool_calls: Whether to format tool calls in the message
|
122
|
+
"""
|
123
|
+
...
|
124
|
+
|
125
|
+
def add_response(
|
126
|
+
self,
|
127
|
+
response: Union[LanguageModelResponse, Stream, AsyncStream],
|
128
|
+
*,
|
129
|
+
format_tool_calls: bool = False
|
130
|
+
) -> None:
|
131
|
+
"""Add a language model response to the history.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
response: The response, stream, or async stream to add
|
135
|
+
format_tool_calls: Whether to format tool calls in the message content
|
136
|
+
"""
|
137
|
+
if isinstance(response, LanguageModelResponse):
|
138
|
+
# Direct response - convert to message
|
139
|
+
message = response.to_message(format_tool_calls=format_tool_calls)
|
140
|
+
self.messages.append(message)
|
141
|
+
elif isinstance(response, (Stream, AsyncStream)):
|
142
|
+
raise RuntimeError(
|
143
|
+
"Cannot add uncollected streams to history. "
|
144
|
+
"Please collect the stream first using stream.collect() or stream.to_response(), "
|
145
|
+
"then add the resulting LanguageModelResponse to history."
|
146
|
+
)
|
147
|
+
else:
|
148
|
+
raise TypeError(
|
149
|
+
f"Expected LanguageModelResponse, Stream, or AsyncStream, got {type(response)}"
|
150
|
+
)
|
151
|
+
|
152
|
+
def _summarize_content(self, content: str, max_length: int = 100) -> str:
|
153
|
+
"""Summarize content by truncating with ellipsis if too long.
|
154
|
+
|
155
|
+
Args:
|
156
|
+
content: The content to summarize
|
157
|
+
max_length: Maximum length before truncation
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Summarized content
|
161
|
+
"""
|
162
|
+
if len(content) <= max_length:
|
163
|
+
return content
|
164
|
+
return content[:max_length - 3] + "..."
|
165
|
+
|
166
|
+
def _format_and_merge_tool_calls(
|
167
|
+
self,
|
168
|
+
messages: List["ChatCompletionMessageParam"],
|
169
|
+
summarize_tool_calls: bool = True
|
170
|
+
) -> List["ChatCompletionMessageParam"]:
|
171
|
+
"""Format tool calls and merge tool responses into assistant messages.
|
172
|
+
|
173
|
+
Args:
|
174
|
+
messages: List of messages to process
|
175
|
+
summarize_tool_calls: Whether to summarize tool call content
|
176
|
+
|
177
|
+
Returns:
|
178
|
+
Formatted messages with tool calls and responses merged
|
179
|
+
"""
|
180
|
+
# Create a mapping of tool_call_id to tool response content
|
181
|
+
tool_responses: Dict[str, str] = {}
|
182
|
+
tool_message_indices: List[int] = []
|
183
|
+
|
184
|
+
for i, message in enumerate(messages):
|
185
|
+
if message.get("role") == "tool":
|
186
|
+
tool_call_id = message.get("tool_call_id")
|
187
|
+
if tool_call_id:
|
188
|
+
tool_responses[tool_call_id] = message.get("content", "")
|
189
|
+
tool_message_indices.append(i)
|
190
|
+
|
191
|
+
# Process messages and format tool calls
|
192
|
+
formatted_messages = []
|
193
|
+
indices_to_skip = set(tool_message_indices)
|
194
|
+
|
195
|
+
for i, message in enumerate(messages):
|
196
|
+
if i in indices_to_skip:
|
197
|
+
continue
|
198
|
+
|
199
|
+
if message.get("role") == "assistant" and message.get("tool_calls"):
|
200
|
+
# Create a copy of the message
|
201
|
+
formatted_message = dict(message)
|
202
|
+
|
203
|
+
# Format tool calls and merge responses
|
204
|
+
content_parts = []
|
205
|
+
if message.get("content"):
|
206
|
+
content_parts.append(message["content"])
|
207
|
+
|
208
|
+
for tool_call in message["tool_calls"]:
|
209
|
+
tool_id = tool_call.get("id")
|
210
|
+
tool_name = tool_call["function"]["name"]
|
211
|
+
tool_args = tool_call["function"]["arguments"]
|
212
|
+
|
213
|
+
# Format arguments nicely
|
214
|
+
try:
|
215
|
+
args_dict = json.loads(tool_args) if isinstance(tool_args, str) else tool_args
|
216
|
+
args_str = json.dumps(args_dict, indent=2)
|
217
|
+
except:
|
218
|
+
args_str = str(tool_args)
|
219
|
+
|
220
|
+
# Create the tool call section
|
221
|
+
tool_section = f"I called the function `{tool_name}` with arguments:\n{args_str}"
|
222
|
+
|
223
|
+
# Add tool response if available
|
224
|
+
if tool_id and tool_id in tool_responses:
|
225
|
+
response_content = tool_responses[tool_id]
|
226
|
+
if summarize_tool_calls and len(response_content) > 100:
|
227
|
+
response_content = self._summarize_content(response_content)
|
228
|
+
tool_section += f"\n\nResponse: {response_content}"
|
229
|
+
|
230
|
+
content_parts.append(tool_section)
|
231
|
+
|
232
|
+
formatted_message["content"] = "\n\n".join(content_parts)
|
233
|
+
# Remove tool_calls from the formatted message
|
234
|
+
formatted_message.pop("tool_calls", None)
|
235
|
+
|
236
|
+
formatted_messages.append(formatted_message)
|
237
|
+
else:
|
238
|
+
formatted_messages.append(message)
|
239
|
+
|
240
|
+
return formatted_messages
|
241
|
+
|
242
|
+
def render(
|
243
|
+
self,
|
244
|
+
*,
|
245
|
+
format_tool_calls: bool = False,
|
246
|
+
summarize_tool_calls: bool = True
|
247
|
+
) -> List["ChatCompletionMessageParam"]:
|
248
|
+
"""Render the conversation history as a list of messages.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
format_tool_calls: Whether to format tool calls in assistant messages
|
252
|
+
for better readability and merge tool responses
|
253
|
+
summarize_tool_calls: Whether to summarize tool call responses when
|
254
|
+
format_tool_calls is True (defaults to True)
|
255
|
+
|
256
|
+
Returns:
|
257
|
+
List of ChatCompletionMessageParam messages
|
258
|
+
"""
|
259
|
+
if format_tool_calls:
|
260
|
+
return self._format_and_merge_tool_calls(self.messages, summarize_tool_calls)
|
261
|
+
return self.messages.copy()
|
262
|
+
|
263
|
+
def clear(self) -> None:
|
264
|
+
"""Clear all messages from the history."""
|
265
|
+
self.messages.clear()
|
266
|
+
|
267
|
+
def __len__(self) -> int:
|
268
|
+
"""Return the number of messages in the history."""
|
269
|
+
return len(self.messages)
|
270
|
+
|
271
|
+
def __bool__(self) -> bool:
|
272
|
+
"""Return True if there are messages in the history."""
|
273
|
+
return bool(self.messages)
|
274
|
+
|
275
|
+
def __repr__(self) -> str:
|
276
|
+
"""Return a string representation of the history."""
|
277
|
+
return f"History(messages={len(self.messages)})"
|