hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,7 @@ by users as bases as well as for type hints. These objects define simple
5
5
  interfaces for various types of common objects."""
6
6
 
7
7
  from typing import TYPE_CHECKING
8
- from ...performance.imports import create_getattr_importer
8
+ from ..._internal import create_getattr_importer
9
9
 
10
10
 
11
11
  if TYPE_CHECKING:
@@ -24,10 +24,8 @@ __all__ = (
24
24
  # hammad.data.types.text
25
25
  "BaseText",
26
26
  "Text",
27
-
28
27
  # hammad.data.types.file
29
28
  "File",
30
-
31
29
  # hammad.data.types.multimodal
32
30
  "Audio",
33
31
  "Image",
hammad/data/types/file.py CHANGED
@@ -1,4 +1,4 @@
1
- """hammad.types.file"""
1
+ """hammad.data.types.file"""
2
2
 
3
3
  from pathlib import Path
4
4
  import httpx
@@ -6,8 +6,8 @@ from typing import Any, Self
6
6
  import mimetypes
7
7
  from urllib.parse import urlparse
8
8
 
9
- from ..models.base.model import Model
10
- from ..models.base.fields import field
9
+ from ..models.model import Model
10
+ from ..models.fields import field
11
11
 
12
12
  __all__ = ("File", "FileSource")
13
13
 
@@ -1,10 +1,10 @@
1
- """hammad.multimodal
1
+ """hammad.types.multimodal
2
2
 
3
3
  Contains types and model like objects for working with various
4
4
  types of multimodal data."""
5
5
 
6
6
  from typing import TYPE_CHECKING
7
- from ....performance.imports import create_getattr_importer
7
+ from ...._internal import create_getattr_importer
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from .image import Image
@@ -1,10 +1,10 @@
1
- """hammad.data.types.files.audio"""
1
+ """hammad.data.types.multimodal.audio"""
2
2
 
3
3
  import httpx
4
4
  from typing import Self
5
5
 
6
6
  from ...types.file import File, FileSource
7
- from ...models.base.fields import field
7
+ from ...models.fields import field
8
8
 
9
9
  __all__ = ("Audio",)
10
10
 
@@ -1,10 +1,10 @@
1
- """hammad.data.types.files.image"""
1
+ """hammad.data.types.multimodal.image"""
2
2
 
3
3
  import httpx
4
4
  from typing import Self
5
5
 
6
6
  from ...types.file import File, FileSource
7
- from ...models.base.fields import field
7
+ from ...models.fields import field
8
8
 
9
9
  __all__ = ("Image",)
10
10
 
@@ -1,38 +1,20 @@
1
- """hammad.formatting
2
-
3
- Contains resources for working with various data structures and formats
4
- such as JSON, YAML, and text / markdown formatting."""
1
+ """hammad.formatting"""
5
2
 
6
3
  from typing import TYPE_CHECKING
7
- from ..performance.imports import create_getattr_importer
8
-
4
+ from .._internal import create_getattr_importer
9
5
 
10
6
  if TYPE_CHECKING:
11
- from .json import (
12
- convert_to_json_schema,
13
- encode_json,
14
- decode_json,
15
- )
16
- from .yaml import (
17
- encode_yaml,
18
- decode_yaml,
19
- )
20
-
7
+ from . import json
8
+ from . import text
9
+ from . import yaml
21
10
 
22
11
  __all__ = (
23
- # hammad.formatting.json
24
- "convert_to_json_schema",
25
- "encode_json",
26
- "decode_json",
27
- # hammad.formatting.yaml
28
- "encode_yaml",
29
- "decode_yaml",
12
+ "json",
13
+ "text",
14
+ "yaml",
30
15
  )
31
16
 
32
-
33
17
  __getattr__ = create_getattr_importer(__all__)
34
18
 
35
-
36
19
  def __dir__() -> list[str]:
37
- """Get the attributes of the formatting module."""
38
- return list(__all__)
20
+ return list(__all__)
@@ -1,16 +1,22 @@
1
1
  """hammad.formatting.json"""
2
2
 
3
3
  from typing import TYPE_CHECKING
4
- from ...performance.imports import create_getattr_importer
4
+ from ..._internal import create_getattr_importer
5
5
 
6
6
  if TYPE_CHECKING:
7
7
  from .converters import (
8
8
  convert_to_json_schema,
9
+ convert_to_json,
9
10
  encode_json,
10
11
  decode_json,
11
12
  )
12
13
 
13
- __all__ = ("convert_to_json_schema", "encode_json", "decode_json")
14
+ __all__ = (
15
+ "convert_to_json_schema",
16
+ "convert_to_json",
17
+ "encode_json",
18
+ "decode_json",
19
+ )
14
20
 
15
21
 
16
22
  __getattr__ = create_getattr_importer(__all__)
@@ -1,4 +1,4 @@
1
- """hammad.formatting.json.utils
1
+ """hammad.formatting.json.converters
2
2
 
3
3
  Contains various utility functions used when working with JSON data."""
4
4
 
@@ -150,3 +150,9 @@ def convert_to_json_schema(t: Any) -> dict:
150
150
 
151
151
  # Default to string representation of type
152
152
  return {"type": get_type_description(t)}
153
+
154
+
155
+ def convert_to_json(
156
+ target : Any,
157
+ ) -> str:
158
+ return encode_json(target).decode()
@@ -3,7 +3,7 @@
3
3
  Contains resources for working with text / markdown formatting."""
4
4
 
5
5
  from typing import TYPE_CHECKING
6
- from ...performance.imports import create_getattr_importer
6
+ from ..._internal import create_getattr_importer
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from .converters import (
@@ -3,7 +3,7 @@
3
3
  Simply extends the `msgspec.yaml` submodule."""
4
4
 
5
5
  from typing import TYPE_CHECKING
6
- from ...performance.imports import create_getattr_importer
6
+ from ..._internal import create_getattr_importer
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from .converters import (
@@ -0,0 +1,78 @@
1
+ """hammad.genai"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from .._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .embedding_models import (
8
+ EmbeddingModel,
9
+ EmbeddingModelRequest,
10
+ EmbeddingModelResponse,
11
+ run_embedding_model,
12
+ async_run_embedding_model
13
+ )
14
+ from .language_models import (
15
+ LanguageModel,
16
+ LanguageModelRequest,
17
+ LanguageModelResponse,
18
+ run_language_model,
19
+ async_run_language_model,
20
+ )
21
+ from .rerank_models import (
22
+ run_rerank_model,
23
+ async_run_rerank_model,
24
+ )
25
+ from .multimodal_models import (
26
+ run_image_generation_model,
27
+ async_run_image_generation_model,
28
+ run_image_edit_model,
29
+ async_run_image_edit_model,
30
+ run_image_variation_model,
31
+ async_run_image_variation_model,
32
+
33
+ run_tts_model,
34
+ async_run_tts_model,
35
+ run_transcription_model,
36
+ async_run_transcription_model,
37
+ )
38
+
39
+
40
+ __all__ = (
41
+ # hammad.genai.embedding_models
42
+ "EmbeddingModel",
43
+ "EmbeddingModelRequest",
44
+ "EmbeddingModelResponse",
45
+ "run_embedding_model",
46
+ "async_run_embedding_model",
47
+
48
+ # hammad.genai.language_models
49
+ "LanguageModel",
50
+ "LanguageModelRequest",
51
+ "LanguageModelResponse",
52
+ "run_language_model",
53
+ "async_run_language_model",
54
+
55
+ # hammad.genai.rerank_models
56
+ "run_rerank_model",
57
+ "async_run_rerank_model",
58
+
59
+ # hammad.genai.multimodal_models
60
+ "run_image_generation_model",
61
+ "async_run_image_generation_model",
62
+ "run_image_edit_model",
63
+ "async_run_image_edit_model",
64
+ "run_image_variation_model",
65
+ "async_run_image_variation_model",
66
+ "run_tts_model",
67
+ "async_run_tts_model",
68
+ "run_transcription_model",
69
+ "async_run_transcription_model",
70
+ )
71
+
72
+
73
+ __getattr__ = create_getattr_importer(__all__)
74
+
75
+
76
+ def __dir__() -> list[str]:
77
+ """Get the attributes of the genai module."""
78
+ return list(__all__)
@@ -0,0 +1 @@
1
+ """hammad.genai.agents"""
@@ -0,0 +1,35 @@
1
+ """hammad.genai.types
2
+
3
+ Contains functional types usable with various components within
4
+ the `hammad.genai` module."""
5
+
6
+ from typing import TYPE_CHECKING
7
+ from ...._internal import create_getattr_importer
8
+
9
+
10
+ if TYPE_CHECKING:
11
+ from .history import (
12
+ History,
13
+ )
14
+ from .tool import (
15
+ Tool,
16
+ ToolResponseMessage,
17
+ function_tool,
18
+ )
19
+
20
+
21
+ __all__ = (
22
+ # hammad.genai.types.history
23
+ "History",
24
+ # hammad.genai.types.tool
25
+ "Tool",
26
+ "function_tool",
27
+ "ToolResponseMessage",
28
+ )
29
+
30
+
31
+ __getattr__ = create_getattr_importer(__all__)
32
+
33
+
34
+ def __dir__() -> list[str]:
35
+ return __all__
@@ -0,0 +1,277 @@
1
+ """hammad.genai.types.history"""
2
+
3
+ from typing import List, Union, overload, TYPE_CHECKING, Any, Dict
4
+ from typing_extensions import Literal
5
+ import json
6
+
7
+ if TYPE_CHECKING:
8
+ try:
9
+ from openai.types.chat import ChatCompletionMessageParam
10
+ except ImportError:
11
+ ChatCompletionMessageParam = Any
12
+
13
+ from ...language_models import LanguageModelResponse
14
+ from ...language_models._streaming import Stream, AsyncStream
15
+
16
+ __all__ = ["History"]
17
+
18
+
19
+ class History:
20
+ """A conversation history manager that handles messages and responses.
21
+
22
+ This class provides a clean interface for managing conversation history,
23
+ including adding messages, responses, and rendering the complete history
24
+ with optional tool call formatting.
25
+ """
26
+
27
+ def __init__(self):
28
+ """Initialize an empty conversation history."""
29
+ self.messages: List["ChatCompletionMessageParam"] = []
30
+
31
+ @overload
32
+ def add(self, content: str, *, role: Literal["user", "assistant", "system", "tool"] = "user") -> None:
33
+ """Add a simple text message to the history.
34
+
35
+ Args:
36
+ content: The message content
37
+ role: The role of the message sender
38
+ """
39
+ ...
40
+
41
+ @overload
42
+ def add(self, content: List["ChatCompletionMessageParam"]) -> None:
43
+ """Add a list of messages to the history.
44
+
45
+ Args:
46
+ content: List of ChatCompletionMessageParam messages
47
+ """
48
+ ...
49
+
50
+ def add(
51
+ self,
52
+ content: Union[str, List["ChatCompletionMessageParam"]],
53
+ *,
54
+ role: Literal["user", "assistant", "system", "tool"] = "user"
55
+ ) -> None:
56
+ """Add content to the conversation history.
57
+
58
+ Args:
59
+ content: Either a string message or a list of messages
60
+ role: The role for string messages (ignored for message lists)
61
+ """
62
+ if isinstance(content, str):
63
+ self.messages.append({
64
+ "role": role,
65
+ "content": content
66
+ })
67
+ elif isinstance(content, list):
68
+ self.messages.extend(content)
69
+ else:
70
+ raise TypeError(f"Expected str or List[ChatCompletionMessageParam], got {type(content)}")
71
+
72
+ def add_message(self, message: "ChatCompletionMessageParam") -> None:
73
+ """Add a single message to the history.
74
+
75
+ Args:
76
+ message: A ChatCompletionMessageParam to add
77
+ """
78
+ self.messages.append(message)
79
+
80
+ @overload
81
+ def add_response(
82
+ self,
83
+ response: LanguageModelResponse,
84
+ *,
85
+ format_tool_calls: bool = False
86
+ ) -> None:
87
+ """Add a LanguageModelResponse to the history.
88
+
89
+ Args:
90
+ response: The language model response to add
91
+ format_tool_calls: Whether to format tool calls in the message
92
+ """
93
+ ...
94
+
95
+ @overload
96
+ def add_response(
97
+ self,
98
+ response: Stream,
99
+ *,
100
+ format_tool_calls: bool = False
101
+ ) -> None:
102
+ """Add a Stream response to the history after collecting it.
103
+
104
+ Args:
105
+ response: The stream to collect and add
106
+ format_tool_calls: Whether to format tool calls in the message
107
+ """
108
+ ...
109
+
110
+ @overload
111
+ def add_response(
112
+ self,
113
+ response: AsyncStream,
114
+ *,
115
+ format_tool_calls: bool = False
116
+ ) -> None:
117
+ """Add an AsyncStream response to the history after collecting it.
118
+
119
+ Args:
120
+ response: The async stream to collect and add
121
+ format_tool_calls: Whether to format tool calls in the message
122
+ """
123
+ ...
124
+
125
+ def add_response(
126
+ self,
127
+ response: Union[LanguageModelResponse, Stream, AsyncStream],
128
+ *,
129
+ format_tool_calls: bool = False
130
+ ) -> None:
131
+ """Add a language model response to the history.
132
+
133
+ Args:
134
+ response: The response, stream, or async stream to add
135
+ format_tool_calls: Whether to format tool calls in the message content
136
+ """
137
+ if isinstance(response, LanguageModelResponse):
138
+ # Direct response - convert to message
139
+ message = response.to_message(format_tool_calls=format_tool_calls)
140
+ self.messages.append(message)
141
+ elif isinstance(response, (Stream, AsyncStream)):
142
+ raise RuntimeError(
143
+ "Cannot add uncollected streams to history. "
144
+ "Please collect the stream first using stream.collect() or stream.to_response(), "
145
+ "then add the resulting LanguageModelResponse to history."
146
+ )
147
+ else:
148
+ raise TypeError(
149
+ f"Expected LanguageModelResponse, Stream, or AsyncStream, got {type(response)}"
150
+ )
151
+
152
+ def _summarize_content(self, content: str, max_length: int = 100) -> str:
153
+ """Summarize content by truncating with ellipsis if too long.
154
+
155
+ Args:
156
+ content: The content to summarize
157
+ max_length: Maximum length before truncation
158
+
159
+ Returns:
160
+ Summarized content
161
+ """
162
+ if len(content) <= max_length:
163
+ return content
164
+ return content[:max_length - 3] + "..."
165
+
166
+ def _format_and_merge_tool_calls(
167
+ self,
168
+ messages: List["ChatCompletionMessageParam"],
169
+ summarize_tool_calls: bool = True
170
+ ) -> List["ChatCompletionMessageParam"]:
171
+ """Format tool calls and merge tool responses into assistant messages.
172
+
173
+ Args:
174
+ messages: List of messages to process
175
+ summarize_tool_calls: Whether to summarize tool call content
176
+
177
+ Returns:
178
+ Formatted messages with tool calls and responses merged
179
+ """
180
+ # Create a mapping of tool_call_id to tool response content
181
+ tool_responses: Dict[str, str] = {}
182
+ tool_message_indices: List[int] = []
183
+
184
+ for i, message in enumerate(messages):
185
+ if message.get("role") == "tool":
186
+ tool_call_id = message.get("tool_call_id")
187
+ if tool_call_id:
188
+ tool_responses[tool_call_id] = message.get("content", "")
189
+ tool_message_indices.append(i)
190
+
191
+ # Process messages and format tool calls
192
+ formatted_messages = []
193
+ indices_to_skip = set(tool_message_indices)
194
+
195
+ for i, message in enumerate(messages):
196
+ if i in indices_to_skip:
197
+ continue
198
+
199
+ if message.get("role") == "assistant" and message.get("tool_calls"):
200
+ # Create a copy of the message
201
+ formatted_message = dict(message)
202
+
203
+ # Format tool calls and merge responses
204
+ content_parts = []
205
+ if message.get("content"):
206
+ content_parts.append(message["content"])
207
+
208
+ for tool_call in message["tool_calls"]:
209
+ tool_id = tool_call.get("id")
210
+ tool_name = tool_call["function"]["name"]
211
+ tool_args = tool_call["function"]["arguments"]
212
+
213
+ # Format arguments nicely
214
+ try:
215
+ args_dict = json.loads(tool_args) if isinstance(tool_args, str) else tool_args
216
+ args_str = json.dumps(args_dict, indent=2)
217
+ except:
218
+ args_str = str(tool_args)
219
+
220
+ # Create the tool call section
221
+ tool_section = f"I called the function `{tool_name}` with arguments:\n{args_str}"
222
+
223
+ # Add tool response if available
224
+ if tool_id and tool_id in tool_responses:
225
+ response_content = tool_responses[tool_id]
226
+ if summarize_tool_calls and len(response_content) > 100:
227
+ response_content = self._summarize_content(response_content)
228
+ tool_section += f"\n\nResponse: {response_content}"
229
+
230
+ content_parts.append(tool_section)
231
+
232
+ formatted_message["content"] = "\n\n".join(content_parts)
233
+ # Remove tool_calls from the formatted message
234
+ formatted_message.pop("tool_calls", None)
235
+
236
+ formatted_messages.append(formatted_message)
237
+ else:
238
+ formatted_messages.append(message)
239
+
240
+ return formatted_messages
241
+
242
+ def render(
243
+ self,
244
+ *,
245
+ format_tool_calls: bool = False,
246
+ summarize_tool_calls: bool = True
247
+ ) -> List["ChatCompletionMessageParam"]:
248
+ """Render the conversation history as a list of messages.
249
+
250
+ Args:
251
+ format_tool_calls: Whether to format tool calls in assistant messages
252
+ for better readability and merge tool responses
253
+ summarize_tool_calls: Whether to summarize tool call responses when
254
+ format_tool_calls is True (defaults to True)
255
+
256
+ Returns:
257
+ List of ChatCompletionMessageParam messages
258
+ """
259
+ if format_tool_calls:
260
+ return self._format_and_merge_tool_calls(self.messages, summarize_tool_calls)
261
+ return self.messages.copy()
262
+
263
+ def clear(self) -> None:
264
+ """Clear all messages from the history."""
265
+ self.messages.clear()
266
+
267
+ def __len__(self) -> int:
268
+ """Return the number of messages in the history."""
269
+ return len(self.messages)
270
+
271
+ def __bool__(self) -> bool:
272
+ """Return True if there are messages in the history."""
273
+ return bool(self.messages)
274
+
275
+ def __repr__(self) -> str:
276
+ """Return a string representation of the history."""
277
+ return f"History(messages={len(self.messages)})"