hammad-python 0.0.19__py3-none-any.whl → 0.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. hammad/__init__.py +7 -137
  2. hammad/_internal.py +1 -0
  3. hammad/cli/_runner.py +8 -8
  4. hammad/cli/plugins.py +55 -26
  5. hammad/cli/styles/utils.py +16 -8
  6. hammad/data/__init__.py +1 -5
  7. hammad/data/collections/__init__.py +2 -3
  8. hammad/data/collections/collection.py +41 -22
  9. hammad/data/collections/indexes/__init__.py +1 -1
  10. hammad/data/collections/indexes/qdrant/__init__.py +1 -1
  11. hammad/data/collections/indexes/qdrant/index.py +106 -118
  12. hammad/data/collections/indexes/qdrant/settings.py +14 -14
  13. hammad/data/collections/indexes/qdrant/utils.py +28 -38
  14. hammad/data/collections/indexes/tantivy/__init__.py +1 -1
  15. hammad/data/collections/indexes/tantivy/index.py +57 -59
  16. hammad/data/collections/indexes/tantivy/settings.py +8 -19
  17. hammad/data/collections/indexes/tantivy/utils.py +28 -52
  18. hammad/data/models/__init__.py +2 -7
  19. hammad/data/sql/__init__.py +1 -1
  20. hammad/data/sql/database.py +71 -73
  21. hammad/data/sql/types.py +37 -51
  22. hammad/formatting/__init__.py +2 -1
  23. hammad/formatting/json/converters.py +2 -2
  24. hammad/genai/__init__.py +96 -36
  25. hammad/genai/agents/__init__.py +47 -1
  26. hammad/genai/agents/agent.py +1022 -0
  27. hammad/genai/agents/run.py +615 -0
  28. hammad/genai/agents/types/__init__.py +29 -22
  29. hammad/genai/agents/types/agent_context.py +13 -0
  30. hammad/genai/agents/types/agent_event.py +128 -0
  31. hammad/genai/agents/types/agent_hooks.py +220 -0
  32. hammad/genai/agents/types/agent_messages.py +31 -0
  33. hammad/genai/agents/types/agent_response.py +90 -0
  34. hammad/genai/agents/types/agent_stream.py +242 -0
  35. hammad/genai/models/__init__.py +1 -0
  36. hammad/genai/models/embeddings/__init__.py +39 -0
  37. hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
  38. hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
  39. hammad/genai/models/embeddings/types/__init__.py +37 -0
  40. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
  41. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
  42. hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
  43. hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
  44. hammad/genai/models/language/__init__.py +48 -0
  45. hammad/genai/{language_models/language_model.py → models/language/model.py} +481 -204
  46. hammad/genai/{language_models → models/language}/run.py +80 -57
  47. hammad/genai/models/language/types/__init__.py +40 -0
  48. hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
  49. hammad/genai/models/language/types/language_model_messages.py +28 -0
  50. hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
  51. hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
  52. hammad/genai/{language_models → models/language/types}/language_model_response.py +61 -68
  53. hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
  54. hammad/genai/models/language/types/language_model_settings.py +89 -0
  55. hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
  56. hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
  57. hammad/genai/models/language/utils/requests.py +421 -0
  58. hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
  59. hammad/genai/models/model_provider.py +4 -0
  60. hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
  61. hammad/genai/models/reranking.py +26 -0
  62. hammad/genai/types/__init__.py +1 -0
  63. hammad/genai/types/base.py +215 -0
  64. hammad/genai/{agents/types → types}/history.py +101 -88
  65. hammad/genai/{agents/types/tool.py → types/tools.py} +156 -141
  66. hammad/logging/logger.py +1 -1
  67. hammad/mcp/client/__init__.py +2 -3
  68. hammad/mcp/client/client.py +10 -10
  69. hammad/mcp/servers/__init__.py +2 -1
  70. hammad/service/decorators.py +1 -3
  71. hammad/web/models.py +1 -3
  72. hammad/web/search/client.py +10 -22
  73. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/METADATA +10 -2
  74. hammad_python-0.0.20.dist-info/RECORD +127 -0
  75. hammad/genai/embedding_models/__init__.py +0 -41
  76. hammad/genai/language_models/__init__.py +0 -35
  77. hammad/genai/language_models/_utils/_completions.py +0 -131
  78. hammad/genai/language_models/_utils/_messages.py +0 -89
  79. hammad/genai/language_models/_utils/_requests.py +0 -202
  80. hammad/genai/rerank_models.py +0 -26
  81. hammad_python-0.0.19.dist-info/RECORD +0 -111
  82. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/WHEEL +0 -0
  83. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,215 @@
1
+ """hammad.genai.types.base"""
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import (
5
+ Any,
6
+ AsyncIterator,
7
+ Callable,
8
+ Dict,
9
+ Iterator,
10
+ Generic,
11
+ ParamSpec,
12
+ TypeVar,
13
+ TYPE_CHECKING,
14
+ Union,
15
+ )
16
+
17
+ from pydantic import BaseModel, ConfigDict
18
+
19
+ if TYPE_CHECKING:
20
+ from openai.types.chat import (
21
+ ChatCompletionMessageParam,
22
+ ChatCompletionContentPartParam,
23
+ ChatCompletionMessageParam,
24
+ )
25
+
26
+
27
+ __all__ = [
28
+ "T",
29
+ "P",
30
+ "R",
31
+ "BaseGenAIModelEvent",
32
+ "BaseGenAIModelStream",
33
+ "BaseTool",
34
+ "BaseGenAIModelResponse",
35
+ "BaseGenAIModelSettings",
36
+ "BaseGenAIModel",
37
+ ]
38
+
39
+
40
+ T = TypeVar("T")
41
+ P = ParamSpec("P")
42
+ R = TypeVar("R")
43
+
44
+
45
+ class BaseGenAIModelEvent(BaseModel, Generic[T]):
46
+ """Base class for all events that a Generative AI model can
47
+ emit / return.
48
+
49
+ This is a base class used only for type hinting and incorporates
50
+ no fields.
51
+ """
52
+
53
+ model_config = ConfigDict(
54
+ arbitrary_types_allowed=True,
55
+ )
56
+
57
+ def to_message(self) -> "ChatCompletionMessageParam":
58
+ """Converts the event into a message dictionary that is compatible
59
+ with any message interface within the `hammad.genai` module."""
60
+ raise NotImplementedError(
61
+ f"to_message() is not implemented for {self.__class__.__name__}"
62
+ )
63
+
64
+ def to_content_part(self) -> "ChatCompletionContentPartParam":
65
+ """Converts the event into a content part dictionary that can be added
66
+ within chat messages."""
67
+ raise NotImplementedError(
68
+ f"to_content_part() is not implemented for {self.__class__.__name__}"
69
+ )
70
+
71
+
72
+ class BaseGenAIModelStream(BaseGenAIModelEvent[T]):
73
+ """Base class for all streams from Generative AI models within the
74
+ `hammad.genai` module.
75
+
76
+ This class manages both sync and async streaming.
77
+ """
78
+
79
+ model_config = ConfigDict(
80
+ arbitrary_types_allowed=True,
81
+ )
82
+
83
+ type: str
84
+ """The type of the model, can be `language_model`, `embedding_model`,
85
+ `image_model`..."""
86
+
87
+ model: str
88
+ """The model that was used to generate the stream."""
89
+
90
+ stream: Union[Iterator[T], AsyncIterator[T]] | None = None
91
+ """The streamed content generated by the model."""
92
+
93
+ def __iter__(self) -> Iterator[T]:
94
+ raise NotImplementedError(
95
+ f"__iter__() is not implemented for {self.__class__.__name__}"
96
+ )
97
+
98
+ def __aiter__(self) -> AsyncIterator[T]:
99
+ raise NotImplementedError(
100
+ f"__aiter__() is not implemented for {self.__class__.__name__}"
101
+ )
102
+
103
+
104
+ class BaseTool(BaseModel, Generic[P, R]):
105
+ """Base class for tools. All generative AI models within the
106
+ `hammad.genai` module can be converted into tools usable by
107
+ agents and language models.
108
+ """
109
+
110
+ model_config = ConfigDict(
111
+ arbitrary_types_allowed=True,
112
+ )
113
+
114
+ name: str
115
+ """The name of the tool."""
116
+
117
+ description: str
118
+ """Description of what the tool does."""
119
+
120
+ function: Callable[P, R]
121
+ """The Python function to execute."""
122
+
123
+ parameters_json_schema: Dict[str, Any]
124
+ """JSON schema for the tool's parameters."""
125
+
126
+ takes_context: bool = False
127
+ """Whether the function expects a context as first parameter."""
128
+
129
+ strict: bool = True
130
+ """Whether to enforce strict JSON schema validation."""
131
+
132
+
133
+ class BaseGenAIModelResponse(BaseGenAIModelEvent[T]):
134
+ """Base class for all responses from Generative AI models within the
135
+ `hammad.genai` module.
136
+ """
137
+
138
+ model_config = ConfigDict(
139
+ arbitrary_types_allowed=True,
140
+ )
141
+
142
+ type: str
143
+ """The type of the model, can be `language_model`, `embedding_model`,
144
+ `image_model`..."""
145
+
146
+ model: str
147
+ """The model that was used to generate the response."""
148
+
149
+ output: T
150
+ """The final response or output generated by the model. This can be
151
+ anything from chat messages, embeddings, ..."""
152
+
153
+
154
+ class BaseGenAIModelSettings(BaseModel):
155
+ """Represents the defaults & base for additional settings that
156
+ can be applied to any model within the `hammad.genai` module.
157
+ """
158
+
159
+
160
+ class BaseGenAIModel(BaseModel, ABC):
161
+ """Base class for all Generative AI models available within the
162
+ `hammad.genai` module.
163
+
164
+ NOTE:
165
+ All models within this library use `litellm` directly as the
166
+ client, if you dont have a need for any of the opinionation
167
+ given by this package, I would recommend just using
168
+ `litellm` directly.
169
+ """
170
+
171
+ model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
172
+
173
+ model: str | None = None
174
+ """The model to use. This is always in the `litellm` format:
175
+
176
+ `<provider>/<model>`
177
+
178
+ `openai/gpt-4o-mini`
179
+ `openai/text-embedding-3-small`
180
+ """
181
+
182
+ base_url: str | None = None
183
+ """A custom base URL to use for the model.
184
+ """
185
+
186
+ api_key: str | None = None
187
+ """The API key to use for the model.
188
+ """
189
+
190
+ settings: BaseGenAIModelSettings | None = None
191
+ """The default (additional) settings to use when generating outputs
192
+ with this model."""
193
+
194
+ @abstractmethod
195
+ def run(
196
+ self,
197
+ *args,
198
+ **kwargs,
199
+ ) -> Any: ...
200
+
201
+ @abstractmethod
202
+ async def async_run(
203
+ self,
204
+ *args,
205
+ **kwargs,
206
+ ) -> Any: ...
207
+
208
+ def as_tool(
209
+ self,
210
+ *args,
211
+ **kwargs,
212
+ ) -> BaseTool[P, R]:
213
+ raise NotImplementedError(
214
+ f"as_tool() is not implemented for {self.__class__.__name__}"
215
+ )
@@ -10,268 +10,281 @@ if TYPE_CHECKING:
10
10
  except ImportError:
11
11
  ChatCompletionMessageParam = Any
12
12
 
13
- from ...language_models import LanguageModelResponse
14
- from ...language_models._streaming import Stream, AsyncStream
13
+ from ..models.language.types import LanguageModelResponse, LanguageModelStream
14
+ from ..agents.types.agent_response import AgentResponse
15
+ from ..agents.types.agent_stream import AgentStream
15
16
 
16
17
  __all__ = ["History"]
17
18
 
18
19
 
19
20
  class History:
20
21
  """A conversation history manager that handles messages and responses.
21
-
22
+
22
23
  This class provides a clean interface for managing conversation history,
23
24
  including adding messages, responses, and rendering the complete history
24
25
  with optional tool call formatting.
25
26
  """
26
-
27
+
27
28
  def __init__(self):
28
29
  """Initialize an empty conversation history."""
29
30
  self.messages: List["ChatCompletionMessageParam"] = []
30
-
31
+
31
32
  @overload
32
- def add(self, content: str, *, role: Literal["user", "assistant", "system", "tool"] = "user") -> None:
33
+ def add(
34
+ self,
35
+ content: str,
36
+ *,
37
+ role: Literal["user", "assistant", "system", "tool"] = "user",
38
+ ) -> None:
33
39
  """Add a simple text message to the history.
34
-
40
+
35
41
  Args:
36
42
  content: The message content
37
43
  role: The role of the message sender
38
44
  """
39
45
  ...
40
-
46
+
41
47
  @overload
42
48
  def add(self, content: List["ChatCompletionMessageParam"]) -> None:
43
49
  """Add a list of messages to the history.
44
-
50
+
45
51
  Args:
46
52
  content: List of ChatCompletionMessageParam messages
47
53
  """
48
54
  ...
49
-
55
+
50
56
  def add(
51
- self,
52
- content: Union[str, List["ChatCompletionMessageParam"]],
53
- *,
54
- role: Literal["user", "assistant", "system", "tool"] = "user"
57
+ self,
58
+ content: Union[str, List["ChatCompletionMessageParam"]],
59
+ *,
60
+ role: Literal["user", "assistant", "system", "tool"] = "user",
55
61
  ) -> None:
56
62
  """Add content to the conversation history.
57
-
63
+
58
64
  Args:
59
65
  content: Either a string message or a list of messages
60
66
  role: The role for string messages (ignored for message lists)
61
67
  """
62
68
  if isinstance(content, str):
63
- self.messages.append({
64
- "role": role,
65
- "content": content
66
- })
69
+ self.messages.append({"role": role, "content": content})
67
70
  elif isinstance(content, list):
68
71
  self.messages.extend(content)
69
72
  else:
70
- raise TypeError(f"Expected str or List[ChatCompletionMessageParam], got {type(content)}")
71
-
73
+ raise TypeError(
74
+ f"Expected str or List[ChatCompletionMessageParam], got {type(content)}"
75
+ )
76
+
72
77
  def add_message(self, message: "ChatCompletionMessageParam") -> None:
73
78
  """Add a single message to the history.
74
-
79
+
75
80
  Args:
76
81
  message: A ChatCompletionMessageParam to add
77
82
  """
78
83
  self.messages.append(message)
79
-
84
+
80
85
  @overload
81
86
  def add_response(
82
- self,
83
- response: LanguageModelResponse,
84
- *,
85
- format_tool_calls: bool = False
87
+ self, response: LanguageModelResponse, *, format_tool_calls: bool = False
86
88
  ) -> None:
87
89
  """Add a LanguageModelResponse to the history.
88
-
90
+
89
91
  Args:
90
92
  response: The language model response to add
91
93
  format_tool_calls: Whether to format tool calls in the message
92
94
  """
93
95
  ...
94
-
96
+
95
97
  @overload
96
98
  def add_response(
97
- self,
98
- response: Stream,
99
- *,
100
- format_tool_calls: bool = False
99
+ self, response: LanguageModelStream, *, format_tool_calls: bool = False
101
100
  ) -> None:
102
101
  """Add a Stream response to the history after collecting it.
103
-
102
+
104
103
  Args:
105
104
  response: The stream to collect and add
106
105
  format_tool_calls: Whether to format tool calls in the message
107
106
  """
108
107
  ...
109
-
108
+
110
109
  @overload
111
110
  def add_response(
112
- self,
113
- response: AsyncStream,
114
- *,
115
- format_tool_calls: bool = False
111
+ self, response: AgentResponse, *, format_tool_calls: bool = False
116
112
  ) -> None:
117
- """Add an AsyncStream response to the history after collecting it.
118
-
113
+ """Add an AgentResponse to the history.
114
+
115
+ Args:
116
+ response: The agent response to add
117
+ format_tool_calls: Whether to format tool calls in the message
118
+ """
119
+ ...
120
+
121
+ @overload
122
+ def add_response(
123
+ self, response: AgentStream, *, format_tool_calls: bool = False
124
+ ) -> None:
125
+ """Add an AgentStream to the history after collecting it.
126
+
119
127
  Args:
120
- response: The async stream to collect and add
128
+ response: The agent stream to collect and add
121
129
  format_tool_calls: Whether to format tool calls in the message
122
130
  """
123
131
  ...
124
-
132
+
125
133
  def add_response(
126
- self,
127
- response: Union[LanguageModelResponse, Stream, AsyncStream],
134
+ self,
135
+ response: Union[
136
+ LanguageModelResponse, LanguageModelStream, AgentResponse, AgentStream
137
+ ],
128
138
  *,
129
- format_tool_calls: bool = False
139
+ format_tool_calls: bool = False,
130
140
  ) -> None:
131
- """Add a language model response to the history.
132
-
141
+ """Add a language model or agent response to the history.
142
+
133
143
  Args:
134
- response: The response, stream, or async stream to add
144
+ response: The response or stream to add
135
145
  format_tool_calls: Whether to format tool calls in the message content
136
146
  """
137
- if isinstance(response, LanguageModelResponse):
147
+ if isinstance(response, (LanguageModelResponse, AgentResponse)):
138
148
  # Direct response - convert to message
139
149
  message = response.to_message(format_tool_calls=format_tool_calls)
140
150
  self.messages.append(message)
141
- elif isinstance(response, (Stream, AsyncStream)):
151
+ elif isinstance(response, (LanguageModelStream, AgentStream)):
142
152
  raise RuntimeError(
143
153
  "Cannot add uncollected streams to history. "
144
154
  "Please collect the stream first using stream.collect() or stream.to_response(), "
145
- "then add the resulting LanguageModelResponse to history."
155
+ "then add the resulting response to history."
146
156
  )
147
157
  else:
148
158
  raise TypeError(
149
- f"Expected LanguageModelResponse, Stream, or AsyncStream, got {type(response)}"
159
+ f"Expected LanguageModelResponse, AgentResponse, Stream, or AgentStream, got {type(response)}"
150
160
  )
151
-
161
+
152
162
  def _summarize_content(self, content: str, max_length: int = 100) -> str:
153
163
  """Summarize content by truncating with ellipsis if too long.
154
-
164
+
155
165
  Args:
156
166
  content: The content to summarize
157
167
  max_length: Maximum length before truncation
158
-
168
+
159
169
  Returns:
160
170
  Summarized content
161
171
  """
162
172
  if len(content) <= max_length:
163
173
  return content
164
- return content[:max_length - 3] + "..."
165
-
174
+ return content[: max_length - 3] + "..."
175
+
166
176
  def _format_and_merge_tool_calls(
167
- self,
177
+ self,
168
178
  messages: List["ChatCompletionMessageParam"],
169
- summarize_tool_calls: bool = True
179
+ summarize_tool_calls: bool = True,
170
180
  ) -> List["ChatCompletionMessageParam"]:
171
181
  """Format tool calls and merge tool responses into assistant messages.
172
-
182
+
173
183
  Args:
174
184
  messages: List of messages to process
175
185
  summarize_tool_calls: Whether to summarize tool call content
176
-
186
+
177
187
  Returns:
178
188
  Formatted messages with tool calls and responses merged
179
189
  """
180
190
  # Create a mapping of tool_call_id to tool response content
181
191
  tool_responses: Dict[str, str] = {}
182
192
  tool_message_indices: List[int] = []
183
-
193
+
184
194
  for i, message in enumerate(messages):
185
195
  if message.get("role") == "tool":
186
196
  tool_call_id = message.get("tool_call_id")
187
197
  if tool_call_id:
188
198
  tool_responses[tool_call_id] = message.get("content", "")
189
199
  tool_message_indices.append(i)
190
-
200
+
191
201
  # Process messages and format tool calls
192
202
  formatted_messages = []
193
203
  indices_to_skip = set(tool_message_indices)
194
-
204
+
195
205
  for i, message in enumerate(messages):
196
206
  if i in indices_to_skip:
197
207
  continue
198
-
208
+
199
209
  if message.get("role") == "assistant" and message.get("tool_calls"):
200
210
  # Create a copy of the message
201
211
  formatted_message = dict(message)
202
-
212
+
203
213
  # Format tool calls and merge responses
204
214
  content_parts = []
205
215
  if message.get("content"):
206
216
  content_parts.append(message["content"])
207
-
217
+
208
218
  for tool_call in message["tool_calls"]:
209
219
  tool_id = tool_call.get("id")
210
220
  tool_name = tool_call["function"]["name"]
211
221
  tool_args = tool_call["function"]["arguments"]
212
-
222
+
213
223
  # Format arguments nicely
214
224
  try:
215
- args_dict = json.loads(tool_args) if isinstance(tool_args, str) else tool_args
225
+ args_dict = (
226
+ json.loads(tool_args)
227
+ if isinstance(tool_args, str)
228
+ else tool_args
229
+ )
216
230
  args_str = json.dumps(args_dict, indent=2)
217
231
  except:
218
232
  args_str = str(tool_args)
219
-
233
+
220
234
  # Create the tool call section
221
235
  tool_section = f"I called the function `{tool_name}` with arguments:\n{args_str}"
222
-
236
+
223
237
  # Add tool response if available
224
238
  if tool_id and tool_id in tool_responses:
225
239
  response_content = tool_responses[tool_id]
226
240
  if summarize_tool_calls and len(response_content) > 100:
227
241
  response_content = self._summarize_content(response_content)
228
242
  tool_section += f"\n\nResponse: {response_content}"
229
-
243
+
230
244
  content_parts.append(tool_section)
231
-
245
+
232
246
  formatted_message["content"] = "\n\n".join(content_parts)
233
247
  # Remove tool_calls from the formatted message
234
248
  formatted_message.pop("tool_calls", None)
235
-
249
+
236
250
  formatted_messages.append(formatted_message)
237
251
  else:
238
252
  formatted_messages.append(message)
239
-
253
+
240
254
  return formatted_messages
241
-
255
+
242
256
  def render(
243
- self,
244
- *,
245
- format_tool_calls: bool = False,
246
- summarize_tool_calls: bool = True
257
+ self, *, format_tool_calls: bool = False, summarize_tool_calls: bool = True
247
258
  ) -> List["ChatCompletionMessageParam"]:
248
259
  """Render the conversation history as a list of messages.
249
-
260
+
250
261
  Args:
251
262
  format_tool_calls: Whether to format tool calls in assistant messages
252
263
  for better readability and merge tool responses
253
264
  summarize_tool_calls: Whether to summarize tool call responses when
254
265
  format_tool_calls is True (defaults to True)
255
-
266
+
256
267
  Returns:
257
268
  List of ChatCompletionMessageParam messages
258
269
  """
259
270
  if format_tool_calls:
260
- return self._format_and_merge_tool_calls(self.messages, summarize_tool_calls)
271
+ return self._format_and_merge_tool_calls(
272
+ self.messages, summarize_tool_calls
273
+ )
261
274
  return self.messages.copy()
262
-
275
+
263
276
  def clear(self) -> None:
264
277
  """Clear all messages from the history."""
265
278
  self.messages.clear()
266
-
279
+
267
280
  def __len__(self) -> int:
268
281
  """Return the number of messages in the history."""
269
282
  return len(self.messages)
270
-
283
+
271
284
  def __bool__(self) -> bool:
272
285
  """Return True if there are messages in the history."""
273
286
  return bool(self.messages)
274
-
287
+
275
288
  def __repr__(self) -> str:
276
289
  """Return a string representation of the history."""
277
290
  return f"History(messages={len(self.messages)})"