hammad-python 0.0.18__py3-none-any.whl → 0.0.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +7 -137
- hammad/_internal.py +1 -0
- hammad/cli/_runner.py +8 -8
- hammad/cli/plugins.py +55 -26
- hammad/cli/styles/utils.py +16 -8
- hammad/data/__init__.py +1 -5
- hammad/data/collections/__init__.py +2 -3
- hammad/data/collections/collection.py +41 -22
- hammad/data/collections/indexes/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/__init__.py +1 -1
- hammad/data/collections/indexes/qdrant/index.py +106 -118
- hammad/data/collections/indexes/qdrant/settings.py +14 -14
- hammad/data/collections/indexes/qdrant/utils.py +28 -38
- hammad/data/collections/indexes/tantivy/__init__.py +1 -1
- hammad/data/collections/indexes/tantivy/index.py +57 -59
- hammad/data/collections/indexes/tantivy/settings.py +8 -19
- hammad/data/collections/indexes/tantivy/utils.py +28 -52
- hammad/data/models/__init__.py +2 -7
- hammad/data/sql/__init__.py +1 -1
- hammad/data/sql/database.py +71 -73
- hammad/data/sql/types.py +37 -51
- hammad/formatting/__init__.py +2 -1
- hammad/formatting/json/converters.py +2 -2
- hammad/genai/__init__.py +96 -36
- hammad/genai/agents/__init__.py +47 -1
- hammad/genai/agents/agent.py +1022 -0
- hammad/genai/agents/run.py +615 -0
- hammad/genai/agents/types/__init__.py +29 -22
- hammad/genai/agents/types/agent_context.py +13 -0
- hammad/genai/agents/types/agent_event.py +128 -0
- hammad/genai/agents/types/agent_hooks.py +220 -0
- hammad/genai/agents/types/agent_messages.py +31 -0
- hammad/genai/agents/types/agent_response.py +90 -0
- hammad/genai/agents/types/agent_stream.py +242 -0
- hammad/genai/models/__init__.py +1 -0
- hammad/genai/models/embeddings/__init__.py +39 -0
- hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
- hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
- hammad/genai/models/embeddings/types/__init__.py +37 -0
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
- hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
- hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
- hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
- hammad/genai/models/language/__init__.py +48 -0
- hammad/genai/{language_models/language_model.py → models/language/model.py} +481 -204
- hammad/genai/{language_models → models/language}/run.py +80 -57
- hammad/genai/models/language/types/__init__.py +40 -0
- hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
- hammad/genai/models/language/types/language_model_messages.py +28 -0
- hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
- hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
- hammad/genai/{language_models → models/language/types}/language_model_response.py +61 -68
- hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
- hammad/genai/models/language/types/language_model_settings.py +89 -0
- hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
- hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
- hammad/genai/models/language/utils/requests.py +421 -0
- hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
- hammad/genai/models/model_provider.py +4 -0
- hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
- hammad/genai/models/reranking.py +26 -0
- hammad/genai/types/__init__.py +1 -0
- hammad/genai/types/base.py +215 -0
- hammad/genai/{agents/types → types}/history.py +101 -88
- hammad/genai/{agents/types/tool.py → types/tools.py} +156 -141
- hammad/logging/logger.py +2 -1
- hammad/mcp/client/__init__.py +2 -3
- hammad/mcp/client/client.py +10 -10
- hammad/mcp/servers/__init__.py +2 -1
- hammad/service/decorators.py +1 -3
- hammad/web/models.py +1 -3
- hammad/web/search/client.py +10 -22
- {hammad_python-0.0.18.dist-info → hammad_python-0.0.20.dist-info}/METADATA +10 -2
- hammad_python-0.0.20.dist-info/RECORD +127 -0
- hammad/genai/embedding_models/__init__.py +0 -41
- hammad/genai/language_models/__init__.py +0 -35
- hammad/genai/language_models/_utils/_completions.py +0 -131
- hammad/genai/language_models/_utils/_messages.py +0 -89
- hammad/genai/language_models/_utils/_requests.py +0 -202
- hammad/genai/rerank_models.py +0 -26
- hammad_python-0.0.18.dist-info/RECORD +0 -111
- {hammad_python-0.0.18.dist-info → hammad_python-0.0.20.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.18.dist-info → hammad_python-0.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -1,32 +1,38 @@
|
|
1
1
|
"""hammad.genai.language_models.language_model_response"""
|
2
2
|
|
3
|
-
from
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing import (
|
6
|
+
List,
|
7
|
+
TypeVar,
|
8
|
+
Optional,
|
9
|
+
Any,
|
10
|
+
Dict,
|
11
|
+
Union,
|
12
|
+
Literal,
|
13
|
+
)
|
14
|
+
|
15
|
+
from ...model_provider import litellm
|
16
|
+
from ....types.base import BaseGenAIModelResponse
|
17
|
+
from openai.types.chat import (
|
18
|
+
ChatCompletionContentPartParam,
|
19
|
+
ChatCompletionMessageParam,
|
20
|
+
ChatCompletionMessageToolCall,
|
21
|
+
)
|
17
22
|
|
18
23
|
__all__ = [
|
19
24
|
"LanguageModelResponse",
|
20
25
|
]
|
21
26
|
|
27
|
+
|
22
28
|
T = TypeVar("T")
|
23
29
|
|
24
30
|
|
25
|
-
class LanguageModelResponse(
|
31
|
+
class LanguageModelResponse(BaseGenAIModelResponse[T]):
|
26
32
|
"""A response generated by a language model. This response is unified
|
27
33
|
to represent both standard completions as well as structured outputs."""
|
28
34
|
|
29
|
-
|
35
|
+
type: Literal["language_model"] = "language_model"
|
30
36
|
|
31
37
|
model: str
|
32
38
|
"""The model that generated this response."""
|
@@ -47,7 +53,7 @@ class LanguageModelResponse(BaseModel, Generic[T]):
|
|
47
53
|
"""The actual response content of the completion. This is the string that
|
48
54
|
was generated by the model."""
|
49
55
|
|
50
|
-
tool_calls: Optional[List["
|
56
|
+
tool_calls: Optional[List["ChatCompletionMessageToolCall"]] = None
|
51
57
|
"""The tool calls that were made by the model. This is a list of tool calls
|
52
58
|
that were made by the model."""
|
53
59
|
|
@@ -56,75 +62,64 @@ class LanguageModelResponse(BaseModel, Generic[T]):
|
|
56
62
|
was generated by the model when it refused to generate the completion."""
|
57
63
|
|
58
64
|
def get_content(
|
59
|
-
self,
|
60
|
-
choice: int = 0
|
65
|
+
self, choice: int = 0
|
61
66
|
) -> Union[str, List["ChatCompletionContentPartParam"], None]:
|
62
67
|
"""The 'raw' message content generated by the language model, this
|
63
68
|
can be either a string, a list of content parts, or `None`."""
|
64
69
|
|
65
70
|
if not self.completion or not self.completion.choices:
|
66
71
|
return None
|
67
|
-
|
72
|
+
|
68
73
|
if choice >= len(self.completion.choices):
|
69
74
|
return None
|
70
|
-
|
75
|
+
|
71
76
|
return self.completion.choices[choice].message.content
|
72
77
|
|
73
78
|
def get_tool_calls(
|
74
|
-
self,
|
75
|
-
|
76
|
-
name: Optional[str] = None,
|
77
|
-
id: Optional[str] = None,
|
78
|
-
choice: int = 0
|
79
|
-
) -> Optional[List["litellm.ChatCompletionMessageToolCall"]]:
|
79
|
+
self, *, name: Optional[str] = None, id: Optional[str] = None, choice: int = 0
|
80
|
+
) -> Optional[List["ChatCompletionMessageToolCall"]]:
|
80
81
|
"""The tool calls generated by the language model, this is a list of
|
81
82
|
`ChatCompletionMessageToolCall` objects. Optionally can be filtered
|
82
83
|
by name or ID to return specific tool calls.
|
83
|
-
|
84
|
+
|
84
85
|
NOTE: Only one of `name` or `id` can be provided."""
|
85
86
|
if not self.completion or not self.completion.choices:
|
86
87
|
return None
|
87
|
-
|
88
|
+
|
88
89
|
if choice >= len(self.completion.choices):
|
89
90
|
return None
|
90
|
-
|
91
|
+
|
91
92
|
tool_calls = self.completion.choices[choice].message.tool_calls
|
92
93
|
if not tool_calls:
|
93
94
|
return None
|
94
|
-
|
95
|
+
|
95
96
|
# Filter by name or id if provided
|
96
97
|
if name is not None and id is not None:
|
97
98
|
raise ValueError("Only one of 'name' or 'id' can be provided, not both")
|
98
|
-
|
99
|
+
|
99
100
|
if name is not None:
|
100
101
|
return [call for call in tool_calls if call.function.name == name]
|
101
|
-
|
102
|
+
|
102
103
|
if id is not None:
|
103
104
|
return [call for call in tool_calls if call.id == id]
|
104
|
-
|
105
|
+
|
105
106
|
return tool_calls
|
106
107
|
|
107
|
-
def has_tool_calls(
|
108
|
-
self,
|
109
|
-
name: Optional[str] = None,
|
110
|
-
choice: int = 0
|
111
|
-
) -> bool:
|
108
|
+
def has_tool_calls(self, name: Optional[str] = None, choice: int = 0) -> bool:
|
112
109
|
"""Checks if the response has tool calls, optionally filtered by name.
|
113
110
|
If `name` is provided, it will check if the tool call with that name
|
114
111
|
exists in the response."""
|
115
112
|
if not self.completion or not self.completion.choices:
|
116
113
|
return False
|
117
|
-
|
114
|
+
|
118
115
|
tool_calls = self.get_tool_calls(name=name, choice=choice)
|
119
116
|
return tool_calls is not None and len(tool_calls) > 0
|
120
117
|
|
121
118
|
def get_tool_call_parameters(
|
122
|
-
self,
|
123
|
-
tool: Optional[str] = None,
|
124
|
-
choice: int = 0
|
119
|
+
self, tool: Optional[str] = None, choice: int = 0
|
125
120
|
) -> Optional[Dict[str, Any]]:
|
126
|
-
"""Returns the generated parameters for a tool call within a response.
|
127
|
-
If the response has multiple tool calls, and no tool is specified,
|
121
|
+
"""Returns the generated parameters for a tool call within a response.
|
122
|
+
If the response has multiple tool calls, and no tool is specified,
|
128
123
|
an error will be raised.
|
129
124
|
|
130
125
|
Args:
|
@@ -148,62 +143,60 @@ class LanguageModelResponse(BaseModel, Generic[T]):
|
|
148
143
|
for tool_call in tool_calls:
|
149
144
|
if tool_call.function.name == tool:
|
150
145
|
import json
|
146
|
+
|
151
147
|
return json.loads(tool_call.function.arguments)
|
152
148
|
return None
|
153
149
|
|
154
150
|
def to_message(
|
155
|
-
self,
|
156
|
-
format_tool_calls: bool = False,
|
157
|
-
choice: int = 0
|
151
|
+
self, format_tool_calls: bool = False, choice: int = 0
|
158
152
|
) -> "ChatCompletionMessageParam":
|
159
153
|
"""Converts the LanguageModelResponse to a Chat Completions
|
160
154
|
message object.
|
161
|
-
|
155
|
+
|
162
156
|
If the `format_tool_calls` parameter is True, the tool calls
|
163
157
|
will be cleanly formatted and added to the message content
|
164
158
|
with something similar to:
|
165
|
-
|
159
|
+
|
166
160
|
'I called the function `get_weather` with the following arguments:
|
167
161
|
{arguments}'
|
168
162
|
"""
|
169
163
|
if not self.completion or not self.completion.choices:
|
170
|
-
return {
|
171
|
-
|
172
|
-
"content": ""
|
173
|
-
}
|
174
|
-
|
164
|
+
return {"role": "assistant", "content": ""}
|
165
|
+
|
175
166
|
if choice >= len(self.completion.choices):
|
176
|
-
return {
|
177
|
-
|
178
|
-
"content": ""
|
179
|
-
}
|
180
|
-
|
167
|
+
return {"role": "assistant", "content": ""}
|
168
|
+
|
181
169
|
choice_message = self.completion.choices[choice].message
|
182
|
-
|
170
|
+
|
183
171
|
# Base message structure
|
184
172
|
message: "ChatCompletionMessageParam" = {
|
185
173
|
"role": "assistant",
|
186
|
-
"content": choice_message.content or ""
|
174
|
+
"content": choice_message.content or "",
|
187
175
|
}
|
188
|
-
|
176
|
+
|
189
177
|
# Add tool calls if they exist and format_tool_calls is False
|
190
178
|
if choice_message.tool_calls and not format_tool_calls:
|
191
179
|
message["tool_calls"] = choice_message.tool_calls
|
192
|
-
|
180
|
+
|
193
181
|
# Format tool calls into content if format_tool_calls is True
|
194
182
|
elif choice_message.tool_calls and format_tool_calls:
|
195
183
|
content_parts = []
|
196
184
|
if choice_message.content:
|
197
185
|
content_parts.append(choice_message.content)
|
198
|
-
|
186
|
+
|
199
187
|
for tool_call in choice_message.tool_calls:
|
200
188
|
formatted_call = f"I called the function `{tool_call.function.name}` with the following arguments:\n{tool_call.function.arguments}"
|
201
189
|
content_parts.append(formatted_call)
|
202
|
-
|
190
|
+
|
203
191
|
message["content"] = "\n\n".join(content_parts)
|
204
|
-
|
192
|
+
|
205
193
|
return message
|
206
194
|
|
195
|
+
def to_content_part(self) -> "ChatCompletionContentPartParam":
|
196
|
+
"""Converts the LanguageModelResponse to a Chat Completions
|
197
|
+
content part object."""
|
198
|
+
return {"type": "text", "text": self.content}
|
199
|
+
|
207
200
|
def __str__(self) -> str:
|
208
201
|
"""Pretty prints the response object."""
|
209
202
|
output = "LanguageModelResponse:"
|
@@ -216,4 +209,4 @@ class LanguageModelResponse(BaseModel, Generic[T]):
|
|
216
209
|
output += f"\n\n>>> Model: {self.model}"
|
217
210
|
output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
|
218
211
|
|
219
|
-
return output
|
212
|
+
return output
|
@@ -1,23 +1,26 @@
|
|
1
1
|
"""hammad.genai.language_models.language_model_response_chunk"""
|
2
2
|
|
3
|
-
from typing import TypeVar,
|
4
|
-
|
3
|
+
from typing import TypeVar, Optional, Any, Literal
|
4
|
+
|
5
|
+
from ....types.base import BaseGenAIModelEvent
|
5
6
|
|
6
7
|
__all__ = [
|
7
8
|
"LanguageModelResponseChunk",
|
8
9
|
]
|
9
10
|
|
11
|
+
|
10
12
|
T = TypeVar("T")
|
11
13
|
|
12
14
|
|
13
|
-
class LanguageModelResponseChunk(
|
15
|
+
class LanguageModelResponseChunk(BaseGenAIModelEvent[T]):
|
14
16
|
"""Represents a chunk of data from a language model response stream.
|
15
17
|
|
16
18
|
This class unifies chunks from both LiteLLM and Instructor streaming,
|
17
19
|
providing a consistent interface for processing streaming responses.
|
18
20
|
"""
|
19
21
|
|
20
|
-
|
22
|
+
type: Literal["language_model"] = "language_model"
|
23
|
+
"""The type of the event, always `language_model`."""
|
21
24
|
|
22
25
|
content: Optional[str] = None
|
23
26
|
"""The content delta for this chunk."""
|
@@ -50,4 +53,4 @@ class LanguageModelResponseChunk(BaseModel, Generic[T]):
|
|
50
53
|
elif self.finish_reason:
|
51
54
|
return f"LanguageModelResponseChunk(finish_reason={self.finish_reason})"
|
52
55
|
else:
|
53
|
-
return "LanguageModelResponseChunk(empty)"
|
56
|
+
return "LanguageModelResponseChunk(empty)"
|
@@ -0,0 +1,89 @@
|
|
1
|
+
"""hammad.genai.language_models.language_model_settings"""
|
2
|
+
|
3
|
+
from typing import (
|
4
|
+
Any,
|
5
|
+
Dict,
|
6
|
+
List,
|
7
|
+
Union,
|
8
|
+
Type,
|
9
|
+
TypeVar,
|
10
|
+
TYPE_CHECKING,
|
11
|
+
Callable,
|
12
|
+
Optional,
|
13
|
+
)
|
14
|
+
import sys
|
15
|
+
from pydantic import BaseModel, Field
|
16
|
+
|
17
|
+
if sys.version_info >= (3, 12):
|
18
|
+
from typing import TypedDict, Required, NotRequired
|
19
|
+
else:
|
20
|
+
from typing_extensions import TypedDict, Required, NotRequired
|
21
|
+
|
22
|
+
if TYPE_CHECKING:
|
23
|
+
pass
|
24
|
+
|
25
|
+
from .language_model_name import LanguageModelName
|
26
|
+
from .language_model_instructor_mode import LanguageModelInstructorMode
|
27
|
+
from ....types.base import BaseGenAIModelSettings
|
28
|
+
|
29
|
+
__all__ = [
|
30
|
+
"LanguageModelSettings",
|
31
|
+
"LanguageModelProviderSettings",
|
32
|
+
]
|
33
|
+
|
34
|
+
|
35
|
+
T = TypeVar("T")
|
36
|
+
|
37
|
+
|
38
|
+
class LanguageModelSettings(BaseGenAIModelSettings):
|
39
|
+
"""Complete settings for language model requests."""
|
40
|
+
|
41
|
+
# Structured output settings
|
42
|
+
type: Optional[Type[T]] = None
|
43
|
+
instructor_mode: Optional[LanguageModelInstructorMode] = None
|
44
|
+
response_field_name: Optional[str] = None
|
45
|
+
response_field_instruction: Optional[str] = None
|
46
|
+
max_retries: Optional[int] = None
|
47
|
+
strict: Optional[bool] = None
|
48
|
+
validation_context: Optional[Dict[str, Any]] = None
|
49
|
+
context: Optional[Dict[str, Any]] = None
|
50
|
+
|
51
|
+
# Tool settings
|
52
|
+
tools: Optional[List[Any]] = None
|
53
|
+
tool_choice: Optional[Union[str, Dict[str, Any]]] = None
|
54
|
+
parallel_tool_calls: Optional[bool] = None
|
55
|
+
functions: Optional[List[Any]] = None
|
56
|
+
function_call: Optional[str] = None
|
57
|
+
|
58
|
+
# Streaming settings
|
59
|
+
stream: Optional[bool] = None
|
60
|
+
stream_options: Optional[Dict[str, Any]] = None
|
61
|
+
|
62
|
+
# Hook settings
|
63
|
+
completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None
|
64
|
+
completion_response_hooks: Optional[List[Callable[..., None]]] = None
|
65
|
+
completion_error_hooks: Optional[List[Callable[..., None]]] = None
|
66
|
+
completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None
|
67
|
+
parse_error_hooks: Optional[List[Callable[..., None]]] = None
|
68
|
+
|
69
|
+
# Extended settings
|
70
|
+
timeout: Optional[Union[float, str]] = None
|
71
|
+
temperature: Optional[float] = None
|
72
|
+
top_p: Optional[float] = None
|
73
|
+
n: Optional[int] = None
|
74
|
+
stop: Optional[str] = None
|
75
|
+
max_completion_tokens: Optional[int] = None
|
76
|
+
max_tokens: Optional[int] = None
|
77
|
+
modalities: Optional[List[Any]] = None
|
78
|
+
prediction: Optional[Any] = None
|
79
|
+
audio: Optional[Any] = None
|
80
|
+
presence_penalty: Optional[float] = None
|
81
|
+
frequency_penalty: Optional[float] = None
|
82
|
+
logit_bias: Optional[Dict[str, float]] = None
|
83
|
+
user: Optional[str] = None
|
84
|
+
reasoning_effort: Optional[str] = None
|
85
|
+
seed: Optional[int] = None
|
86
|
+
logprobs: Optional[bool] = None
|
87
|
+
top_logprobs: Optional[int] = None
|
88
|
+
thinking: Optional[Dict[str, Any]] = None
|
89
|
+
web_search_options: Optional[Dict[str, Any]] = None
|