hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,135 @@
1
+ """hammad.genai.language_models.language_model_request"""
2
+
3
+ from typing import (
4
+ Any,
5
+ Dict,
6
+ List,
7
+ Union,
8
+ Type,
9
+ TypeVar,
10
+ TYPE_CHECKING,
11
+ TypeAlias,
12
+ Callable,
13
+ )
14
+ import sys
15
+
16
+ if sys.version_info >= (3, 12):
17
+ from typing import TypedDict, Required, NotRequired
18
+ else:
19
+ from typing_extensions import TypedDict, Required, NotRequired
20
+
21
+ if TYPE_CHECKING:
22
+ from httpx import Timeout
23
+ try:
24
+ from openai.types.chat import (
25
+ ChatCompletionMessageParam,
26
+ ChatCompletionModality,
27
+ ChatCompletionPredictionContentParam,
28
+ ChatCompletionAudioParam,
29
+ )
30
+ except ImportError:
31
+ ChatCompletionMessageParam = Any
32
+ ChatCompletionModality = Any
33
+ ChatCompletionPredictionContentParam = Any
34
+ ChatCompletionAudioParam = Any
35
+
36
+ from ._types import LanguageModelName, LanguageModelInstructorMode
37
+
38
+ __all__ = [
39
+ "LanguageModelMessagesParam",
40
+ "LanguageModelRequest",
41
+ ]
42
+
43
+ T = TypeVar("T")
44
+
45
+ LanguageModelMessagesParam : TypeAlias = Union[
46
+ str,
47
+ "ChatCompletionMessageParam",
48
+ "List[ChatCompletionMessageParam]",
49
+ Any,
50
+ ]
51
+ """Type alias for the input parameters of a language model request."""
52
+
53
+
54
+ class LanguageModelRequestProviderSettings(TypedDict, total=False):
55
+ """Provider-specific settings for language model requests."""
56
+ model: Required[LanguageModelName]
57
+ base_url: NotRequired[str]
58
+ api_key: NotRequired[str]
59
+ api_version: NotRequired[str]
60
+ organization: NotRequired[str]
61
+ deployment_id: NotRequired[str]
62
+ model_list: NotRequired[List[Any]]
63
+ extra_headers: NotRequired[Dict[str, str]]
64
+
65
+
66
+ class LanguageModelRequestStructuredOutputSettings(TypedDict, total=False):
67
+ """Settings for structured output generation."""
68
+ type: Required[Type[T]]
69
+ instructor_mode: NotRequired[LanguageModelInstructorMode]
70
+ response_field_name: NotRequired[str]
71
+ response_field_instruction: NotRequired[str]
72
+ max_retries: NotRequired[int]
73
+ strict: NotRequired[bool]
74
+ validation_context: NotRequired[Dict[str, Any]]
75
+ context: NotRequired[Dict[str, Any]]
76
+
77
+
78
+ class LanguageModelRequestToolsSettings(TypedDict, total=False):
79
+ """Settings for tool usage in language model requests."""
80
+ tools: NotRequired[List[Any]]
81
+ tool_choice: NotRequired[Union[str, Dict[str, Any]]]
82
+ parallel_tool_calls: NotRequired[bool]
83
+ functions: NotRequired[List[Any]]
84
+ function_call: NotRequired[str]
85
+
86
+
87
+ class LanguageModelRequestStreamingSettings(TypedDict, total=False):
88
+ """Settings for streaming responses."""
89
+ stream: Required[bool]
90
+ stream_options: NotRequired[Dict[str, Any]]
91
+
92
+
93
+ class LanguageModelRequestHooksSettings(TypedDict, total=False):
94
+ """Settings for instructor hooks."""
95
+ completion_kwargs_hooks: NotRequired[List[Callable[..., None]]]
96
+ completion_response_hooks: NotRequired[List[Callable[..., None]]]
97
+ completion_error_hooks: NotRequired[List[Callable[..., None]]]
98
+ completion_last_attempt_hooks: NotRequired[List[Callable[..., None]]]
99
+ parse_error_hooks: NotRequired[List[Callable[..., None]]]
100
+
101
+
102
+ class LanguageModelRequestExtendedSettings(TypedDict, total=False):
103
+ """Extended settings for language model requests."""
104
+ timeout: NotRequired[Union[float, str, "Timeout"]]
105
+ temperature: NotRequired[float]
106
+ top_p: NotRequired[float]
107
+ n: NotRequired[int]
108
+ stop: NotRequired[str]
109
+ max_completion_tokens: NotRequired[int]
110
+ max_tokens: NotRequired[int]
111
+ modalities: NotRequired[List["ChatCompletionModality"]]
112
+ prediction: NotRequired["ChatCompletionPredictionContentParam"]
113
+ audio: NotRequired["ChatCompletionAudioParam"]
114
+ presence_penalty: NotRequired[float]
115
+ frequency_penalty: NotRequired[float]
116
+ logit_bias: NotRequired[Dict[str, float]]
117
+ user: NotRequired[str]
118
+ reasoning_effort: NotRequired[str]
119
+ seed: NotRequired[int]
120
+ logprobs: NotRequired[bool]
121
+ top_logprobs: NotRequired[int]
122
+ thinking: NotRequired[Dict[str, Any]]
123
+ web_search_options: NotRequired[Dict[str, Any]]
124
+
125
+
126
+ class LanguageModelRequest(
127
+ LanguageModelRequestProviderSettings,
128
+ LanguageModelRequestStructuredOutputSettings,
129
+ LanguageModelRequestToolsSettings,
130
+ LanguageModelRequestStreamingSettings,
131
+ LanguageModelRequestHooksSettings,
132
+ LanguageModelRequestExtendedSettings,
133
+ ):
134
+ """Complete settings for language model requests."""
135
+ pass
@@ -0,0 +1,219 @@
1
+ """hammad.genai.language_models.language_model_response"""
2
+
3
+ from typing import List, TypeVar, Generic, TYPE_CHECKING, Optional, Any, Dict, Union
4
+ from pydantic import BaseModel, ConfigDict
5
+
6
+ if TYPE_CHECKING:
7
+ try:
8
+ import litellm
9
+ from openai.types.chat import (
10
+ ChatCompletionContentPartParam,
11
+ ChatCompletionMessageParam,
12
+ )
13
+ except ImportError:
14
+ ChatCompletionContentPartParam = Any
15
+ ChatCompletionMessageParam = Any
16
+
17
+
18
+ __all__ = [
19
+ "LanguageModelResponse",
20
+ ]
21
+
22
+ T = TypeVar("T")
23
+
24
+
25
+ class LanguageModelResponse(BaseModel, Generic[T]):
26
+ """A response generated by a language model. This response is unified
27
+ to represent both standard completions as well as structured outputs."""
28
+
29
+ model_config = ConfigDict(arbitrary_types_allowed=True)
30
+
31
+ model: str
32
+ """The model that generated this response."""
33
+
34
+ output: T
35
+ """The 'final' or primary response content from the language model, this is
36
+ in the type requested by the user.
37
+
38
+ NOTE:
39
+ In many cases with tool calling, message content is not present, in these cases
40
+ this field will **NOT** represent tool calls, and will be returned as `None`."""
41
+
42
+ completion: "litellm.ModelResponse"
43
+ """The raw Chat Completion (`litellm.ModelResponse`) object returned by the
44
+ language model."""
45
+
46
+ content: Optional[str] = None
47
+ """The actual response content of the completion. This is the string that
48
+ was generated by the model."""
49
+
50
+ tool_calls: Optional[List["litellm.ChatCompletionMessageToolCall"]] = None
51
+ """The tool calls that were made by the model. This is a list of tool calls
52
+ that were made by the model."""
53
+
54
+ refusal: Optional[str] = None
55
+ """The refusal message generated by the model. This is the string that
56
+ was generated by the model when it refused to generate the completion."""
57
+
58
+ def get_content(
59
+ self,
60
+ choice: int = 0
61
+ ) -> Union[str, List["ChatCompletionContentPartParam"], None]:
62
+ """The 'raw' message content generated by the language model, this
63
+ can be either a string, a list of content parts, or `None`."""
64
+
65
+ if not self.completion or not self.completion.choices:
66
+ return None
67
+
68
+ if choice >= len(self.completion.choices):
69
+ return None
70
+
71
+ return self.completion.choices[choice].message.content
72
+
73
+ def get_tool_calls(
74
+ self,
75
+ *,
76
+ name: Optional[str] = None,
77
+ id: Optional[str] = None,
78
+ choice: int = 0
79
+ ) -> Optional[List["litellm.ChatCompletionMessageToolCall"]]:
80
+ """The tool calls generated by the language model, this is a list of
81
+ `ChatCompletionMessageToolCall` objects. Optionally can be filtered
82
+ by name or ID to return specific tool calls.
83
+
84
+ NOTE: Only one of `name` or `id` can be provided."""
85
+ if not self.completion or not self.completion.choices:
86
+ return None
87
+
88
+ if choice >= len(self.completion.choices):
89
+ return None
90
+
91
+ tool_calls = self.completion.choices[choice].message.tool_calls
92
+ if not tool_calls:
93
+ return None
94
+
95
+ # Filter by name or id if provided
96
+ if name is not None and id is not None:
97
+ raise ValueError("Only one of 'name' or 'id' can be provided, not both")
98
+
99
+ if name is not None:
100
+ return [call for call in tool_calls if call.function.name == name]
101
+
102
+ if id is not None:
103
+ return [call for call in tool_calls if call.id == id]
104
+
105
+ return tool_calls
106
+
107
+ def has_tool_calls(
108
+ self,
109
+ name: Optional[str] = None,
110
+ choice: int = 0
111
+ ) -> bool:
112
+ """Checks if the response has tool calls, optionally filtered by name.
113
+ If `name` is provided, it will check if the tool call with that name
114
+ exists in the response."""
115
+ if not self.completion or not self.completion.choices:
116
+ return False
117
+
118
+ tool_calls = self.get_tool_calls(name=name, choice=choice)
119
+ return tool_calls is not None and len(tool_calls) > 0
120
+
121
+ def get_tool_call_parameters(
122
+ self,
123
+ tool: Optional[str] = None,
124
+ choice: int = 0
125
+ ) -> Optional[Dict[str, Any]]:
126
+ """Returns the generated parameters for a tool call within a response.
127
+ If the response has multiple tool calls, and no tool is specified,
128
+ an error will be raised.
129
+
130
+ Args:
131
+ tool: The name of the tool to get the parameters for.
132
+ choice: The choice index to get tool calls from.
133
+
134
+ Returns:
135
+ Dict[str, Any]: The generated parameters for the tool call.
136
+ """
137
+ tool_calls = self.get_tool_calls(choice=choice)
138
+ if not tool_calls:
139
+ return None
140
+
141
+ if tool is None:
142
+ if len(tool_calls) > 1:
143
+ raise ValueError(
144
+ "Multiple tool calls found in response, and no tool specified."
145
+ )
146
+ tool = tool_calls[0].function.name
147
+
148
+ for tool_call in tool_calls:
149
+ if tool_call.function.name == tool:
150
+ import json
151
+ return json.loads(tool_call.function.arguments)
152
+ return None
153
+
154
+ def to_message(
155
+ self,
156
+ format_tool_calls: bool = False,
157
+ choice: int = 0
158
+ ) -> "ChatCompletionMessageParam":
159
+ """Converts the LanguageModelResponse to a Chat Completions
160
+ message object.
161
+
162
+ If the `format_tool_calls` parameter is True, the tool calls
163
+ will be cleanly formatted and added to the message content
164
+ with something similar to:
165
+
166
+ 'I called the function `get_weather` with the following arguments:
167
+ {arguments}'
168
+ """
169
+ if not self.completion or not self.completion.choices:
170
+ return {
171
+ "role": "assistant",
172
+ "content": ""
173
+ }
174
+
175
+ if choice >= len(self.completion.choices):
176
+ return {
177
+ "role": "assistant",
178
+ "content": ""
179
+ }
180
+
181
+ choice_message = self.completion.choices[choice].message
182
+
183
+ # Base message structure
184
+ message: "ChatCompletionMessageParam" = {
185
+ "role": "assistant",
186
+ "content": choice_message.content or ""
187
+ }
188
+
189
+ # Add tool calls if they exist and format_tool_calls is False
190
+ if choice_message.tool_calls and not format_tool_calls:
191
+ message["tool_calls"] = choice_message.tool_calls
192
+
193
+ # Format tool calls into content if format_tool_calls is True
194
+ elif choice_message.tool_calls and format_tool_calls:
195
+ content_parts = []
196
+ if choice_message.content:
197
+ content_parts.append(choice_message.content)
198
+
199
+ for tool_call in choice_message.tool_calls:
200
+ formatted_call = f"I called the function `{tool_call.function.name}` with the following arguments:\n{tool_call.function.arguments}"
201
+ content_parts.append(formatted_call)
202
+
203
+ message["content"] = "\n\n".join(content_parts)
204
+
205
+ return message
206
+
207
+ def __str__(self) -> str:
208
+ """Pretty prints the response object."""
209
+ output = "LanguageModelResponse:"
210
+
211
+ if self.output or self.content:
212
+ output += f"\n{self.output if self.output else self.content}"
213
+ else:
214
+ output += f"\n{self.completion}"
215
+
216
+ output += f"\n\n>>> Model: {self.model}"
217
+ output += f"\n>>> Tool Calls: {len(self.tool_calls) if self.tool_calls else 0}"
218
+
219
+ return output
@@ -0,0 +1,53 @@
1
+ """hammad.genai.language_models.language_model_response_chunk"""
2
+
3
+ from typing import TypeVar, Generic, Optional, Any
4
+ from pydantic import BaseModel, ConfigDict
5
+
6
+ __all__ = [
7
+ "LanguageModelResponseChunk",
8
+ ]
9
+
10
+ T = TypeVar("T")
11
+
12
+
13
+ class LanguageModelResponseChunk(BaseModel, Generic[T]):
14
+ """Represents a chunk of data from a language model response stream.
15
+
16
+ This class unifies chunks from both LiteLLM and Instructor streaming,
17
+ providing a consistent interface for processing streaming responses.
18
+ """
19
+
20
+ model_config = ConfigDict(arbitrary_types_allowed=True)
21
+
22
+ content: Optional[str] = None
23
+ """The content delta for this chunk."""
24
+
25
+ output: Optional[T] = None
26
+ """The structured output for this chunk (from instructor)."""
27
+
28
+ model: Optional[str] = None
29
+ """The model that generated this chunk."""
30
+
31
+ finish_reason: Optional[str] = None
32
+ """The reason the stream finished (if applicable)."""
33
+
34
+ chunk: Optional[Any] = None
35
+ """The original chunk object from the provider."""
36
+
37
+ is_final: bool = False
38
+ """Whether this is the final chunk in the stream."""
39
+
40
+ def __bool__(self) -> bool:
41
+ """Check if this chunk has meaningful content."""
42
+ return bool(self.content or self.output or self.finish_reason)
43
+
44
+ def __str__(self) -> str:
45
+ """String representation of the chunk."""
46
+ if self.output:
47
+ return f"LanguageModelResponseChunk(output={self.output})"
48
+ elif self.content:
49
+ return f"LanguageModelResponseChunk(content={repr(self.content)})"
50
+ elif self.finish_reason:
51
+ return f"LanguageModelResponseChunk(finish_reason={self.finish_reason})"
52
+ else:
53
+ return "LanguageModelResponseChunk(empty)"