hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,530 @@
1
+ """hammad.genai.language_models.run
2
+
3
+ Standalone functions for running language models with full parameter typing.
4
+ """
5
+
6
+ from typing import Any, List, TypeVar, Union, Optional, Type, overload, Dict, TYPE_CHECKING, Callable
7
+ from typing_extensions import Literal
8
+
9
+ if TYPE_CHECKING:
10
+ from httpx import Timeout
11
+
12
+ try:
13
+ from openai.types.chat import (
14
+ ChatCompletionMessageParam,
15
+ ChatCompletionModality,
16
+ ChatCompletionPredictionContentParam,
17
+ ChatCompletionAudioParam,
18
+ )
19
+ except ImportError:
20
+ ChatCompletionMessageParam = Any
21
+ ChatCompletionModality = Any
22
+ ChatCompletionPredictionContentParam = Any
23
+ ChatCompletionAudioParam = Any
24
+
25
+ from ._types import LanguageModelName, LanguageModelInstructorMode
26
+ from .language_model import LanguageModel
27
+ from .language_model_request import LanguageModelMessagesParam
28
+ from .language_model_response import LanguageModelResponse
29
+ from ._streaming import Stream, AsyncStream
30
+
31
+ __all__ = [
32
+ "run_language_model",
33
+ "async_run_language_model",
34
+ ]
35
+
36
+ T = TypeVar("T")
37
+
38
+
39
+ # Overloads for run_language_model - String output, non-streaming
40
+ @overload
41
+ def run_language_model(
42
+ messages: LanguageModelMessagesParam,
43
+ instructions: Optional[str] = None,
44
+ *,
45
+ # Provider settings
46
+ model: LanguageModelName = "openai/gpt-4o-mini",
47
+ base_url: Optional[str] = None,
48
+ api_key: Optional[str] = None,
49
+ api_version: Optional[str] = None,
50
+ organization: Optional[str] = None,
51
+ deployment_id: Optional[str] = None,
52
+ model_list: Optional[List[Any]] = None,
53
+ extra_headers: Optional[Dict[str, str]] = None,
54
+ # Streaming settings
55
+ stream: Literal[False] = False,
56
+ stream_options: Optional[Dict[str, Any]] = None,
57
+ # Extended settings
58
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
59
+ temperature: Optional[float] = None,
60
+ top_p: Optional[float] = None,
61
+ n: Optional[int] = None,
62
+ stop: Optional[str] = None,
63
+ max_completion_tokens: Optional[int] = None,
64
+ max_tokens: Optional[int] = None,
65
+ modalities: Optional[List["ChatCompletionModality"]] = None,
66
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
67
+ audio: Optional["ChatCompletionAudioParam"] = None,
68
+ presence_penalty: Optional[float] = None,
69
+ frequency_penalty: Optional[float] = None,
70
+ logit_bias: Optional[Dict[str, float]] = None,
71
+ user: Optional[str] = None,
72
+ reasoning_effort: Optional[str] = None,
73
+ seed: Optional[int] = None,
74
+ logprobs: Optional[bool] = None,
75
+ top_logprobs: Optional[int] = None,
76
+ thinking: Optional[Dict[str, Any]] = None,
77
+ web_search_options: Optional[Dict[str, Any]] = None,
78
+ # Tools settings
79
+ tools: Optional[List[Any]] = None,
80
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
81
+ parallel_tool_calls: Optional[bool] = None,
82
+ functions: Optional[List[Any]] = None,
83
+ function_call: Optional[str] = None,
84
+ ) -> LanguageModelResponse[str]: ...
85
+
86
+
87
+ # Overloads for run_language_model - String output, streaming
88
+ @overload
89
+ def run_language_model(
90
+ messages: LanguageModelMessagesParam,
91
+ instructions: Optional[str] = None,
92
+ *,
93
+ # Provider settings
94
+ model: LanguageModelName = "openai/gpt-4o-mini",
95
+ base_url: Optional[str] = None,
96
+ api_key: Optional[str] = None,
97
+ api_version: Optional[str] = None,
98
+ organization: Optional[str] = None,
99
+ deployment_id: Optional[str] = None,
100
+ model_list: Optional[List[Any]] = None,
101
+ extra_headers: Optional[Dict[str, str]] = None,
102
+ # Streaming settings
103
+ stream: Literal[True],
104
+ stream_options: Optional[Dict[str, Any]] = None,
105
+ # Extended settings
106
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
107
+ temperature: Optional[float] = None,
108
+ top_p: Optional[float] = None,
109
+ n: Optional[int] = None,
110
+ stop: Optional[str] = None,
111
+ max_completion_tokens: Optional[int] = None,
112
+ max_tokens: Optional[int] = None,
113
+ modalities: Optional[List["ChatCompletionModality"]] = None,
114
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
115
+ audio: Optional["ChatCompletionAudioParam"] = None,
116
+ presence_penalty: Optional[float] = None,
117
+ frequency_penalty: Optional[float] = None,
118
+ logit_bias: Optional[Dict[str, float]] = None,
119
+ user: Optional[str] = None,
120
+ reasoning_effort: Optional[str] = None,
121
+ seed: Optional[int] = None,
122
+ logprobs: Optional[bool] = None,
123
+ top_logprobs: Optional[int] = None,
124
+ thinking: Optional[Dict[str, Any]] = None,
125
+ web_search_options: Optional[Dict[str, Any]] = None,
126
+ # Tools settings
127
+ tools: Optional[List[Any]] = None,
128
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
129
+ parallel_tool_calls: Optional[bool] = None,
130
+ functions: Optional[List[Any]] = None,
131
+ function_call: Optional[str] = None,
132
+ ) -> Stream[str]: ...
133
+
134
+
135
+ # Overloads for run_language_model - Structured output, non-streaming
136
+ @overload
137
+ def run_language_model(
138
+ messages: LanguageModelMessagesParam,
139
+ instructions: Optional[str] = None,
140
+ *,
141
+ # Provider settings
142
+ model: LanguageModelName = "openai/gpt-4o-mini",
143
+ base_url: Optional[str] = None,
144
+ api_key: Optional[str] = None,
145
+ api_version: Optional[str] = None,
146
+ organization: Optional[str] = None,
147
+ deployment_id: Optional[str] = None,
148
+ model_list: Optional[List[Any]] = None,
149
+ extra_headers: Optional[Dict[str, str]] = None,
150
+ # Structured output settings
151
+ type: Type[T],
152
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
153
+ response_field_name: Optional[str] = None,
154
+ response_field_instruction: Optional[str] = None,
155
+ response_model_name: Optional[str] = None,
156
+ max_retries: Optional[int] = None,
157
+ strict: Optional[bool] = None,
158
+ validation_context: Optional[Dict[str, Any]] = None,
159
+ context: Optional[Dict[str, Any]] = None,
160
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
161
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
162
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
163
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
164
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
165
+ # Streaming settings
166
+ stream: Literal[False] = False,
167
+ stream_options: Optional[Dict[str, Any]] = None,
168
+ # Extended settings
169
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
170
+ temperature: Optional[float] = None,
171
+ top_p: Optional[float] = None,
172
+ n: Optional[int] = None,
173
+ stop: Optional[str] = None,
174
+ max_completion_tokens: Optional[int] = None,
175
+ max_tokens: Optional[int] = None,
176
+ modalities: Optional[List["ChatCompletionModality"]] = None,
177
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
178
+ audio: Optional["ChatCompletionAudioParam"] = None,
179
+ presence_penalty: Optional[float] = None,
180
+ frequency_penalty: Optional[float] = None,
181
+ logit_bias: Optional[Dict[str, float]] = None,
182
+ user: Optional[str] = None,
183
+ reasoning_effort: Optional[str] = None,
184
+ seed: Optional[int] = None,
185
+ logprobs: Optional[bool] = None,
186
+ top_logprobs: Optional[int] = None,
187
+ thinking: Optional[Dict[str, Any]] = None,
188
+ web_search_options: Optional[Dict[str, Any]] = None,
189
+ # Tools settings
190
+ tools: Optional[List[Any]] = None,
191
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
192
+ parallel_tool_calls: Optional[bool] = None,
193
+ functions: Optional[List[Any]] = None,
194
+ function_call: Optional[str] = None,
195
+ ) -> LanguageModelResponse[T]: ...
196
+
197
+
198
+ # Overloads for run_language_model - Structured output, streaming
199
+ @overload
200
+ def run_language_model(
201
+ messages: LanguageModelMessagesParam,
202
+ instructions: Optional[str] = None,
203
+ *,
204
+ # Provider settings
205
+ model: LanguageModelName = "openai/gpt-4o-mini",
206
+ base_url: Optional[str] = None,
207
+ api_key: Optional[str] = None,
208
+ api_version: Optional[str] = None,
209
+ organization: Optional[str] = None,
210
+ deployment_id: Optional[str] = None,
211
+ model_list: Optional[List[Any]] = None,
212
+ extra_headers: Optional[Dict[str, str]] = None,
213
+ # Structured output settings
214
+ type: Type[T],
215
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
216
+ response_field_name: Optional[str] = None,
217
+ response_field_instruction: Optional[str] = None,
218
+ response_model_name: Optional[str] = None,
219
+ max_retries: Optional[int] = None,
220
+ strict: Optional[bool] = None,
221
+ validation_context: Optional[Dict[str, Any]] = None,
222
+ context: Optional[Dict[str, Any]] = None,
223
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
224
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
225
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
226
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
227
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
228
+ # Streaming settings
229
+ stream: Literal[True],
230
+ stream_options: Optional[Dict[str, Any]] = None,
231
+ # Extended settings
232
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
233
+ temperature: Optional[float] = None,
234
+ top_p: Optional[float] = None,
235
+ n: Optional[int] = None,
236
+ stop: Optional[str] = None,
237
+ max_completion_tokens: Optional[int] = None,
238
+ max_tokens: Optional[int] = None,
239
+ modalities: Optional[List["ChatCompletionModality"]] = None,
240
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
241
+ audio: Optional["ChatCompletionAudioParam"] = None,
242
+ presence_penalty: Optional[float] = None,
243
+ frequency_penalty: Optional[float] = None,
244
+ logit_bias: Optional[Dict[str, float]] = None,
245
+ user: Optional[str] = None,
246
+ reasoning_effort: Optional[str] = None,
247
+ seed: Optional[int] = None,
248
+ logprobs: Optional[bool] = None,
249
+ top_logprobs: Optional[int] = None,
250
+ thinking: Optional[Dict[str, Any]] = None,
251
+ web_search_options: Optional[Dict[str, Any]] = None,
252
+ # Tools settings
253
+ tools: Optional[List[Any]] = None,
254
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
255
+ parallel_tool_calls: Optional[bool] = None,
256
+ functions: Optional[List[Any]] = None,
257
+ function_call: Optional[str] = None,
258
+ ) -> Stream[T]: ...
259
+
260
+
261
+ def run_language_model(
262
+ messages: LanguageModelMessagesParam,
263
+ instructions: Optional[str] = None,
264
+ **kwargs: Any,
265
+ ) -> Union[LanguageModelResponse[Any], Stream[Any]]:
266
+ """Run a language model request with full parameter support.
267
+
268
+ Args:
269
+ messages: The input messages/content for the request
270
+ instructions: Optional system instructions to prepend
271
+ **kwargs: All request parameters from LanguageModelRequest
272
+
273
+ Returns:
274
+ LanguageModelResponse or Stream depending on parameters
275
+ """
276
+ # Extract model parameter or use default
277
+ model = kwargs.pop("model", "openai/gpt-4o-mini")
278
+
279
+ # Create language model instance
280
+ language_model = LanguageModel(model=model)
281
+
282
+ # Forward to the instance method
283
+ return language_model.run(messages, instructions, **kwargs)
284
+
285
+
286
+ # Async overloads for async_run_language_model - String output, non-streaming
287
+ @overload
288
+ async def async_run_language_model(
289
+ messages: LanguageModelMessagesParam,
290
+ instructions: Optional[str] = None,
291
+ *,
292
+ # Provider settings
293
+ model: LanguageModelName = "openai/gpt-4o-mini",
294
+ base_url: Optional[str] = None,
295
+ api_key: Optional[str] = None,
296
+ api_version: Optional[str] = None,
297
+ organization: Optional[str] = None,
298
+ deployment_id: Optional[str] = None,
299
+ model_list: Optional[List[Any]] = None,
300
+ extra_headers: Optional[Dict[str, str]] = None,
301
+ # Streaming settings
302
+ stream: Literal[False] = False,
303
+ stream_options: Optional[Dict[str, Any]] = None,
304
+ # Extended settings
305
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
306
+ temperature: Optional[float] = None,
307
+ top_p: Optional[float] = None,
308
+ n: Optional[int] = None,
309
+ stop: Optional[str] = None,
310
+ max_completion_tokens: Optional[int] = None,
311
+ max_tokens: Optional[int] = None,
312
+ modalities: Optional[List["ChatCompletionModality"]] = None,
313
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
314
+ audio: Optional["ChatCompletionAudioParam"] = None,
315
+ presence_penalty: Optional[float] = None,
316
+ frequency_penalty: Optional[float] = None,
317
+ logit_bias: Optional[Dict[str, float]] = None,
318
+ user: Optional[str] = None,
319
+ reasoning_effort: Optional[str] = None,
320
+ seed: Optional[int] = None,
321
+ logprobs: Optional[bool] = None,
322
+ top_logprobs: Optional[int] = None,
323
+ thinking: Optional[Dict[str, Any]] = None,
324
+ web_search_options: Optional[Dict[str, Any]] = None,
325
+ # Tools settings
326
+ tools: Optional[List[Any]] = None,
327
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
328
+ parallel_tool_calls: Optional[bool] = None,
329
+ functions: Optional[List[Any]] = None,
330
+ function_call: Optional[str] = None,
331
+ ) -> LanguageModelResponse[str]: ...
332
+
333
+
334
+ # Async overloads for async_run_language_model - String output, streaming
335
+ @overload
336
+ async def async_run_language_model(
337
+ messages: LanguageModelMessagesParam,
338
+ instructions: Optional[str] = None,
339
+ *,
340
+ # Provider settings
341
+ model: LanguageModelName = "openai/gpt-4o-mini",
342
+ base_url: Optional[str] = None,
343
+ api_key: Optional[str] = None,
344
+ api_version: Optional[str] = None,
345
+ organization: Optional[str] = None,
346
+ deployment_id: Optional[str] = None,
347
+ model_list: Optional[List[Any]] = None,
348
+ extra_headers: Optional[Dict[str, str]] = None,
349
+ # Streaming settings
350
+ stream: Literal[True],
351
+ stream_options: Optional[Dict[str, Any]] = None,
352
+ # Extended settings
353
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
354
+ temperature: Optional[float] = None,
355
+ top_p: Optional[float] = None,
356
+ n: Optional[int] = None,
357
+ stop: Optional[str] = None,
358
+ max_completion_tokens: Optional[int] = None,
359
+ max_tokens: Optional[int] = None,
360
+ modalities: Optional[List["ChatCompletionModality"]] = None,
361
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
362
+ audio: Optional["ChatCompletionAudioParam"] = None,
363
+ presence_penalty: Optional[float] = None,
364
+ frequency_penalty: Optional[float] = None,
365
+ logit_bias: Optional[Dict[str, float]] = None,
366
+ user: Optional[str] = None,
367
+ reasoning_effort: Optional[str] = None,
368
+ seed: Optional[int] = None,
369
+ logprobs: Optional[bool] = None,
370
+ top_logprobs: Optional[int] = None,
371
+ thinking: Optional[Dict[str, Any]] = None,
372
+ web_search_options: Optional[Dict[str, Any]] = None,
373
+ # Tools settings
374
+ tools: Optional[List[Any]] = None,
375
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
376
+ parallel_tool_calls: Optional[bool] = None,
377
+ functions: Optional[List[Any]] = None,
378
+ function_call: Optional[str] = None,
379
+ ) -> AsyncStream[str]: ...
380
+
381
+
382
+ # Async overloads for async_run_language_model - Structured output, non-streaming
383
+ @overload
384
+ async def async_run_language_model(
385
+ messages: LanguageModelMessagesParam,
386
+ instructions: Optional[str] = None,
387
+ *,
388
+ # Provider settings
389
+ model: LanguageModelName = "openai/gpt-4o-mini",
390
+ base_url: Optional[str] = None,
391
+ api_key: Optional[str] = None,
392
+ api_version: Optional[str] = None,
393
+ organization: Optional[str] = None,
394
+ deployment_id: Optional[str] = None,
395
+ model_list: Optional[List[Any]] = None,
396
+ extra_headers: Optional[Dict[str, str]] = None,
397
+ # Structured output settings
398
+ type: Type[T],
399
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
400
+ response_field_name: Optional[str] = None,
401
+ response_field_instruction: Optional[str] = None,
402
+ response_model_name: Optional[str] = None,
403
+ max_retries: Optional[int] = None,
404
+ strict: Optional[bool] = None,
405
+ validation_context: Optional[Dict[str, Any]] = None,
406
+ context: Optional[Dict[str, Any]] = None,
407
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
408
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
409
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
410
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
411
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
412
+ # Streaming settings
413
+ stream: Literal[False] = False,
414
+ stream_options: Optional[Dict[str, Any]] = None,
415
+ # Extended settings
416
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
417
+ temperature: Optional[float] = None,
418
+ top_p: Optional[float] = None,
419
+ n: Optional[int] = None,
420
+ stop: Optional[str] = None,
421
+ max_completion_tokens: Optional[int] = None,
422
+ max_tokens: Optional[int] = None,
423
+ modalities: Optional[List["ChatCompletionModality"]] = None,
424
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
425
+ audio: Optional["ChatCompletionAudioParam"] = None,
426
+ presence_penalty: Optional[float] = None,
427
+ frequency_penalty: Optional[float] = None,
428
+ logit_bias: Optional[Dict[str, float]] = None,
429
+ user: Optional[str] = None,
430
+ reasoning_effort: Optional[str] = None,
431
+ seed: Optional[int] = None,
432
+ logprobs: Optional[bool] = None,
433
+ top_logprobs: Optional[int] = None,
434
+ thinking: Optional[Dict[str, Any]] = None,
435
+ web_search_options: Optional[Dict[str, Any]] = None,
436
+ # Tools settings
437
+ tools: Optional[List[Any]] = None,
438
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
439
+ parallel_tool_calls: Optional[bool] = None,
440
+ functions: Optional[List[Any]] = None,
441
+ function_call: Optional[str] = None,
442
+ ) -> LanguageModelResponse[T]: ...
443
+
444
+
445
+ # Async overloads for async_run_language_model - Structured output, streaming
446
+ @overload
447
+ async def async_run_language_model(
448
+ messages: LanguageModelMessagesParam,
449
+ instructions: Optional[str] = None,
450
+ *,
451
+ # Provider settings
452
+ model: LanguageModelName = "openai/gpt-4o-mini",
453
+ base_url: Optional[str] = None,
454
+ api_key: Optional[str] = None,
455
+ api_version: Optional[str] = None,
456
+ organization: Optional[str] = None,
457
+ deployment_id: Optional[str] = None,
458
+ model_list: Optional[List[Any]] = None,
459
+ extra_headers: Optional[Dict[str, str]] = None,
460
+ # Structured output settings
461
+ type: Type[T],
462
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
463
+ response_field_name: Optional[str] = None,
464
+ response_field_instruction: Optional[str] = None,
465
+ response_model_name: Optional[str] = None,
466
+ max_retries: Optional[int] = None,
467
+ strict: Optional[bool] = None,
468
+ validation_context: Optional[Dict[str, Any]] = None,
469
+ context: Optional[Dict[str, Any]] = None,
470
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
471
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
472
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
473
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
474
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
475
+ # Streaming settings
476
+ stream: Literal[True],
477
+ stream_options: Optional[Dict[str, Any]] = None,
478
+ # Extended settings
479
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
480
+ temperature: Optional[float] = None,
481
+ top_p: Optional[float] = None,
482
+ n: Optional[int] = None,
483
+ stop: Optional[str] = None,
484
+ max_completion_tokens: Optional[int] = None,
485
+ max_tokens: Optional[int] = None,
486
+ modalities: Optional[List["ChatCompletionModality"]] = None,
487
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
488
+ audio: Optional["ChatCompletionAudioParam"] = None,
489
+ presence_penalty: Optional[float] = None,
490
+ frequency_penalty: Optional[float] = None,
491
+ logit_bias: Optional[Dict[str, float]] = None,
492
+ user: Optional[str] = None,
493
+ reasoning_effort: Optional[str] = None,
494
+ seed: Optional[int] = None,
495
+ logprobs: Optional[bool] = None,
496
+ top_logprobs: Optional[int] = None,
497
+ thinking: Optional[Dict[str, Any]] = None,
498
+ web_search_options: Optional[Dict[str, Any]] = None,
499
+ # Tools settings
500
+ tools: Optional[List[Any]] = None,
501
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
502
+ parallel_tool_calls: Optional[bool] = None,
503
+ functions: Optional[List[Any]] = None,
504
+ function_call: Optional[str] = None,
505
+ ) -> AsyncStream[T]: ...
506
+
507
+
508
+ async def async_run_language_model(
509
+ messages: LanguageModelMessagesParam,
510
+ instructions: Optional[str] = None,
511
+ **kwargs: Any,
512
+ ) -> Union[LanguageModelResponse[Any], AsyncStream[Any]]:
513
+ """Run an async language model request with full parameter support.
514
+
515
+ Args:
516
+ messages: The input messages/content for the request
517
+ instructions: Optional system instructions to prepend
518
+ **kwargs: All request parameters from LanguageModelRequest
519
+
520
+ Returns:
521
+ LanguageModelResponse or AsyncStream depending on parameters
522
+ """
523
+ # Extract model parameter or use default
524
+ model = kwargs.pop("model", "openai/gpt-4o-mini")
525
+
526
+ # Create language model instance
527
+ language_model = LanguageModel(model=model)
528
+
529
+ # Forward to the instance method
530
+ return await language_model.async_run(messages, instructions, **kwargs)
@@ -0,0 +1,48 @@
1
+ """hammad.genai.multimodal_models"""
2
+
3
+ # simple litellm refs
4
+ # thanks litellm :)
5
+
6
+ from typing import TYPE_CHECKING
7
+ from .._internal import create_getattr_importer
8
+
9
+
10
+ if TYPE_CHECKING:
11
+ from litellm import (
12
+ # images / image editing
13
+ image_generation as run_image_generation_model,
14
+ aimage_generation as async_run_image_generation_model,
15
+ image_edit as run_image_edit_model,
16
+ aimage_edit as async_run_image_edit_model,
17
+ image_variation as run_image_variation_model,
18
+ aimage_variation as async_run_image_variation_model,
19
+
20
+ # audio / speech
21
+ speech as run_tts_model,
22
+ aspeech as async_run_tts_model,
23
+ transcription as run_transcription_model,
24
+ atranscription as async_run_transcription_model,
25
+ )
26
+
27
+
28
+ __all__ = (
29
+ # images / image editing
30
+ "run_image_generation_model",
31
+ "async_run_image_generation_model",
32
+ "run_image_edit_model",
33
+ "async_run_image_edit_model",
34
+ "run_image_variation_model",
35
+ "async_run_image_variation_model",
36
+
37
+ # audio / speech
38
+ "run_tts_model",
39
+ "async_run_tts_model",
40
+ "run_transcription_model",
41
+ "async_run_transcription_model",
42
+ )
43
+
44
+
45
+ __getattr__ = create_getattr_importer(__all__)
46
+
47
+ def __dir__() -> list[str]:
48
+ return list(__all__)
@@ -0,0 +1,26 @@
1
+ """hammad.genai.rerank_models"""
2
+
3
+ # yay litellm
4
+
5
+ from typing import TYPE_CHECKING
6
+ from .._internal import create_getattr_importer
7
+
8
+
9
+ if TYPE_CHECKING:
10
+ from litellm import (
11
+ rerank as run_rerank_model,
12
+ arerank as async_run_rerank_model,
13
+ )
14
+
15
+
16
+ __all__ = (
17
+ "run_rerank_model",
18
+ "async_run_rerank_model",
19
+ )
20
+
21
+
22
+ __getattr__ = create_getattr_importer(__all__)
23
+
24
+
25
+ def __dir__() -> list[str]:
26
+ return list(__all__)
@@ -1,7 +1,7 @@
1
1
  """hammad.logging"""
2
2
 
3
3
  from typing import TYPE_CHECKING
4
- from ..performance.imports import create_getattr_importer
4
+ from .._internal import create_getattr_importer
5
5
 
6
6
  if TYPE_CHECKING:
7
7
  from .logger import Logger, create_logger, create_logger_level, LoggerLevelName
@@ -1,4 +1,4 @@
1
- """hammad.logging.tracers"""
1
+ """hammad.logging.decorators"""
2
2
 
3
3
  from functools import wraps
4
4
  from typing import (
hammad/logging/logger.py CHANGED
@@ -293,7 +293,7 @@ class Logger:
293
293
  def __init__(
294
294
  self,
295
295
  name: Optional[str] = None,
296
- level: Optional[Union[str, int]] = None,
296
+ level: Optional[Union[LoggerLevelName, int]] = None,
297
297
  rich: bool = True,
298
298
  display_all: bool = False,
299
299
  level_styles: Optional[Dict[str, LoggerLevelSettings]] = None,
@@ -899,7 +899,7 @@ def create_logger_level(
899
899
 
900
900
  def create_logger(
901
901
  name: Optional[str] = None,
902
- level: Optional[Union[str, int]] = None,
902
+ level: Optional[Union[LoggerLevelName, int]] = None,
903
903
  rich: bool = True,
904
904
  display_all: bool = False,
905
905
  levels: Optional[Dict[LoggerLevelName, LoggerLevelSettings]] = None,
hammad/mcp/__init__.py CHANGED
@@ -3,7 +3,7 @@ hammad.mcp
3
3
  """
4
4
 
5
5
  from typing import TYPE_CHECKING
6
- from ..performance.imports import create_getattr_importer
6
+ from .._internal import create_getattr_importer
7
7
 
8
8
  if TYPE_CHECKING:
9
9
  from .client.client import (
@@ -1 +1,36 @@
1
1
  """hammad.mcp.client"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from ..._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .client import (
8
+ MCPClient,
9
+ MCPClientService,
10
+ )
11
+ from .settings import (
12
+ MCPClientSettings,
13
+ MCPClientSseSettings,
14
+ MCPClientStreamableHttpSettings,
15
+ MCPClientStdioSettings
16
+ )
17
+
18
+ __all__ = (
19
+ # hammad.mcp.client
20
+ "MCPClient",
21
+
22
+ # hammad.mcp.client.client_service
23
+ "MCPClientService",
24
+
25
+ # hammad.mcp.client.settings
26
+ "MCPClientSettings",
27
+ "MCPClientSseSettings",
28
+ "MCPClientStreamableHttpSettings",
29
+ "MCPClientStdioSettings",
30
+ )
31
+
32
+ __getattr__ = create_getattr_importer(__all__)
33
+
34
+ def __dir__() -> list[str]:
35
+ """Get the attributes of the client module."""
36
+ return list(__all__)