hammad-python 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. hammad/__init__.py +178 -0
  2. hammad/_internal.py +237 -0
  3. hammad/cache/__init__.py +40 -0
  4. hammad/cache/base_cache.py +181 -0
  5. hammad/cache/cache.py +169 -0
  6. hammad/cache/decorators.py +261 -0
  7. hammad/cache/file_cache.py +80 -0
  8. hammad/cache/ttl_cache.py +74 -0
  9. hammad/cli/__init__.py +35 -0
  10. hammad/cli/_runner.py +265 -0
  11. hammad/cli/animations.py +573 -0
  12. hammad/cli/plugins.py +836 -0
  13. hammad/cli/styles/__init__.py +55 -0
  14. hammad/cli/styles/settings.py +139 -0
  15. hammad/cli/styles/types.py +358 -0
  16. hammad/cli/styles/utils.py +626 -0
  17. hammad/data/__init__.py +83 -0
  18. hammad/data/collections/__init__.py +44 -0
  19. hammad/data/collections/collection.py +274 -0
  20. hammad/data/collections/indexes/__init__.py +37 -0
  21. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  22. hammad/data/collections/indexes/qdrant/index.py +735 -0
  23. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  24. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  25. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  26. hammad/data/collections/indexes/tantivy/index.py +428 -0
  27. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  28. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  29. hammad/data/configurations/__init__.py +35 -0
  30. hammad/data/configurations/configuration.py +564 -0
  31. hammad/data/models/__init__.py +55 -0
  32. hammad/data/models/extensions/__init__.py +4 -0
  33. hammad/data/models/extensions/pydantic/__init__.py +42 -0
  34. hammad/data/models/extensions/pydantic/converters.py +759 -0
  35. hammad/data/models/fields.py +546 -0
  36. hammad/data/models/model.py +1078 -0
  37. hammad/data/models/utils.py +280 -0
  38. hammad/data/sql/__init__.py +23 -0
  39. hammad/data/sql/database.py +578 -0
  40. hammad/data/sql/types.py +141 -0
  41. hammad/data/types/__init__.py +39 -0
  42. hammad/data/types/file.py +358 -0
  43. hammad/data/types/multimodal/__init__.py +24 -0
  44. hammad/data/types/multimodal/audio.py +96 -0
  45. hammad/data/types/multimodal/image.py +80 -0
  46. hammad/data/types/text.py +1066 -0
  47. hammad/formatting/__init__.py +20 -0
  48. hammad/formatting/json/__init__.py +27 -0
  49. hammad/formatting/json/converters.py +158 -0
  50. hammad/formatting/text/__init__.py +63 -0
  51. hammad/formatting/text/converters.py +723 -0
  52. hammad/formatting/text/markdown.py +131 -0
  53. hammad/formatting/yaml/__init__.py +26 -0
  54. hammad/formatting/yaml/converters.py +5 -0
  55. hammad/genai/__init__.py +78 -0
  56. hammad/genai/agents/__init__.py +1 -0
  57. hammad/genai/agents/types/__init__.py +35 -0
  58. hammad/genai/agents/types/history.py +277 -0
  59. hammad/genai/agents/types/tool.py +490 -0
  60. hammad/genai/embedding_models/__init__.py +41 -0
  61. hammad/genai/embedding_models/embedding_model.py +193 -0
  62. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  63. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  64. hammad/genai/embedding_models/embedding_model_response.py +69 -0
  65. hammad/genai/embedding_models/run.py +161 -0
  66. hammad/genai/language_models/__init__.py +35 -0
  67. hammad/genai/language_models/_streaming.py +622 -0
  68. hammad/genai/language_models/_types.py +276 -0
  69. hammad/genai/language_models/_utils/__init__.py +31 -0
  70. hammad/genai/language_models/_utils/_completions.py +131 -0
  71. hammad/genai/language_models/_utils/_messages.py +89 -0
  72. hammad/genai/language_models/_utils/_requests.py +202 -0
  73. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  74. hammad/genai/language_models/language_model.py +734 -0
  75. hammad/genai/language_models/language_model_request.py +135 -0
  76. hammad/genai/language_models/language_model_response.py +219 -0
  77. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  78. hammad/genai/language_models/run.py +530 -0
  79. hammad/genai/multimodal_models.py +48 -0
  80. hammad/genai/rerank_models.py +26 -0
  81. hammad/logging/__init__.py +35 -0
  82. hammad/logging/decorators.py +834 -0
  83. hammad/logging/logger.py +954 -0
  84. hammad/mcp/__init__.py +50 -0
  85. hammad/mcp/client/__init__.py +36 -0
  86. hammad/mcp/client/client.py +624 -0
  87. hammad/mcp/client/client_service.py +400 -0
  88. hammad/mcp/client/settings.py +178 -0
  89. hammad/mcp/servers/__init__.py +25 -0
  90. hammad/mcp/servers/launcher.py +1161 -0
  91. hammad/runtime/__init__.py +32 -0
  92. hammad/runtime/decorators.py +142 -0
  93. hammad/runtime/run.py +299 -0
  94. hammad/service/__init__.py +49 -0
  95. hammad/service/create.py +527 -0
  96. hammad/service/decorators.py +285 -0
  97. hammad/typing/__init__.py +435 -0
  98. hammad/web/__init__.py +43 -0
  99. hammad/web/http/__init__.py +1 -0
  100. hammad/web/http/client.py +944 -0
  101. hammad/web/models.py +277 -0
  102. hammad/web/openapi/__init__.py +1 -0
  103. hammad/web/openapi/client.py +740 -0
  104. hammad/web/search/__init__.py +1 -0
  105. hammad/web/search/client.py +1035 -0
  106. hammad/web/utils.py +472 -0
  107. {hammad_python-0.0.15.dist-info → hammad_python-0.0.16.dist-info}/METADATA +8 -1
  108. hammad_python-0.0.16.dist-info/RECORD +110 -0
  109. hammad_python-0.0.15.dist-info/RECORD +0 -4
  110. {hammad_python-0.0.15.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  111. {hammad_python-0.0.15.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,530 @@
1
+ """hammad.genai.language_models.run
2
+
3
+ Standalone functions for running language models with full parameter typing.
4
+ """
5
+
6
+ from typing import Any, List, TypeVar, Union, Optional, Type, overload, Dict, TYPE_CHECKING, Callable
7
+ from typing_extensions import Literal
8
+
9
+ if TYPE_CHECKING:
10
+ from httpx import Timeout
11
+
12
+ try:
13
+ from openai.types.chat import (
14
+ ChatCompletionMessageParam,
15
+ ChatCompletionModality,
16
+ ChatCompletionPredictionContentParam,
17
+ ChatCompletionAudioParam,
18
+ )
19
+ except ImportError:
20
+ ChatCompletionMessageParam = Any
21
+ ChatCompletionModality = Any
22
+ ChatCompletionPredictionContentParam = Any
23
+ ChatCompletionAudioParam = Any
24
+
25
+ from ._types import LanguageModelName, LanguageModelInstructorMode
26
+ from .language_model import LanguageModel
27
+ from .language_model_request import LanguageModelMessagesParam
28
+ from .language_model_response import LanguageModelResponse
29
+ from ._streaming import Stream, AsyncStream
30
+
31
+ __all__ = [
32
+ "run_language_model",
33
+ "async_run_language_model",
34
+ ]
35
+
36
+ T = TypeVar("T")
37
+
38
+
39
+ # Overloads for run_language_model - String output, non-streaming
40
+ @overload
41
+ def run_language_model(
42
+ messages: LanguageModelMessagesParam,
43
+ instructions: Optional[str] = None,
44
+ *,
45
+ # Provider settings
46
+ model: LanguageModelName = "openai/gpt-4o-mini",
47
+ base_url: Optional[str] = None,
48
+ api_key: Optional[str] = None,
49
+ api_version: Optional[str] = None,
50
+ organization: Optional[str] = None,
51
+ deployment_id: Optional[str] = None,
52
+ model_list: Optional[List[Any]] = None,
53
+ extra_headers: Optional[Dict[str, str]] = None,
54
+ # Streaming settings
55
+ stream: Literal[False] = False,
56
+ stream_options: Optional[Dict[str, Any]] = None,
57
+ # Extended settings
58
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
59
+ temperature: Optional[float] = None,
60
+ top_p: Optional[float] = None,
61
+ n: Optional[int] = None,
62
+ stop: Optional[str] = None,
63
+ max_completion_tokens: Optional[int] = None,
64
+ max_tokens: Optional[int] = None,
65
+ modalities: Optional[List["ChatCompletionModality"]] = None,
66
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
67
+ audio: Optional["ChatCompletionAudioParam"] = None,
68
+ presence_penalty: Optional[float] = None,
69
+ frequency_penalty: Optional[float] = None,
70
+ logit_bias: Optional[Dict[str, float]] = None,
71
+ user: Optional[str] = None,
72
+ reasoning_effort: Optional[str] = None,
73
+ seed: Optional[int] = None,
74
+ logprobs: Optional[bool] = None,
75
+ top_logprobs: Optional[int] = None,
76
+ thinking: Optional[Dict[str, Any]] = None,
77
+ web_search_options: Optional[Dict[str, Any]] = None,
78
+ # Tools settings
79
+ tools: Optional[List[Any]] = None,
80
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
81
+ parallel_tool_calls: Optional[bool] = None,
82
+ functions: Optional[List[Any]] = None,
83
+ function_call: Optional[str] = None,
84
+ ) -> LanguageModelResponse[str]: ...
85
+
86
+
87
+ # Overloads for run_language_model - String output, streaming
88
+ @overload
89
+ def run_language_model(
90
+ messages: LanguageModelMessagesParam,
91
+ instructions: Optional[str] = None,
92
+ *,
93
+ # Provider settings
94
+ model: LanguageModelName = "openai/gpt-4o-mini",
95
+ base_url: Optional[str] = None,
96
+ api_key: Optional[str] = None,
97
+ api_version: Optional[str] = None,
98
+ organization: Optional[str] = None,
99
+ deployment_id: Optional[str] = None,
100
+ model_list: Optional[List[Any]] = None,
101
+ extra_headers: Optional[Dict[str, str]] = None,
102
+ # Streaming settings
103
+ stream: Literal[True],
104
+ stream_options: Optional[Dict[str, Any]] = None,
105
+ # Extended settings
106
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
107
+ temperature: Optional[float] = None,
108
+ top_p: Optional[float] = None,
109
+ n: Optional[int] = None,
110
+ stop: Optional[str] = None,
111
+ max_completion_tokens: Optional[int] = None,
112
+ max_tokens: Optional[int] = None,
113
+ modalities: Optional[List["ChatCompletionModality"]] = None,
114
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
115
+ audio: Optional["ChatCompletionAudioParam"] = None,
116
+ presence_penalty: Optional[float] = None,
117
+ frequency_penalty: Optional[float] = None,
118
+ logit_bias: Optional[Dict[str, float]] = None,
119
+ user: Optional[str] = None,
120
+ reasoning_effort: Optional[str] = None,
121
+ seed: Optional[int] = None,
122
+ logprobs: Optional[bool] = None,
123
+ top_logprobs: Optional[int] = None,
124
+ thinking: Optional[Dict[str, Any]] = None,
125
+ web_search_options: Optional[Dict[str, Any]] = None,
126
+ # Tools settings
127
+ tools: Optional[List[Any]] = None,
128
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
129
+ parallel_tool_calls: Optional[bool] = None,
130
+ functions: Optional[List[Any]] = None,
131
+ function_call: Optional[str] = None,
132
+ ) -> Stream[str]: ...
133
+
134
+
135
+ # Overloads for run_language_model - Structured output, non-streaming
136
+ @overload
137
+ def run_language_model(
138
+ messages: LanguageModelMessagesParam,
139
+ instructions: Optional[str] = None,
140
+ *,
141
+ # Provider settings
142
+ model: LanguageModelName = "openai/gpt-4o-mini",
143
+ base_url: Optional[str] = None,
144
+ api_key: Optional[str] = None,
145
+ api_version: Optional[str] = None,
146
+ organization: Optional[str] = None,
147
+ deployment_id: Optional[str] = None,
148
+ model_list: Optional[List[Any]] = None,
149
+ extra_headers: Optional[Dict[str, str]] = None,
150
+ # Structured output settings
151
+ type: Type[T],
152
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
153
+ response_field_name: Optional[str] = None,
154
+ response_field_instruction: Optional[str] = None,
155
+ response_model_name: Optional[str] = None,
156
+ max_retries: Optional[int] = None,
157
+ strict: Optional[bool] = None,
158
+ validation_context: Optional[Dict[str, Any]] = None,
159
+ context: Optional[Dict[str, Any]] = None,
160
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
161
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
162
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
163
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
164
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
165
+ # Streaming settings
166
+ stream: Literal[False] = False,
167
+ stream_options: Optional[Dict[str, Any]] = None,
168
+ # Extended settings
169
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
170
+ temperature: Optional[float] = None,
171
+ top_p: Optional[float] = None,
172
+ n: Optional[int] = None,
173
+ stop: Optional[str] = None,
174
+ max_completion_tokens: Optional[int] = None,
175
+ max_tokens: Optional[int] = None,
176
+ modalities: Optional[List["ChatCompletionModality"]] = None,
177
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
178
+ audio: Optional["ChatCompletionAudioParam"] = None,
179
+ presence_penalty: Optional[float] = None,
180
+ frequency_penalty: Optional[float] = None,
181
+ logit_bias: Optional[Dict[str, float]] = None,
182
+ user: Optional[str] = None,
183
+ reasoning_effort: Optional[str] = None,
184
+ seed: Optional[int] = None,
185
+ logprobs: Optional[bool] = None,
186
+ top_logprobs: Optional[int] = None,
187
+ thinking: Optional[Dict[str, Any]] = None,
188
+ web_search_options: Optional[Dict[str, Any]] = None,
189
+ # Tools settings
190
+ tools: Optional[List[Any]] = None,
191
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
192
+ parallel_tool_calls: Optional[bool] = None,
193
+ functions: Optional[List[Any]] = None,
194
+ function_call: Optional[str] = None,
195
+ ) -> LanguageModelResponse[T]: ...
196
+
197
+
198
+ # Overloads for run_language_model - Structured output, streaming
199
+ @overload
200
+ def run_language_model(
201
+ messages: LanguageModelMessagesParam,
202
+ instructions: Optional[str] = None,
203
+ *,
204
+ # Provider settings
205
+ model: LanguageModelName = "openai/gpt-4o-mini",
206
+ base_url: Optional[str] = None,
207
+ api_key: Optional[str] = None,
208
+ api_version: Optional[str] = None,
209
+ organization: Optional[str] = None,
210
+ deployment_id: Optional[str] = None,
211
+ model_list: Optional[List[Any]] = None,
212
+ extra_headers: Optional[Dict[str, str]] = None,
213
+ # Structured output settings
214
+ type: Type[T],
215
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
216
+ response_field_name: Optional[str] = None,
217
+ response_field_instruction: Optional[str] = None,
218
+ response_model_name: Optional[str] = None,
219
+ max_retries: Optional[int] = None,
220
+ strict: Optional[bool] = None,
221
+ validation_context: Optional[Dict[str, Any]] = None,
222
+ context: Optional[Dict[str, Any]] = None,
223
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
224
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
225
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
226
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
227
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
228
+ # Streaming settings
229
+ stream: Literal[True],
230
+ stream_options: Optional[Dict[str, Any]] = None,
231
+ # Extended settings
232
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
233
+ temperature: Optional[float] = None,
234
+ top_p: Optional[float] = None,
235
+ n: Optional[int] = None,
236
+ stop: Optional[str] = None,
237
+ max_completion_tokens: Optional[int] = None,
238
+ max_tokens: Optional[int] = None,
239
+ modalities: Optional[List["ChatCompletionModality"]] = None,
240
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
241
+ audio: Optional["ChatCompletionAudioParam"] = None,
242
+ presence_penalty: Optional[float] = None,
243
+ frequency_penalty: Optional[float] = None,
244
+ logit_bias: Optional[Dict[str, float]] = None,
245
+ user: Optional[str] = None,
246
+ reasoning_effort: Optional[str] = None,
247
+ seed: Optional[int] = None,
248
+ logprobs: Optional[bool] = None,
249
+ top_logprobs: Optional[int] = None,
250
+ thinking: Optional[Dict[str, Any]] = None,
251
+ web_search_options: Optional[Dict[str, Any]] = None,
252
+ # Tools settings
253
+ tools: Optional[List[Any]] = None,
254
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
255
+ parallel_tool_calls: Optional[bool] = None,
256
+ functions: Optional[List[Any]] = None,
257
+ function_call: Optional[str] = None,
258
+ ) -> Stream[T]: ...
259
+
260
+
261
+ def run_language_model(
262
+ messages: LanguageModelMessagesParam,
263
+ instructions: Optional[str] = None,
264
+ **kwargs: Any,
265
+ ) -> Union[LanguageModelResponse[Any], Stream[Any]]:
266
+ """Run a language model request with full parameter support.
267
+
268
+ Args:
269
+ messages: The input messages/content for the request
270
+ instructions: Optional system instructions to prepend
271
+ **kwargs: All request parameters from LanguageModelRequest
272
+
273
+ Returns:
274
+ LanguageModelResponse or Stream depending on parameters
275
+ """
276
+ # Extract model parameter or use default
277
+ model = kwargs.pop("model", "openai/gpt-4o-mini")
278
+
279
+ # Create language model instance
280
+ language_model = LanguageModel(model=model)
281
+
282
+ # Forward to the instance method
283
+ return language_model.run(messages, instructions, **kwargs)
284
+
285
+
286
+ # Async overloads for async_run_language_model - String output, non-streaming
287
+ @overload
288
+ async def async_run_language_model(
289
+ messages: LanguageModelMessagesParam,
290
+ instructions: Optional[str] = None,
291
+ *,
292
+ # Provider settings
293
+ model: LanguageModelName = "openai/gpt-4o-mini",
294
+ base_url: Optional[str] = None,
295
+ api_key: Optional[str] = None,
296
+ api_version: Optional[str] = None,
297
+ organization: Optional[str] = None,
298
+ deployment_id: Optional[str] = None,
299
+ model_list: Optional[List[Any]] = None,
300
+ extra_headers: Optional[Dict[str, str]] = None,
301
+ # Streaming settings
302
+ stream: Literal[False] = False,
303
+ stream_options: Optional[Dict[str, Any]] = None,
304
+ # Extended settings
305
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
306
+ temperature: Optional[float] = None,
307
+ top_p: Optional[float] = None,
308
+ n: Optional[int] = None,
309
+ stop: Optional[str] = None,
310
+ max_completion_tokens: Optional[int] = None,
311
+ max_tokens: Optional[int] = None,
312
+ modalities: Optional[List["ChatCompletionModality"]] = None,
313
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
314
+ audio: Optional["ChatCompletionAudioParam"] = None,
315
+ presence_penalty: Optional[float] = None,
316
+ frequency_penalty: Optional[float] = None,
317
+ logit_bias: Optional[Dict[str, float]] = None,
318
+ user: Optional[str] = None,
319
+ reasoning_effort: Optional[str] = None,
320
+ seed: Optional[int] = None,
321
+ logprobs: Optional[bool] = None,
322
+ top_logprobs: Optional[int] = None,
323
+ thinking: Optional[Dict[str, Any]] = None,
324
+ web_search_options: Optional[Dict[str, Any]] = None,
325
+ # Tools settings
326
+ tools: Optional[List[Any]] = None,
327
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
328
+ parallel_tool_calls: Optional[bool] = None,
329
+ functions: Optional[List[Any]] = None,
330
+ function_call: Optional[str] = None,
331
+ ) -> LanguageModelResponse[str]: ...
332
+
333
+
334
+ # Async overloads for async_run_language_model - String output, streaming
335
+ @overload
336
+ async def async_run_language_model(
337
+ messages: LanguageModelMessagesParam,
338
+ instructions: Optional[str] = None,
339
+ *,
340
+ # Provider settings
341
+ model: LanguageModelName = "openai/gpt-4o-mini",
342
+ base_url: Optional[str] = None,
343
+ api_key: Optional[str] = None,
344
+ api_version: Optional[str] = None,
345
+ organization: Optional[str] = None,
346
+ deployment_id: Optional[str] = None,
347
+ model_list: Optional[List[Any]] = None,
348
+ extra_headers: Optional[Dict[str, str]] = None,
349
+ # Streaming settings
350
+ stream: Literal[True],
351
+ stream_options: Optional[Dict[str, Any]] = None,
352
+ # Extended settings
353
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
354
+ temperature: Optional[float] = None,
355
+ top_p: Optional[float] = None,
356
+ n: Optional[int] = None,
357
+ stop: Optional[str] = None,
358
+ max_completion_tokens: Optional[int] = None,
359
+ max_tokens: Optional[int] = None,
360
+ modalities: Optional[List["ChatCompletionModality"]] = None,
361
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
362
+ audio: Optional["ChatCompletionAudioParam"] = None,
363
+ presence_penalty: Optional[float] = None,
364
+ frequency_penalty: Optional[float] = None,
365
+ logit_bias: Optional[Dict[str, float]] = None,
366
+ user: Optional[str] = None,
367
+ reasoning_effort: Optional[str] = None,
368
+ seed: Optional[int] = None,
369
+ logprobs: Optional[bool] = None,
370
+ top_logprobs: Optional[int] = None,
371
+ thinking: Optional[Dict[str, Any]] = None,
372
+ web_search_options: Optional[Dict[str, Any]] = None,
373
+ # Tools settings
374
+ tools: Optional[List[Any]] = None,
375
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
376
+ parallel_tool_calls: Optional[bool] = None,
377
+ functions: Optional[List[Any]] = None,
378
+ function_call: Optional[str] = None,
379
+ ) -> AsyncStream[str]: ...
380
+
381
+
382
+ # Async overloads for async_run_language_model - Structured output, non-streaming
383
+ @overload
384
+ async def async_run_language_model(
385
+ messages: LanguageModelMessagesParam,
386
+ instructions: Optional[str] = None,
387
+ *,
388
+ # Provider settings
389
+ model: LanguageModelName = "openai/gpt-4o-mini",
390
+ base_url: Optional[str] = None,
391
+ api_key: Optional[str] = None,
392
+ api_version: Optional[str] = None,
393
+ organization: Optional[str] = None,
394
+ deployment_id: Optional[str] = None,
395
+ model_list: Optional[List[Any]] = None,
396
+ extra_headers: Optional[Dict[str, str]] = None,
397
+ # Structured output settings
398
+ type: Type[T],
399
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
400
+ response_field_name: Optional[str] = None,
401
+ response_field_instruction: Optional[str] = None,
402
+ response_model_name: Optional[str] = None,
403
+ max_retries: Optional[int] = None,
404
+ strict: Optional[bool] = None,
405
+ validation_context: Optional[Dict[str, Any]] = None,
406
+ context: Optional[Dict[str, Any]] = None,
407
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
408
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
409
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
410
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
411
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
412
+ # Streaming settings
413
+ stream: Literal[False] = False,
414
+ stream_options: Optional[Dict[str, Any]] = None,
415
+ # Extended settings
416
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
417
+ temperature: Optional[float] = None,
418
+ top_p: Optional[float] = None,
419
+ n: Optional[int] = None,
420
+ stop: Optional[str] = None,
421
+ max_completion_tokens: Optional[int] = None,
422
+ max_tokens: Optional[int] = None,
423
+ modalities: Optional[List["ChatCompletionModality"]] = None,
424
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
425
+ audio: Optional["ChatCompletionAudioParam"] = None,
426
+ presence_penalty: Optional[float] = None,
427
+ frequency_penalty: Optional[float] = None,
428
+ logit_bias: Optional[Dict[str, float]] = None,
429
+ user: Optional[str] = None,
430
+ reasoning_effort: Optional[str] = None,
431
+ seed: Optional[int] = None,
432
+ logprobs: Optional[bool] = None,
433
+ top_logprobs: Optional[int] = None,
434
+ thinking: Optional[Dict[str, Any]] = None,
435
+ web_search_options: Optional[Dict[str, Any]] = None,
436
+ # Tools settings
437
+ tools: Optional[List[Any]] = None,
438
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
439
+ parallel_tool_calls: Optional[bool] = None,
440
+ functions: Optional[List[Any]] = None,
441
+ function_call: Optional[str] = None,
442
+ ) -> LanguageModelResponse[T]: ...
443
+
444
+
445
+ # Async overloads for async_run_language_model - Structured output, streaming
446
+ @overload
447
+ async def async_run_language_model(
448
+ messages: LanguageModelMessagesParam,
449
+ instructions: Optional[str] = None,
450
+ *,
451
+ # Provider settings
452
+ model: LanguageModelName = "openai/gpt-4o-mini",
453
+ base_url: Optional[str] = None,
454
+ api_key: Optional[str] = None,
455
+ api_version: Optional[str] = None,
456
+ organization: Optional[str] = None,
457
+ deployment_id: Optional[str] = None,
458
+ model_list: Optional[List[Any]] = None,
459
+ extra_headers: Optional[Dict[str, str]] = None,
460
+ # Structured output settings
461
+ type: Type[T],
462
+ instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
463
+ response_field_name: Optional[str] = None,
464
+ response_field_instruction: Optional[str] = None,
465
+ response_model_name: Optional[str] = None,
466
+ max_retries: Optional[int] = None,
467
+ strict: Optional[bool] = None,
468
+ validation_context: Optional[Dict[str, Any]] = None,
469
+ context: Optional[Dict[str, Any]] = None,
470
+ completion_kwargs_hooks: Optional[List[Callable[..., None]]] = None,
471
+ completion_response_hooks: Optional[List[Callable[..., None]]] = None,
472
+ completion_error_hooks: Optional[List[Callable[..., None]]] = None,
473
+ completion_last_attempt_hooks: Optional[List[Callable[..., None]]] = None,
474
+ parse_error_hooks: Optional[List[Callable[..., None]]] = None,
475
+ # Streaming settings
476
+ stream: Literal[True],
477
+ stream_options: Optional[Dict[str, Any]] = None,
478
+ # Extended settings
479
+ timeout: Optional[Union[float, str, "Timeout"]] = None,
480
+ temperature: Optional[float] = None,
481
+ top_p: Optional[float] = None,
482
+ n: Optional[int] = None,
483
+ stop: Optional[str] = None,
484
+ max_completion_tokens: Optional[int] = None,
485
+ max_tokens: Optional[int] = None,
486
+ modalities: Optional[List["ChatCompletionModality"]] = None,
487
+ prediction: Optional["ChatCompletionPredictionContentParam"] = None,
488
+ audio: Optional["ChatCompletionAudioParam"] = None,
489
+ presence_penalty: Optional[float] = None,
490
+ frequency_penalty: Optional[float] = None,
491
+ logit_bias: Optional[Dict[str, float]] = None,
492
+ user: Optional[str] = None,
493
+ reasoning_effort: Optional[str] = None,
494
+ seed: Optional[int] = None,
495
+ logprobs: Optional[bool] = None,
496
+ top_logprobs: Optional[int] = None,
497
+ thinking: Optional[Dict[str, Any]] = None,
498
+ web_search_options: Optional[Dict[str, Any]] = None,
499
+ # Tools settings
500
+ tools: Optional[List[Any]] = None,
501
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
502
+ parallel_tool_calls: Optional[bool] = None,
503
+ functions: Optional[List[Any]] = None,
504
+ function_call: Optional[str] = None,
505
+ ) -> AsyncStream[T]: ...
506
+
507
+
508
+ async def async_run_language_model(
509
+ messages: LanguageModelMessagesParam,
510
+ instructions: Optional[str] = None,
511
+ **kwargs: Any,
512
+ ) -> Union[LanguageModelResponse[Any], AsyncStream[Any]]:
513
+ """Run an async language model request with full parameter support.
514
+
515
+ Args:
516
+ messages: The input messages/content for the request
517
+ instructions: Optional system instructions to prepend
518
+ **kwargs: All request parameters from LanguageModelRequest
519
+
520
+ Returns:
521
+ LanguageModelResponse or AsyncStream depending on parameters
522
+ """
523
+ # Extract model parameter or use default
524
+ model = kwargs.pop("model", "openai/gpt-4o-mini")
525
+
526
+ # Create language model instance
527
+ language_model = LanguageModel(model=model)
528
+
529
+ # Forward to the instance method
530
+ return await language_model.async_run(messages, instructions, **kwargs)
@@ -0,0 +1,48 @@
1
+ """hammad.genai.multimodal_models"""
2
+
3
+ # simple litellm refs
4
+ # thanks litellm :)
5
+
6
+ from typing import TYPE_CHECKING
7
+ from .._internal import create_getattr_importer
8
+
9
+
10
+ if TYPE_CHECKING:
11
+ from litellm import (
12
+ # images / image editing
13
+ image_generation as run_image_generation_model,
14
+ aimage_generation as async_run_image_generation_model,
15
+ image_edit as run_image_edit_model,
16
+ aimage_edit as async_run_image_edit_model,
17
+ image_variation as run_image_variation_model,
18
+ aimage_variation as async_run_image_variation_model,
19
+
20
+ # audio / speech
21
+ speech as run_tts_model,
22
+ aspeech as async_run_tts_model,
23
+ transcription as run_transcription_model,
24
+ atranscription as async_run_transcription_model,
25
+ )
26
+
27
+
28
+ __all__ = (
29
+ # images / image editing
30
+ "run_image_generation_model",
31
+ "async_run_image_generation_model",
32
+ "run_image_edit_model",
33
+ "async_run_image_edit_model",
34
+ "run_image_variation_model",
35
+ "async_run_image_variation_model",
36
+
37
+ # audio / speech
38
+ "run_tts_model",
39
+ "async_run_tts_model",
40
+ "run_transcription_model",
41
+ "async_run_transcription_model",
42
+ )
43
+
44
+
45
+ __getattr__ = create_getattr_importer(__all__)
46
+
47
+ def __dir__() -> list[str]:
48
+ return list(__all__)
@@ -0,0 +1,26 @@
1
+ """hammad.genai.rerank_models"""
2
+
3
+ # yay litellm
4
+
5
+ from typing import TYPE_CHECKING
6
+ from .._internal import create_getattr_importer
7
+
8
+
9
+ if TYPE_CHECKING:
10
+ from litellm import (
11
+ rerank as run_rerank_model,
12
+ arerank as async_run_rerank_model,
13
+ )
14
+
15
+
16
+ __all__ = (
17
+ "run_rerank_model",
18
+ "async_run_rerank_model",
19
+ )
20
+
21
+
22
+ __getattr__ = create_getattr_importer(__all__)
23
+
24
+
25
+ def __dir__() -> list[str]:
26
+ return list(__all__)
@@ -0,0 +1,35 @@
1
+ """hammad.logging"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from .._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .logger import Logger, create_logger, create_logger_level, LoggerLevelName
8
+ from .decorators import (
9
+ trace_function,
10
+ trace_cls,
11
+ trace,
12
+ trace_http,
13
+ install_trace_http,
14
+ )
15
+
16
+
17
+ __all__ = (
18
+ "Logger",
19
+ "LoggerLevelName",
20
+ "create_logger",
21
+ "create_logger_level",
22
+ "trace_function",
23
+ "trace_cls",
24
+ "trace",
25
+ "trace_http",
26
+ "install_trace_http",
27
+ )
28
+
29
+
30
+ __getattr__ = create_getattr_importer(__all__)
31
+
32
+
33
+ def __dir__() -> list[str]:
34
+ """Get the attributes of the logging module."""
35
+ return list(__all__)