hammad-python 0.0.19__py3-none-any.whl → 0.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. hammad/__init__.py +7 -137
  2. hammad/_internal.py +1 -0
  3. hammad/cli/_runner.py +8 -8
  4. hammad/cli/plugins.py +55 -26
  5. hammad/cli/styles/utils.py +16 -8
  6. hammad/data/__init__.py +1 -5
  7. hammad/data/collections/__init__.py +2 -3
  8. hammad/data/collections/collection.py +41 -22
  9. hammad/data/collections/indexes/__init__.py +1 -1
  10. hammad/data/collections/indexes/qdrant/__init__.py +1 -1
  11. hammad/data/collections/indexes/qdrant/index.py +106 -118
  12. hammad/data/collections/indexes/qdrant/settings.py +14 -14
  13. hammad/data/collections/indexes/qdrant/utils.py +28 -38
  14. hammad/data/collections/indexes/tantivy/__init__.py +1 -1
  15. hammad/data/collections/indexes/tantivy/index.py +57 -59
  16. hammad/data/collections/indexes/tantivy/settings.py +8 -19
  17. hammad/data/collections/indexes/tantivy/utils.py +28 -52
  18. hammad/data/models/__init__.py +2 -7
  19. hammad/data/sql/__init__.py +1 -1
  20. hammad/data/sql/database.py +71 -73
  21. hammad/data/sql/types.py +37 -51
  22. hammad/formatting/__init__.py +2 -1
  23. hammad/formatting/json/converters.py +2 -2
  24. hammad/genai/__init__.py +96 -36
  25. hammad/genai/agents/__init__.py +47 -1
  26. hammad/genai/agents/agent.py +1022 -0
  27. hammad/genai/agents/run.py +615 -0
  28. hammad/genai/agents/types/__init__.py +29 -22
  29. hammad/genai/agents/types/agent_context.py +13 -0
  30. hammad/genai/agents/types/agent_event.py +128 -0
  31. hammad/genai/agents/types/agent_hooks.py +220 -0
  32. hammad/genai/agents/types/agent_messages.py +31 -0
  33. hammad/genai/agents/types/agent_response.py +90 -0
  34. hammad/genai/agents/types/agent_stream.py +242 -0
  35. hammad/genai/models/__init__.py +1 -0
  36. hammad/genai/models/embeddings/__init__.py +39 -0
  37. hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
  38. hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
  39. hammad/genai/models/embeddings/types/__init__.py +37 -0
  40. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
  41. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
  42. hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
  43. hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
  44. hammad/genai/models/language/__init__.py +48 -0
  45. hammad/genai/{language_models/language_model.py → models/language/model.py} +481 -204
  46. hammad/genai/{language_models → models/language}/run.py +80 -57
  47. hammad/genai/models/language/types/__init__.py +40 -0
  48. hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
  49. hammad/genai/models/language/types/language_model_messages.py +28 -0
  50. hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
  51. hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
  52. hammad/genai/{language_models → models/language/types}/language_model_response.py +61 -68
  53. hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
  54. hammad/genai/models/language/types/language_model_settings.py +89 -0
  55. hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
  56. hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
  57. hammad/genai/models/language/utils/requests.py +421 -0
  58. hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
  59. hammad/genai/models/model_provider.py +4 -0
  60. hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
  61. hammad/genai/models/reranking.py +26 -0
  62. hammad/genai/types/__init__.py +1 -0
  63. hammad/genai/types/base.py +215 -0
  64. hammad/genai/{agents/types → types}/history.py +101 -88
  65. hammad/genai/{agents/types/tool.py → types/tools.py} +156 -141
  66. hammad/logging/logger.py +1 -1
  67. hammad/mcp/client/__init__.py +2 -3
  68. hammad/mcp/client/client.py +10 -10
  69. hammad/mcp/servers/__init__.py +2 -1
  70. hammad/service/decorators.py +1 -3
  71. hammad/web/models.py +1 -3
  72. hammad/web/search/client.py +10 -22
  73. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/METADATA +10 -2
  74. hammad_python-0.0.20.dist-info/RECORD +127 -0
  75. hammad/genai/embedding_models/__init__.py +0 -41
  76. hammad/genai/language_models/__init__.py +0 -35
  77. hammad/genai/language_models/_utils/_completions.py +0 -131
  78. hammad/genai/language_models/_utils/_messages.py +0 -89
  79. hammad/genai/language_models/_utils/_requests.py +0 -202
  80. hammad/genai/rerank_models.py +0 -26
  81. hammad_python-0.0.19.dist-info/RECORD +0 -111
  82. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/WHEEL +0 -0
  83. {hammad_python-0.0.19.dist-info → hammad_python-0.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -3,47 +3,57 @@
3
3
  Standalone functions for running language models with full parameter typing.
4
4
  """
5
5
 
6
- from typing import Any, List, TypeVar, Union, Optional, Type, overload, Dict, TYPE_CHECKING, Callable
6
+ from typing import (
7
+ Any,
8
+ List,
9
+ TypeVar,
10
+ Union,
11
+ Optional,
12
+ Type,
13
+ overload,
14
+ Dict,
15
+ TYPE_CHECKING,
16
+ Callable,
17
+ )
7
18
  from typing_extensions import Literal
8
19
 
9
20
  if TYPE_CHECKING:
10
21
  from httpx import Timeout
11
22
 
12
- try:
13
- from openai.types.chat import (
14
- ChatCompletionMessageParam,
15
- ChatCompletionModality,
16
- ChatCompletionPredictionContentParam,
17
- ChatCompletionAudioParam,
18
- )
19
- except ImportError:
20
- ChatCompletionMessageParam = Any
21
- ChatCompletionModality = Any
22
- ChatCompletionPredictionContentParam = Any
23
- ChatCompletionAudioParam = Any
24
-
25
- from ._types import LanguageModelName, LanguageModelInstructorMode
26
- from .language_model import LanguageModel
27
- from .language_model_request import LanguageModelMessagesParam
28
- from .language_model_response import LanguageModelResponse
29
- from ._streaming import Stream, AsyncStream
23
+ from openai.types.chat import (
24
+ ChatCompletionModality,
25
+ ChatCompletionPredictionContentParam,
26
+ ChatCompletionAudioParam,
27
+ )
28
+
29
+ from .types import (
30
+ LanguageModelMessages,
31
+ LanguageModelInstructorMode,
32
+ LanguageModelName,
33
+ LanguageModelResponse,
34
+ LanguageModelStream,
35
+ )
36
+
37
+ from .model import LanguageModel
38
+
30
39
 
31
40
  __all__ = [
32
41
  "run_language_model",
33
42
  "async_run_language_model",
34
43
  ]
35
44
 
45
+
36
46
  T = TypeVar("T")
37
47
 
38
48
 
39
49
  # Overloads for run_language_model - String output, non-streaming
40
50
  @overload
41
51
  def run_language_model(
42
- messages: LanguageModelMessagesParam,
52
+ messages: "LanguageModelMessages",
43
53
  instructions: Optional[str] = None,
44
54
  *,
45
55
  # Provider settings
46
- model: LanguageModelName = "openai/gpt-4o-mini",
56
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
47
57
  base_url: Optional[str] = None,
48
58
  api_key: Optional[str] = None,
49
59
  api_version: Optional[str] = None,
@@ -51,6 +61,7 @@ def run_language_model(
51
61
  deployment_id: Optional[str] = None,
52
62
  model_list: Optional[List[Any]] = None,
53
63
  extra_headers: Optional[Dict[str, str]] = None,
64
+ mock_response: Optional[bool] = None,
54
65
  # Streaming settings
55
66
  stream: Literal[False] = False,
56
67
  stream_options: Optional[Dict[str, Any]] = None,
@@ -81,17 +92,17 @@ def run_language_model(
81
92
  parallel_tool_calls: Optional[bool] = None,
82
93
  functions: Optional[List[Any]] = None,
83
94
  function_call: Optional[str] = None,
84
- ) -> LanguageModelResponse[str]: ...
95
+ ) -> "LanguageModelResponse[str]": ...
85
96
 
86
97
 
87
98
  # Overloads for run_language_model - String output, streaming
88
99
  @overload
89
100
  def run_language_model(
90
- messages: LanguageModelMessagesParam,
101
+ messages: "LanguageModelMessages",
91
102
  instructions: Optional[str] = None,
92
103
  *,
93
104
  # Provider settings
94
- model: LanguageModelName = "openai/gpt-4o-mini",
105
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
95
106
  base_url: Optional[str] = None,
96
107
  api_key: Optional[str] = None,
97
108
  api_version: Optional[str] = None,
@@ -99,6 +110,7 @@ def run_language_model(
99
110
  deployment_id: Optional[str] = None,
100
111
  model_list: Optional[List[Any]] = None,
101
112
  extra_headers: Optional[Dict[str, str]] = None,
113
+ mock_response: Optional[bool] = None,
102
114
  # Streaming settings
103
115
  stream: Literal[True],
104
116
  stream_options: Optional[Dict[str, Any]] = None,
@@ -129,17 +141,17 @@ def run_language_model(
129
141
  parallel_tool_calls: Optional[bool] = None,
130
142
  functions: Optional[List[Any]] = None,
131
143
  function_call: Optional[str] = None,
132
- ) -> Stream[str]: ...
144
+ ) -> "LanguageModelStream[str]": ...
133
145
 
134
146
 
135
147
  # Overloads for run_language_model - Structured output, non-streaming
136
148
  @overload
137
149
  def run_language_model(
138
- messages: LanguageModelMessagesParam,
150
+ messages: "LanguageModelMessages",
139
151
  instructions: Optional[str] = None,
140
152
  *,
141
153
  # Provider settings
142
- model: LanguageModelName = "openai/gpt-4o-mini",
154
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
143
155
  base_url: Optional[str] = None,
144
156
  api_key: Optional[str] = None,
145
157
  api_version: Optional[str] = None,
@@ -147,6 +159,7 @@ def run_language_model(
147
159
  deployment_id: Optional[str] = None,
148
160
  model_list: Optional[List[Any]] = None,
149
161
  extra_headers: Optional[Dict[str, str]] = None,
162
+ mock_response: Optional[bool] = None,
150
163
  # Structured output settings
151
164
  type: Type[T],
152
165
  instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
@@ -192,17 +205,17 @@ def run_language_model(
192
205
  parallel_tool_calls: Optional[bool] = None,
193
206
  functions: Optional[List[Any]] = None,
194
207
  function_call: Optional[str] = None,
195
- ) -> LanguageModelResponse[T]: ...
208
+ ) -> "LanguageModelResponse[T]": ...
196
209
 
197
210
 
198
211
  # Overloads for run_language_model - Structured output, streaming
199
212
  @overload
200
213
  def run_language_model(
201
- messages: LanguageModelMessagesParam,
214
+ messages: "LanguageModelMessages",
202
215
  instructions: Optional[str] = None,
203
216
  *,
204
217
  # Provider settings
205
- model: LanguageModelName = "openai/gpt-4o-mini",
218
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
206
219
  base_url: Optional[str] = None,
207
220
  api_key: Optional[str] = None,
208
221
  api_version: Optional[str] = None,
@@ -210,6 +223,7 @@ def run_language_model(
210
223
  deployment_id: Optional[str] = None,
211
224
  model_list: Optional[List[Any]] = None,
212
225
  extra_headers: Optional[Dict[str, str]] = None,
226
+ mock_response: Optional[bool] = None,
213
227
  # Structured output settings
214
228
  type: Type[T],
215
229
  instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
@@ -255,42 +269,45 @@ def run_language_model(
255
269
  parallel_tool_calls: Optional[bool] = None,
256
270
  functions: Optional[List[Any]] = None,
257
271
  function_call: Optional[str] = None,
258
- ) -> Stream[T]: ...
272
+ ) -> "LanguageModelStream[T]": ...
259
273
 
260
274
 
261
275
  def run_language_model(
262
- messages: LanguageModelMessagesParam,
276
+ messages: "LanguageModelMessages",
263
277
  instructions: Optional[str] = None,
278
+ mock_response: Optional[bool] = None,
264
279
  **kwargs: Any,
265
- ) -> Union[LanguageModelResponse[Any], Stream[Any]]:
280
+ ) -> Union["LanguageModelResponse[Any]", "LanguageModelStream[Any]"]:
266
281
  """Run a language model request with full parameter support.
267
-
282
+
268
283
  Args:
269
284
  messages: The input messages/content for the request
270
285
  instructions: Optional system instructions to prepend
271
286
  **kwargs: All request parameters from LanguageModelRequest
272
-
287
+
273
288
  Returns:
274
289
  LanguageModelResponse or Stream depending on parameters
275
290
  """
276
291
  # Extract model parameter or use default
277
292
  model = kwargs.pop("model", "openai/gpt-4o-mini")
278
-
293
+
279
294
  # Create language model instance
280
295
  language_model = LanguageModel(model=model)
281
-
296
+
282
297
  # Forward to the instance method
283
- return language_model.run(messages, instructions, **kwargs)
298
+ return language_model.run(
299
+ messages, instructions, mock_response=mock_response, **kwargs
300
+ )
284
301
 
285
302
 
286
303
  # Async overloads for async_run_language_model - String output, non-streaming
287
304
  @overload
288
305
  async def async_run_language_model(
289
- messages: LanguageModelMessagesParam,
306
+ messages: "LanguageModelMessages",
290
307
  instructions: Optional[str] = None,
291
308
  *,
292
309
  # Provider settings
293
- model: LanguageModelName = "openai/gpt-4o-mini",
310
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
294
311
  base_url: Optional[str] = None,
295
312
  api_key: Optional[str] = None,
296
313
  api_version: Optional[str] = None,
@@ -328,17 +345,17 @@ async def async_run_language_model(
328
345
  parallel_tool_calls: Optional[bool] = None,
329
346
  functions: Optional[List[Any]] = None,
330
347
  function_call: Optional[str] = None,
331
- ) -> LanguageModelResponse[str]: ...
348
+ ) -> "LanguageModelResponse[str]": ...
332
349
 
333
350
 
334
351
  # Async overloads for async_run_language_model - String output, streaming
335
352
  @overload
336
353
  async def async_run_language_model(
337
- messages: LanguageModelMessagesParam,
354
+ messages: "LanguageModelMessages",
338
355
  instructions: Optional[str] = None,
339
356
  *,
340
357
  # Provider settings
341
- model: LanguageModelName = "openai/gpt-4o-mini",
358
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
342
359
  base_url: Optional[str] = None,
343
360
  api_key: Optional[str] = None,
344
361
  api_version: Optional[str] = None,
@@ -346,6 +363,7 @@ async def async_run_language_model(
346
363
  deployment_id: Optional[str] = None,
347
364
  model_list: Optional[List[Any]] = None,
348
365
  extra_headers: Optional[Dict[str, str]] = None,
366
+ mock_response: Optional[bool] = None,
349
367
  # Streaming settings
350
368
  stream: Literal[True],
351
369
  stream_options: Optional[Dict[str, Any]] = None,
@@ -376,17 +394,17 @@ async def async_run_language_model(
376
394
  parallel_tool_calls: Optional[bool] = None,
377
395
  functions: Optional[List[Any]] = None,
378
396
  function_call: Optional[str] = None,
379
- ) -> AsyncStream[str]: ...
397
+ ) -> "LanguageModelStream[str]": ...
380
398
 
381
399
 
382
400
  # Async overloads for async_run_language_model - Structured output, non-streaming
383
401
  @overload
384
402
  async def async_run_language_model(
385
- messages: LanguageModelMessagesParam,
403
+ messages: "LanguageModelMessages",
386
404
  instructions: Optional[str] = None,
387
405
  *,
388
406
  # Provider settings
389
- model: LanguageModelName = "openai/gpt-4o-mini",
407
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
390
408
  base_url: Optional[str] = None,
391
409
  api_key: Optional[str] = None,
392
410
  api_version: Optional[str] = None,
@@ -394,6 +412,7 @@ async def async_run_language_model(
394
412
  deployment_id: Optional[str] = None,
395
413
  model_list: Optional[List[Any]] = None,
396
414
  extra_headers: Optional[Dict[str, str]] = None,
415
+ mock_response: Optional[bool] = None,
397
416
  # Structured output settings
398
417
  type: Type[T],
399
418
  instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
@@ -439,17 +458,17 @@ async def async_run_language_model(
439
458
  parallel_tool_calls: Optional[bool] = None,
440
459
  functions: Optional[List[Any]] = None,
441
460
  function_call: Optional[str] = None,
442
- ) -> LanguageModelResponse[T]: ...
461
+ ) -> "LanguageModelResponse[T]": ...
443
462
 
444
463
 
445
464
  # Async overloads for async_run_language_model - Structured output, streaming
446
465
  @overload
447
466
  async def async_run_language_model(
448
- messages: LanguageModelMessagesParam,
467
+ messages: "LanguageModelMessages",
449
468
  instructions: Optional[str] = None,
450
469
  *,
451
470
  # Provider settings
452
- model: LanguageModelName = "openai/gpt-4o-mini",
471
+ model: "LanguageModelName" = "openai/gpt-4o-mini",
453
472
  base_url: Optional[str] = None,
454
473
  api_key: Optional[str] = None,
455
474
  api_version: Optional[str] = None,
@@ -457,6 +476,7 @@ async def async_run_language_model(
457
476
  deployment_id: Optional[str] = None,
458
477
  model_list: Optional[List[Any]] = None,
459
478
  extra_headers: Optional[Dict[str, str]] = None,
479
+ mock_response: Optional[bool] = None,
460
480
  # Structured output settings
461
481
  type: Type[T],
462
482
  instructor_mode: Optional[LanguageModelInstructorMode] = "tool_call",
@@ -502,29 +522,32 @@ async def async_run_language_model(
502
522
  parallel_tool_calls: Optional[bool] = None,
503
523
  functions: Optional[List[Any]] = None,
504
524
  function_call: Optional[str] = None,
505
- ) -> AsyncStream[T]: ...
525
+ ) -> "LanguageModelStream[T]": ...
506
526
 
507
527
 
508
528
  async def async_run_language_model(
509
- messages: LanguageModelMessagesParam,
529
+ messages: "LanguageModelMessages",
510
530
  instructions: Optional[str] = None,
531
+ mock_response: Optional[bool] = None,
511
532
  **kwargs: Any,
512
- ) -> Union[LanguageModelResponse[Any], AsyncStream[Any]]:
533
+ ) -> Union["LanguageModelResponse[Any]", "LanguageModelStream[Any]"]:
513
534
  """Run an async language model request with full parameter support.
514
-
535
+
515
536
  Args:
516
537
  messages: The input messages/content for the request
517
538
  instructions: Optional system instructions to prepend
518
539
  **kwargs: All request parameters from LanguageModelRequest
519
-
540
+
520
541
  Returns:
521
542
  LanguageModelResponse or AsyncStream depending on parameters
522
543
  """
523
544
  # Extract model parameter or use default
524
545
  model = kwargs.pop("model", "openai/gpt-4o-mini")
525
-
546
+
526
547
  # Create language model instance
527
548
  language_model = LanguageModel(model=model)
528
-
549
+
529
550
  # Forward to the instance method
530
- return await language_model.async_run(messages, instructions, **kwargs)
551
+ return await language_model.async_run(
552
+ messages, instructions, mock_response=mock_response, **kwargs
553
+ )
@@ -0,0 +1,40 @@
1
+ """hammad.genai.models.language.types"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from ....._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .language_model_instructor_mode import LanguageModelInstructorMode
8
+ from .language_model_messages import LanguageModelMessages
9
+ from .language_model_name import LanguageModelName
10
+ from .language_model_request import LanguageModelRequest
11
+ from .language_model_response import LanguageModelResponse
12
+ from .language_model_response_chunk import LanguageModelResponseChunk
13
+ from .language_model_settings import LanguageModelSettings
14
+ from .language_model_stream import LanguageModelStream
15
+
16
+ __all__ = [
17
+ # hammad.genai.models.language.types.language_model_instructor_mode
18
+ "LanguageModelInstructorMode",
19
+ # hammad.genai.models.language.types.language_model_messages
20
+ "LanguageModelMessages",
21
+ # hammad.genai.models.language.types.language_model_name
22
+ "LanguageModelName",
23
+ # hammad.genai.models.language.types.language_model_request
24
+ "LanguageModelRequest",
25
+ # hammad.genai.models.language.types.language_model_response
26
+ "LanguageModelResponse",
27
+ # hammad.genai.models.language.types.language_model_response_chunk
28
+ "LanguageModelResponseChunk",
29
+ # hammad.genai.models.language.types.language_model_settings
30
+ "LanguageModelSettings",
31
+ # hammad.genai.models.language.types.language_model_stream
32
+ "LanguageModelStream",
33
+ ]
34
+
35
+
36
+ __getattr__ = create_getattr_importer(__all__)
37
+
38
+
39
+ def __dir__() -> list[str]:
40
+ return __all__
@@ -0,0 +1,47 @@
1
+ """hammad.genai.models.language.types.language_model_instructor_mode"""
2
+
3
+ from typing import (
4
+ TypeAlias,
5
+ Literal,
6
+ )
7
+
8
+
9
+ __all__ = [
10
+ "LanguageModelInstructorMode",
11
+ ]
12
+
13
+
14
+ LanguageModelInstructorMode: TypeAlias = Literal[
15
+ "function_call",
16
+ "parallel_tool_call",
17
+ "tool_call",
18
+ "tools_strict",
19
+ "json_mode",
20
+ "json_o1",
21
+ "markdown_json_mode",
22
+ "json_schema_mode",
23
+ "anthropic_tools",
24
+ "anthropic_reasoning_tools",
25
+ "anthropic_json",
26
+ "mistral_tools",
27
+ "mistral_structured_outputs",
28
+ "vertexai_tools",
29
+ "vertexai_json",
30
+ "vertexai_parallel_tools",
31
+ "gemini_json",
32
+ "gemini_tools",
33
+ "genai_tools",
34
+ "genai_structured_outputs",
35
+ "cohere_tools",
36
+ "cohere_json_object",
37
+ "cerebras_tools",
38
+ "cerebras_json",
39
+ "fireworks_tools",
40
+ "fireworks_json",
41
+ "writer_tools",
42
+ "bedrock_tools",
43
+ "bedrock_json",
44
+ "perplexity_json",
45
+ "openrouter_structured_outputs",
46
+ ]
47
+ """Instructor prompt/parsing mode for structured outputs."""
@@ -0,0 +1,28 @@
1
+ """hammad.genai.models.language.types.language_model_messages"""
2
+
3
+ from typing import (
4
+ TypeAlias,
5
+ Union,
6
+ Any,
7
+ List,
8
+ TYPE_CHECKING,
9
+ )
10
+
11
+ if TYPE_CHECKING:
12
+ from openai.types.chat import (
13
+ ChatCompletionMessageParam,
14
+ )
15
+
16
+
17
+ __all__ = [
18
+ "LanguageModelMessages",
19
+ ]
20
+
21
+
22
+ LanguageModelMessages: TypeAlias = Union[
23
+ str,
24
+ "ChatCompletionMessageParam",
25
+ "List[ChatCompletionMessageParam]",
26
+ Any,
27
+ ]
28
+ """Type alias for the input parameters of a language model request."""
@@ -1,4 +1,4 @@
1
- """hammad.genai.language_models._types"""
1
+ """hammad.genai.models.language.types.language_model_name"""
2
2
 
3
3
  from typing import (
4
4
  TypeAlias,
@@ -7,48 +7,11 @@ from typing import (
7
7
 
8
8
 
9
9
  __all__ = [
10
- "LanguageModelInstructorMode",
11
10
  "LanguageModelName",
12
11
  ]
13
12
 
14
13
 
15
- LanguageModelInstructorMode : TypeAlias = Literal[
16
- "function_call",
17
- "parallel_tool_call",
18
- "tool_call",
19
- "tools_strict",
20
- "json_mode",
21
- "json_o1",
22
- "markdown_json_mode",
23
- "json_schema_mode",
24
- "anthropic_tools",
25
- "anthropic_reasoning_tools",
26
- "anthropic_json",
27
- "mistral_tools",
28
- "mistral_structured_outputs",
29
- "vertexai_tools",
30
- "vertexai_json",
31
- "vertexai_parallel_tools",
32
- "gemini_json",
33
- "gemini_tools",
34
- "genai_tools",
35
- "genai_structured_outputs",
36
- "cohere_tools",
37
- "cohere_json_object",
38
- "cerebras_tools",
39
- "cerebras_json",
40
- "fireworks_tools",
41
- "fireworks_json",
42
- "writer_tools",
43
- "bedrock_tools",
44
- "bedrock_json",
45
- "perplexity_json",
46
- "openrouter_structured_outputs",
47
- ]
48
- """Instructor prompt/parsing mode for structured outputs."""
49
-
50
-
51
- LanguageModelName : TypeAlias = Literal[
14
+ LanguageModelName: TypeAlias = Literal[
52
15
  "anthropic/claude-3-7-sonnet-latest",
53
16
  "anthropic/claude-3-5-haiku-latest",
54
17
  "anthropic/claude-3-5-sonnet-latest",
@@ -273,4 +236,4 @@ LanguageModelName : TypeAlias = Literal[
273
236
  "xai/grok-3-latest",
274
237
  ]
275
238
  """Helper alias for various compatible models usable with litellm
276
- completions."""
239
+ completions."""
@@ -8,7 +8,6 @@ from typing import (
8
8
  Type,
9
9
  TypeVar,
10
10
  TYPE_CHECKING,
11
- TypeAlias,
12
11
  Callable,
13
12
  )
14
13
  import sys
@@ -20,39 +19,26 @@ else:
20
19
 
21
20
  if TYPE_CHECKING:
22
21
  from httpx import Timeout
23
- try:
24
- from openai.types.chat import (
25
- ChatCompletionMessageParam,
26
- ChatCompletionModality,
27
- ChatCompletionPredictionContentParam,
28
- ChatCompletionAudioParam,
29
- )
30
- except ImportError:
31
- ChatCompletionMessageParam = Any
32
- ChatCompletionModality = Any
33
- ChatCompletionPredictionContentParam = Any
34
- ChatCompletionAudioParam = Any
35
-
36
- from ._types import LanguageModelName, LanguageModelInstructorMode
22
+ from openai.types.chat import (
23
+ ChatCompletionModality,
24
+ ChatCompletionPredictionContentParam,
25
+ ChatCompletionAudioParam,
26
+ )
27
+
28
+ from .language_model_name import LanguageModelName
29
+ from .language_model_instructor_mode import LanguageModelInstructorMode
37
30
 
38
31
  __all__ = [
39
- "LanguageModelMessagesParam",
40
32
  "LanguageModelRequest",
41
33
  ]
42
34
 
43
- T = TypeVar("T")
44
35
 
45
- LanguageModelMessagesParam : TypeAlias = Union[
46
- str,
47
- "ChatCompletionMessageParam",
48
- "List[ChatCompletionMessageParam]",
49
- Any,
50
- ]
51
- """Type alias for the input parameters of a language model request."""
36
+ T = TypeVar("T")
52
37
 
53
38
 
54
39
  class LanguageModelRequestProviderSettings(TypedDict, total=False):
55
40
  """Provider-specific settings for language model requests."""
41
+
56
42
  model: Required[LanguageModelName]
57
43
  base_url: NotRequired[str]
58
44
  api_key: NotRequired[str]
@@ -65,6 +51,7 @@ class LanguageModelRequestProviderSettings(TypedDict, total=False):
65
51
 
66
52
  class LanguageModelRequestStructuredOutputSettings(TypedDict, total=False):
67
53
  """Settings for structured output generation."""
54
+
68
55
  type: Required[Type[T]]
69
56
  instructor_mode: NotRequired[LanguageModelInstructorMode]
70
57
  response_field_name: NotRequired[str]
@@ -77,6 +64,7 @@ class LanguageModelRequestStructuredOutputSettings(TypedDict, total=False):
77
64
 
78
65
  class LanguageModelRequestToolsSettings(TypedDict, total=False):
79
66
  """Settings for tool usage in language model requests."""
67
+
80
68
  tools: NotRequired[List[Any]]
81
69
  tool_choice: NotRequired[Union[str, Dict[str, Any]]]
82
70
  parallel_tool_calls: NotRequired[bool]
@@ -86,12 +74,14 @@ class LanguageModelRequestToolsSettings(TypedDict, total=False):
86
74
 
87
75
  class LanguageModelRequestStreamingSettings(TypedDict, total=False):
88
76
  """Settings for streaming responses."""
77
+
89
78
  stream: Required[bool]
90
79
  stream_options: NotRequired[Dict[str, Any]]
91
80
 
92
81
 
93
82
  class LanguageModelRequestHooksSettings(TypedDict, total=False):
94
83
  """Settings for instructor hooks."""
84
+
95
85
  completion_kwargs_hooks: NotRequired[List[Callable[..., None]]]
96
86
  completion_response_hooks: NotRequired[List[Callable[..., None]]]
97
87
  completion_error_hooks: NotRequired[List[Callable[..., None]]]
@@ -101,6 +91,7 @@ class LanguageModelRequestHooksSettings(TypedDict, total=False):
101
91
 
102
92
  class LanguageModelRequestExtendedSettings(TypedDict, total=False):
103
93
  """Extended settings for language model requests."""
94
+
104
95
  timeout: NotRequired[Union[float, str, "Timeout"]]
105
96
  temperature: NotRequired[float]
106
97
  top_p: NotRequired[float]
@@ -132,4 +123,5 @@ class LanguageModelRequest(
132
123
  LanguageModelRequestExtendedSettings,
133
124
  ):
134
125
  """Complete settings for language model requests."""
135
- pass
126
+
127
+ pass