hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,8 @@
1
- """hammad.ai.embeddings.client.litellm_embeddings_client"""
1
+ """hammad.genai.embedding_models.embedding_model"""
2
2
 
3
- from typing import Any, List, Literal, Optional
3
+ import asyncio
4
+ from dataclasses import dataclass
5
+ from typing import Any, List, Literal, Optional, TYPE_CHECKING
4
6
  import sys
5
7
 
6
8
  if sys.version_info >= (3, 12):
@@ -8,110 +10,32 @@ if sys.version_info >= (3, 12):
8
10
  else:
9
11
  from typing_extensions import TypedDict
10
12
 
11
-
12
- from .base_embeddings_client import BaseEmbeddingsClient
13
- from ..types import (
13
+ if TYPE_CHECKING:
14
+ try:
15
+ from litellm import EmbeddingResponse as _LitellmEmbeddingResponse
16
+ except ImportError:
17
+ _LitellmEmbeddingResponse = Any
18
+
19
+ from ..language_models.language_model import _AIProvider
20
+ from .embedding_model_request import EmbeddingModelRequest
21
+ from .embedding_model_name import EmbeddingModelName
22
+ from .embedding_model_response import (
14
23
  Embedding,
15
24
  EmbeddingUsage,
16
- EmbeddingResponse,
25
+ EmbeddingModelResponse,
17
26
  )
18
- from ....formatting.text.converters import convert_to_text
19
- from ..._utils import get_litellm
27
+ from ...formatting.text import convert_to_text
28
+
20
29
 
21
30
  __all__ = (
22
- "LiteLlmEmbeddingsClient",
23
- "LiteLlmEmbeddingModel",
24
- "LiteLlmEmbeddingModelSettings",
31
+ "EmbeddingModel",
32
+ "EmbeddingModelError",
25
33
  )
26
34
 
27
35
 
28
- LiteLlmEmbeddingModel = Literal[
29
- # OpenAI Embedding Models
30
- "text-embedding-3-small",
31
- "text-embedding-3-large",
32
- "text-embedding-ada-002",
33
- # OpenAI Compatible Embedding Models
34
- "openai/text-embedding-3-small",
35
- "openai/text-embedding-3-large",
36
- "openai/text-embedding-ada-002",
37
- # Bedrock Embedding Models
38
- "amazon.titan-embed-text-v1",
39
- "cohere.embed-english-v3",
40
- "cohere.embed-multilingual-v3",
41
- # Cohere Embedding Models
42
- "embed-english-v3.0",
43
- "embed-english-light-v3.0",
44
- "embed-multilingual-v3.0",
45
- "embed-multilingual-light-v3.0",
46
- "embed-english-v2.0",
47
- "embed-english-light-v2.0",
48
- "embed-multilingual-v2.0",
49
- # NVIDIA NIM Embedding Models
50
- "nvidia_nim/NV-Embed-QA",
51
- "nvidia_nim/nvidia/nv-embed-v1",
52
- "nvidia_nim/nvidia/nv-embedqa-mistral-7b-v2",
53
- "nvidia_nim/nvidia/nv-embedqa-e5-v5",
54
- "nvidia_nim/nvidia/embed-qa-4",
55
- "nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v1",
56
- "nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v2",
57
- "nvidia_nim/snowflake/arctic-embed-l",
58
- "nvidia_nim/baai/bge-m3",
59
- # HuggingFace Embedding Models
60
- "huggingface/microsoft/codebert-base",
61
- "huggingface/BAAI/bge-large-zh",
62
- # Mistral AI Embedding Models
63
- "mistral/mistral-embed",
64
- # Gemini AI Embedding Models
65
- "gemini/text-embedding-004",
66
- # Vertex AI Embedding Models
67
- "vertex_ai/textembedding-gecko",
68
- "vertex_ai/textembedding-gecko-multilingual",
69
- "vertex_ai/textembedding-gecko-multilingual@001",
70
- "vertex_ai/textembedding-gecko@001",
71
- "vertex_ai/textembedding-gecko@003",
72
- "vertex_ai/text-embedding-preview-0409",
73
- "vertex_ai/text-multilingual-embedding-preview-0409",
74
- # Voyage AI Embedding Models
75
- "voyage/voyage-01",
76
- "voyage/voyage-lite-01",
77
- "voyage/voyage-lite-01-instruct",
78
- # Nebius AI Studio Embedding Models
79
- "nebius/BAAI/bge-en-icl",
80
- "nebius/BAAI/bge-multilingual-gemma2",
81
- "nebius/intfloat/e5-mistral-7b-instruct",
82
- # Ollama Embedding Models
83
- "ollama/granite-embedding:30m",
84
- "ollama/granite-embedding:278m",
85
- "ollama/snowflake-arctic-embed2",
86
- "ollama/bge-large",
87
- "ollama/paraphrase-multilingual",
88
- "ollama/bge-m3",
89
- "ollama/snowflake-arctic-embed",
90
- "ollama/mxbai-embed-large",
91
- "ollama/all-minilm",
92
- "ollama/nomic-embed-text",
93
- ]
94
- """Common embedding models supported by `litellm`."""
95
-
96
-
97
- class LiteLlmEmbeddingModelSettings(TypedDict):
98
- """Valid settings for the `litellm` embedding models."""
99
-
100
- model: LiteLlmEmbeddingModel | str
101
- dimensions: Optional[int]
102
- encoding_format: Optional[str]
103
- timeout: Optional[int]
104
- api_base: Optional[str]
105
- api_version: Optional[str]
106
- api_key: Optional[str]
107
- api_type: Optional[str]
108
- caching: bool
109
- user: Optional[str]
110
-
111
-
112
- class LiteLlmEmbeddingError(Exception):
36
+ class EmbeddingModelError(Exception):
113
37
  """Exception raised when an error occurs while generating embeddings
114
- using `litellm`."""
38
+ using an embedding model."""
115
39
 
116
40
  def __init__(self, message: str, response: Any):
117
41
  self.message = message
@@ -119,8 +43,8 @@ class LiteLlmEmbeddingError(Exception):
119
43
  super().__init__(self.message)
120
44
 
121
45
 
122
- def _parse_litellm_response_to_embedding_response(response: Any) -> EmbeddingResponse:
123
- """Parse the response from `litellm` to an `EmbeddingResponse` object."""
46
+ def _parse_litellm_response_to_embedding_model_response(response: "_LitellmEmbeddingResponse") -> EmbeddingModelResponse:
47
+ """Parse the response from `litellm` to an `EmbeddingModelResponse` object."""
124
48
  try:
125
49
  embedding_data: List[Embedding] = []
126
50
 
@@ -132,27 +56,29 @@ def _parse_litellm_response_to_embedding_response(response: Any) -> EmbeddingRes
132
56
  prompt_tokens=response.usage.prompt_tokens,
133
57
  total_tokens=response.usage.total_tokens,
134
58
  )
135
- return EmbeddingResponse(
59
+ return EmbeddingModelResponse(
136
60
  data=embedding_data,
137
61
  model=response.model,
138
62
  object="list",
139
63
  usage=usage,
140
64
  )
141
65
  except Exception as e:
142
- raise LiteLlmEmbeddingError(
66
+ raise EmbeddingModelError(
143
67
  f"Failed to parse litellm response to embedding response: {e}",
144
68
  response,
145
69
  )
146
70
 
147
71
 
148
- class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
72
+ @dataclass
73
+ class EmbeddingModel:
149
74
  """Embeddings provider client that utilizes the `litellm` module
150
75
  when generating embeddings."""
151
-
152
- @staticmethod
153
- async def async_embed(
76
+
77
+ model: EmbeddingModelName = "openai/text-embedding-3-small"
78
+
79
+ async def async_run(
80
+ self,
154
81
  input: List[Any] | Any,
155
- model: LiteLlmEmbeddingModel | str,
156
82
  dimensions: Optional[int] = None,
157
83
  encoding_format: Optional[str] = None,
158
84
  timeout=600,
@@ -163,13 +89,12 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
163
89
  caching: bool = False,
164
90
  user: Optional[str] = None,
165
91
  format: bool = False,
166
- ) -> Embedding:
92
+ ) -> EmbeddingModelResponse:
167
93
  """Asynchronously generate embeddings for the given input using
168
94
  a valid `litellm` model.
169
95
 
170
96
  Args:
171
97
  input (List[Any] | Any) : The input text / content to generate embeddings for.
172
- model (LiteLlmEmbeddingModel | str) : The model to use for generating embeddings.
173
98
  dimensions (Optional[int]) : The number of dimensions for the embedding.
174
99
  encoding_format (Optional[str]) : The format to return the embeddings in. (e.g. "float", "base64")
175
100
  timeout (int) : The timeout for the request.
@@ -182,7 +107,7 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
182
107
  format (bool) : Whether to format each non-string input as a markdown string.
183
108
 
184
109
  Returns:
185
- Embedding : The embedding generated for the given input.
110
+ EmbeddingModelResponse : The embedding response generated for the given input.
186
111
  """
187
112
  if not isinstance(input, list):
188
113
  input = [input]
@@ -192,16 +117,16 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
192
117
  try:
193
118
  i = convert_to_text(i)
194
119
  except Exception as e:
195
- raise LiteLlmEmbeddingError(
120
+ raise EmbeddingModelError(
196
121
  f"Failed to format input to text: {e}",
197
122
  i,
198
123
  )
199
124
 
200
- async_embedding_fn = get_litellm().aembedding
125
+ async_embedding_fn = _AIProvider.get_litellm().aembedding
201
126
 
202
127
  try:
203
128
  response = await async_embedding_fn(
204
- model=model,
129
+ model=self.model,
205
130
  input=input,
206
131
  dimensions=dimensions,
207
132
  encoding_format=encoding_format,
@@ -214,14 +139,13 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
214
139
  user=user,
215
140
  )
216
141
  except Exception as e:
217
- raise e
142
+ raise EmbeddingModelError(f"Error in embedding model request: {e}", response=None) from e
218
143
 
219
- return _parse_litellm_response_to_embedding_response(response)
144
+ return _parse_litellm_response_to_embedding_model_response(response)
220
145
 
221
- @staticmethod
222
- def embed(
146
+ def run(
147
+ self,
223
148
  input: List[Any] | Any,
224
- model: LiteLlmEmbeddingModel | str,
225
149
  dimensions: Optional[int] = None,
226
150
  encoding_format: Optional[str] = None,
227
151
  timeout=600,
@@ -232,13 +156,12 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
232
156
  caching: bool = False,
233
157
  user: Optional[str] = None,
234
158
  format: bool = False,
235
- ) -> Embedding:
159
+ ) -> EmbeddingModelResponse:
236
160
  """Generate embeddings for the given input using
237
161
  a valid `litellm` model.
238
162
 
239
163
  Args:
240
164
  input (List[Any] | Any) : The input text / content to generate embeddings for.
241
- model (LiteLlmEmbeddingModel | str) : The model to use for generating embeddings.
242
165
  dimensions (Optional[int]) : The number of dimensions for the embedding.
243
166
  encoding_format (Optional[str]) : The format to return the embeddings in. (e.g. "float", "base64")
244
167
  timeout (int) : The timeout for the request.
@@ -251,26 +174,10 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
251
174
  format (bool) : Whether to format each non-string input as a markdown string.
252
175
 
253
176
  Returns:
254
- Embedding : The embedding generated for the given input.
177
+ EmbeddingModelResponse : The embedding response generated for the given input.
255
178
  """
256
- if not isinstance(input, list):
257
- input = [input]
258
-
259
- if format:
260
- for i in input:
261
- try:
262
- i = convert_to_text(i)
263
- except Exception as e:
264
- raise LiteLlmEmbeddingError(
265
- f"Failed to format input to text: {e}",
266
- i,
267
- )
268
-
269
- sync_embedding_fn = get_litellm().embedding
270
-
271
- try:
272
- response = sync_embedding_fn(
273
- model=model,
179
+ return asyncio.run(
180
+ self.async_run(
274
181
  input=input,
275
182
  dimensions=dimensions,
276
183
  encoding_format=encoding_format,
@@ -281,8 +188,6 @@ class LiteLlmEmbeddingsClient(BaseEmbeddingsClient):
281
188
  api_type=api_type,
282
189
  caching=caching,
283
190
  user=user,
191
+ format=format,
284
192
  )
285
- except Exception as e:
286
- raise e
287
-
288
- return _parse_litellm_response_to_embedding_response(response)
193
+ )
@@ -0,0 +1,77 @@
1
+ """hammad.genai.embedding_models.embedding_model_name"""
2
+
3
+ from typing import Literal
4
+
5
+
6
+ __all__ = (
7
+ "EmbeddingModelName",
8
+ )
9
+
10
+
11
+ EmbeddingModelName = Literal[
12
+ # OpenAI Embedding Models
13
+ "text-embedding-3-small",
14
+ "text-embedding-3-large",
15
+ "text-embedding-ada-002",
16
+ # OpenAI Compatible Embedding Models
17
+ "openai/text-embedding-3-small",
18
+ "openai/text-embedding-3-large",
19
+ "openai/text-embedding-ada-002",
20
+ # Bedrock Embedding Models
21
+ "amazon.titan-embed-text-v1",
22
+ "cohere.embed-english-v3",
23
+ "cohere.embed-multilingual-v3",
24
+ # Cohere Embedding Models
25
+ "embed-english-v3.0",
26
+ "embed-english-light-v3.0",
27
+ "embed-multilingual-v3.0",
28
+ "embed-multilingual-light-v3.0",
29
+ "embed-english-v2.0",
30
+ "embed-english-light-v2.0",
31
+ "embed-multilingual-v2.0",
32
+ # NVIDIA NIM Embedding Models
33
+ "nvidia_nim/NV-Embed-QA",
34
+ "nvidia_nim/nvidia/nv-embed-v1",
35
+ "nvidia_nim/nvidia/nv-embedqa-mistral-7b-v2",
36
+ "nvidia_nim/nvidia/nv-embedqa-e5-v5",
37
+ "nvidia_nim/nvidia/embed-qa-4",
38
+ "nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v1",
39
+ "nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v2",
40
+ "nvidia_nim/snowflake/arctic-embed-l",
41
+ "nvidia_nim/baai/bge-m3",
42
+ # HuggingFace Embedding Models
43
+ "huggingface/microsoft/codebert-base",
44
+ "huggingface/BAAI/bge-large-zh",
45
+ # Mistral AI Embedding Models
46
+ "mistral/mistral-embed",
47
+ # Gemini AI Embedding Models
48
+ "gemini/text-embedding-004",
49
+ # Vertex AI Embedding Models
50
+ "vertex_ai/textembedding-gecko",
51
+ "vertex_ai/textembedding-gecko-multilingual",
52
+ "vertex_ai/textembedding-gecko-multilingual@001",
53
+ "vertex_ai/textembedding-gecko@001",
54
+ "vertex_ai/textembedding-gecko@003",
55
+ "vertex_ai/text-embedding-preview-0409",
56
+ "vertex_ai/text-multilingual-embedding-preview-0409",
57
+ # Voyage AI Embedding Models
58
+ "voyage/voyage-01",
59
+ "voyage/voyage-lite-01",
60
+ "voyage/voyage-lite-01-instruct",
61
+ # Nebius AI Studio Embedding Models
62
+ "nebius/BAAI/bge-en-icl",
63
+ "nebius/BAAI/bge-multilingual-gemma2",
64
+ "nebius/intfloat/e5-mistral-7b-instruct",
65
+ # Ollama Embedding Models
66
+ "ollama/granite-embedding:30m",
67
+ "ollama/granite-embedding:278m",
68
+ "ollama/snowflake-arctic-embed2",
69
+ "ollama/bge-large",
70
+ "ollama/paraphrase-multilingual",
71
+ "ollama/bge-m3",
72
+ "ollama/snowflake-arctic-embed",
73
+ "ollama/mxbai-embed-large",
74
+ "ollama/all-minilm",
75
+ "ollama/nomic-embed-text",
76
+ ]
77
+ """Common embedding models supported by `litellm`."""
@@ -0,0 +1,65 @@
1
+ """hammad.genai.embedding_models.embedding_model_request"""
2
+
3
+ import sys
4
+ if sys.version_info >= (3, 12):
5
+ from typing import TypedDict, Required, NotRequired
6
+ else:
7
+ from typing_extensions import TypedDict, Required, NotRequired
8
+
9
+ from typing import (
10
+ Any,
11
+ Dict,
12
+ List,
13
+ Optional,
14
+ Type,
15
+ TypeVar,
16
+ Union,
17
+ Literal,
18
+ )
19
+
20
+ from .embedding_model_name import EmbeddingModelName
21
+
22
+ __all__ = [
23
+ "EmbeddingModelRequest",
24
+ ]
25
+
26
+
27
+ class EmbeddingModelRequest(TypedDict, total=False):
28
+ """A request to an embedding model."""
29
+
30
+ input: List[Any] | Any
31
+ """The input items to embed."""
32
+
33
+ model: EmbeddingModelName | str
34
+ """The embedding model to use."""
35
+
36
+ format: bool = False
37
+ """Whether to format each non-string input as a markdown string."""
38
+
39
+ # LiteLLM Settings
40
+ dimensions: Optional[int] = None
41
+ """The dimensions of the embedding."""
42
+
43
+ encoding_format: Optional[str] = None
44
+ """The encoding format of the embedding."""
45
+
46
+ timeout: Optional[int] = None
47
+ """The timeout for the embedding request."""
48
+
49
+ api_base: Optional[str] = None
50
+ """The API base for the embedding request."""
51
+
52
+ api_version: Optional[str] = None
53
+ """The API version for the embedding request."""
54
+
55
+ api_key: Optional[str] = None
56
+ """The API key for the embedding request."""
57
+
58
+ api_type: Optional[str] = None
59
+ """The API type for the embedding request."""
60
+
61
+ caching: bool = False
62
+ """Whether to cache the embedding request."""
63
+
64
+ user: Optional[str] = None
65
+ """The user for the embedding request."""
@@ -1,4 +1,4 @@
1
- """hammad.ai.embeddings.types"""
1
+ """hammad.genai.embedding_models.embedding_model_response"""
2
2
 
3
3
  from typing import List, Literal
4
4
 
@@ -41,7 +41,7 @@ class EmbeddingUsage(BaseModel):
41
41
  """The total number of tokens used by the request."""
42
42
 
43
43
 
44
- class EmbeddingResponse(BaseModel):
44
+ class EmbeddingModelResponse(BaseModel):
45
45
  data: List[Embedding]
46
46
  """The list of embeddings generated by the model."""
47
47
 
@@ -61,7 +61,7 @@ class EmbeddingResponse(BaseModel):
61
61
 
62
62
  def __str__(self) -> str:
63
63
  return (
64
- "Embedding Response:\n"
64
+ "EmbeddingModelResponse:\n"
65
65
  f">>> Model: {self.model}\n"
66
66
  f">>> Dimensions: {self.dimensions}\n"
67
67
  f">>> Usage: {self.usage}\n"
@@ -0,0 +1,161 @@
1
+ """hammad.genai.embedding_models.run
2
+
3
+ Standalone functions for running embedding models with full parameter typing.
4
+ """
5
+
6
+ from typing import Any, List, Optional, overload, Union
7
+
8
+ from .embedding_model_name import EmbeddingModelName
9
+ from .embedding_model import EmbeddingModel
10
+ from .embedding_model_response import EmbeddingModelResponse
11
+
12
+ __all__ = [
13
+ "run_embedding_model",
14
+ "async_run_embedding_model",
15
+ ]
16
+
17
+
18
+ # Overloads for run_embedding_model
19
+ @overload
20
+ def run_embedding_model(
21
+ input: List[Any] | Any,
22
+ *,
23
+ # Provider settings
24
+ model: EmbeddingModelName = "openai/text-embedding-3-small",
25
+ api_base: Optional[str] = None,
26
+ api_key: Optional[str] = None,
27
+ api_version: Optional[str] = None,
28
+ api_type: Optional[str] = None,
29
+ # Extended settings
30
+ dimensions: Optional[int] = None,
31
+ encoding_format: Optional[str] = None,
32
+ timeout: int = 600,
33
+ caching: bool = False,
34
+ user: Optional[str] = None,
35
+ format: bool = False,
36
+ ) -> EmbeddingModelResponse: ...
37
+
38
+
39
+ def run_embedding_model(
40
+ input: List[Any] | Any,
41
+ *,
42
+ # Provider settings
43
+ model: EmbeddingModelName = "openai/text-embedding-3-small",
44
+ api_base: Optional[str] = None,
45
+ api_key: Optional[str] = None,
46
+ api_version: Optional[str] = None,
47
+ api_type: Optional[str] = None,
48
+ # Extended settings
49
+ dimensions: Optional[int] = None,
50
+ encoding_format: Optional[str] = None,
51
+ timeout: int = 600,
52
+ caching: bool = False,
53
+ user: Optional[str] = None,
54
+ format: bool = False,
55
+ ) -> EmbeddingModelResponse:
56
+ """Run an embedding model with the given input.
57
+
58
+ Args:
59
+ input: The input text/content to generate embeddings for
60
+ model: The embedding model to use
61
+ api_base: The base URL for the API
62
+ api_key: The API key to use for the request
63
+ api_version: The version of the API
64
+ api_type: The API type to use for the request
65
+ dimensions: The number of dimensions for the embedding
66
+ encoding_format: The format to return the embeddings in
67
+ timeout: The timeout for the request
68
+ caching: Whether to cache the request
69
+ user: The user to use for the request
70
+ format: Whether to format each non-string input as a markdown string
71
+
72
+ Returns:
73
+ EmbeddingModelResponse: The embedding response
74
+ """
75
+ embedding_model = EmbeddingModel(model=model)
76
+ return embedding_model.run(
77
+ input=input,
78
+ dimensions=dimensions,
79
+ encoding_format=encoding_format,
80
+ timeout=timeout,
81
+ api_base=api_base,
82
+ api_version=api_version,
83
+ api_key=api_key,
84
+ api_type=api_type,
85
+ caching=caching,
86
+ user=user,
87
+ format=format,
88
+ )
89
+
90
+
91
+ # Overloads for async_run_embedding_model
92
+ @overload
93
+ async def async_run_embedding_model(
94
+ input: List[Any] | Any,
95
+ *,
96
+ # Provider settings
97
+ model: EmbeddingModelName = "openai/text-embedding-3-small",
98
+ api_base: Optional[str] = None,
99
+ api_key: Optional[str] = None,
100
+ api_version: Optional[str] = None,
101
+ api_type: Optional[str] = None,
102
+ # Extended settings
103
+ dimensions: Optional[int] = None,
104
+ encoding_format: Optional[str] = None,
105
+ timeout: int = 600,
106
+ caching: bool = False,
107
+ user: Optional[str] = None,
108
+ format: bool = False,
109
+ ) -> EmbeddingModelResponse: ...
110
+
111
+
112
+ async def async_run_embedding_model(
113
+ input: List[Any] | Any,
114
+ *,
115
+ # Provider settings
116
+ model: EmbeddingModelName = "openai/text-embedding-3-small",
117
+ api_base: Optional[str] = None,
118
+ api_key: Optional[str] = None,
119
+ api_version: Optional[str] = None,
120
+ api_type: Optional[str] = None,
121
+ # Extended settings
122
+ dimensions: Optional[int] = None,
123
+ encoding_format: Optional[str] = None,
124
+ timeout: int = 600,
125
+ caching: bool = False,
126
+ user: Optional[str] = None,
127
+ format: bool = False,
128
+ ) -> EmbeddingModelResponse:
129
+ """Asynchronously run an embedding model with the given input.
130
+
131
+ Args:
132
+ input: The input text/content to generate embeddings for
133
+ model: The embedding model to use
134
+ api_base: The base URL for the API
135
+ api_key: The API key to use for the request
136
+ api_version: The version of the API
137
+ api_type: The API type to use for the request
138
+ dimensions: The number of dimensions for the embedding
139
+ encoding_format: The format to return the embeddings in
140
+ timeout: The timeout for the request
141
+ caching: Whether to cache the request
142
+ user: The user to use for the request
143
+ format: Whether to format each non-string input as a markdown string
144
+
145
+ Returns:
146
+ EmbeddingModelResponse: The embedding response
147
+ """
148
+ embedding_model = EmbeddingModel(model=model)
149
+ return await embedding_model.async_run(
150
+ input=input,
151
+ dimensions=dimensions,
152
+ encoding_format=encoding_format,
153
+ timeout=timeout,
154
+ api_base=api_base,
155
+ api_version=api_version,
156
+ api_key=api_key,
157
+ api_type=api_type,
158
+ caching=caching,
159
+ user=user,
160
+ format=format,
161
+ )
@@ -0,0 +1,35 @@
1
+ """hammad.genai.language_models"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from ..._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .language_model import LanguageModel
8
+ from .run import run_language_model, async_run_language_model
9
+ from .language_model_request import LanguageModelMessagesParam
10
+ from .language_model_response import LanguageModelResponse
11
+ from .language_model_response_chunk import LanguageModelResponseChunk
12
+ from .language_model_request import LanguageModelRequest
13
+
14
+ __all__ = (
15
+ # hammad.genai.language_models.language_model
16
+ "LanguageModel",
17
+
18
+ # hammad.genai.language_models.run
19
+ "run_language_model",
20
+ "async_run_language_model",
21
+
22
+ # hammad.genai.language_models.language_model_request
23
+ "LanguageModelMessagesParam",
24
+ "LanguageModelRequest",
25
+
26
+ # hammad.genai.language_models.language_model_response
27
+ "LanguageModelResponse",
28
+ "LanguageModelResponseChunk",
29
+ )
30
+
31
+ __getattr__ = create_getattr_importer(__all__)
32
+
33
+ def __dir__() -> list[str]:
34
+ """Get the attributes of the language_models module."""
35
+ return list(__all__)