hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -1,486 +0,0 @@
1
- """hammad.ai.completions.utils"""
2
-
3
- import json
4
- from typing import (
5
- Optional,
6
- List,
7
- Iterator,
8
- AsyncIterator,
9
- TypeVar,
10
- Type,
11
- Any,
12
- )
13
-
14
- try:
15
- from pydantic import BaseModel
16
- except ImportError:
17
- raise ImportError(
18
- "Using completion stream parsing requires the `openai` and `instructor` packages."
19
- "Please install with: pip install 'hammad-python[ai]'"
20
- )
21
-
22
- from ...cache import cached
23
- from .types import (
24
- CompletionsInputParam,
25
- ChatCompletionMessageParam,
26
- CompletionStream,
27
- AsyncCompletionStream,
28
- Completion,
29
- )
30
-
31
- T = TypeVar("T", bound=BaseModel)
32
-
33
- __all__ = (
34
- "parse_completions_input",
35
- "create_completion_stream",
36
- "create_async_completion_stream",
37
- "format_tool_calls",
38
- "convert_response_to_completion",
39
- "InstructorStreamWrapper",
40
- "AsyncInstructorStreamWrapper",
41
- )
42
-
43
-
44
- @cached
45
- def parse_completions_input(
46
- input: CompletionsInputParam,
47
- instructions: Optional[str] = None,
48
- ) -> List[ChatCompletionMessageParam]:
49
- """Parse various input formats into a list of ChatCompletionMessageParam.
50
-
51
- This function handles:
52
- - Plain strings (converted to user messages)
53
- - Strings with message blocks like [system], [user], [assistant]
54
- - Single ChatCompletionMessageParam objects
55
- - Lists of ChatCompletionMessageParam objects
56
- - Objects with model_dump() method
57
-
58
- Args:
59
- input: The input to parse
60
- instructions: Optional system instructions to prepend
61
-
62
- Returns:
63
- List of ChatCompletionMessageParam objects
64
- """
65
- messages: List[ChatCompletionMessageParam] = []
66
-
67
- # Handle string inputs
68
- if isinstance(input, str):
69
- # Check if string contains message blocks like [system], [user], [assistant]
70
- import re
71
-
72
- # Pattern to match only allowed message blocks (system, user, assistant)
73
- pattern = (
74
- r"\[(system|user|assistant)\]\s*(.*?)(?=\[(?:system|user|assistant)\]|$)"
75
- )
76
- matches = re.findall(pattern, input, re.DOTALL | re.IGNORECASE)
77
-
78
- if matches:
79
- # Validate that we only have allowed roles
80
- allowed_roles = {"system", "user", "assistant"}
81
- found_roles = {role.lower() for role, _ in matches}
82
-
83
- if not found_roles.issubset(allowed_roles):
84
- invalid_roles = found_roles - allowed_roles
85
- raise ValueError(
86
- f"Invalid message roles found: {invalid_roles}. Only 'system', 'user', and 'assistant' are allowed."
87
- )
88
-
89
- # Parse message blocks
90
- system_contents = []
91
-
92
- for role, content in matches:
93
- content = content.strip()
94
- if content:
95
- if role.lower() == "system":
96
- system_contents.append(content)
97
- else:
98
- messages.append({"role": role.lower(), "content": content})
99
-
100
- # Combine system contents if any exist
101
- if system_contents:
102
- combined_system = "\n\n".join(system_contents)
103
- if instructions:
104
- combined_system = f"{combined_system}\n\n{instructions}"
105
- messages.insert(0, {"role": "system", "content": combined_system})
106
- elif instructions:
107
- messages.insert(0, {"role": "system", "content": instructions})
108
- else:
109
- # Plain string - create user message
110
- if instructions:
111
- messages.append({"role": "system", "content": instructions})
112
- messages.append({"role": "user", "content": input})
113
-
114
- # Handle single message object
115
- elif hasattr(input, "model_dump"):
116
- message_dict = input.model_dump()
117
- if instructions:
118
- messages.append({"role": "system", "content": instructions})
119
- messages.append(message_dict)
120
-
121
- # Handle list of messages
122
- elif isinstance(input, list):
123
- system_contents = []
124
- other_messages = []
125
-
126
- for item in input:
127
- if hasattr(item, "model_dump"):
128
- msg_dict = item.model_dump()
129
- else:
130
- msg_dict = item
131
-
132
- if msg_dict.get("role") == "system":
133
- system_contents.append(msg_dict.get("content", ""))
134
- else:
135
- other_messages.append(msg_dict)
136
-
137
- # Combine system messages and instructions
138
- if system_contents or instructions:
139
- combined_system_parts = []
140
- if system_contents:
141
- combined_system_parts.extend(system_contents)
142
- if instructions:
143
- combined_system_parts.append(instructions)
144
-
145
- messages.append(
146
- {"role": "system", "content": "\n\n".join(combined_system_parts)}
147
- )
148
-
149
- messages.extend(other_messages)
150
-
151
- # Handle single dictionary or other object
152
- else:
153
- if hasattr(input, "model_dump"):
154
- message_dict = input.model_dump()
155
- else:
156
- message_dict = input
157
-
158
- if instructions:
159
- messages.append({"role": "system", "content": instructions})
160
- messages.append(message_dict)
161
-
162
- return messages
163
-
164
-
165
- def create_completion_stream(
166
- stream: Iterator[Any], output_type: Type[T] = str, model: str | None = None
167
- ) -> CompletionStream[T]:
168
- """Create a unified completion stream from a raw stream.
169
-
170
- This function wraps raw streams from both LiteLLM and Instructor
171
- into a unified CompletionStream interface. It automatically detects
172
- the stream type based on the output_type parameter.
173
-
174
- Args:
175
- stream: The raw stream from LiteLLM or Instructor
176
- output_type: The expected output type (str for LiteLLM, model class for Instructor)
177
- model: The model name for metadata
178
-
179
- Returns:
180
- CompletionStream: Unified stream interface
181
-
182
- Examples:
183
- # For LiteLLM string completions
184
- litellm_stream = litellm.completion(model="gpt-4", messages=messages, stream=True)
185
- unified_stream = create_completion_stream(litellm_stream, str, "gpt-4")
186
-
187
- # For Instructor structured outputs
188
- instructor_stream = instructor_client.completion(response_model=User, messages=messages, stream=True)
189
- unified_stream = create_completion_stream(instructor_stream, User, "gpt-4")
190
- """
191
- return CompletionStream(stream, output_type, model)
192
-
193
-
194
- def create_async_completion_stream(
195
- stream: AsyncIterator[Any], output_type: Type[T] = str, model: str | None = None
196
- ) -> AsyncCompletionStream[T]:
197
- """Create a unified async completion stream from a raw async stream.
198
-
199
- This function wraps raw async streams from both LiteLLM and Instructor
200
- into a unified AsyncCompletionStream interface. It automatically detects
201
- the stream type based on the output_type parameter.
202
-
203
- Args:
204
- stream: The raw async stream from LiteLLM or Instructor
205
- output_type: The expected output type (str for LiteLLM, model class for Instructor)
206
- model: The model name for metadata
207
-
208
- Returns:
209
- AsyncCompletionStream: Unified async stream interface
210
-
211
- Examples:
212
- ```python
213
- # For LiteLLM async string completions
214
- litellm_stream = await litellm.acompletion(model="gpt-4", messages=messages, stream=True)
215
- unified_stream = create_async_completion_stream(litellm_stream, str, "gpt-4")
216
-
217
- # For Instructor async structured outputs
218
- instructor_stream = await instructor_client.acompletion(response_model=User, messages=messages, stream=True)
219
- unified_stream = create_async_completion_stream(instructor_stream, User, "gpt-4")
220
- ```
221
- """
222
- return AsyncCompletionStream(stream, output_type, model)
223
-
224
-
225
- def format_tool_calls(
226
- messages: List[ChatCompletionMessageParam],
227
- ) -> List[ChatCompletionMessageParam]:
228
- """Format message thread by replacing tool call blocks with readable assistant messages.
229
-
230
- This function processes a message thread and replaces sequences of:
231
- assistant(with tool_calls) + tool + tool + ... with a single clean assistant message
232
- that describes what tools were called and their results.
233
-
234
- Args:
235
- messages: List of messages in the conversation thread
236
-
237
- Returns:
238
- List[ChatCompletionMessageParam]: Cleaned message thread with tool calls formatted
239
-
240
- Example:
241
- ```python
242
- messages = [
243
- {"role": "user", "content": "What's the weather in NYC?"},
244
- {"role": "assistant", "tool_calls": [...]},
245
- {"role": "tool", "tool_call_id": "call_1", "content": "Sunny, 72°F"},
246
- {"role": "user", "content": "Thanks!"}
247
- ]
248
-
249
- formatted = format_tool_calls(messages)
250
- # Returns: [
251
- # {"role": "user", "content": "What's the weather in NYC?"},
252
- # {"role": "assistant", "content": "I called get_weather tool with parameters (city=NYC), and got result: Sunny, 72°F"},
253
- # {"role": "user", "content": "Thanks!"}
254
- # ]
255
- ```
256
- """
257
- if not messages:
258
- return messages
259
-
260
- formatted_messages = []
261
- i = 0
262
-
263
- while i < len(messages):
264
- current_msg = messages[i]
265
-
266
- # Check if this is an assistant message with tool calls
267
- if current_msg.get("role") == "assistant" and current_msg.get("tool_calls"):
268
- # Collect all following tool messages
269
- tool_results = {}
270
- j = i + 1
271
-
272
- # Gather tool results that follow this assistant message
273
- while j < len(messages) and messages[j].get("role") == "tool":
274
- tool_msg = messages[j]
275
- tool_call_id = tool_msg.get("tool_call_id")
276
- if tool_call_id:
277
- tool_results[tool_call_id] = tool_msg.get("content", "No result")
278
- j += 1
279
-
280
- # Format the tool calls with their results
281
- tool_calls = current_msg.get("tool_calls", [])
282
- formatted_calls = []
283
-
284
- for tool_call in tool_calls:
285
- tool_name = tool_call.function.name
286
- tool_args = tool_call.function.arguments
287
- tool_id = tool_call.id
288
-
289
- # Parse arguments for cleaner display
290
- try:
291
- args_dict = json.loads(tool_args) if tool_args else {}
292
- args_str = ", ".join([f"{k}={v}" for k, v in args_dict.items()])
293
- except json.JSONDecodeError:
294
- args_str = tool_args or "no parameters"
295
-
296
- # Get the result for this tool call
297
- result = tool_results.get(tool_id, "No result available")
298
-
299
- # Format the tool call description
300
- call_description = f"I called {tool_name} tool with parameters ({args_str}), and got result: {result}"
301
- formatted_calls.append(call_description)
302
-
303
- # Create the formatted assistant message
304
- if len(formatted_calls) == 1:
305
- content = formatted_calls[0]
306
- elif len(formatted_calls) > 1:
307
- content = "I made the following tool calls:\n" + "\n".join(
308
- [f"- {call}" for call in formatted_calls]
309
- )
310
- else:
311
- content = "I made tool calls but no results were available."
312
-
313
- # Add the formatted message
314
- formatted_messages.append({"role": "assistant", "content": content})
315
-
316
- # Skip past all the tool messages we processed
317
- i = j
318
- else:
319
- # Regular message, add as-is
320
- formatted_messages.append(current_msg)
321
- i += 1
322
-
323
- return formatted_messages
324
-
325
-
326
- def convert_response_to_completion(response: Any) -> Completion[str]:
327
- """Convert a LiteLLM ModelResponse to a Completion object.
328
-
329
- This function converts LiteLLM's ModelResponse (which is based on OpenAI's
330
- ChatCompletion format) into our unified Completion type for standard
331
- string completions.
332
-
333
- Args:
334
- response: The ModelResponse from LiteLLM
335
-
336
- Returns:
337
- Completion[str]: Unified completion object with string output
338
-
339
- Example:
340
- ```python
341
- # For LiteLLM completions
342
- response = await litellm.acompletion(model="gpt-4", messages=messages)
343
- completion = convert_response_to_completion(response)
344
- ```
345
- """
346
- # Handle empty or invalid response
347
- if not hasattr(response, "choices") or not response.choices:
348
- return Completion(
349
- output="",
350
- model=getattr(response, "model", "unknown"),
351
- content=None,
352
- completion=response,
353
- )
354
-
355
- choice = response.choices[0]
356
-
357
- # Extract message data
358
- if hasattr(choice, "message"):
359
- message = choice.message
360
- content = getattr(message, "content", None)
361
- tool_calls = getattr(message, "tool_calls", None)
362
- refusal = getattr(message, "refusal", None)
363
- else:
364
- # Fallback for different response structures
365
- content = None
366
- tool_calls = None
367
- refusal = None
368
-
369
- return Completion(
370
- output=content or "",
371
- model=getattr(response, "model", "unknown"),
372
- content=content,
373
- tool_calls=tool_calls,
374
- refusal=refusal,
375
- completion=response,
376
- )
377
-
378
-
379
- class InstructorStreamWrapper:
380
- """Wrapper for instructor streaming that captures raw completion content using hooks."""
381
-
382
- def __init__(self, client, response_model, params, output_type, model):
383
- self.client = client
384
- self.response_model = response_model
385
- self.params = params
386
- self.output_type = output_type
387
- self.model = model
388
- self._raw_content_chunks = []
389
- self._raw_completion = None
390
- self._tool_calls = None
391
-
392
- # Set up hooks to capture raw content
393
- self.client.on("completion:response", self._capture_completion)
394
-
395
- def _capture_completion(self, completion):
396
- """Capture the raw completion response."""
397
- self._raw_completion = completion
398
- if hasattr(completion, "choices") and completion.choices:
399
- choice = completion.choices[0]
400
- # Capture content chunks
401
- if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
402
- content = choice.delta.content
403
- if content:
404
- self._raw_content_chunks.append(content)
405
- # Capture tool calls from message (final chunk)
406
- if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
407
- self._tool_calls = choice.message.tool_calls
408
-
409
- def __iter__(self):
410
- """Create the stream and yield wrapped chunks."""
411
- stream = self.client.chat.completions.create_partial(
412
- response_model=self.response_model, **self.params
413
- )
414
-
415
- for chunk in stream:
416
- yield chunk
417
-
418
- # Clean up hooks
419
- self.client.off("completion:response", self._capture_completion)
420
-
421
- def get_raw_content(self):
422
- """Get the accumulated raw content."""
423
- return "".join(self._raw_content_chunks)
424
-
425
- def get_raw_completion(self):
426
- """Get the raw completion object."""
427
- return self._raw_completion
428
-
429
- def get_tool_calls(self):
430
- """Get the tool calls from the completion."""
431
- return self._tool_calls
432
-
433
- def get_tool_calls(self):
434
- """Get the tool calls from the completion."""
435
- return self._tool_calls
436
-
437
-
438
- class AsyncInstructorStreamWrapper:
439
- """Async wrapper for instructor streaming that captures raw completion content using hooks."""
440
-
441
- def __init__(self, client, response_model, params, output_type, model):
442
- self.client = client
443
- self.response_model = response_model
444
- self.params = params
445
- self.output_type = output_type
446
- self.model = model
447
- self._raw_content_chunks = []
448
- self._raw_completion = None
449
- self._tool_calls = None
450
-
451
- # Set up hooks to capture raw content
452
- self.client.on("completion:response", self._capture_completion)
453
-
454
- def _capture_completion(self, completion):
455
- """Capture the raw completion response."""
456
- self._raw_completion = completion
457
- if hasattr(completion, "choices") and completion.choices:
458
- choice = completion.choices[0]
459
- # Capture content chunks
460
- if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
461
- content = choice.delta.content
462
- if content:
463
- self._raw_content_chunks.append(content)
464
- # Capture tool calls from message (final chunk)
465
- if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
466
- self._tool_calls = choice.message.tool_calls
467
-
468
- async def __aiter__(self):
469
- """Create the stream and yield wrapped chunks."""
470
- stream = await self.client.chat.completions.create_partial(
471
- response_model=self.response_model, **self.params
472
- )
473
-
474
- async for chunk in stream:
475
- yield chunk
476
-
477
- # Clean up hooks
478
- self.client.off("completion:response", self._capture_completion)
479
-
480
- def get_raw_content(self):
481
- """Get the accumulated raw content."""
482
- return "".join(self._raw_content_chunks)
483
-
484
- def get_raw_completion(self):
485
- """Get the raw completion object."""
486
- return self._raw_completion
@@ -1,35 +0,0 @@
1
- """hammad.ai.embeddings"""
2
-
3
- from typing import TYPE_CHECKING
4
- from ...performance.imports import create_getattr_importer
5
-
6
- if TYPE_CHECKING:
7
- from .client.base_embeddings_client import BaseEmbeddingsClient
8
- from .client.fastembed_text_embeddings_client import FastEmbedTextEmbeddingsClient
9
- from .client.litellm_embeddings_client import LiteLlmEmbeddingsClient
10
- from .types import Embedding, EmbeddingResponse, EmbeddingUsage
11
- from .create import create_embeddings, async_create_embeddings
12
-
13
-
14
- __all__ = (
15
- # hammad.ai.embeddings.client.base_embeddings_client
16
- "BaseEmbeddingsClient",
17
- # hammad.ai.embeddings.client.fastembed_text_embeddings_client
18
- "FastEmbedTextEmbeddingsClient",
19
- # hammad.ai.embeddings.client.litellm_embeddings_client
20
- "LiteLlmEmbeddingsClient",
21
- # hammad.ai.embeddings.types
22
- "Embedding",
23
- "EmbeddingResponse",
24
- "EmbeddingUsage",
25
- # hammad.ai.embeddings.create
26
- "create_embeddings",
27
- "async_create_embeddings",
28
- )
29
-
30
-
31
- __getattr__ = create_getattr_importer(__all__)
32
-
33
-
34
- def __dir__() -> list[str]:
35
- return list(__all__)
@@ -1 +0,0 @@
1
- """hammad.ai.embeddings.client"""
@@ -1,26 +0,0 @@
1
- """hammad.ai.embeddings.client.base_embeddings_client"""
2
-
3
- from abc import ABC, abstractmethod
4
-
5
- from ..types import (
6
- EmbeddingResponse,
7
- )
8
-
9
- __all__ = ("BaseEmbeddingsClient",)
10
-
11
-
12
- class BaseEmbeddingsClient(ABC):
13
- """Base class for the various supported embeddings clients within
14
- the `hammad.ai` extension."""
15
-
16
- @staticmethod
17
- @abstractmethod
18
- def async_embed(input: list, model: str, **kwargs) -> EmbeddingResponse:
19
- """"""
20
- pass
21
-
22
- @staticmethod
23
- @abstractmethod
24
- def embed(input: list, model: str, **kwargs) -> EmbeddingResponse:
25
- """"""
26
- pass