hammad-python 0.0.14__py3-none-any.whl → 0.0.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. hammad_python-0.0.15.dist-info/METADATA +184 -0
  2. hammad_python-0.0.15.dist-info/RECORD +4 -0
  3. hammad/__init__.py +0 -1
  4. hammad/ai/__init__.py +0 -1
  5. hammad/ai/_utils.py +0 -142
  6. hammad/ai/completions/__init__.py +0 -45
  7. hammad/ai/completions/client.py +0 -684
  8. hammad/ai/completions/create.py +0 -710
  9. hammad/ai/completions/settings.py +0 -100
  10. hammad/ai/completions/types.py +0 -792
  11. hammad/ai/completions/utils.py +0 -486
  12. hammad/ai/embeddings/__init__.py +0 -35
  13. hammad/ai/embeddings/client/__init__.py +0 -1
  14. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  15. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  16. hammad/ai/embeddings/client/litellm_embeddings_client.py +0 -288
  17. hammad/ai/embeddings/create.py +0 -159
  18. hammad/ai/embeddings/types.py +0 -69
  19. hammad/cache/__init__.py +0 -40
  20. hammad/cache/base_cache.py +0 -181
  21. hammad/cache/cache.py +0 -169
  22. hammad/cache/decorators.py +0 -261
  23. hammad/cache/file_cache.py +0 -80
  24. hammad/cache/ttl_cache.py +0 -74
  25. hammad/cli/__init__.py +0 -33
  26. hammad/cli/animations.py +0 -573
  27. hammad/cli/plugins.py +0 -781
  28. hammad/cli/styles/__init__.py +0 -55
  29. hammad/cli/styles/settings.py +0 -139
  30. hammad/cli/styles/types.py +0 -358
  31. hammad/cli/styles/utils.py +0 -480
  32. hammad/data/__init__.py +0 -56
  33. hammad/data/collections/__init__.py +0 -34
  34. hammad/data/collections/base_collection.py +0 -58
  35. hammad/data/collections/collection.py +0 -452
  36. hammad/data/collections/searchable_collection.py +0 -556
  37. hammad/data/collections/vector_collection.py +0 -596
  38. hammad/data/configurations/__init__.py +0 -35
  39. hammad/data/configurations/configuration.py +0 -564
  40. hammad/data/databases/__init__.py +0 -21
  41. hammad/data/databases/database.py +0 -902
  42. hammad/data/models/__init__.py +0 -44
  43. hammad/data/models/base/__init__.py +0 -35
  44. hammad/data/models/base/fields.py +0 -546
  45. hammad/data/models/base/model.py +0 -1078
  46. hammad/data/models/base/utils.py +0 -280
  47. hammad/data/models/pydantic/__init__.py +0 -55
  48. hammad/data/models/pydantic/converters.py +0 -632
  49. hammad/data/models/pydantic/models/__init__.py +0 -28
  50. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  51. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  52. hammad/data/models/pydantic/models/fast_model.py +0 -318
  53. hammad/data/models/pydantic/models/function_model.py +0 -176
  54. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  55. hammad/data/types/__init__.py +0 -41
  56. hammad/data/types/file.py +0 -358
  57. hammad/data/types/multimodal/__init__.py +0 -24
  58. hammad/data/types/multimodal/audio.py +0 -96
  59. hammad/data/types/multimodal/image.py +0 -80
  60. hammad/data/types/text.py +0 -1066
  61. hammad/formatting/__init__.py +0 -38
  62. hammad/formatting/json/__init__.py +0 -21
  63. hammad/formatting/json/converters.py +0 -152
  64. hammad/formatting/text/__init__.py +0 -63
  65. hammad/formatting/text/converters.py +0 -723
  66. hammad/formatting/text/markdown.py +0 -131
  67. hammad/formatting/yaml/__init__.py +0 -26
  68. hammad/formatting/yaml/converters.py +0 -5
  69. hammad/logging/__init__.py +0 -35
  70. hammad/logging/decorators.py +0 -834
  71. hammad/logging/logger.py +0 -954
  72. hammad/mcp/__init__.py +0 -50
  73. hammad/mcp/client/__init__.py +0 -1
  74. hammad/mcp/client/client.py +0 -523
  75. hammad/mcp/client/client_service.py +0 -393
  76. hammad/mcp/client/settings.py +0 -178
  77. hammad/mcp/servers/__init__.py +0 -1
  78. hammad/mcp/servers/launcher.py +0 -1161
  79. hammad/performance/__init__.py +0 -36
  80. hammad/performance/imports.py +0 -231
  81. hammad/performance/runtime/__init__.py +0 -32
  82. hammad/performance/runtime/decorators.py +0 -142
  83. hammad/performance/runtime/run.py +0 -299
  84. hammad/py.typed +0 -0
  85. hammad/service/__init__.py +0 -49
  86. hammad/service/create.py +0 -532
  87. hammad/service/decorators.py +0 -285
  88. hammad/typing/__init__.py +0 -407
  89. hammad/web/__init__.py +0 -43
  90. hammad/web/http/__init__.py +0 -1
  91. hammad/web/http/client.py +0 -944
  92. hammad/web/models.py +0 -245
  93. hammad/web/openapi/__init__.py +0 -1
  94. hammad/web/openapi/client.py +0 -740
  95. hammad/web/search/__init__.py +0 -1
  96. hammad/web/search/client.py +0 -988
  97. hammad/web/utils.py +0 -472
  98. hammad_python-0.0.14.dist-info/METADATA +0 -70
  99. hammad_python-0.0.14.dist-info/RECORD +0 -99
  100. {hammad_python-0.0.14.dist-info → hammad_python-0.0.15.dist-info}/WHEEL +0 -0
  101. {hammad_python-0.0.14.dist-info → hammad_python-0.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,486 +0,0 @@
1
- """hammad.ai.completions.utils"""
2
-
3
- import json
4
- from typing import (
5
- Optional,
6
- List,
7
- Iterator,
8
- AsyncIterator,
9
- TypeVar,
10
- Type,
11
- Any,
12
- )
13
-
14
- try:
15
- from pydantic import BaseModel
16
- except ImportError:
17
- raise ImportError(
18
- "Using completion stream parsing requires the `openai` and `instructor` packages."
19
- "Please install with: pip install 'hammad-python[ai]'"
20
- )
21
-
22
- from ...cache import cached
23
- from .types import (
24
- CompletionsInputParam,
25
- ChatCompletionMessageParam,
26
- CompletionStream,
27
- AsyncCompletionStream,
28
- Completion,
29
- )
30
-
31
- T = TypeVar("T", bound=BaseModel)
32
-
33
- __all__ = (
34
- "parse_completions_input",
35
- "create_completion_stream",
36
- "create_async_completion_stream",
37
- "format_tool_calls",
38
- "convert_response_to_completion",
39
- "InstructorStreamWrapper",
40
- "AsyncInstructorStreamWrapper",
41
- )
42
-
43
-
44
- @cached
45
- def parse_completions_input(
46
- input: CompletionsInputParam,
47
- instructions: Optional[str] = None,
48
- ) -> List[ChatCompletionMessageParam]:
49
- """Parse various input formats into a list of ChatCompletionMessageParam.
50
-
51
- This function handles:
52
- - Plain strings (converted to user messages)
53
- - Strings with message blocks like [system], [user], [assistant]
54
- - Single ChatCompletionMessageParam objects
55
- - Lists of ChatCompletionMessageParam objects
56
- - Objects with model_dump() method
57
-
58
- Args:
59
- input: The input to parse
60
- instructions: Optional system instructions to prepend
61
-
62
- Returns:
63
- List of ChatCompletionMessageParam objects
64
- """
65
- messages: List[ChatCompletionMessageParam] = []
66
-
67
- # Handle string inputs
68
- if isinstance(input, str):
69
- # Check if string contains message blocks like [system], [user], [assistant]
70
- import re
71
-
72
- # Pattern to match only allowed message blocks (system, user, assistant)
73
- pattern = (
74
- r"\[(system|user|assistant)\]\s*(.*?)(?=\[(?:system|user|assistant)\]|$)"
75
- )
76
- matches = re.findall(pattern, input, re.DOTALL | re.IGNORECASE)
77
-
78
- if matches:
79
- # Validate that we only have allowed roles
80
- allowed_roles = {"system", "user", "assistant"}
81
- found_roles = {role.lower() for role, _ in matches}
82
-
83
- if not found_roles.issubset(allowed_roles):
84
- invalid_roles = found_roles - allowed_roles
85
- raise ValueError(
86
- f"Invalid message roles found: {invalid_roles}. Only 'system', 'user', and 'assistant' are allowed."
87
- )
88
-
89
- # Parse message blocks
90
- system_contents = []
91
-
92
- for role, content in matches:
93
- content = content.strip()
94
- if content:
95
- if role.lower() == "system":
96
- system_contents.append(content)
97
- else:
98
- messages.append({"role": role.lower(), "content": content})
99
-
100
- # Combine system contents if any exist
101
- if system_contents:
102
- combined_system = "\n\n".join(system_contents)
103
- if instructions:
104
- combined_system = f"{combined_system}\n\n{instructions}"
105
- messages.insert(0, {"role": "system", "content": combined_system})
106
- elif instructions:
107
- messages.insert(0, {"role": "system", "content": instructions})
108
- else:
109
- # Plain string - create user message
110
- if instructions:
111
- messages.append({"role": "system", "content": instructions})
112
- messages.append({"role": "user", "content": input})
113
-
114
- # Handle single message object
115
- elif hasattr(input, "model_dump"):
116
- message_dict = input.model_dump()
117
- if instructions:
118
- messages.append({"role": "system", "content": instructions})
119
- messages.append(message_dict)
120
-
121
- # Handle list of messages
122
- elif isinstance(input, list):
123
- system_contents = []
124
- other_messages = []
125
-
126
- for item in input:
127
- if hasattr(item, "model_dump"):
128
- msg_dict = item.model_dump()
129
- else:
130
- msg_dict = item
131
-
132
- if msg_dict.get("role") == "system":
133
- system_contents.append(msg_dict.get("content", ""))
134
- else:
135
- other_messages.append(msg_dict)
136
-
137
- # Combine system messages and instructions
138
- if system_contents or instructions:
139
- combined_system_parts = []
140
- if system_contents:
141
- combined_system_parts.extend(system_contents)
142
- if instructions:
143
- combined_system_parts.append(instructions)
144
-
145
- messages.append(
146
- {"role": "system", "content": "\n\n".join(combined_system_parts)}
147
- )
148
-
149
- messages.extend(other_messages)
150
-
151
- # Handle single dictionary or other object
152
- else:
153
- if hasattr(input, "model_dump"):
154
- message_dict = input.model_dump()
155
- else:
156
- message_dict = input
157
-
158
- if instructions:
159
- messages.append({"role": "system", "content": instructions})
160
- messages.append(message_dict)
161
-
162
- return messages
163
-
164
-
165
- def create_completion_stream(
166
- stream: Iterator[Any], output_type: Type[T] = str, model: str | None = None
167
- ) -> CompletionStream[T]:
168
- """Create a unified completion stream from a raw stream.
169
-
170
- This function wraps raw streams from both LiteLLM and Instructor
171
- into a unified CompletionStream interface. It automatically detects
172
- the stream type based on the output_type parameter.
173
-
174
- Args:
175
- stream: The raw stream from LiteLLM or Instructor
176
- output_type: The expected output type (str for LiteLLM, model class for Instructor)
177
- model: The model name for metadata
178
-
179
- Returns:
180
- CompletionStream: Unified stream interface
181
-
182
- Examples:
183
- # For LiteLLM string completions
184
- litellm_stream = litellm.completion(model="gpt-4", messages=messages, stream=True)
185
- unified_stream = create_completion_stream(litellm_stream, str, "gpt-4")
186
-
187
- # For Instructor structured outputs
188
- instructor_stream = instructor_client.completion(response_model=User, messages=messages, stream=True)
189
- unified_stream = create_completion_stream(instructor_stream, User, "gpt-4")
190
- """
191
- return CompletionStream(stream, output_type, model)
192
-
193
-
194
- def create_async_completion_stream(
195
- stream: AsyncIterator[Any], output_type: Type[T] = str, model: str | None = None
196
- ) -> AsyncCompletionStream[T]:
197
- """Create a unified async completion stream from a raw async stream.
198
-
199
- This function wraps raw async streams from both LiteLLM and Instructor
200
- into a unified AsyncCompletionStream interface. It automatically detects
201
- the stream type based on the output_type parameter.
202
-
203
- Args:
204
- stream: The raw async stream from LiteLLM or Instructor
205
- output_type: The expected output type (str for LiteLLM, model class for Instructor)
206
- model: The model name for metadata
207
-
208
- Returns:
209
- AsyncCompletionStream: Unified async stream interface
210
-
211
- Examples:
212
- ```python
213
- # For LiteLLM async string completions
214
- litellm_stream = await litellm.acompletion(model="gpt-4", messages=messages, stream=True)
215
- unified_stream = create_async_completion_stream(litellm_stream, str, "gpt-4")
216
-
217
- # For Instructor async structured outputs
218
- instructor_stream = await instructor_client.acompletion(response_model=User, messages=messages, stream=True)
219
- unified_stream = create_async_completion_stream(instructor_stream, User, "gpt-4")
220
- ```
221
- """
222
- return AsyncCompletionStream(stream, output_type, model)
223
-
224
-
225
- def format_tool_calls(
226
- messages: List[ChatCompletionMessageParam],
227
- ) -> List[ChatCompletionMessageParam]:
228
- """Format message thread by replacing tool call blocks with readable assistant messages.
229
-
230
- This function processes a message thread and replaces sequences of:
231
- assistant(with tool_calls) + tool + tool + ... with a single clean assistant message
232
- that describes what tools were called and their results.
233
-
234
- Args:
235
- messages: List of messages in the conversation thread
236
-
237
- Returns:
238
- List[ChatCompletionMessageParam]: Cleaned message thread with tool calls formatted
239
-
240
- Example:
241
- ```python
242
- messages = [
243
- {"role": "user", "content": "What's the weather in NYC?"},
244
- {"role": "assistant", "tool_calls": [...]},
245
- {"role": "tool", "tool_call_id": "call_1", "content": "Sunny, 72°F"},
246
- {"role": "user", "content": "Thanks!"}
247
- ]
248
-
249
- formatted = format_tool_calls(messages)
250
- # Returns: [
251
- # {"role": "user", "content": "What's the weather in NYC?"},
252
- # {"role": "assistant", "content": "I called get_weather tool with parameters (city=NYC), and got result: Sunny, 72°F"},
253
- # {"role": "user", "content": "Thanks!"}
254
- # ]
255
- ```
256
- """
257
- if not messages:
258
- return messages
259
-
260
- formatted_messages = []
261
- i = 0
262
-
263
- while i < len(messages):
264
- current_msg = messages[i]
265
-
266
- # Check if this is an assistant message with tool calls
267
- if current_msg.get("role") == "assistant" and current_msg.get("tool_calls"):
268
- # Collect all following tool messages
269
- tool_results = {}
270
- j = i + 1
271
-
272
- # Gather tool results that follow this assistant message
273
- while j < len(messages) and messages[j].get("role") == "tool":
274
- tool_msg = messages[j]
275
- tool_call_id = tool_msg.get("tool_call_id")
276
- if tool_call_id:
277
- tool_results[tool_call_id] = tool_msg.get("content", "No result")
278
- j += 1
279
-
280
- # Format the tool calls with their results
281
- tool_calls = current_msg.get("tool_calls", [])
282
- formatted_calls = []
283
-
284
- for tool_call in tool_calls:
285
- tool_name = tool_call.function.name
286
- tool_args = tool_call.function.arguments
287
- tool_id = tool_call.id
288
-
289
- # Parse arguments for cleaner display
290
- try:
291
- args_dict = json.loads(tool_args) if tool_args else {}
292
- args_str = ", ".join([f"{k}={v}" for k, v in args_dict.items()])
293
- except json.JSONDecodeError:
294
- args_str = tool_args or "no parameters"
295
-
296
- # Get the result for this tool call
297
- result = tool_results.get(tool_id, "No result available")
298
-
299
- # Format the tool call description
300
- call_description = f"I called {tool_name} tool with parameters ({args_str}), and got result: {result}"
301
- formatted_calls.append(call_description)
302
-
303
- # Create the formatted assistant message
304
- if len(formatted_calls) == 1:
305
- content = formatted_calls[0]
306
- elif len(formatted_calls) > 1:
307
- content = "I made the following tool calls:\n" + "\n".join(
308
- [f"- {call}" for call in formatted_calls]
309
- )
310
- else:
311
- content = "I made tool calls but no results were available."
312
-
313
- # Add the formatted message
314
- formatted_messages.append({"role": "assistant", "content": content})
315
-
316
- # Skip past all the tool messages we processed
317
- i = j
318
- else:
319
- # Regular message, add as-is
320
- formatted_messages.append(current_msg)
321
- i += 1
322
-
323
- return formatted_messages
324
-
325
-
326
- def convert_response_to_completion(response: Any) -> Completion[str]:
327
- """Convert a LiteLLM ModelResponse to a Completion object.
328
-
329
- This function converts LiteLLM's ModelResponse (which is based on OpenAI's
330
- ChatCompletion format) into our unified Completion type for standard
331
- string completions.
332
-
333
- Args:
334
- response: The ModelResponse from LiteLLM
335
-
336
- Returns:
337
- Completion[str]: Unified completion object with string output
338
-
339
- Example:
340
- ```python
341
- # For LiteLLM completions
342
- response = await litellm.acompletion(model="gpt-4", messages=messages)
343
- completion = convert_response_to_completion(response)
344
- ```
345
- """
346
- # Handle empty or invalid response
347
- if not hasattr(response, "choices") or not response.choices:
348
- return Completion(
349
- output="",
350
- model=getattr(response, "model", "unknown"),
351
- content=None,
352
- completion=response,
353
- )
354
-
355
- choice = response.choices[0]
356
-
357
- # Extract message data
358
- if hasattr(choice, "message"):
359
- message = choice.message
360
- content = getattr(message, "content", None)
361
- tool_calls = getattr(message, "tool_calls", None)
362
- refusal = getattr(message, "refusal", None)
363
- else:
364
- # Fallback for different response structures
365
- content = None
366
- tool_calls = None
367
- refusal = None
368
-
369
- return Completion(
370
- output=content or "",
371
- model=getattr(response, "model", "unknown"),
372
- content=content,
373
- tool_calls=tool_calls,
374
- refusal=refusal,
375
- completion=response,
376
- )
377
-
378
-
379
- class InstructorStreamWrapper:
380
- """Wrapper for instructor streaming that captures raw completion content using hooks."""
381
-
382
- def __init__(self, client, response_model, params, output_type, model):
383
- self.client = client
384
- self.response_model = response_model
385
- self.params = params
386
- self.output_type = output_type
387
- self.model = model
388
- self._raw_content_chunks = []
389
- self._raw_completion = None
390
- self._tool_calls = None
391
-
392
- # Set up hooks to capture raw content
393
- self.client.on("completion:response", self._capture_completion)
394
-
395
- def _capture_completion(self, completion):
396
- """Capture the raw completion response."""
397
- self._raw_completion = completion
398
- if hasattr(completion, "choices") and completion.choices:
399
- choice = completion.choices[0]
400
- # Capture content chunks
401
- if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
402
- content = choice.delta.content
403
- if content:
404
- self._raw_content_chunks.append(content)
405
- # Capture tool calls from message (final chunk)
406
- if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
407
- self._tool_calls = choice.message.tool_calls
408
-
409
- def __iter__(self):
410
- """Create the stream and yield wrapped chunks."""
411
- stream = self.client.chat.completions.create_partial(
412
- response_model=self.response_model, **self.params
413
- )
414
-
415
- for chunk in stream:
416
- yield chunk
417
-
418
- # Clean up hooks
419
- self.client.off("completion:response", self._capture_completion)
420
-
421
- def get_raw_content(self):
422
- """Get the accumulated raw content."""
423
- return "".join(self._raw_content_chunks)
424
-
425
- def get_raw_completion(self):
426
- """Get the raw completion object."""
427
- return self._raw_completion
428
-
429
- def get_tool_calls(self):
430
- """Get the tool calls from the completion."""
431
- return self._tool_calls
432
-
433
- def get_tool_calls(self):
434
- """Get the tool calls from the completion."""
435
- return self._tool_calls
436
-
437
-
438
- class AsyncInstructorStreamWrapper:
439
- """Async wrapper for instructor streaming that captures raw completion content using hooks."""
440
-
441
- def __init__(self, client, response_model, params, output_type, model):
442
- self.client = client
443
- self.response_model = response_model
444
- self.params = params
445
- self.output_type = output_type
446
- self.model = model
447
- self._raw_content_chunks = []
448
- self._raw_completion = None
449
- self._tool_calls = None
450
-
451
- # Set up hooks to capture raw content
452
- self.client.on("completion:response", self._capture_completion)
453
-
454
- def _capture_completion(self, completion):
455
- """Capture the raw completion response."""
456
- self._raw_completion = completion
457
- if hasattr(completion, "choices") and completion.choices:
458
- choice = completion.choices[0]
459
- # Capture content chunks
460
- if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
461
- content = choice.delta.content
462
- if content:
463
- self._raw_content_chunks.append(content)
464
- # Capture tool calls from message (final chunk)
465
- if hasattr(choice, "message") and hasattr(choice.message, "tool_calls"):
466
- self._tool_calls = choice.message.tool_calls
467
-
468
- async def __aiter__(self):
469
- """Create the stream and yield wrapped chunks."""
470
- stream = await self.client.chat.completions.create_partial(
471
- response_model=self.response_model, **self.params
472
- )
473
-
474
- async for chunk in stream:
475
- yield chunk
476
-
477
- # Clean up hooks
478
- self.client.off("completion:response", self._capture_completion)
479
-
480
- def get_raw_content(self):
481
- """Get the accumulated raw content."""
482
- return "".join(self._raw_content_chunks)
483
-
484
- def get_raw_completion(self):
485
- """Get the raw completion object."""
486
- return self._raw_completion
@@ -1,35 +0,0 @@
1
- """hammad.ai.embeddings"""
2
-
3
- from typing import TYPE_CHECKING
4
- from ...performance.imports import create_getattr_importer
5
-
6
- if TYPE_CHECKING:
7
- from .client.base_embeddings_client import BaseEmbeddingsClient
8
- from .client.fastembed_text_embeddings_client import FastEmbedTextEmbeddingsClient
9
- from .client.litellm_embeddings_client import LiteLlmEmbeddingsClient
10
- from .types import Embedding, EmbeddingResponse, EmbeddingUsage
11
- from .create import create_embeddings, async_create_embeddings
12
-
13
-
14
- __all__ = (
15
- # hammad.ai.embeddings.client.base_embeddings_client
16
- "BaseEmbeddingsClient",
17
- # hammad.ai.embeddings.client.fastembed_text_embeddings_client
18
- "FastEmbedTextEmbeddingsClient",
19
- # hammad.ai.embeddings.client.litellm_embeddings_client
20
- "LiteLlmEmbeddingsClient",
21
- # hammad.ai.embeddings.types
22
- "Embedding",
23
- "EmbeddingResponse",
24
- "EmbeddingUsage",
25
- # hammad.ai.embeddings.create
26
- "create_embeddings",
27
- "async_create_embeddings",
28
- )
29
-
30
-
31
- __getattr__ = create_getattr_importer(__all__)
32
-
33
-
34
- def __dir__() -> list[str]:
35
- return list(__all__)
@@ -1 +0,0 @@
1
- """hammad.ai.embeddings.client"""
@@ -1,26 +0,0 @@
1
- """hammad.ai.embeddings.client.base_embeddings_client"""
2
-
3
- from abc import ABC, abstractmethod
4
-
5
- from ..types import (
6
- EmbeddingResponse,
7
- )
8
-
9
- __all__ = ("BaseEmbeddingsClient",)
10
-
11
-
12
- class BaseEmbeddingsClient(ABC):
13
- """Base class for the various supported embeddings clients within
14
- the `hammad.ai` extension."""
15
-
16
- @staticmethod
17
- @abstractmethod
18
- def async_embed(input: list, model: str, **kwargs) -> EmbeddingResponse:
19
- """"""
20
- pass
21
-
22
- @staticmethod
23
- @abstractmethod
24
- def embed(input: list, model: str, **kwargs) -> EmbeddingResponse:
25
- """"""
26
- pass