hammad-python 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +178 -0
- hammad/_internal.py +237 -0
- hammad/cache/__init__.py +40 -0
- hammad/cache/base_cache.py +181 -0
- hammad/cache/cache.py +169 -0
- hammad/cache/decorators.py +261 -0
- hammad/cache/file_cache.py +80 -0
- hammad/cache/ttl_cache.py +74 -0
- hammad/cli/__init__.py +35 -0
- hammad/cli/_runner.py +265 -0
- hammad/cli/animations.py +573 -0
- hammad/cli/plugins.py +836 -0
- hammad/cli/styles/__init__.py +55 -0
- hammad/cli/styles/settings.py +139 -0
- hammad/cli/styles/types.py +358 -0
- hammad/cli/styles/utils.py +626 -0
- hammad/data/__init__.py +83 -0
- hammad/data/collections/__init__.py +44 -0
- hammad/data/collections/collection.py +274 -0
- hammad/data/collections/indexes/__init__.py +37 -0
- hammad/data/collections/indexes/qdrant/__init__.py +1 -0
- hammad/data/collections/indexes/qdrant/index.py +735 -0
- hammad/data/collections/indexes/qdrant/settings.py +94 -0
- hammad/data/collections/indexes/qdrant/utils.py +220 -0
- hammad/data/collections/indexes/tantivy/__init__.py +1 -0
- hammad/data/collections/indexes/tantivy/index.py +428 -0
- hammad/data/collections/indexes/tantivy/settings.py +51 -0
- hammad/data/collections/indexes/tantivy/utils.py +200 -0
- hammad/data/configurations/__init__.py +35 -0
- hammad/data/configurations/configuration.py +564 -0
- hammad/data/models/__init__.py +55 -0
- hammad/data/models/extensions/__init__.py +4 -0
- hammad/data/models/extensions/pydantic/__init__.py +42 -0
- hammad/data/models/extensions/pydantic/converters.py +759 -0
- hammad/data/models/fields.py +546 -0
- hammad/data/models/model.py +1078 -0
- hammad/data/models/utils.py +280 -0
- hammad/data/sql/__init__.py +23 -0
- hammad/data/sql/database.py +578 -0
- hammad/data/sql/types.py +141 -0
- hammad/data/types/__init__.py +39 -0
- hammad/data/types/file.py +358 -0
- hammad/data/types/multimodal/__init__.py +24 -0
- hammad/data/types/multimodal/audio.py +96 -0
- hammad/data/types/multimodal/image.py +80 -0
- hammad/data/types/text.py +1066 -0
- hammad/formatting/__init__.py +20 -0
- hammad/formatting/json/__init__.py +27 -0
- hammad/formatting/json/converters.py +158 -0
- hammad/formatting/text/__init__.py +63 -0
- hammad/formatting/text/converters.py +723 -0
- hammad/formatting/text/markdown.py +131 -0
- hammad/formatting/yaml/__init__.py +26 -0
- hammad/formatting/yaml/converters.py +5 -0
- hammad/genai/__init__.py +78 -0
- hammad/genai/agents/__init__.py +1 -0
- hammad/genai/agents/types/__init__.py +35 -0
- hammad/genai/agents/types/history.py +277 -0
- hammad/genai/agents/types/tool.py +490 -0
- hammad/genai/embedding_models/__init__.py +41 -0
- hammad/genai/embedding_models/embedding_model.py +193 -0
- hammad/genai/embedding_models/embedding_model_name.py +77 -0
- hammad/genai/embedding_models/embedding_model_request.py +65 -0
- hammad/genai/embedding_models/embedding_model_response.py +69 -0
- hammad/genai/embedding_models/run.py +161 -0
- hammad/genai/language_models/__init__.py +35 -0
- hammad/genai/language_models/_streaming.py +622 -0
- hammad/genai/language_models/_types.py +276 -0
- hammad/genai/language_models/_utils/__init__.py +31 -0
- hammad/genai/language_models/_utils/_completions.py +131 -0
- hammad/genai/language_models/_utils/_messages.py +89 -0
- hammad/genai/language_models/_utils/_requests.py +202 -0
- hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
- hammad/genai/language_models/language_model.py +734 -0
- hammad/genai/language_models/language_model_request.py +135 -0
- hammad/genai/language_models/language_model_response.py +219 -0
- hammad/genai/language_models/language_model_response_chunk.py +53 -0
- hammad/genai/language_models/run.py +530 -0
- hammad/genai/multimodal_models.py +48 -0
- hammad/genai/rerank_models.py +26 -0
- hammad/logging/__init__.py +35 -0
- hammad/logging/decorators.py +834 -0
- hammad/logging/logger.py +954 -0
- hammad/mcp/__init__.py +50 -0
- hammad/mcp/client/__init__.py +36 -0
- hammad/mcp/client/client.py +624 -0
- hammad/mcp/client/client_service.py +400 -0
- hammad/mcp/client/settings.py +178 -0
- hammad/mcp/servers/__init__.py +25 -0
- hammad/mcp/servers/launcher.py +1161 -0
- hammad/runtime/__init__.py +32 -0
- hammad/runtime/decorators.py +142 -0
- hammad/runtime/run.py +299 -0
- hammad/service/__init__.py +49 -0
- hammad/service/create.py +527 -0
- hammad/service/decorators.py +285 -0
- hammad/typing/__init__.py +435 -0
- hammad/web/__init__.py +43 -0
- hammad/web/http/__init__.py +1 -0
- hammad/web/http/client.py +944 -0
- hammad/web/models.py +277 -0
- hammad/web/openapi/__init__.py +1 -0
- hammad/web/openapi/client.py +740 -0
- hammad/web/search/__init__.py +1 -0
- hammad/web/search/client.py +1035 -0
- hammad/web/utils.py +472 -0
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/METADATA +8 -1
- hammad_python-0.0.17.dist-info/RECORD +110 -0
- hammad_python-0.0.15.dist-info/RECORD +0 -4
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.15.dist-info → hammad_python-0.0.17.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,193 @@
|
|
1
|
+
"""hammad.genai.embedding_models.embedding_model"""
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from typing import Any, List, Literal, Optional, TYPE_CHECKING
|
6
|
+
import sys
|
7
|
+
|
8
|
+
if sys.version_info >= (3, 12):
|
9
|
+
from typing import TypedDict
|
10
|
+
else:
|
11
|
+
from typing_extensions import TypedDict
|
12
|
+
|
13
|
+
if TYPE_CHECKING:
|
14
|
+
try:
|
15
|
+
from litellm import EmbeddingResponse as _LitellmEmbeddingResponse
|
16
|
+
except ImportError:
|
17
|
+
_LitellmEmbeddingResponse = Any
|
18
|
+
|
19
|
+
from ..language_models.language_model import _AIProvider
|
20
|
+
from .embedding_model_request import EmbeddingModelRequest
|
21
|
+
from .embedding_model_name import EmbeddingModelName
|
22
|
+
from .embedding_model_response import (
|
23
|
+
Embedding,
|
24
|
+
EmbeddingUsage,
|
25
|
+
EmbeddingModelResponse,
|
26
|
+
)
|
27
|
+
from ...formatting.text import convert_to_text
|
28
|
+
|
29
|
+
|
30
|
+
__all__ = (
|
31
|
+
"EmbeddingModel",
|
32
|
+
"EmbeddingModelError",
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
class EmbeddingModelError(Exception):
|
37
|
+
"""Exception raised when an error occurs while generating embeddings
|
38
|
+
using an embedding model."""
|
39
|
+
|
40
|
+
def __init__(self, message: str, response: Any):
|
41
|
+
self.message = message
|
42
|
+
self.response = response
|
43
|
+
super().__init__(self.message)
|
44
|
+
|
45
|
+
|
46
|
+
def _parse_litellm_response_to_embedding_model_response(response: "_LitellmEmbeddingResponse") -> EmbeddingModelResponse:
|
47
|
+
"""Parse the response from `litellm` to an `EmbeddingModelResponse` object."""
|
48
|
+
try:
|
49
|
+
embedding_data: List[Embedding] = []
|
50
|
+
|
51
|
+
for i, item in enumerate(response.data):
|
52
|
+
embedding_data.append(
|
53
|
+
Embedding(embedding=item["embedding"], index=i, object="embedding")
|
54
|
+
)
|
55
|
+
usage = EmbeddingUsage(
|
56
|
+
prompt_tokens=response.usage.prompt_tokens,
|
57
|
+
total_tokens=response.usage.total_tokens,
|
58
|
+
)
|
59
|
+
return EmbeddingModelResponse(
|
60
|
+
data=embedding_data,
|
61
|
+
model=response.model,
|
62
|
+
object="list",
|
63
|
+
usage=usage,
|
64
|
+
)
|
65
|
+
except Exception as e:
|
66
|
+
raise EmbeddingModelError(
|
67
|
+
f"Failed to parse litellm response to embedding response: {e}",
|
68
|
+
response,
|
69
|
+
)
|
70
|
+
|
71
|
+
|
72
|
+
@dataclass
|
73
|
+
class EmbeddingModel:
|
74
|
+
"""Embeddings provider client that utilizes the `litellm` module
|
75
|
+
when generating embeddings."""
|
76
|
+
|
77
|
+
model: EmbeddingModelName = "openai/text-embedding-3-small"
|
78
|
+
|
79
|
+
async def async_run(
|
80
|
+
self,
|
81
|
+
input: List[Any] | Any,
|
82
|
+
dimensions: Optional[int] = None,
|
83
|
+
encoding_format: Optional[str] = None,
|
84
|
+
timeout=600,
|
85
|
+
api_base: Optional[str] = None,
|
86
|
+
api_version: Optional[str] = None,
|
87
|
+
api_key: Optional[str] = None,
|
88
|
+
api_type: Optional[str] = None,
|
89
|
+
caching: bool = False,
|
90
|
+
user: Optional[str] = None,
|
91
|
+
format: bool = False,
|
92
|
+
) -> EmbeddingModelResponse:
|
93
|
+
"""Asynchronously generate embeddings for the given input using
|
94
|
+
a valid `litellm` model.
|
95
|
+
|
96
|
+
Args:
|
97
|
+
input (List[Any] | Any) : The input text / content to generate embeddings for.
|
98
|
+
dimensions (Optional[int]) : The number of dimensions for the embedding.
|
99
|
+
encoding_format (Optional[str]) : The format to return the embeddings in. (e.g. "float", "base64")
|
100
|
+
timeout (int) : The timeout for the request.
|
101
|
+
api_base (Optional[str]) : The base URL for the API.
|
102
|
+
api_version (Optional[str]) : The version of the API.
|
103
|
+
api_key (Optional[str]) : The API key to use for the request.
|
104
|
+
api_type (Optional[str]) : The API type to use for the request.
|
105
|
+
caching (bool) : Whether to cache the request.
|
106
|
+
user (Optional[str]) : The user to use for the request.
|
107
|
+
format (bool) : Whether to format each non-string input as a markdown string.
|
108
|
+
|
109
|
+
Returns:
|
110
|
+
EmbeddingModelResponse : The embedding response generated for the given input.
|
111
|
+
"""
|
112
|
+
if not isinstance(input, list):
|
113
|
+
input = [input]
|
114
|
+
|
115
|
+
if format:
|
116
|
+
for i in input:
|
117
|
+
try:
|
118
|
+
i = convert_to_text(i)
|
119
|
+
except Exception as e:
|
120
|
+
raise EmbeddingModelError(
|
121
|
+
f"Failed to format input to text: {e}",
|
122
|
+
i,
|
123
|
+
)
|
124
|
+
|
125
|
+
async_embedding_fn = _AIProvider.get_litellm().aembedding
|
126
|
+
|
127
|
+
try:
|
128
|
+
response = await async_embedding_fn(
|
129
|
+
model=self.model,
|
130
|
+
input=input,
|
131
|
+
dimensions=dimensions,
|
132
|
+
encoding_format=encoding_format,
|
133
|
+
timeout=timeout,
|
134
|
+
api_base=api_base,
|
135
|
+
api_version=api_version,
|
136
|
+
api_key=api_key,
|
137
|
+
api_type=api_type,
|
138
|
+
caching=caching,
|
139
|
+
user=user,
|
140
|
+
)
|
141
|
+
except Exception as e:
|
142
|
+
raise EmbeddingModelError(f"Error in embedding model request: {e}", response=None) from e
|
143
|
+
|
144
|
+
return _parse_litellm_response_to_embedding_model_response(response)
|
145
|
+
|
146
|
+
def run(
|
147
|
+
self,
|
148
|
+
input: List[Any] | Any,
|
149
|
+
dimensions: Optional[int] = None,
|
150
|
+
encoding_format: Optional[str] = None,
|
151
|
+
timeout=600,
|
152
|
+
api_base: Optional[str] = None,
|
153
|
+
api_version: Optional[str] = None,
|
154
|
+
api_key: Optional[str] = None,
|
155
|
+
api_type: Optional[str] = None,
|
156
|
+
caching: bool = False,
|
157
|
+
user: Optional[str] = None,
|
158
|
+
format: bool = False,
|
159
|
+
) -> EmbeddingModelResponse:
|
160
|
+
"""Generate embeddings for the given input using
|
161
|
+
a valid `litellm` model.
|
162
|
+
|
163
|
+
Args:
|
164
|
+
input (List[Any] | Any) : The input text / content to generate embeddings for.
|
165
|
+
dimensions (Optional[int]) : The number of dimensions for the embedding.
|
166
|
+
encoding_format (Optional[str]) : The format to return the embeddings in. (e.g. "float", "base64")
|
167
|
+
timeout (int) : The timeout for the request.
|
168
|
+
api_base (Optional[str]) : The base URL for the API.
|
169
|
+
api_version (Optional[str]) : The version of the API.
|
170
|
+
api_key (Optional[str]) : The API key to use for the request.
|
171
|
+
api_type (Optional[str]) : The API type to use for the request.
|
172
|
+
caching (bool) : Whether to cache the request.
|
173
|
+
user (Optional[str]) : The user to use for the request.
|
174
|
+
format (bool) : Whether to format each non-string input as a markdown string.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
EmbeddingModelResponse : The embedding response generated for the given input.
|
178
|
+
"""
|
179
|
+
return asyncio.run(
|
180
|
+
self.async_run(
|
181
|
+
input=input,
|
182
|
+
dimensions=dimensions,
|
183
|
+
encoding_format=encoding_format,
|
184
|
+
timeout=timeout,
|
185
|
+
api_base=api_base,
|
186
|
+
api_version=api_version,
|
187
|
+
api_key=api_key,
|
188
|
+
api_type=api_type,
|
189
|
+
caching=caching,
|
190
|
+
user=user,
|
191
|
+
format=format,
|
192
|
+
)
|
193
|
+
)
|
@@ -0,0 +1,77 @@
|
|
1
|
+
"""hammad.genai.embedding_models.embedding_model_name"""
|
2
|
+
|
3
|
+
from typing import Literal
|
4
|
+
|
5
|
+
|
6
|
+
__all__ = (
|
7
|
+
"EmbeddingModelName",
|
8
|
+
)
|
9
|
+
|
10
|
+
|
11
|
+
EmbeddingModelName = Literal[
|
12
|
+
# OpenAI Embedding Models
|
13
|
+
"text-embedding-3-small",
|
14
|
+
"text-embedding-3-large",
|
15
|
+
"text-embedding-ada-002",
|
16
|
+
# OpenAI Compatible Embedding Models
|
17
|
+
"openai/text-embedding-3-small",
|
18
|
+
"openai/text-embedding-3-large",
|
19
|
+
"openai/text-embedding-ada-002",
|
20
|
+
# Bedrock Embedding Models
|
21
|
+
"amazon.titan-embed-text-v1",
|
22
|
+
"cohere.embed-english-v3",
|
23
|
+
"cohere.embed-multilingual-v3",
|
24
|
+
# Cohere Embedding Models
|
25
|
+
"embed-english-v3.0",
|
26
|
+
"embed-english-light-v3.0",
|
27
|
+
"embed-multilingual-v3.0",
|
28
|
+
"embed-multilingual-light-v3.0",
|
29
|
+
"embed-english-v2.0",
|
30
|
+
"embed-english-light-v2.0",
|
31
|
+
"embed-multilingual-v2.0",
|
32
|
+
# NVIDIA NIM Embedding Models
|
33
|
+
"nvidia_nim/NV-Embed-QA",
|
34
|
+
"nvidia_nim/nvidia/nv-embed-v1",
|
35
|
+
"nvidia_nim/nvidia/nv-embedqa-mistral-7b-v2",
|
36
|
+
"nvidia_nim/nvidia/nv-embedqa-e5-v5",
|
37
|
+
"nvidia_nim/nvidia/embed-qa-4",
|
38
|
+
"nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v1",
|
39
|
+
"nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v2",
|
40
|
+
"nvidia_nim/snowflake/arctic-embed-l",
|
41
|
+
"nvidia_nim/baai/bge-m3",
|
42
|
+
# HuggingFace Embedding Models
|
43
|
+
"huggingface/microsoft/codebert-base",
|
44
|
+
"huggingface/BAAI/bge-large-zh",
|
45
|
+
# Mistral AI Embedding Models
|
46
|
+
"mistral/mistral-embed",
|
47
|
+
# Gemini AI Embedding Models
|
48
|
+
"gemini/text-embedding-004",
|
49
|
+
# Vertex AI Embedding Models
|
50
|
+
"vertex_ai/textembedding-gecko",
|
51
|
+
"vertex_ai/textembedding-gecko-multilingual",
|
52
|
+
"vertex_ai/textembedding-gecko-multilingual@001",
|
53
|
+
"vertex_ai/textembedding-gecko@001",
|
54
|
+
"vertex_ai/textembedding-gecko@003",
|
55
|
+
"vertex_ai/text-embedding-preview-0409",
|
56
|
+
"vertex_ai/text-multilingual-embedding-preview-0409",
|
57
|
+
# Voyage AI Embedding Models
|
58
|
+
"voyage/voyage-01",
|
59
|
+
"voyage/voyage-lite-01",
|
60
|
+
"voyage/voyage-lite-01-instruct",
|
61
|
+
# Nebius AI Studio Embedding Models
|
62
|
+
"nebius/BAAI/bge-en-icl",
|
63
|
+
"nebius/BAAI/bge-multilingual-gemma2",
|
64
|
+
"nebius/intfloat/e5-mistral-7b-instruct",
|
65
|
+
# Ollama Embedding Models
|
66
|
+
"ollama/granite-embedding:30m",
|
67
|
+
"ollama/granite-embedding:278m",
|
68
|
+
"ollama/snowflake-arctic-embed2",
|
69
|
+
"ollama/bge-large",
|
70
|
+
"ollama/paraphrase-multilingual",
|
71
|
+
"ollama/bge-m3",
|
72
|
+
"ollama/snowflake-arctic-embed",
|
73
|
+
"ollama/mxbai-embed-large",
|
74
|
+
"ollama/all-minilm",
|
75
|
+
"ollama/nomic-embed-text",
|
76
|
+
]
|
77
|
+
"""Common embedding models supported by `litellm`."""
|
@@ -0,0 +1,65 @@
|
|
1
|
+
"""hammad.genai.embedding_models.embedding_model_request"""
|
2
|
+
|
3
|
+
import sys
|
4
|
+
if sys.version_info >= (3, 12):
|
5
|
+
from typing import TypedDict, Required, NotRequired
|
6
|
+
else:
|
7
|
+
from typing_extensions import TypedDict, Required, NotRequired
|
8
|
+
|
9
|
+
from typing import (
|
10
|
+
Any,
|
11
|
+
Dict,
|
12
|
+
List,
|
13
|
+
Optional,
|
14
|
+
Type,
|
15
|
+
TypeVar,
|
16
|
+
Union,
|
17
|
+
Literal,
|
18
|
+
)
|
19
|
+
|
20
|
+
from .embedding_model_name import EmbeddingModelName
|
21
|
+
|
22
|
+
__all__ = [
|
23
|
+
"EmbeddingModelRequest",
|
24
|
+
]
|
25
|
+
|
26
|
+
|
27
|
+
class EmbeddingModelRequest(TypedDict, total=False):
|
28
|
+
"""A request to an embedding model."""
|
29
|
+
|
30
|
+
input: List[Any] | Any
|
31
|
+
"""The input items to embed."""
|
32
|
+
|
33
|
+
model: EmbeddingModelName | str
|
34
|
+
"""The embedding model to use."""
|
35
|
+
|
36
|
+
format: bool = False
|
37
|
+
"""Whether to format each non-string input as a markdown string."""
|
38
|
+
|
39
|
+
# LiteLLM Settings
|
40
|
+
dimensions: Optional[int] = None
|
41
|
+
"""The dimensions of the embedding."""
|
42
|
+
|
43
|
+
encoding_format: Optional[str] = None
|
44
|
+
"""The encoding format of the embedding."""
|
45
|
+
|
46
|
+
timeout: Optional[int] = None
|
47
|
+
"""The timeout for the embedding request."""
|
48
|
+
|
49
|
+
api_base: Optional[str] = None
|
50
|
+
"""The API base for the embedding request."""
|
51
|
+
|
52
|
+
api_version: Optional[str] = None
|
53
|
+
"""The API version for the embedding request."""
|
54
|
+
|
55
|
+
api_key: Optional[str] = None
|
56
|
+
"""The API key for the embedding request."""
|
57
|
+
|
58
|
+
api_type: Optional[str] = None
|
59
|
+
"""The API type for the embedding request."""
|
60
|
+
|
61
|
+
caching: bool = False
|
62
|
+
"""Whether to cache the embedding request."""
|
63
|
+
|
64
|
+
user: Optional[str] = None
|
65
|
+
"""The user for the embedding request."""
|
@@ -0,0 +1,69 @@
|
|
1
|
+
"""hammad.genai.embedding_models.embedding_model_response"""
|
2
|
+
|
3
|
+
from typing import List, Literal
|
4
|
+
|
5
|
+
from pydantic import BaseModel
|
6
|
+
|
7
|
+
__all__ = (
|
8
|
+
"Embedding",
|
9
|
+
"EmbeddingUsage",
|
10
|
+
"EmbeddingResponse",
|
11
|
+
)
|
12
|
+
|
13
|
+
|
14
|
+
class Embedding(BaseModel):
|
15
|
+
embedding: List[float]
|
16
|
+
"""The embedding vector, which is a list of floats.
|
17
|
+
|
18
|
+
The length of vector depends on the model as listed in the
|
19
|
+
[embedding guide](https://platform.openai.com/docs/guides/embeddings).
|
20
|
+
"""
|
21
|
+
|
22
|
+
index: int
|
23
|
+
"""The index of the embedding in the list of embeddings."""
|
24
|
+
|
25
|
+
object: Literal["embedding"]
|
26
|
+
"""The object type, which is always "embedding"."""
|
27
|
+
|
28
|
+
@property
|
29
|
+
def dimensions(self) -> int:
|
30
|
+
"""The dimensions of the embedding."""
|
31
|
+
return len(self.embedding)
|
32
|
+
|
33
|
+
|
34
|
+
class EmbeddingUsage(BaseModel):
|
35
|
+
"""Usage statistics for embedding requests."""
|
36
|
+
|
37
|
+
prompt_tokens: int
|
38
|
+
"""The number of tokens used by the prompt."""
|
39
|
+
|
40
|
+
total_tokens: int
|
41
|
+
"""The total number of tokens used by the request."""
|
42
|
+
|
43
|
+
|
44
|
+
class EmbeddingModelResponse(BaseModel):
|
45
|
+
data: List[Embedding]
|
46
|
+
"""The list of embeddings generated by the model."""
|
47
|
+
|
48
|
+
model: str
|
49
|
+
"""The name of the model used to generate the embedding."""
|
50
|
+
|
51
|
+
object: Literal["list"]
|
52
|
+
"""The object type, which is always "list"."""
|
53
|
+
|
54
|
+
usage: EmbeddingUsage
|
55
|
+
"""The usage information for the request."""
|
56
|
+
|
57
|
+
@property
|
58
|
+
def dimensions(self) -> int:
|
59
|
+
"""The dimensions of the embedding."""
|
60
|
+
return len(self.data[0].embedding)
|
61
|
+
|
62
|
+
def __str__(self) -> str:
|
63
|
+
return (
|
64
|
+
"EmbeddingModelResponse:\n"
|
65
|
+
f">>> Model: {self.model}\n"
|
66
|
+
f">>> Dimensions: {self.dimensions}\n"
|
67
|
+
f">>> Usage: {self.usage}\n"
|
68
|
+
f">>> Number of Generated Embeddings: {len(self.data)}\n"
|
69
|
+
)
|
@@ -0,0 +1,161 @@
|
|
1
|
+
"""hammad.genai.embedding_models.run
|
2
|
+
|
3
|
+
Standalone functions for running embedding models with full parameter typing.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Any, List, Optional, overload, Union
|
7
|
+
|
8
|
+
from .embedding_model_name import EmbeddingModelName
|
9
|
+
from .embedding_model import EmbeddingModel
|
10
|
+
from .embedding_model_response import EmbeddingModelResponse
|
11
|
+
|
12
|
+
__all__ = [
|
13
|
+
"run_embedding_model",
|
14
|
+
"async_run_embedding_model",
|
15
|
+
]
|
16
|
+
|
17
|
+
|
18
|
+
# Overloads for run_embedding_model
|
19
|
+
@overload
|
20
|
+
def run_embedding_model(
|
21
|
+
input: List[Any] | Any,
|
22
|
+
*,
|
23
|
+
# Provider settings
|
24
|
+
model: EmbeddingModelName = "openai/text-embedding-3-small",
|
25
|
+
api_base: Optional[str] = None,
|
26
|
+
api_key: Optional[str] = None,
|
27
|
+
api_version: Optional[str] = None,
|
28
|
+
api_type: Optional[str] = None,
|
29
|
+
# Extended settings
|
30
|
+
dimensions: Optional[int] = None,
|
31
|
+
encoding_format: Optional[str] = None,
|
32
|
+
timeout: int = 600,
|
33
|
+
caching: bool = False,
|
34
|
+
user: Optional[str] = None,
|
35
|
+
format: bool = False,
|
36
|
+
) -> EmbeddingModelResponse: ...
|
37
|
+
|
38
|
+
|
39
|
+
def run_embedding_model(
|
40
|
+
input: List[Any] | Any,
|
41
|
+
*,
|
42
|
+
# Provider settings
|
43
|
+
model: EmbeddingModelName = "openai/text-embedding-3-small",
|
44
|
+
api_base: Optional[str] = None,
|
45
|
+
api_key: Optional[str] = None,
|
46
|
+
api_version: Optional[str] = None,
|
47
|
+
api_type: Optional[str] = None,
|
48
|
+
# Extended settings
|
49
|
+
dimensions: Optional[int] = None,
|
50
|
+
encoding_format: Optional[str] = None,
|
51
|
+
timeout: int = 600,
|
52
|
+
caching: bool = False,
|
53
|
+
user: Optional[str] = None,
|
54
|
+
format: bool = False,
|
55
|
+
) -> EmbeddingModelResponse:
|
56
|
+
"""Run an embedding model with the given input.
|
57
|
+
|
58
|
+
Args:
|
59
|
+
input: The input text/content to generate embeddings for
|
60
|
+
model: The embedding model to use
|
61
|
+
api_base: The base URL for the API
|
62
|
+
api_key: The API key to use for the request
|
63
|
+
api_version: The version of the API
|
64
|
+
api_type: The API type to use for the request
|
65
|
+
dimensions: The number of dimensions for the embedding
|
66
|
+
encoding_format: The format to return the embeddings in
|
67
|
+
timeout: The timeout for the request
|
68
|
+
caching: Whether to cache the request
|
69
|
+
user: The user to use for the request
|
70
|
+
format: Whether to format each non-string input as a markdown string
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
EmbeddingModelResponse: The embedding response
|
74
|
+
"""
|
75
|
+
embedding_model = EmbeddingModel(model=model)
|
76
|
+
return embedding_model.run(
|
77
|
+
input=input,
|
78
|
+
dimensions=dimensions,
|
79
|
+
encoding_format=encoding_format,
|
80
|
+
timeout=timeout,
|
81
|
+
api_base=api_base,
|
82
|
+
api_version=api_version,
|
83
|
+
api_key=api_key,
|
84
|
+
api_type=api_type,
|
85
|
+
caching=caching,
|
86
|
+
user=user,
|
87
|
+
format=format,
|
88
|
+
)
|
89
|
+
|
90
|
+
|
91
|
+
# Overloads for async_run_embedding_model
|
92
|
+
@overload
|
93
|
+
async def async_run_embedding_model(
|
94
|
+
input: List[Any] | Any,
|
95
|
+
*,
|
96
|
+
# Provider settings
|
97
|
+
model: EmbeddingModelName = "openai/text-embedding-3-small",
|
98
|
+
api_base: Optional[str] = None,
|
99
|
+
api_key: Optional[str] = None,
|
100
|
+
api_version: Optional[str] = None,
|
101
|
+
api_type: Optional[str] = None,
|
102
|
+
# Extended settings
|
103
|
+
dimensions: Optional[int] = None,
|
104
|
+
encoding_format: Optional[str] = None,
|
105
|
+
timeout: int = 600,
|
106
|
+
caching: bool = False,
|
107
|
+
user: Optional[str] = None,
|
108
|
+
format: bool = False,
|
109
|
+
) -> EmbeddingModelResponse: ...
|
110
|
+
|
111
|
+
|
112
|
+
async def async_run_embedding_model(
|
113
|
+
input: List[Any] | Any,
|
114
|
+
*,
|
115
|
+
# Provider settings
|
116
|
+
model: EmbeddingModelName = "openai/text-embedding-3-small",
|
117
|
+
api_base: Optional[str] = None,
|
118
|
+
api_key: Optional[str] = None,
|
119
|
+
api_version: Optional[str] = None,
|
120
|
+
api_type: Optional[str] = None,
|
121
|
+
# Extended settings
|
122
|
+
dimensions: Optional[int] = None,
|
123
|
+
encoding_format: Optional[str] = None,
|
124
|
+
timeout: int = 600,
|
125
|
+
caching: bool = False,
|
126
|
+
user: Optional[str] = None,
|
127
|
+
format: bool = False,
|
128
|
+
) -> EmbeddingModelResponse:
|
129
|
+
"""Asynchronously run an embedding model with the given input.
|
130
|
+
|
131
|
+
Args:
|
132
|
+
input: The input text/content to generate embeddings for
|
133
|
+
model: The embedding model to use
|
134
|
+
api_base: The base URL for the API
|
135
|
+
api_key: The API key to use for the request
|
136
|
+
api_version: The version of the API
|
137
|
+
api_type: The API type to use for the request
|
138
|
+
dimensions: The number of dimensions for the embedding
|
139
|
+
encoding_format: The format to return the embeddings in
|
140
|
+
timeout: The timeout for the request
|
141
|
+
caching: Whether to cache the request
|
142
|
+
user: The user to use for the request
|
143
|
+
format: Whether to format each non-string input as a markdown string
|
144
|
+
|
145
|
+
Returns:
|
146
|
+
EmbeddingModelResponse: The embedding response
|
147
|
+
"""
|
148
|
+
embedding_model = EmbeddingModel(model=model)
|
149
|
+
return await embedding_model.async_run(
|
150
|
+
input=input,
|
151
|
+
dimensions=dimensions,
|
152
|
+
encoding_format=encoding_format,
|
153
|
+
timeout=timeout,
|
154
|
+
api_base=api_base,
|
155
|
+
api_version=api_version,
|
156
|
+
api_key=api_key,
|
157
|
+
api_type=api_type,
|
158
|
+
caching=caching,
|
159
|
+
user=user,
|
160
|
+
format=format,
|
161
|
+
)
|
@@ -0,0 +1,35 @@
|
|
1
|
+
"""hammad.genai.language_models"""
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING
|
4
|
+
from ..._internal import create_getattr_importer
|
5
|
+
|
6
|
+
if TYPE_CHECKING:
|
7
|
+
from .language_model import LanguageModel
|
8
|
+
from .run import run_language_model, async_run_language_model
|
9
|
+
from .language_model_request import LanguageModelMessagesParam
|
10
|
+
from .language_model_response import LanguageModelResponse
|
11
|
+
from .language_model_response_chunk import LanguageModelResponseChunk
|
12
|
+
from .language_model_request import LanguageModelRequest
|
13
|
+
|
14
|
+
__all__ = (
|
15
|
+
# hammad.genai.language_models.language_model
|
16
|
+
"LanguageModel",
|
17
|
+
|
18
|
+
# hammad.genai.language_models.run
|
19
|
+
"run_language_model",
|
20
|
+
"async_run_language_model",
|
21
|
+
|
22
|
+
# hammad.genai.language_models.language_model_request
|
23
|
+
"LanguageModelMessagesParam",
|
24
|
+
"LanguageModelRequest",
|
25
|
+
|
26
|
+
# hammad.genai.language_models.language_model_response
|
27
|
+
"LanguageModelResponse",
|
28
|
+
"LanguageModelResponseChunk",
|
29
|
+
)
|
30
|
+
|
31
|
+
__getattr__ = create_getattr_importer(__all__)
|
32
|
+
|
33
|
+
def __dir__() -> list[str]:
|
34
|
+
"""Get the attributes of the language_models module."""
|
35
|
+
return list(__all__)
|