hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +177 -0
- hammad/{performance/imports.py → _internal.py} +7 -1
- hammad/cache/__init__.py +1 -1
- hammad/cli/__init__.py +3 -1
- hammad/cli/_runner.py +265 -0
- hammad/cli/animations.py +1 -1
- hammad/cli/plugins.py +133 -78
- hammad/cli/styles/__init__.py +1 -1
- hammad/cli/styles/utils.py +149 -3
- hammad/data/__init__.py +56 -29
- hammad/data/collections/__init__.py +27 -17
- hammad/data/collections/collection.py +205 -383
- hammad/data/collections/indexes/__init__.py +37 -0
- hammad/data/collections/indexes/qdrant/__init__.py +1 -0
- hammad/data/collections/indexes/qdrant/index.py +735 -0
- hammad/data/collections/indexes/qdrant/settings.py +94 -0
- hammad/data/collections/indexes/qdrant/utils.py +220 -0
- hammad/data/collections/indexes/tantivy/__init__.py +1 -0
- hammad/data/collections/indexes/tantivy/index.py +428 -0
- hammad/data/collections/indexes/tantivy/settings.py +51 -0
- hammad/data/collections/indexes/tantivy/utils.py +200 -0
- hammad/data/configurations/__init__.py +2 -2
- hammad/data/configurations/configuration.py +2 -2
- hammad/data/models/__init__.py +20 -9
- hammad/data/models/extensions/__init__.py +4 -0
- hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
- hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
- hammad/data/models/{base/fields.py → fields.py} +1 -1
- hammad/data/models/{base/model.py → model.py} +1 -1
- hammad/data/models/{base/utils.py → utils.py} +1 -1
- hammad/data/sql/__init__.py +23 -0
- hammad/data/sql/database.py +578 -0
- hammad/data/sql/types.py +141 -0
- hammad/data/types/__init__.py +1 -3
- hammad/data/types/file.py +3 -3
- hammad/data/types/multimodal/__init__.py +2 -2
- hammad/data/types/multimodal/audio.py +2 -2
- hammad/data/types/multimodal/image.py +2 -2
- hammad/formatting/__init__.py +9 -27
- hammad/formatting/json/__init__.py +8 -2
- hammad/formatting/json/converters.py +7 -1
- hammad/formatting/text/__init__.py +1 -1
- hammad/formatting/yaml/__init__.py +1 -1
- hammad/genai/__init__.py +78 -0
- hammad/genai/agents/__init__.py +1 -0
- hammad/genai/agents/types/__init__.py +35 -0
- hammad/genai/agents/types/history.py +277 -0
- hammad/genai/agents/types/tool.py +490 -0
- hammad/genai/embedding_models/__init__.py +41 -0
- hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
- hammad/genai/embedding_models/embedding_model_name.py +77 -0
- hammad/genai/embedding_models/embedding_model_request.py +65 -0
- hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
- hammad/genai/embedding_models/run.py +161 -0
- hammad/genai/language_models/__init__.py +35 -0
- hammad/genai/language_models/_streaming.py +622 -0
- hammad/genai/language_models/_types.py +276 -0
- hammad/genai/language_models/_utils/__init__.py +31 -0
- hammad/genai/language_models/_utils/_completions.py +131 -0
- hammad/genai/language_models/_utils/_messages.py +89 -0
- hammad/genai/language_models/_utils/_requests.py +202 -0
- hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
- hammad/genai/language_models/language_model.py +734 -0
- hammad/genai/language_models/language_model_request.py +135 -0
- hammad/genai/language_models/language_model_response.py +219 -0
- hammad/genai/language_models/language_model_response_chunk.py +53 -0
- hammad/genai/language_models/run.py +530 -0
- hammad/genai/multimodal_models.py +48 -0
- hammad/genai/rerank_models.py +26 -0
- hammad/logging/__init__.py +1 -1
- hammad/logging/decorators.py +1 -1
- hammad/logging/logger.py +2 -2
- hammad/mcp/__init__.py +1 -1
- hammad/mcp/client/__init__.py +35 -0
- hammad/mcp/client/client.py +105 -4
- hammad/mcp/client/client_service.py +10 -3
- hammad/mcp/servers/__init__.py +24 -0
- hammad/{performance/runtime → runtime}/__init__.py +2 -2
- hammad/{performance/runtime → runtime}/decorators.py +1 -1
- hammad/{performance/runtime → runtime}/run.py +1 -1
- hammad/service/__init__.py +1 -1
- hammad/service/create.py +3 -8
- hammad/service/decorators.py +8 -8
- hammad/typing/__init__.py +28 -0
- hammad/web/__init__.py +3 -3
- hammad/web/http/client.py +1 -1
- hammad/web/models.py +53 -21
- hammad/web/search/client.py +99 -52
- hammad/web/utils.py +13 -13
- hammad_python-0.0.16.dist-info/METADATA +191 -0
- hammad_python-0.0.16.dist-info/RECORD +110 -0
- hammad/ai/__init__.py +0 -1
- hammad/ai/_utils.py +0 -142
- hammad/ai/completions/__init__.py +0 -45
- hammad/ai/completions/client.py +0 -684
- hammad/ai/completions/create.py +0 -710
- hammad/ai/completions/settings.py +0 -100
- hammad/ai/completions/types.py +0 -792
- hammad/ai/completions/utils.py +0 -486
- hammad/ai/embeddings/__init__.py +0 -35
- hammad/ai/embeddings/client/__init__.py +0 -1
- hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
- hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
- hammad/ai/embeddings/create.py +0 -159
- hammad/data/collections/base_collection.py +0 -58
- hammad/data/collections/searchable_collection.py +0 -556
- hammad/data/collections/vector_collection.py +0 -596
- hammad/data/databases/__init__.py +0 -21
- hammad/data/databases/database.py +0 -902
- hammad/data/models/base/__init__.py +0 -35
- hammad/data/models/pydantic/models/__init__.py +0 -28
- hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
- hammad/data/models/pydantic/models/cacheable_model.py +0 -79
- hammad/data/models/pydantic/models/fast_model.py +0 -318
- hammad/data/models/pydantic/models/function_model.py +0 -176
- hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
- hammad/performance/__init__.py +0 -36
- hammad/py.typed +0 -0
- hammad_python-0.0.14.dist-info/METADATA +0 -70
- hammad_python-0.0.14.dist-info/RECORD +0 -99
- {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,276 @@
|
|
1
|
+
"""hammad.genai.language_models._types"""
|
2
|
+
|
3
|
+
from typing import (
|
4
|
+
TypeAlias,
|
5
|
+
Literal,
|
6
|
+
)
|
7
|
+
|
8
|
+
|
9
|
+
__all__ = [
|
10
|
+
"LanguageModelInstructorMode",
|
11
|
+
"LanguageModelName",
|
12
|
+
]
|
13
|
+
|
14
|
+
|
15
|
+
LanguageModelInstructorMode : TypeAlias = Literal[
|
16
|
+
"function_call",
|
17
|
+
"parallel_tool_call",
|
18
|
+
"tool_call",
|
19
|
+
"tools_strict",
|
20
|
+
"json_mode",
|
21
|
+
"json_o1",
|
22
|
+
"markdown_json_mode",
|
23
|
+
"json_schema_mode",
|
24
|
+
"anthropic_tools",
|
25
|
+
"anthropic_reasoning_tools",
|
26
|
+
"anthropic_json",
|
27
|
+
"mistral_tools",
|
28
|
+
"mistral_structured_outputs",
|
29
|
+
"vertexai_tools",
|
30
|
+
"vertexai_json",
|
31
|
+
"vertexai_parallel_tools",
|
32
|
+
"gemini_json",
|
33
|
+
"gemini_tools",
|
34
|
+
"genai_tools",
|
35
|
+
"genai_structured_outputs",
|
36
|
+
"cohere_tools",
|
37
|
+
"cohere_json_object",
|
38
|
+
"cerebras_tools",
|
39
|
+
"cerebras_json",
|
40
|
+
"fireworks_tools",
|
41
|
+
"fireworks_json",
|
42
|
+
"writer_tools",
|
43
|
+
"bedrock_tools",
|
44
|
+
"bedrock_json",
|
45
|
+
"perplexity_json",
|
46
|
+
"openrouter_structured_outputs",
|
47
|
+
]
|
48
|
+
"""Instructor prompt/parsing mode for structured outputs."""
|
49
|
+
|
50
|
+
|
51
|
+
LanguageModelName : TypeAlias = Literal[
|
52
|
+
"anthropic/claude-3-7-sonnet-latest",
|
53
|
+
"anthropic/claude-3-5-haiku-latest",
|
54
|
+
"anthropic/claude-3-5-sonnet-latest",
|
55
|
+
"anthropic/claude-3-opus-latest",
|
56
|
+
"claude-3-7-sonnet-latest",
|
57
|
+
"claude-3-5-haiku-latest",
|
58
|
+
"bedrock/amazon.titan-tg1-large",
|
59
|
+
"bedrock/amazon.titan-text-lite-v1",
|
60
|
+
"bedrock/amazon.titan-text-express-v1",
|
61
|
+
"bedrock/us.amazon.nova-pro-v1:0",
|
62
|
+
"bedrock/us.amazon.nova-lite-v1:0",
|
63
|
+
"bedrock/us.amazon.nova-micro-v1:0",
|
64
|
+
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
65
|
+
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
66
|
+
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
67
|
+
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
68
|
+
"bedrock/anthropic.claude-instant-v1",
|
69
|
+
"bedrock/anthropic.claude-v2:1",
|
70
|
+
"bedrock/anthropic.claude-v2",
|
71
|
+
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
72
|
+
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
|
73
|
+
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
74
|
+
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
|
75
|
+
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
76
|
+
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
|
77
|
+
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
78
|
+
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
79
|
+
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
|
80
|
+
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
81
|
+
"bedrock/cohere.command-text-v14",
|
82
|
+
"bedrock/cohere.command-r-v1:0",
|
83
|
+
"bedrock/cohere.command-r-plus-v1:0",
|
84
|
+
"bedrock/cohere.command-light-text-v14",
|
85
|
+
"bedrock/meta.llama3-8b-instruct-v1:0",
|
86
|
+
"bedrock/meta.llama3-70b-instruct-v1:0",
|
87
|
+
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
88
|
+
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
|
89
|
+
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
90
|
+
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
|
91
|
+
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
92
|
+
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
93
|
+
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
94
|
+
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
95
|
+
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
96
|
+
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
|
97
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
98
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
99
|
+
"bedrock/mistral.mistral-large-2402-v1:0",
|
100
|
+
"bedrock/mistral.mistral-large-2407-v1:0",
|
101
|
+
"claude-3-5-sonnet-latest",
|
102
|
+
"claude-3-opus-latest",
|
103
|
+
"cohere/c4ai-aya-expanse-32b",
|
104
|
+
"cohere/c4ai-aya-expanse-8b",
|
105
|
+
"cohere/command",
|
106
|
+
"cohere/command-light",
|
107
|
+
"cohere/command-light-nightly",
|
108
|
+
"cohere/command-nightly",
|
109
|
+
"cohere/command-r",
|
110
|
+
"cohere/command-r-03-2024",
|
111
|
+
"cohere/command-r-08-2024",
|
112
|
+
"cohere/command-r-plus",
|
113
|
+
"cohere/command-r-plus-04-2024",
|
114
|
+
"cohere/command-r-plus-08-2024",
|
115
|
+
"cohere/command-r7b-12-2024",
|
116
|
+
"deepseek/deepseek-chat",
|
117
|
+
"deepseek/deepseek-reasoner",
|
118
|
+
"google-gla/gemini-1.0-pro",
|
119
|
+
"google-gla/gemini-1.5-flash",
|
120
|
+
"google-gla/gemini-1.5-flash-8b",
|
121
|
+
"google-gla/gemini-1.5-pro",
|
122
|
+
"google-gla/gemini-2.0-flash-exp",
|
123
|
+
"google-gla/gemini-2.0-flash-thinking-exp-01-21",
|
124
|
+
"google-gla/gemini-exp-1206",
|
125
|
+
"google-gla/gemini-2.0-flash",
|
126
|
+
"google-gla/gemini-2.0-flash-lite-preview-02-05",
|
127
|
+
"google-gla/gemini-2.0-pro-exp-02-05",
|
128
|
+
"google-gla/gemini-2.5-flash-preview-04-17",
|
129
|
+
"google-gla/gemini-2.5-pro-exp-03-25",
|
130
|
+
"google-gla/gemini-2.5-pro-preview-03-25",
|
131
|
+
"google-vertex/gemini-1.0-pro",
|
132
|
+
"google-vertex/gemini-1.5-flash",
|
133
|
+
"google-vertex/gemini-1.5-flash-8b",
|
134
|
+
"google-vertex/gemini-1.5-pro",
|
135
|
+
"google-vertex/gemini-2.0-flash-exp",
|
136
|
+
"google-vertex/gemini-2.0-flash-thinking-exp-01-21",
|
137
|
+
"google-vertex/gemini-exp-1206",
|
138
|
+
"google-vertex/gemini-2.0-flash",
|
139
|
+
"google-vertex/gemini-2.0-flash-lite-preview-02-05",
|
140
|
+
"google-vertex/gemini-2.0-pro-exp-02-05",
|
141
|
+
"google-vertex/gemini-2.5-flash-preview-04-17",
|
142
|
+
"google-vertex/gemini-2.5-pro-exp-03-25",
|
143
|
+
"google-vertex/gemini-2.5-pro-preview-03-25",
|
144
|
+
"gpt-3.5-turbo",
|
145
|
+
"gpt-3.5-turbo-0125",
|
146
|
+
"gpt-3.5-turbo-0301",
|
147
|
+
"gpt-3.5-turbo-0613",
|
148
|
+
"gpt-3.5-turbo-1106",
|
149
|
+
"gpt-3.5-turbo-16k",
|
150
|
+
"gpt-3.5-turbo-16k-0613",
|
151
|
+
"gpt-4",
|
152
|
+
"gpt-4-0125-preview",
|
153
|
+
"gpt-4-0314",
|
154
|
+
"gpt-4-0613",
|
155
|
+
"gpt-4-1106-preview",
|
156
|
+
"gpt-4-32k",
|
157
|
+
"gpt-4-32k-0314",
|
158
|
+
"gpt-4-32k-0613",
|
159
|
+
"gpt-4-turbo",
|
160
|
+
"gpt-4-turbo-2024-04-09",
|
161
|
+
"gpt-4-turbo-preview",
|
162
|
+
"gpt-4-vision-preview",
|
163
|
+
"gpt-4.1",
|
164
|
+
"gpt-4.1-2025-04-14",
|
165
|
+
"gpt-4.1-mini",
|
166
|
+
"gpt-4.1-mini-2025-04-14",
|
167
|
+
"gpt-4.1-nano",
|
168
|
+
"gpt-4.1-nano-2025-04-14",
|
169
|
+
"gpt-4o",
|
170
|
+
"gpt-4o-2024-05-13",
|
171
|
+
"gpt-4o-2024-08-06",
|
172
|
+
"gpt-4o-2024-11-20",
|
173
|
+
"gpt-4o-audio-preview",
|
174
|
+
"gpt-4o-audio-preview-2024-10-01",
|
175
|
+
"gpt-4o-audio-preview-2024-12-17",
|
176
|
+
"gpt-4o-mini",
|
177
|
+
"gpt-4o-mini-2024-07-18",
|
178
|
+
"gpt-4o-mini-audio-preview",
|
179
|
+
"gpt-4o-mini-audio-preview-2024-12-17",
|
180
|
+
"gpt-4o-mini-search-preview",
|
181
|
+
"gpt-4o-mini-search-preview-2025-03-11",
|
182
|
+
"gpt-4o-search-preview",
|
183
|
+
"gpt-4o-search-preview-2025-03-11",
|
184
|
+
"groq/distil-whisper-large-v3-en",
|
185
|
+
"groq/gemma2-9b-it",
|
186
|
+
"groq/llama-3.3-70b-versatile",
|
187
|
+
"groq/llama-3.1-8b-instant",
|
188
|
+
"groq/llama-guard-3-8b",
|
189
|
+
"groq/llama3-70b-8192",
|
190
|
+
"groq/llama3-8b-8192",
|
191
|
+
"groq/whisper-large-v3",
|
192
|
+
"groq/whisper-large-v3-turbo",
|
193
|
+
"groq/playai-tts",
|
194
|
+
"groq/playai-tts-arabic",
|
195
|
+
"groq/qwen-qwq-32b",
|
196
|
+
"groq/mistral-saba-24b",
|
197
|
+
"groq/qwen-2.5-coder-32b",
|
198
|
+
"groq/qwen-2.5-32b",
|
199
|
+
"groq/deepseek-r1-distill-qwen-32b",
|
200
|
+
"groq/deepseek-r1-distill-llama-70b",
|
201
|
+
"groq/llama-3.3-70b-specdec",
|
202
|
+
"groq/llama-3.2-1b-preview",
|
203
|
+
"groq/llama-3.2-3b-preview",
|
204
|
+
"groq/llama-3.2-11b-vision-preview",
|
205
|
+
"groq/llama-3.2-90b-vision-preview",
|
206
|
+
"mistral/codestral-latest",
|
207
|
+
"mistral/mistral-large-latest",
|
208
|
+
"mistral/mistral-moderation-latest",
|
209
|
+
"mistral/mistral-small-latest",
|
210
|
+
"o1",
|
211
|
+
"o1-2024-12-17",
|
212
|
+
"o1-mini",
|
213
|
+
"o1-mini-2024-09-12",
|
214
|
+
"o1-preview",
|
215
|
+
"o1-preview-2024-09-12",
|
216
|
+
"o3",
|
217
|
+
"o3-2025-04-16",
|
218
|
+
"o3-mini",
|
219
|
+
"o3-mini-2025-01-31",
|
220
|
+
"openai/chatgpt-4o-latest",
|
221
|
+
"openai/gpt-3.5-turbo",
|
222
|
+
"openai/gpt-3.5-turbo-0125",
|
223
|
+
"openai/gpt-3.5-turbo-0301",
|
224
|
+
"openai/gpt-3.5-turbo-0613",
|
225
|
+
"openai/gpt-3.5-turbo-1106",
|
226
|
+
"openai/gpt-3.5-turbo-16k",
|
227
|
+
"openai/gpt-3.5-turbo-16k-0613",
|
228
|
+
"openai/gpt-4",
|
229
|
+
"openai/gpt-4-0125-preview",
|
230
|
+
"openai/gpt-4-0314",
|
231
|
+
"openai/gpt-4-0613",
|
232
|
+
"openai/gpt-4-1106-preview",
|
233
|
+
"openai/gpt-4-32k",
|
234
|
+
"openai/gpt-4-32k-0314",
|
235
|
+
"openai/gpt-4-32k-0613",
|
236
|
+
"openai/gpt-4-turbo",
|
237
|
+
"openai/gpt-4-turbo-2024-04-09",
|
238
|
+
"openai/gpt-4-turbo-preview",
|
239
|
+
"openai/gpt-4-vision-preview",
|
240
|
+
"openai/gpt-4.1",
|
241
|
+
"openai/gpt-4.1-2025-04-14",
|
242
|
+
"openai/gpt-4.1-mini",
|
243
|
+
"openai/gpt-4.1-mini-2025-04-14",
|
244
|
+
"openai/gpt-4.1-nano",
|
245
|
+
"openai/gpt-4.1-nano-2025-04-14",
|
246
|
+
"openai/gpt-4o",
|
247
|
+
"openai/gpt-4o-2024-05-13",
|
248
|
+
"openai/gpt-4o-2024-08-06",
|
249
|
+
"openai/gpt-4o-2024-11-20",
|
250
|
+
"openai/gpt-4o-audio-preview",
|
251
|
+
"openai/gpt-4o-audio-preview-2024-10-01",
|
252
|
+
"openai/gpt-4o-audio-preview-2024-12-17",
|
253
|
+
"openai/gpt-4o-mini",
|
254
|
+
"openai/gpt-4o-mini-2024-07-18",
|
255
|
+
"openai/gpt-4o-mini-audio-preview",
|
256
|
+
"openai/gpt-4o-mini-audio-preview-2024-12-17",
|
257
|
+
"openai/gpt-4o-mini-search-preview",
|
258
|
+
"openai/gpt-4o-mini-search-preview-2025-03-11",
|
259
|
+
"openai/gpt-4o-search-preview",
|
260
|
+
"openai/gpt-4o-search-preview-2025-03-11",
|
261
|
+
"openai/o1",
|
262
|
+
"openai/o1-2024-12-17",
|
263
|
+
"openai/o1-mini",
|
264
|
+
"openai/o1-mini-2024-09-12",
|
265
|
+
"openai/o1-preview",
|
266
|
+
"openai/o1-preview-2024-09-12",
|
267
|
+
"openai/o3",
|
268
|
+
"openai/o3-2025-04-16",
|
269
|
+
"openai/o3-mini",
|
270
|
+
"openai/o3-mini-2025-01-31",
|
271
|
+
"openai/o4-mini",
|
272
|
+
"openai/o4-mini-2025-04-16",
|
273
|
+
"xai/grok-3-latest",
|
274
|
+
]
|
275
|
+
"""Helper alias for various compatible models usable with litellm
|
276
|
+
completions."""
|
@@ -0,0 +1,31 @@
|
|
1
|
+
"""hammad.genai.language_models.utils"""
|
2
|
+
|
3
|
+
from ._completions import (
|
4
|
+
parse_messages_input,
|
5
|
+
handle_completion_request_params,
|
6
|
+
handle_completion_response,
|
7
|
+
)
|
8
|
+
from ._structured_outputs import (
|
9
|
+
handle_structured_output_request_params,
|
10
|
+
prepare_response_model,
|
11
|
+
handle_structured_output_response,
|
12
|
+
)
|
13
|
+
from ._messages import (
|
14
|
+
format_tool_calls,
|
15
|
+
consolidate_system_messages,
|
16
|
+
)
|
17
|
+
from ._requests import (
|
18
|
+
LanguageModelRequestBuilder,
|
19
|
+
)
|
20
|
+
|
21
|
+
__all__ = [
|
22
|
+
"parse_messages_input",
|
23
|
+
"handle_completion_request_params",
|
24
|
+
"handle_completion_response",
|
25
|
+
"handle_structured_output_request_params",
|
26
|
+
"prepare_response_model",
|
27
|
+
"handle_structured_output_response",
|
28
|
+
"format_tool_calls",
|
29
|
+
"consolidate_system_messages",
|
30
|
+
"LanguageModelRequestBuilder",
|
31
|
+
]
|
@@ -0,0 +1,131 @@
|
|
1
|
+
"""hammad.ai.llms.utils._completions"""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
from typing import Any, Dict, List, Optional
|
6
|
+
|
7
|
+
from ....cache import cached
|
8
|
+
|
9
|
+
try:
|
10
|
+
from openai.types.chat import ChatCompletionMessageParam
|
11
|
+
except ImportError:
|
12
|
+
ChatCompletionMessageParam = Any
|
13
|
+
|
14
|
+
from ..language_model_request import LanguageModelMessagesParam
|
15
|
+
from ..language_model_response import LanguageModelResponse
|
16
|
+
|
17
|
+
__all__ = [
|
18
|
+
"parse_messages_input",
|
19
|
+
"handle_completion_request_params",
|
20
|
+
"handle_completion_response",
|
21
|
+
]
|
22
|
+
|
23
|
+
|
24
|
+
@cached
|
25
|
+
def parse_messages_input(
|
26
|
+
messages: LanguageModelMessagesParam,
|
27
|
+
instructions: Optional[str] = None,
|
28
|
+
) -> List["ChatCompletionMessageParam"]:
|
29
|
+
"""Parse various message input formats into standardized ChatCompletionMessageParam format.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
messages: Input messages in various formats
|
33
|
+
instructions: Optional system instructions to prepend
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
List of ChatCompletionMessageParam objects
|
37
|
+
"""
|
38
|
+
parsed_messages: List["ChatCompletionMessageParam"] = []
|
39
|
+
|
40
|
+
# Add system instructions if provided
|
41
|
+
if instructions:
|
42
|
+
parsed_messages.append({
|
43
|
+
"role": "system",
|
44
|
+
"content": instructions
|
45
|
+
})
|
46
|
+
|
47
|
+
# Handle different input formats
|
48
|
+
if isinstance(messages, str):
|
49
|
+
# Simple string input
|
50
|
+
parsed_messages.append({
|
51
|
+
"role": "user",
|
52
|
+
"content": messages
|
53
|
+
})
|
54
|
+
elif isinstance(messages, dict):
|
55
|
+
# Single message dict
|
56
|
+
parsed_messages.append(messages)
|
57
|
+
elif isinstance(messages, list):
|
58
|
+
# List of messages
|
59
|
+
for msg in messages:
|
60
|
+
if isinstance(msg, dict):
|
61
|
+
parsed_messages.append(msg)
|
62
|
+
elif isinstance(msg, str):
|
63
|
+
parsed_messages.append({
|
64
|
+
"role": "user",
|
65
|
+
"content": msg
|
66
|
+
})
|
67
|
+
else:
|
68
|
+
# Fallback - try to convert to string
|
69
|
+
parsed_messages.append({
|
70
|
+
"role": "user",
|
71
|
+
"content": str(messages)
|
72
|
+
})
|
73
|
+
|
74
|
+
return parsed_messages
|
75
|
+
|
76
|
+
|
77
|
+
@cached
|
78
|
+
def handle_completion_request_params(params: Dict[str, Any]) -> Dict[str, Any]:
|
79
|
+
"""Filter and process parameters for standard completion requests.
|
80
|
+
|
81
|
+
Args:
|
82
|
+
params: Raw request parameters
|
83
|
+
|
84
|
+
Returns:
|
85
|
+
Filtered parameters suitable for LiteLLM completion
|
86
|
+
"""
|
87
|
+
# Remove structured output specific parameters
|
88
|
+
excluded_keys = {
|
89
|
+
"type", "instructor_mode", "response_field_name",
|
90
|
+
"response_field_instruction", "max_retries", "strict"
|
91
|
+
}
|
92
|
+
|
93
|
+
filtered_params = {
|
94
|
+
key: value for key, value in params.items()
|
95
|
+
if key not in excluded_keys and value is not None
|
96
|
+
}
|
97
|
+
|
98
|
+
return filtered_params
|
99
|
+
|
100
|
+
|
101
|
+
def handle_completion_response(response: Any, model: str) -> LanguageModelResponse[str]:
|
102
|
+
"""Convert a LiteLLM completion response to LanguageModelResponse.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
response: LiteLLM ModelResponse object
|
106
|
+
model: Model name used for the request
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
LanguageModelResponse object with string output
|
110
|
+
"""
|
111
|
+
# Extract content from the response
|
112
|
+
content = None
|
113
|
+
tool_calls = None
|
114
|
+
refusal = None
|
115
|
+
|
116
|
+
if hasattr(response, "choices") and response.choices:
|
117
|
+
choice = response.choices[0]
|
118
|
+
if hasattr(choice, "message"):
|
119
|
+
message = choice.message
|
120
|
+
content = getattr(message, "content", None)
|
121
|
+
tool_calls = getattr(message, "tool_calls", None)
|
122
|
+
refusal = getattr(message, "refusal", None)
|
123
|
+
|
124
|
+
return LanguageModelResponse(
|
125
|
+
model=model,
|
126
|
+
output=content or "",
|
127
|
+
completion=response,
|
128
|
+
content=content,
|
129
|
+
tool_calls=tool_calls,
|
130
|
+
refusal=refusal,
|
131
|
+
)
|
@@ -0,0 +1,89 @@
|
|
1
|
+
"""hammad.ai.llms.utils._messages"""
|
2
|
+
|
3
|
+
from typing import List
|
4
|
+
|
5
|
+
from ....cache import cached
|
6
|
+
|
7
|
+
try:
|
8
|
+
from openai.types.chat import ChatCompletionMessageParam
|
9
|
+
except ImportError:
|
10
|
+
ChatCompletionMessageParam = Any
|
11
|
+
|
12
|
+
__all__ = [
|
13
|
+
"format_tool_calls",
|
14
|
+
"consolidate_system_messages",
|
15
|
+
]
|
16
|
+
|
17
|
+
|
18
|
+
@cached
|
19
|
+
def format_tool_calls(messages: List["ChatCompletionMessageParam"]) -> List["ChatCompletionMessageParam"]:
|
20
|
+
"""Format tool calls in messages for better conversation context.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
messages: List of chat completion messages
|
24
|
+
|
25
|
+
Returns:
|
26
|
+
Messages with formatted tool calls
|
27
|
+
"""
|
28
|
+
formatted_messages = []
|
29
|
+
|
30
|
+
for message in messages:
|
31
|
+
if message.get("role") == "assistant" and message.get("tool_calls"):
|
32
|
+
# Create a copy of the message
|
33
|
+
formatted_message = dict(message)
|
34
|
+
|
35
|
+
# Format tool calls into readable content
|
36
|
+
content_parts = []
|
37
|
+
if message.get("content"):
|
38
|
+
content_parts.append(message["content"])
|
39
|
+
|
40
|
+
for tool_call in message["tool_calls"]:
|
41
|
+
formatted_call = (
|
42
|
+
f"I called the function `{tool_call['function']['name']}` "
|
43
|
+
f"with the following arguments:\n{tool_call['function']['arguments']}"
|
44
|
+
)
|
45
|
+
content_parts.append(formatted_call)
|
46
|
+
|
47
|
+
formatted_message["content"] = "\n\n".join(content_parts)
|
48
|
+
# Remove tool_calls from the formatted message
|
49
|
+
formatted_message.pop("tool_calls", None)
|
50
|
+
|
51
|
+
formatted_messages.append(formatted_message)
|
52
|
+
else:
|
53
|
+
formatted_messages.append(message)
|
54
|
+
|
55
|
+
return formatted_messages
|
56
|
+
|
57
|
+
|
58
|
+
@cached
|
59
|
+
def consolidate_system_messages(messages: List["ChatCompletionMessageParam"]) -> List["ChatCompletionMessageParam"]:
|
60
|
+
"""Consolidate multiple system messages into a single system message.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
messages: List of chat completion messages
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
Messages with consolidated system messages
|
67
|
+
"""
|
68
|
+
system_parts = []
|
69
|
+
other_messages = []
|
70
|
+
|
71
|
+
for message in messages:
|
72
|
+
if message.get("role") == "system":
|
73
|
+
if message.get("content"):
|
74
|
+
system_parts.append(message["content"])
|
75
|
+
else:
|
76
|
+
other_messages.append(message)
|
77
|
+
|
78
|
+
# Create consolidated messages
|
79
|
+
consolidated_messages = []
|
80
|
+
|
81
|
+
if system_parts:
|
82
|
+
consolidated_messages.append({
|
83
|
+
"role": "system",
|
84
|
+
"content": "\n\n".join(system_parts)
|
85
|
+
})
|
86
|
+
|
87
|
+
consolidated_messages.extend(other_messages)
|
88
|
+
|
89
|
+
return consolidated_messages
|