hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.30.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,421 +0,0 @@
1
- """hammad.ai.llms.utils._completions"""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Any, Dict, List, Optional, Type, Generic, TypeVar
6
-
7
- from .....cache import cached
8
- from .....data.models import (
9
- convert_to_pydantic_model,
10
- is_pydantic_model_class,
11
- )
12
-
13
- try:
14
- from openai.types.chat import ChatCompletionMessageParam
15
- except ImportError:
16
- ChatCompletionMessageParam = Any
17
-
18
- from ..types.language_model_messages import LanguageModelMessages
19
- from ..types.language_model_response import LanguageModelResponse
20
- from ..types.language_model_request import LanguageModelRequest
21
- from ..types.language_model_name import LanguageModelName
22
- from ..types.language_model_instructor_mode import LanguageModelInstructorMode
23
-
24
- __all__ = [
25
- "format_tool_calls",
26
- "consolidate_system_messages",
27
- "parse_messages_input",
28
- "handle_completion_request_params",
29
- "handle_completion_response",
30
- "LanguageModelRequestBuilder",
31
- ]
32
-
33
-
34
- T = TypeVar("T")
35
-
36
-
37
- @cached
38
- def format_tool_calls(
39
- messages: List["ChatCompletionMessageParam"],
40
- ) -> List["ChatCompletionMessageParam"]:
41
- """Format tool calls in messages for better conversation context.
42
-
43
- Args:
44
- messages: List of chat completion messages
45
-
46
- Returns:
47
- Messages with formatted tool calls
48
- """
49
- formatted_messages = []
50
-
51
- for message in messages:
52
- if message.get("role") == "assistant" and message.get("tool_calls"):
53
- # Create a copy of the message
54
- formatted_message = dict(message)
55
-
56
- # Format tool calls into readable content
57
- content_parts = []
58
- if message.get("content"):
59
- content_parts.append(message["content"])
60
-
61
- for tool_call in message["tool_calls"]:
62
- formatted_call = (
63
- f"I called the function `{tool_call['function']['name']}` "
64
- f"with the following arguments:\n{tool_call['function']['arguments']}"
65
- )
66
- content_parts.append(formatted_call)
67
-
68
- formatted_message["content"] = "\n\n".join(content_parts)
69
- # Remove tool_calls from the formatted message
70
- formatted_message.pop("tool_calls", None)
71
-
72
- formatted_messages.append(formatted_message)
73
- else:
74
- formatted_messages.append(message)
75
-
76
- return formatted_messages
77
-
78
-
79
- @cached
80
- def consolidate_system_messages(
81
- messages: List["ChatCompletionMessageParam"],
82
- ) -> List["ChatCompletionMessageParam"]:
83
- """Consolidate multiple system messages into a single system message.
84
-
85
- Args:
86
- messages: List of chat completion messages
87
-
88
- Returns:
89
- Messages with consolidated system messages
90
- """
91
- system_parts = []
92
- other_messages = []
93
-
94
- for message in messages:
95
- if message.get("role") == "system":
96
- if message.get("content"):
97
- system_parts.append(message["content"])
98
- else:
99
- other_messages.append(message)
100
-
101
- # Create consolidated messages
102
- consolidated_messages = []
103
-
104
- if system_parts:
105
- consolidated_messages.append(
106
- {"role": "system", "content": "\n\n".join(system_parts)}
107
- )
108
-
109
- consolidated_messages.extend(other_messages)
110
-
111
- return consolidated_messages
112
-
113
-
114
- @cached
115
- def parse_messages_input(
116
- messages: LanguageModelMessages,
117
- instructions: Optional[str] = None,
118
- ) -> List["ChatCompletionMessageParam"]:
119
- """Parse various message input formats into standardized ChatCompletionMessageParam format.
120
-
121
- Args:
122
- messages: Input messages in various formats
123
- instructions: Optional system instructions to prepend
124
-
125
- Returns:
126
- List of ChatCompletionMessageParam objects
127
- """
128
- parsed_messages: List["ChatCompletionMessageParam"] = []
129
-
130
- # Add system instructions if provided
131
- if instructions:
132
- parsed_messages.append({"role": "system", "content": instructions})
133
-
134
- # Handle different input formats
135
- if isinstance(messages, str):
136
- # Simple string input
137
- parsed_messages.append({"role": "user", "content": messages})
138
- elif isinstance(messages, dict):
139
- # Single message dict
140
- parsed_messages.append(messages)
141
- elif isinstance(messages, list):
142
- # List of messages
143
- for msg in messages:
144
- if isinstance(msg, dict):
145
- parsed_messages.append(msg)
146
- elif isinstance(msg, str):
147
- parsed_messages.append({"role": "user", "content": msg})
148
- else:
149
- # Fallback - try to convert to string
150
- parsed_messages.append({"role": "user", "content": str(messages)})
151
-
152
- return parsed_messages
153
-
154
-
155
- @cached
156
- def handle_completion_request_params(params: Dict[str, Any]) -> Dict[str, Any]:
157
- """Filter and process parameters for standard completion requests.
158
-
159
- Args:
160
- params: Raw request parameters
161
-
162
- Returns:
163
- Filtered parameters suitable for LiteLLM completion
164
- """
165
- # Remove structured output specific parameters
166
- excluded_keys = {
167
- "type",
168
- "instructor_mode",
169
- "response_field_name",
170
- "response_field_instruction",
171
- "max_retries",
172
- "strict",
173
- }
174
-
175
- filtered_params = {
176
- key: value
177
- for key, value in params.items()
178
- if key not in excluded_keys and value is not None
179
- }
180
-
181
- return filtered_params
182
-
183
-
184
- def handle_completion_response(response: Any, model: str) -> LanguageModelResponse[str]:
185
- """Convert a LiteLLM completion response to LanguageModelResponse.
186
-
187
- Args:
188
- response: LiteLLM ModelResponse object
189
- model: Model name used for the request
190
-
191
- Returns:
192
- LanguageModelResponse object with string output
193
- """
194
- # Extract content from the response
195
- content = None
196
- tool_calls = None
197
- refusal = None
198
-
199
- if hasattr(response, "choices") and response.choices:
200
- choice = response.choices[0]
201
- if hasattr(choice, "message"):
202
- message = choice.message
203
- content = getattr(message, "content", None)
204
- tool_calls = getattr(message, "tool_calls", None)
205
- refusal = getattr(message, "refusal", None)
206
-
207
- return LanguageModelResponse(
208
- type="language_model",
209
- model=model,
210
- output=content or "",
211
- completion=response,
212
- content=content,
213
- tool_calls=tool_calls,
214
- refusal=refusal,
215
- )
216
-
217
-
218
- class LanguageModelRequestBuilder(Generic[T]):
219
- """A request to a language model with comprehensive parameter handling."""
220
-
221
- def __init__(
222
- self,
223
- messages: LanguageModelMessages,
224
- instructions: Optional[str] = None,
225
- model: LanguageModelName = "openai/gpt-4o-mini",
226
- **kwargs: Any,
227
- ):
228
- """Initialize a language model request.
229
-
230
- Args:
231
- messages: The input messages/content for the request
232
- instructions: Optional system instructions to prepend
233
- model: The model to use for the request
234
- **kwargs: Additional request settings
235
- """
236
- self.messages = messages
237
- self.instructions = instructions
238
- self.model = model
239
- self.settings = self._build_settings(**kwargs)
240
-
241
- # Validate settings
242
- self._validate_settings()
243
-
244
- def _build_settings(self, **kwargs: Any) -> LanguageModelRequest:
245
- """Build the complete settings dictionary from kwargs."""
246
- settings: LanguageModelRequest = {"model": self.model}
247
-
248
- # Add all provided kwargs to settings
249
- for key, value in kwargs.items():
250
- if value is not None:
251
- settings[key] = value
252
-
253
- return settings
254
-
255
- def _validate_settings(self) -> None:
256
- """Validate that the settings are compatible."""
257
- # Check if both tools and structured outputs are specified
258
- has_tools = any(
259
- key in self.settings
260
- for key in [
261
- "tools",
262
- "tool_choice",
263
- "parallel_tool_calls",
264
- "functions",
265
- "function_call",
266
- ]
267
- )
268
-
269
- has_structured_output = (
270
- "type" in self.settings and self.settings["type"] is not str
271
- )
272
-
273
- if has_tools and has_structured_output:
274
- raise ValueError(
275
- "Tools and structured outputs cannot be used together. "
276
- "Please specify either tools OR a structured output type, not both."
277
- )
278
-
279
- def is_structured_output(self) -> bool:
280
- """Check if this request is for structured output."""
281
- return "type" in self.settings and self.settings["type"] is not str
282
-
283
- def is_streaming(self) -> bool:
284
- """Check if this request is for streaming."""
285
- return self.settings.get("stream", False)
286
-
287
- def has_tools(self) -> bool:
288
- """Check if this request has tools."""
289
- return any(
290
- key in self.settings
291
- for key in [
292
- "tools",
293
- "tool_choice",
294
- "parallel_tool_calls",
295
- "functions",
296
- "function_call",
297
- ]
298
- )
299
-
300
- def get_completion_settings(self) -> Dict[str, Any]:
301
- """Get settings filtered for standard completion requests."""
302
- excluded_keys = {
303
- "type",
304
- "instructor_mode",
305
- "response_field_name",
306
- "response_field_instruction",
307
- "response_model_name",
308
- "max_retries",
309
- "strict",
310
- "validation_context",
311
- "context",
312
- "completion_kwargs_hooks",
313
- "completion_response_hooks",
314
- "completion_error_hooks",
315
- "completion_last_attempt_hooks",
316
- "parse_error_hooks",
317
- }
318
-
319
- return {
320
- key: value
321
- for key, value in self.settings.items()
322
- if key not in excluded_keys
323
- }
324
-
325
- def get_structured_output_settings(self) -> Dict[str, Any]:
326
- """Get settings filtered for structured output requests."""
327
- excluded_keys = {
328
- "tools",
329
- "tool_choice",
330
- "parallel_tool_calls",
331
- "functions",
332
- "function_call",
333
- "type",
334
- "instructor_mode",
335
- "response_field_name",
336
- "response_field_instruction",
337
- "response_model_name",
338
- "max_retries",
339
- "strict",
340
- "validation_context",
341
- "context",
342
- "completion_kwargs_hooks",
343
- "completion_response_hooks",
344
- "completion_error_hooks",
345
- "completion_last_attempt_hooks",
346
- "parse_error_hooks",
347
- }
348
-
349
- return {
350
- key: value
351
- for key, value in self.settings.items()
352
- if key not in excluded_keys
353
- }
354
-
355
- def get_output_type(self) -> Type[T]:
356
- """Get the requested output type."""
357
- return self.settings.get("type", str)
358
-
359
- def get_instructor_mode(self) -> LanguageModelInstructorMode:
360
- """Get the instructor mode for structured outputs."""
361
- return self.settings.get("instructor_mode", "tool_call")
362
-
363
- def get_response_field_name(self) -> str:
364
- """Get the response field name for structured outputs."""
365
- return self.settings.get("response_field_name", "content")
366
-
367
- def get_response_field_instruction(self) -> str:
368
- """Get the response field instruction for structured outputs."""
369
- return self.settings.get(
370
- "response_field_instruction",
371
- "A response in the correct type as requested by the user, or relevant content.",
372
- )
373
-
374
- def get_response_model_name(self) -> str:
375
- """Get the response model name for structured outputs."""
376
- return self.settings.get("response_model_name", "Response")
377
-
378
- def get_max_retries(self) -> int:
379
- """Get the maximum retries for structured outputs."""
380
- return self.settings.get("max_retries", 3)
381
-
382
- def get_strict_mode(self) -> bool:
383
- """Get the strict mode for structured outputs."""
384
- return self.settings.get("strict", True)
385
-
386
- def get_validation_context(self) -> Optional[Dict[str, Any]]:
387
- """Get the validation context for structured outputs."""
388
- return self.settings.get("validation_context")
389
-
390
- def get_context(self) -> Optional[Dict[str, Any]]:
391
- """Get the context for structured outputs."""
392
- return self.settings.get("context")
393
-
394
- def prepare_pydantic_model(self) -> Optional[Type[Any]]:
395
- """Prepare a Pydantic model for structured outputs if needed."""
396
- if not self.is_structured_output():
397
- return None
398
-
399
- output_type = self.get_output_type()
400
-
401
- if is_pydantic_model_class(output_type):
402
- return output_type
403
-
404
- # Convert to Pydantic model
405
- return convert_to_pydantic_model(
406
- target=output_type,
407
- name="Response",
408
- field_name=self.get_response_field_name(),
409
- description=self.get_response_field_instruction(),
410
- )
411
-
412
- def __repr__(self) -> str:
413
- """String representation of the request."""
414
- return (
415
- f"LanguageModelRequest("
416
- f"model={self.model}, "
417
- f"structured_output={self.is_structured_output()}, "
418
- f"streaming={self.is_streaming()}, "
419
- f"has_tools={self.has_tools()}"
420
- f")"
421
- )
@@ -1,135 +0,0 @@
1
- """hammad.genai.language_models._utils._structured_outputs"""
2
-
3
- from typing import Any, Dict, Type, TypeVar
4
-
5
- from .....cache import cached
6
- from .....data.models import (
7
- convert_to_pydantic_model,
8
- is_pydantic_model_class,
9
- )
10
- from ..types.language_model_response import LanguageModelResponse
11
-
12
- __all__ = [
13
- "handle_structured_output_request_params",
14
- "prepare_response_model",
15
- "handle_structured_output_response",
16
- ]
17
-
18
- T = TypeVar("T")
19
-
20
-
21
- @cached
22
- def handle_structured_output_request_params(params: Dict[str, Any]) -> Dict[str, Any]:
23
- """Filter and process parameters for structured output requests.
24
-
25
- Args:
26
- params: Raw request parameters
27
-
28
- Returns:
29
- Filtered parameters suitable for Instructor
30
- """
31
- # Remove tool-related parameters (not supported with structured outputs)
32
- # and structured output specific parameters
33
- excluded_keys = {
34
- "tools",
35
- "tool_choice",
36
- "parallel_tool_calls",
37
- "functions",
38
- "function_call",
39
- "type",
40
- "instructor_mode",
41
- "response_field_name",
42
- "response_field_instruction",
43
- "response_model_name",
44
- "max_retries",
45
- "strict",
46
- }
47
-
48
- filtered_params = {
49
- key: value
50
- for key, value in params.items()
51
- if key not in excluded_keys and value is not None
52
- }
53
-
54
- return filtered_params
55
-
56
-
57
- @cached
58
- def prepare_response_model(
59
- output_type: Type[T],
60
- response_field_name: str = "content",
61
- response_field_instruction: str = "A response in the correct type as requested by the user, or relevant content.",
62
- response_model_name: str = "Response",
63
- ) -> Type[Any]:
64
- """Prepare a Pydantic model for structured outputs.
65
-
66
- Args:
67
- output_type: The desired output type
68
- response_field_name: Name of the response field
69
- response_field_instruction: Description of the response field
70
- response_model_name: Name of the response model
71
-
72
- Returns:
73
- Pydantic model class suitable for Instructor
74
- """
75
- # Check if it's already a Pydantic model
76
- if is_pydantic_model_class(output_type):
77
- return output_type
78
-
79
- # Convert to Pydantic model
80
- return convert_to_pydantic_model(
81
- target=output_type,
82
- name=response_model_name,
83
- field_name=response_field_name,
84
- description=response_field_instruction,
85
- )
86
-
87
-
88
- def handle_structured_output_response(
89
- response: Any,
90
- completion: Any,
91
- model: str,
92
- output_type: Type[T],
93
- response_field_name: str = "content",
94
- ) -> LanguageModelResponse[T]:
95
- """Convert an Instructor response to LanguageModelResponse.
96
-
97
- Args:
98
- response: The structured response from Instructor
99
- completion: The raw completion object
100
- model: Model name used for the request
101
- output_type: The expected output type
102
- response_field_name: Name of the response field
103
-
104
- Returns:
105
- LanguageModelResponse object with structured output
106
- """
107
- # Extract the actual value if using converted pydantic model
108
- if not is_pydantic_model_class(output_type) and hasattr(
109
- response, response_field_name
110
- ):
111
- actual_output = getattr(response, response_field_name)
112
- else:
113
- actual_output = response
114
-
115
- # Extract content and tool calls from the completion
116
- content = None
117
- tool_calls = None
118
- refusal = None
119
-
120
- if hasattr(completion, "choices") and completion.choices:
121
- choice = completion.choices[0]
122
- if hasattr(choice, "message"):
123
- message = choice.message
124
- content = getattr(message, "content", None)
125
- tool_calls = getattr(message, "tool_calls", None)
126
- refusal = getattr(message, "refusal", None)
127
-
128
- return LanguageModelResponse(
129
- model=model,
130
- output=actual_output,
131
- completion=completion,
132
- content=content,
133
- tool_calls=tool_calls,
134
- refusal=refusal,
135
- )
@@ -1,4 +0,0 @@
1
- """hammad.genai.models.model_provider"""
2
-
3
- import litellm
4
- import instructor
@@ -1,47 +0,0 @@
1
- """hammad.genai.models.multimodal"""
2
-
3
- # simple litellm refs
4
- # thanks litellm :)
5
-
6
- from typing import TYPE_CHECKING
7
- from ..._internal import create_getattr_importer
8
-
9
-
10
- if TYPE_CHECKING:
11
- from litellm import (
12
- # images / image editing
13
- image_generation as run_image_generation_model,
14
- aimage_generation as async_run_image_generation_model,
15
- image_edit as run_image_edit_model,
16
- aimage_edit as async_run_image_edit_model,
17
- image_variation as run_image_variation_model,
18
- aimage_variation as async_run_image_variation_model,
19
- # audio / speech
20
- speech as run_tts_model,
21
- aspeech as async_run_tts_model,
22
- transcription as run_transcription_model,
23
- atranscription as async_run_transcription_model,
24
- )
25
-
26
-
27
- __all__ = (
28
- # images / image editing
29
- "run_image_generation_model",
30
- "async_run_image_generation_model",
31
- "run_image_edit_model",
32
- "async_run_image_edit_model",
33
- "run_image_variation_model",
34
- "async_run_image_variation_model",
35
- # audio / speech
36
- "run_tts_model",
37
- "async_run_tts_model",
38
- "run_transcription_model",
39
- "async_run_transcription_model",
40
- )
41
-
42
-
43
- __getattr__ = create_getattr_importer(__all__)
44
-
45
-
46
- def __dir__() -> list[str]:
47
- return list(__all__)
@@ -1,26 +0,0 @@
1
- """hammad.genai.models.reranking"""
2
-
3
- # yay litellm
4
-
5
- from typing import TYPE_CHECKING
6
- from ..._internal import create_getattr_importer
7
-
8
-
9
- if TYPE_CHECKING:
10
- from litellm import (
11
- rerank as run_reranking_model,
12
- arerank as async_run_reranking_model,
13
- )
14
-
15
-
16
- __all__ = (
17
- "run_reranking_model",
18
- "async_run_reranking_model",
19
- )
20
-
21
-
22
- __getattr__ = create_getattr_importer(__all__)
23
-
24
-
25
- def __dir__() -> list[str]:
26
- return list(__all__)
@@ -1 +0,0 @@
1
- """hammad.genai.types"""