hammad-python 0.0.14__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. hammad/__init__.py +177 -0
  2. hammad/{performance/imports.py → _internal.py} +7 -1
  3. hammad/cache/__init__.py +1 -1
  4. hammad/cli/__init__.py +3 -1
  5. hammad/cli/_runner.py +265 -0
  6. hammad/cli/animations.py +1 -1
  7. hammad/cli/plugins.py +133 -78
  8. hammad/cli/styles/__init__.py +1 -1
  9. hammad/cli/styles/utils.py +149 -3
  10. hammad/data/__init__.py +56 -29
  11. hammad/data/collections/__init__.py +27 -17
  12. hammad/data/collections/collection.py +205 -383
  13. hammad/data/collections/indexes/__init__.py +37 -0
  14. hammad/data/collections/indexes/qdrant/__init__.py +1 -0
  15. hammad/data/collections/indexes/qdrant/index.py +735 -0
  16. hammad/data/collections/indexes/qdrant/settings.py +94 -0
  17. hammad/data/collections/indexes/qdrant/utils.py +220 -0
  18. hammad/data/collections/indexes/tantivy/__init__.py +1 -0
  19. hammad/data/collections/indexes/tantivy/index.py +428 -0
  20. hammad/data/collections/indexes/tantivy/settings.py +51 -0
  21. hammad/data/collections/indexes/tantivy/utils.py +200 -0
  22. hammad/data/configurations/__init__.py +2 -2
  23. hammad/data/configurations/configuration.py +2 -2
  24. hammad/data/models/__init__.py +20 -9
  25. hammad/data/models/extensions/__init__.py +4 -0
  26. hammad/data/models/{pydantic → extensions/pydantic}/__init__.py +6 -19
  27. hammad/data/models/{pydantic → extensions/pydantic}/converters.py +143 -16
  28. hammad/data/models/{base/fields.py → fields.py} +1 -1
  29. hammad/data/models/{base/model.py → model.py} +1 -1
  30. hammad/data/models/{base/utils.py → utils.py} +1 -1
  31. hammad/data/sql/__init__.py +23 -0
  32. hammad/data/sql/database.py +578 -0
  33. hammad/data/sql/types.py +141 -0
  34. hammad/data/types/__init__.py +1 -3
  35. hammad/data/types/file.py +3 -3
  36. hammad/data/types/multimodal/__init__.py +2 -2
  37. hammad/data/types/multimodal/audio.py +2 -2
  38. hammad/data/types/multimodal/image.py +2 -2
  39. hammad/formatting/__init__.py +9 -27
  40. hammad/formatting/json/__init__.py +8 -2
  41. hammad/formatting/json/converters.py +7 -1
  42. hammad/formatting/text/__init__.py +1 -1
  43. hammad/formatting/yaml/__init__.py +1 -1
  44. hammad/genai/__init__.py +78 -0
  45. hammad/genai/agents/__init__.py +1 -0
  46. hammad/genai/agents/types/__init__.py +35 -0
  47. hammad/genai/agents/types/history.py +277 -0
  48. hammad/genai/agents/types/tool.py +490 -0
  49. hammad/genai/embedding_models/__init__.py +41 -0
  50. hammad/{ai/embeddings/client/litellm_embeddings_client.py → genai/embedding_models/embedding_model.py} +47 -142
  51. hammad/genai/embedding_models/embedding_model_name.py +77 -0
  52. hammad/genai/embedding_models/embedding_model_request.py +65 -0
  53. hammad/{ai/embeddings/types.py → genai/embedding_models/embedding_model_response.py} +3 -3
  54. hammad/genai/embedding_models/run.py +161 -0
  55. hammad/genai/language_models/__init__.py +35 -0
  56. hammad/genai/language_models/_streaming.py +622 -0
  57. hammad/genai/language_models/_types.py +276 -0
  58. hammad/genai/language_models/_utils/__init__.py +31 -0
  59. hammad/genai/language_models/_utils/_completions.py +131 -0
  60. hammad/genai/language_models/_utils/_messages.py +89 -0
  61. hammad/genai/language_models/_utils/_requests.py +202 -0
  62. hammad/genai/language_models/_utils/_structured_outputs.py +124 -0
  63. hammad/genai/language_models/language_model.py +734 -0
  64. hammad/genai/language_models/language_model_request.py +135 -0
  65. hammad/genai/language_models/language_model_response.py +219 -0
  66. hammad/genai/language_models/language_model_response_chunk.py +53 -0
  67. hammad/genai/language_models/run.py +530 -0
  68. hammad/genai/multimodal_models.py +48 -0
  69. hammad/genai/rerank_models.py +26 -0
  70. hammad/logging/__init__.py +1 -1
  71. hammad/logging/decorators.py +1 -1
  72. hammad/logging/logger.py +2 -2
  73. hammad/mcp/__init__.py +1 -1
  74. hammad/mcp/client/__init__.py +35 -0
  75. hammad/mcp/client/client.py +105 -4
  76. hammad/mcp/client/client_service.py +10 -3
  77. hammad/mcp/servers/__init__.py +24 -0
  78. hammad/{performance/runtime → runtime}/__init__.py +2 -2
  79. hammad/{performance/runtime → runtime}/decorators.py +1 -1
  80. hammad/{performance/runtime → runtime}/run.py +1 -1
  81. hammad/service/__init__.py +1 -1
  82. hammad/service/create.py +3 -8
  83. hammad/service/decorators.py +8 -8
  84. hammad/typing/__init__.py +28 -0
  85. hammad/web/__init__.py +3 -3
  86. hammad/web/http/client.py +1 -1
  87. hammad/web/models.py +53 -21
  88. hammad/web/search/client.py +99 -52
  89. hammad/web/utils.py +13 -13
  90. hammad_python-0.0.16.dist-info/METADATA +191 -0
  91. hammad_python-0.0.16.dist-info/RECORD +110 -0
  92. hammad/ai/__init__.py +0 -1
  93. hammad/ai/_utils.py +0 -142
  94. hammad/ai/completions/__init__.py +0 -45
  95. hammad/ai/completions/client.py +0 -684
  96. hammad/ai/completions/create.py +0 -710
  97. hammad/ai/completions/settings.py +0 -100
  98. hammad/ai/completions/types.py +0 -792
  99. hammad/ai/completions/utils.py +0 -486
  100. hammad/ai/embeddings/__init__.py +0 -35
  101. hammad/ai/embeddings/client/__init__.py +0 -1
  102. hammad/ai/embeddings/client/base_embeddings_client.py +0 -26
  103. hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +0 -200
  104. hammad/ai/embeddings/create.py +0 -159
  105. hammad/data/collections/base_collection.py +0 -58
  106. hammad/data/collections/searchable_collection.py +0 -556
  107. hammad/data/collections/vector_collection.py +0 -596
  108. hammad/data/databases/__init__.py +0 -21
  109. hammad/data/databases/database.py +0 -902
  110. hammad/data/models/base/__init__.py +0 -35
  111. hammad/data/models/pydantic/models/__init__.py +0 -28
  112. hammad/data/models/pydantic/models/arbitrary_model.py +0 -46
  113. hammad/data/models/pydantic/models/cacheable_model.py +0 -79
  114. hammad/data/models/pydantic/models/fast_model.py +0 -318
  115. hammad/data/models/pydantic/models/function_model.py +0 -176
  116. hammad/data/models/pydantic/models/subscriptable_model.py +0 -63
  117. hammad/performance/__init__.py +0 -36
  118. hammad/py.typed +0 -0
  119. hammad_python-0.0.14.dist-info/METADATA +0 -70
  120. hammad_python-0.0.14.dist-info/RECORD +0 -99
  121. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/WHEEL +0 -0
  122. {hammad_python-0.0.14.dist-info → hammad_python-0.0.16.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,490 @@
1
+ """hammad.genai.types.tool
2
+
3
+ Tool system for agent function calling with JSON schema generation.
4
+ """
5
+
6
+ import asyncio
7
+ import concurrent.futures
8
+ import inspect
9
+ import json
10
+ from typing import Any, Callable, Dict, List, Optional, Union, get_type_hints, TYPE_CHECKING, Generic, TypeVar, ParamSpec, overload
11
+ from dataclasses import dataclass
12
+ from pydantic import BaseModel, Field, ValidationError
13
+
14
+ from ....formatting.json.converters import convert_to_json_schema
15
+ from ....data.models.extensions.pydantic.converters import (
16
+ get_pydantic_fields_from_function,
17
+ convert_to_pydantic_model
18
+ )
19
+
20
+ if TYPE_CHECKING:
21
+ from ...language_models.language_model_response import LanguageModelResponse
22
+ from ...language_models._streaming import Stream, AsyncStream
23
+
24
+ # Type variables for generic tool typing
25
+ P = ParamSpec("P")
26
+ R = TypeVar("R")
27
+
28
+ __all__ = (
29
+ "Tool",
30
+ "function_tool",
31
+ "ToolResponseMessage",
32
+ "execute_tool_calls_parallel",
33
+ "execute_tools_from_response",
34
+ )
35
+
36
+
37
+ @dataclass
38
+ class ToolResponseMessage:
39
+ """Represents a tool response message for chat completion."""
40
+
41
+ tool_call_id: str
42
+ """ID of the tool call this response corresponds to."""
43
+
44
+ name: str
45
+ """Name of the tool that was called."""
46
+
47
+ content: str
48
+ """The result/output of the tool execution."""
49
+
50
+ role: str = "tool"
51
+ """Message role, always 'tool'."""
52
+
53
+ def to_dict(self) -> Dict[str, Any]:
54
+ """Convert to dictionary format for API calls."""
55
+ return {
56
+ "role": self.role,
57
+ "tool_call_id": self.tool_call_id,
58
+ "name": self.name,
59
+ "content": self.content
60
+ }
61
+
62
+
63
+ def extract_tool_calls_from_response(
64
+ response: Union["LanguageModelResponse", "Stream", "AsyncStream"]
65
+ ) -> List[Any]:
66
+ """Extract tool calls from various response types."""
67
+ # Handle LanguageModelResponse
68
+ if hasattr(response, "get_tool_calls"):
69
+ tool_calls = response.get_tool_calls()
70
+ return tool_calls or []
71
+
72
+ # Handle Stream/AsyncStream - need to collect first
73
+ if hasattr(response, "collect"):
74
+ try:
75
+ if hasattr(response, "_is_consumed") and not response._is_consumed:
76
+ # For streams, we need to consume them first
77
+ if asyncio.iscoroutine(response.collect()):
78
+ # Async stream
79
+ loop = asyncio.get_event_loop()
80
+ collected_response = loop.run_until_complete(response.collect())
81
+ else:
82
+ # Sync stream
83
+ collected_response = response.collect()
84
+
85
+ if hasattr(collected_response, "get_tool_calls"):
86
+ return collected_response.get_tool_calls() or []
87
+ else:
88
+ # Already consumed, try to get tool calls directly
89
+ if hasattr(response, "get_tool_calls"):
90
+ return response.get_tool_calls() or []
91
+ except Exception:
92
+ pass
93
+
94
+ # Check if response has tool_calls attribute directly
95
+ if hasattr(response, "tool_calls"):
96
+ return response.tool_calls or []
97
+
98
+ return []
99
+
100
+ def execute_tool_calls_parallel(
101
+ tool: "Tool[P, R]",
102
+ tool_calls: List[Any],
103
+ context: Any = None
104
+ ) -> List[ToolResponseMessage]:
105
+ """Execute multiple tool calls in parallel using ThreadPoolExecutor."""
106
+ with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(tool_calls), 4)) as executor:
107
+ futures = [
108
+ executor.submit(tool.call_from_tool_call, call, context)
109
+ for call in tool_calls
110
+ ]
111
+
112
+ results = []
113
+ for future in concurrent.futures.as_completed(futures):
114
+ try:
115
+ results.append(future.result())
116
+ except Exception as e:
117
+ # Create error response
118
+ results.append(ToolResponseMessage(
119
+ tool_call_id="unknown",
120
+ name=tool.name,
121
+ content=f"Tool execution failed: {str(e)}"
122
+ ))
123
+
124
+ return results
125
+
126
+
127
+ @dataclass
128
+ class Tool(Generic[P, R]):
129
+ """A tool that wraps a function for agent execution.
130
+
131
+ Combines concepts from both PydanticAI and OpenAI tool specifications
132
+ to provide a simple, internalized tool system.
133
+ """
134
+
135
+ name: str
136
+ """The name of the tool."""
137
+
138
+ description: str
139
+ """Description of what the tool does."""
140
+
141
+ function: Callable[P, R]
142
+ """The Python function to execute."""
143
+
144
+ parameters_json_schema: Dict[str, Any]
145
+ """JSON schema for the tool's parameters."""
146
+
147
+ takes_context: bool = False
148
+ """Whether the function expects a context as first parameter."""
149
+
150
+ strict: bool = True
151
+ """Whether to enforce strict JSON schema validation."""
152
+
153
+ def __post_init__(self):
154
+ """Validate the tool after initialization."""
155
+ if not callable(self.function):
156
+ raise ValueError("Tool function must be callable")
157
+ if not self.name:
158
+ raise ValueError("Tool name cannot be empty")
159
+ if not self.parameters_json_schema:
160
+ raise ValueError("Tool must have parameters JSON schema")
161
+
162
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
163
+ """Call the tool's function directly with the given arguments.
164
+
165
+ This allows using the tool as if it were the original function.
166
+
167
+ Args:
168
+ *args: Positional arguments to pass to the function
169
+ **kwargs: Keyword arguments to pass to the function
170
+
171
+ Returns:
172
+ The result of the function call
173
+ """
174
+ return self.function(*args, **kwargs)
175
+
176
+ def call(
177
+ self,
178
+ arguments: Union[str, Dict[str, Any]],
179
+ context: Any = None,
180
+ ) -> Any:
181
+ """Execute the tool with given arguments.
182
+
183
+ Args:
184
+ arguments: Tool arguments as JSON string or dict
185
+ context: Optional context to pass as first argument if takes_context=True
186
+ as_message: Whether to return the result as a ToolResponseMessage
187
+
188
+ Returns:
189
+ The result of the function call
190
+
191
+ Raises:
192
+ ValidationError: If arguments don't match schema
193
+ ValueError: If function execution fails
194
+ """
195
+ # Parse arguments if they're a JSON string
196
+ if isinstance(arguments, str):
197
+ try:
198
+ args_dict = json.loads(arguments)
199
+ except json.JSONDecodeError as e:
200
+ raise ValidationError(f"Invalid JSON arguments: {e}")
201
+ else:
202
+ args_dict = arguments or {}
203
+
204
+ # Get function signature and validate arguments
205
+ sig = inspect.signature(self.function)
206
+
207
+ # Filter out context parameter if needed
208
+ if self.takes_context:
209
+ params = {k: v for k, v in sig.parameters.items()
210
+ if k not in ('self', 'cls', 'context', 'ctx')}
211
+ filtered_sig = sig.replace(parameters=list(params.values()))
212
+ bound_args = filtered_sig.bind_partial(**args_dict)
213
+ else:
214
+ bound_args = sig.bind_partial(**args_dict)
215
+
216
+ try:
217
+ bound_args.apply_defaults()
218
+ except TypeError as e:
219
+ raise ValidationError(f"Arguments don't match function signature: {e}")
220
+
221
+ # Execute function with or without context
222
+ try:
223
+ if self.takes_context:
224
+ return self.function(context, **bound_args.arguments)
225
+ else:
226
+ return self.function(**bound_args.arguments)
227
+ except Exception as e:
228
+ raise ValueError(f"Tool execution failed: {e}")
229
+
230
+ def call_from_tool_call(
231
+ self,
232
+ tool_call: Union[Dict[str, Any], Any],
233
+ context: Any = None
234
+ ) -> ToolResponseMessage:
235
+ """Execute tool from a tool call and return a tool response message.
236
+
237
+ Args:
238
+ tool_call: Tool call dict or object with function.arguments and id
239
+ context: Optional context to pass to function
240
+
241
+ Returns:
242
+ ToolResponseMessage with tool call ID and result
243
+ """
244
+ # Extract tool call information
245
+ if isinstance(tool_call, dict):
246
+ tool_call_id = tool_call.get("id") or tool_call.get("tool_call_id", "unknown")
247
+ if "function" in tool_call:
248
+ arguments = tool_call["function"].get("arguments", "{}")
249
+ else:
250
+ arguments = tool_call.get("arguments", "{}")
251
+ else:
252
+ # Assume it's a pydantic object or similar
253
+ tool_call_id = getattr(tool_call, "id", "unknown")
254
+ if hasattr(tool_call, "function"):
255
+ arguments = getattr(tool_call.function, "arguments", "{}")
256
+ else:
257
+ arguments = getattr(tool_call, "arguments", "{}")
258
+
259
+ # Execute the tool
260
+ try:
261
+ result = self.call(arguments, context)
262
+ content = str(result) if result is not None else "Tool executed successfully"
263
+ except Exception as e:
264
+ content = f"Tool execution failed: {str(e)}"
265
+
266
+ return ToolResponseMessage(
267
+ tool_call_id=tool_call_id,
268
+ name=self.name,
269
+ content=content
270
+ )
271
+
272
+ def call_from_response(
273
+ self,
274
+ response: Union["LanguageModelResponse", "Stream", "AsyncStream"],
275
+ context: Any = None,
276
+ parallel: bool = True
277
+ ) -> List[ToolResponseMessage]:
278
+ """Execute tool calls found in a language model response or stream.
279
+
280
+ Args:
281
+ response: LanguageModelResponse, Stream, or AsyncStream
282
+ context: Optional context to pass to functions
283
+ parallel: Whether to execute tool calls in parallel
284
+
285
+ Returns:
286
+ List of ToolResponseMessage objects
287
+ """
288
+ tool_calls = extract_tool_calls_from_response(response)
289
+
290
+ if not tool_calls:
291
+ return []
292
+
293
+ # Filter tool calls that match this tool's name
294
+ matching_calls = []
295
+ for tool_call in tool_calls:
296
+ if isinstance(tool_call, dict):
297
+ func_name = tool_call.get("function", {}).get("name")
298
+ else:
299
+ func_name = getattr(tool_call.function, "name", None) if hasattr(tool_call, "function") else None
300
+
301
+ if func_name == self.name:
302
+ matching_calls.append(tool_call)
303
+
304
+ if not matching_calls:
305
+ return []
306
+
307
+ if parallel and len(matching_calls) > 1:
308
+ return execute_tool_calls_parallel(self, matching_calls, context)
309
+ else:
310
+ return [self.call_from_tool_call(call, context) for call in matching_calls]
311
+
312
+ def to_dict(self) -> Dict[str, Any]:
313
+ """Convert tool to dictionary format suitable for API calls."""
314
+ return {
315
+ "type": "function",
316
+ "function": {
317
+ "name": self.name,
318
+ "description": self.description,
319
+ "parameters": self.parameters_json_schema,
320
+ "strict": self.strict
321
+ }
322
+ }
323
+
324
+
325
+ @overload
326
+ def function_tool(
327
+ function: Callable[P, R],
328
+ ) -> Tool[P, R]:
329
+ """Overload for direct decorator usage: @function_tool"""
330
+ ...
331
+
332
+
333
+ @overload
334
+ def function_tool(
335
+ *,
336
+ name: Optional[str] = None,
337
+ description: Optional[str] = None,
338
+ takes_context: bool = False,
339
+ strict: bool = True,
340
+ ) -> Callable[[Callable[P, R]], Tool[P, R]]:
341
+ """Overload for decorator with parameters: @function_tool(...)"""
342
+ ...
343
+
344
+
345
+ def function_tool(
346
+ function: Optional[Callable[P, R]] = None,
347
+ *,
348
+ name: Optional[str] = None,
349
+ description: Optional[str] = None,
350
+ takes_context: bool = False,
351
+ strict: bool = True,
352
+ ) -> Union[Tool[P, R], Callable[[Callable[P, R]], Tool[P, R]]]:
353
+ """Decorator to create a Tool from a function.
354
+
355
+ Args:
356
+ func: Function to wrap (when used as @function_tool)
357
+ name: Override tool name (defaults to function name)
358
+ description: Override tool description (defaults to function docstring)
359
+ takes_context: Whether function expects context as first parameter
360
+ strict: Whether to enforce strict JSON schema validation
361
+
362
+ Returns:
363
+ Tool instance or decorator function
364
+
365
+ Example:
366
+ @function_tool
367
+ def my_tool(x: int, y: str = "default") -> str:
368
+ \"\"\"Does something useful.\"\"\"
369
+ return f"{x}: {y}"
370
+
371
+ # Or with parameters:
372
+ @function_tool(name="custom_name", takes_context=True)
373
+ def context_tool(ctx, value: int) -> int:
374
+ return value * 2
375
+ """
376
+
377
+ def _create_tool(target_func: Callable[P, R]) -> Tool[P, R]:
378
+ # Extract function metadata
379
+ func_name = name or target_func.__name__
380
+ func_description = description or (target_func.__doc__ or "").strip()
381
+
382
+ # Generate JSON schema from function signature
383
+ try:
384
+ # Try using Pydantic converter first for better schema generation
385
+ pydantic_fields = get_pydantic_fields_from_function(target_func)
386
+ if pydantic_fields:
387
+ # Create temporary Pydantic model to get schema
388
+ temp_model = convert_to_pydantic_model(
389
+ target_func,
390
+ name=f"{func_name}_params"
391
+ )
392
+ schema = temp_model.model_json_schema()
393
+ # Extract just the properties and required fields
394
+ parameters_schema = {
395
+ "type": "object",
396
+ "properties": schema.get("properties", {}),
397
+ }
398
+ if "required" in schema:
399
+ parameters_schema["required"] = schema["required"]
400
+ if strict:
401
+ parameters_schema["additionalProperties"] = False
402
+ else:
403
+ # Fallback to basic JSON schema conversion
404
+ parameters_schema = _generate_schema_from_signature(target_func, strict)
405
+ except Exception:
406
+ # Ultimate fallback
407
+ parameters_schema = _generate_schema_from_signature(target_func, strict)
408
+
409
+ return Tool[P, R](
410
+ name=func_name,
411
+ description=func_description,
412
+ function=target_func,
413
+ parameters_json_schema=parameters_schema,
414
+ takes_context=takes_context,
415
+ strict=strict
416
+ )
417
+
418
+ # Handle decorator usage patterns
419
+ if function is None:
420
+ # Used as @function_tool(...)
421
+ return _create_tool
422
+ else:
423
+ # Used as @function_tool
424
+ return _create_tool(function)
425
+
426
+
427
+ def _generate_schema_from_signature(func: Callable, strict: bool = True) -> Dict[str, Any]:
428
+ """Generate JSON schema from function signature as fallback."""
429
+ sig = inspect.signature(func)
430
+ type_hints = get_type_hints(func)
431
+
432
+ properties = {}
433
+ required = []
434
+
435
+ for param_name, param in sig.parameters.items():
436
+ if param_name == "self" or param_name == "cls":
437
+ continue
438
+
439
+ param_type = type_hints.get(param_name, str)
440
+
441
+ try:
442
+ # Use JSON converter for type
443
+ param_schema = convert_to_json_schema(param_type)
444
+ except Exception:
445
+ # Ultimate fallback
446
+ param_schema = {"type": "string"}
447
+
448
+ properties[param_name] = param_schema
449
+
450
+ # Add to required if no default value
451
+ if param.default is inspect.Parameter.empty:
452
+ required.append(param_name)
453
+
454
+ schema = {
455
+ "type": "object",
456
+ "properties": properties,
457
+ }
458
+
459
+ if required:
460
+ schema["required"] = required
461
+
462
+ if strict:
463
+ schema["additionalProperties"] = False
464
+
465
+ return schema
466
+
467
+
468
+ # Utility functions for batch tool execution
469
+ def execute_tools_from_response(
470
+ tools: List[Tool],
471
+ response: Union["LanguageModelResponse", "Stream", "AsyncStream"],
472
+ context: Any = None,
473
+ parallel: bool = True
474
+ ) -> List[ToolResponseMessage]:
475
+ """Execute all matching tools from a response.
476
+
477
+ Args:
478
+ tools: List of tools to check for matches
479
+ response: LanguageModelResponse, Stream, or AsyncStream
480
+ context: Optional context to pass to functions
481
+ parallel: Whether to execute tool calls in parallel
482
+
483
+ Returns:
484
+ List of ToolResponseMessage objects from all executed tools
485
+ """
486
+ all_results = []
487
+ for tool in tools:
488
+ results = tool.call_from_response(response, context, parallel)
489
+ all_results.extend(results)
490
+ return all_results
@@ -0,0 +1,41 @@
1
+ """hammad.genai.embedding_models"""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from ..._internal import create_getattr_importer
5
+
6
+ if TYPE_CHECKING:
7
+ from .embedding_model import EmbeddingModel
8
+ from .embedding_model_request import EmbeddingModelRequest
9
+ from .embedding_model_response import EmbeddingModelResponse
10
+ from .embedding_model_name import EmbeddingModelName
11
+ from .run import (
12
+ run_embedding_model,
13
+ async_run_embedding_model,
14
+ )
15
+
16
+
17
+ __all__ = (
18
+ # hammad.genai.embedding_models.embedding_model
19
+ "EmbeddingModel",
20
+
21
+ # hammad.genai.embedding_models.embedding_model_request
22
+ "EmbeddingModelRequest",
23
+
24
+ # hammad.genai.embedding_models.embedding_model_response
25
+ "EmbeddingModelResponse",
26
+
27
+ # hammad.genai.embedding_models.embedding_model_name
28
+ "EmbeddingModelName",
29
+
30
+ # hammad.genai.embedding_models.run
31
+ "run_embedding_model",
32
+ "async_run_embedding_model",
33
+ )
34
+
35
+
36
+ __getattr__ = create_getattr_importer(__all__)
37
+
38
+
39
+ def __dir__() -> list[str]:
40
+ """Get the attributes of the embedding_models module."""
41
+ return list(__all__)