hammad-python 0.0.19__py3-none-any.whl → 0.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. hammad/__init__.py +7 -137
  2. hammad/_internal.py +1 -0
  3. hammad/cli/_runner.py +8 -8
  4. hammad/cli/plugins.py +55 -26
  5. hammad/cli/styles/utils.py +16 -8
  6. hammad/data/__init__.py +1 -5
  7. hammad/data/collections/__init__.py +2 -3
  8. hammad/data/collections/collection.py +41 -22
  9. hammad/data/collections/indexes/__init__.py +1 -1
  10. hammad/data/collections/indexes/qdrant/__init__.py +1 -1
  11. hammad/data/collections/indexes/qdrant/index.py +106 -118
  12. hammad/data/collections/indexes/qdrant/settings.py +14 -14
  13. hammad/data/collections/indexes/qdrant/utils.py +28 -38
  14. hammad/data/collections/indexes/tantivy/__init__.py +1 -1
  15. hammad/data/collections/indexes/tantivy/index.py +57 -59
  16. hammad/data/collections/indexes/tantivy/settings.py +8 -19
  17. hammad/data/collections/indexes/tantivy/utils.py +28 -52
  18. hammad/data/models/__init__.py +2 -7
  19. hammad/data/sql/__init__.py +1 -1
  20. hammad/data/sql/database.py +71 -73
  21. hammad/data/sql/types.py +37 -51
  22. hammad/formatting/__init__.py +2 -1
  23. hammad/formatting/json/converters.py +2 -2
  24. hammad/genai/__init__.py +96 -36
  25. hammad/genai/agents/__init__.py +47 -1
  26. hammad/genai/agents/agent.py +1298 -0
  27. hammad/genai/agents/run.py +615 -0
  28. hammad/genai/agents/types/__init__.py +29 -22
  29. hammad/genai/agents/types/agent_context.py +13 -0
  30. hammad/genai/agents/types/agent_event.py +128 -0
  31. hammad/genai/agents/types/agent_hooks.py +220 -0
  32. hammad/genai/agents/types/agent_messages.py +31 -0
  33. hammad/genai/agents/types/agent_response.py +122 -0
  34. hammad/genai/agents/types/agent_stream.py +318 -0
  35. hammad/genai/models/__init__.py +1 -0
  36. hammad/genai/models/embeddings/__init__.py +39 -0
  37. hammad/genai/{embedding_models/embedding_model.py → models/embeddings/model.py} +45 -41
  38. hammad/genai/{embedding_models → models/embeddings}/run.py +10 -8
  39. hammad/genai/models/embeddings/types/__init__.py +37 -0
  40. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_name.py +2 -4
  41. hammad/genai/{embedding_models → models/embeddings/types}/embedding_model_response.py +11 -4
  42. hammad/genai/{embedding_models/embedding_model_request.py → models/embeddings/types/embedding_model_run_params.py} +4 -3
  43. hammad/genai/models/embeddings/types/embedding_model_settings.py +47 -0
  44. hammad/genai/models/language/__init__.py +48 -0
  45. hammad/genai/{language_models/language_model.py → models/language/model.py} +496 -204
  46. hammad/genai/{language_models → models/language}/run.py +80 -57
  47. hammad/genai/models/language/types/__init__.py +40 -0
  48. hammad/genai/models/language/types/language_model_instructor_mode.py +47 -0
  49. hammad/genai/models/language/types/language_model_messages.py +28 -0
  50. hammad/genai/{language_models/_types.py → models/language/types/language_model_name.py} +3 -40
  51. hammad/genai/{language_models → models/language/types}/language_model_request.py +17 -25
  52. hammad/genai/{language_models → models/language/types}/language_model_response.py +60 -67
  53. hammad/genai/{language_models → models/language/types}/language_model_response_chunk.py +8 -5
  54. hammad/genai/models/language/types/language_model_settings.py +89 -0
  55. hammad/genai/{language_models/_streaming.py → models/language/types/language_model_stream.py} +221 -243
  56. hammad/genai/{language_models/_utils → models/language/utils}/__init__.py +8 -11
  57. hammad/genai/models/language/utils/requests.py +421 -0
  58. hammad/genai/{language_models/_utils/_structured_outputs.py → models/language/utils/structured_outputs.py} +31 -20
  59. hammad/genai/models/model_provider.py +4 -0
  60. hammad/genai/{multimodal_models.py → models/multimodal.py} +4 -5
  61. hammad/genai/models/reranking.py +26 -0
  62. hammad/genai/types/__init__.py +1 -0
  63. hammad/genai/types/base.py +215 -0
  64. hammad/genai/{agents/types → types}/history.py +101 -88
  65. hammad/genai/{agents/types/tool.py → types/tools.py} +157 -140
  66. hammad/logging/logger.py +9 -1
  67. hammad/mcp/client/__init__.py +2 -3
  68. hammad/mcp/client/client.py +10 -10
  69. hammad/mcp/servers/__init__.py +2 -1
  70. hammad/service/decorators.py +1 -3
  71. hammad/web/models.py +1 -3
  72. hammad/web/search/client.py +10 -22
  73. {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/METADATA +10 -2
  74. hammad_python-0.0.21.dist-info/RECORD +127 -0
  75. hammad/genai/embedding_models/__init__.py +0 -41
  76. hammad/genai/language_models/__init__.py +0 -35
  77. hammad/genai/language_models/_utils/_completions.py +0 -131
  78. hammad/genai/language_models/_utils/_messages.py +0 -89
  79. hammad/genai/language_models/_utils/_requests.py +0 -202
  80. hammad/genai/rerank_models.py +0 -26
  81. hammad_python-0.0.19.dist-info/RECORD +0 -111
  82. {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/WHEEL +0 -0
  83. {hammad_python-0.0.19.dist-info → hammad_python-0.0.21.dist-info}/licenses/LICENSE +0 -0
@@ -5,21 +5,32 @@ Tool system for agent function calling with JSON schema generation.
5
5
 
6
6
  import asyncio
7
7
  import concurrent.futures
8
+ from dataclasses import dataclass
8
9
  import inspect
9
10
  import json
10
- from typing import Any, Callable, Dict, List, Optional, Union, get_type_hints, TYPE_CHECKING, Generic, TypeVar, ParamSpec, overload
11
- from dataclasses import dataclass
11
+ from typing import (
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ List,
16
+ Optional,
17
+ Union,
18
+ get_type_hints,
19
+ TYPE_CHECKING,
20
+ Generic,
21
+ TypeVar,
22
+ ParamSpec,
23
+ overload,
24
+ )
12
25
  from pydantic import BaseModel, Field, ValidationError
13
26
 
14
- from ....formatting.json.converters import convert_to_json_schema
15
- from ....data.models.extensions.pydantic.converters import (
27
+ from ...formatting.json.converters import convert_to_json_schema
28
+ from ...data.models.extensions.pydantic.converters import (
16
29
  get_pydantic_fields_from_function,
17
- convert_to_pydantic_model
30
+ convert_to_pydantic_model,
18
31
  )
32
+ from .base import BaseGenAIModelStream, BaseGenAIModelResponse, BaseTool
19
33
 
20
- if TYPE_CHECKING:
21
- from ...language_models.language_model_response import LanguageModelResponse
22
- from ...language_models._streaming import Stream, AsyncStream
23
34
 
24
35
  # Type variables for generic tool typing
25
36
  P = ParamSpec("P")
@@ -27,48 +38,51 @@ R = TypeVar("R")
27
38
 
28
39
  __all__ = (
29
40
  "Tool",
30
- "function_tool",
41
+ "define_tool",
31
42
  "ToolResponseMessage",
32
43
  "execute_tool_calls_parallel",
33
- "execute_tools_from_response",
44
+ "execute_tools_from_language_model_response",
34
45
  )
35
46
 
36
47
 
37
48
  @dataclass
38
49
  class ToolResponseMessage:
39
50
  """Represents a tool response message for chat completion."""
40
-
51
+
41
52
  tool_call_id: str
42
53
  """ID of the tool call this response corresponds to."""
43
-
54
+
44
55
  name: str
45
56
  """Name of the tool that was called."""
46
-
57
+
47
58
  content: str
48
59
  """The result/output of the tool execution."""
49
60
 
50
61
  role: str = "tool"
51
62
  """Message role, always 'tool'."""
52
-
63
+
53
64
  def to_dict(self) -> Dict[str, Any]:
54
65
  """Convert to dictionary format for API calls."""
55
66
  return {
56
67
  "role": self.role,
57
68
  "tool_call_id": self.tool_call_id,
58
- "name": self.name,
59
- "content": self.content
69
+ "content": self.content,
60
70
  }
61
71
 
62
72
 
63
73
  def extract_tool_calls_from_response(
64
- response: Union["LanguageModelResponse", "Stream", "AsyncStream"]
74
+ response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
65
75
  ) -> List[Any]:
66
76
  """Extract tool calls from various response types."""
77
+ # ensure type is of agent or language model
78
+ if response.type not in ["language_model", "agent"]:
79
+ raise ValueError(f"Response type {response.type} is not supported")
80
+
67
81
  # Handle LanguageModelResponse
68
82
  if hasattr(response, "get_tool_calls"):
69
83
  tool_calls = response.get_tool_calls()
70
84
  return tool_calls or []
71
-
85
+
72
86
  # Handle Stream/AsyncStream - need to collect first
73
87
  if hasattr(response, "collect"):
74
88
  try:
@@ -81,7 +95,7 @@ def extract_tool_calls_from_response(
81
95
  else:
82
96
  # Sync stream
83
97
  collected_response = response.collect()
84
-
98
+
85
99
  if hasattr(collected_response, "get_tool_calls"):
86
100
  return collected_response.get_tool_calls() or []
87
101
  else:
@@ -90,67 +104,62 @@ def extract_tool_calls_from_response(
90
104
  return response.get_tool_calls() or []
91
105
  except Exception:
92
106
  pass
93
-
107
+
94
108
  # Check if response has tool_calls attribute directly
95
109
  if hasattr(response, "tool_calls"):
96
110
  return response.tool_calls or []
97
-
111
+
98
112
  return []
99
113
 
114
+
100
115
  def execute_tool_calls_parallel(
101
- tool: "Tool[P, R]",
102
- tool_calls: List[Any],
103
- context: Any = None
116
+ tool: "Tool", tool_calls: List[Any], context: Any = None
104
117
  ) -> List[ToolResponseMessage]:
105
118
  """Execute multiple tool calls in parallel using ThreadPoolExecutor."""
106
- with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(tool_calls), 4)) as executor:
119
+ with concurrent.futures.ThreadPoolExecutor(
120
+ max_workers=min(len(tool_calls), 4)
121
+ ) as executor:
107
122
  futures = [
108
123
  executor.submit(tool.call_from_tool_call, call, context)
109
124
  for call in tool_calls
110
125
  ]
111
-
126
+
112
127
  results = []
113
128
  for future in concurrent.futures.as_completed(futures):
114
129
  try:
115
130
  results.append(future.result())
116
131
  except Exception as e:
117
132
  # Create error response
118
- results.append(ToolResponseMessage(
119
- tool_call_id="unknown",
120
- name=tool.name,
121
- content=f"Tool execution failed: {str(e)}"
122
- ))
123
-
133
+ results.append(
134
+ ToolResponseMessage(
135
+ tool_call_id="unknown",
136
+ name=tool.name,
137
+ content=f"Tool execution failed: {str(e)}",
138
+ )
139
+ )
140
+
124
141
  return results
125
142
 
126
143
 
127
- @dataclass
128
- class Tool(Generic[P, R]):
144
+ class Tool(BaseTool[P, R]):
129
145
  """A tool that wraps a function for agent execution.
130
-
146
+
131
147
  Combines concepts from both PydanticAI and OpenAI tool specifications
132
148
  to provide a simple, internalized tool system.
133
149
  """
134
-
135
- name: str
136
- """The name of the tool."""
137
-
138
- description: str
139
- """Description of what the tool does."""
140
-
141
- function: Callable[P, R]
142
- """The Python function to execute."""
143
-
144
- parameters_json_schema: Dict[str, Any]
145
- """JSON schema for the tool's parameters."""
146
-
147
- takes_context: bool = False
150
+
151
+ takes_context: bool = Field(
152
+ default=False,
153
+ description="Whether the function expects a context as first parameter.",
154
+ )
148
155
  """Whether the function expects a context as first parameter."""
149
-
150
- strict: bool = True
156
+
157
+ strict: bool = Field(
158
+ default=True, description="Whether to enforce strict JSON schema validation."
159
+ )
151
160
  """Whether to enforce strict JSON schema validation."""
152
-
153
- def __post_init__(self):
161
+
162
+ def model_post_init(self, __context: Any) -> None:
154
163
  """Validate the tool after initialization."""
155
164
  if not callable(self.function):
156
165
  raise ValueError("Tool function must be callable")
@@ -158,36 +167,36 @@ class Tool(Generic[P, R]):
158
167
  raise ValueError("Tool name cannot be empty")
159
168
  if not self.parameters_json_schema:
160
169
  raise ValueError("Tool must have parameters JSON schema")
161
-
170
+
162
171
  def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
163
172
  """Call the tool's function directly with the given arguments.
164
-
173
+
165
174
  This allows using the tool as if it were the original function.
166
-
175
+
167
176
  Args:
168
177
  *args: Positional arguments to pass to the function
169
178
  **kwargs: Keyword arguments to pass to the function
170
-
179
+
171
180
  Returns:
172
181
  The result of the function call
173
182
  """
174
183
  return self.function(*args, **kwargs)
175
-
184
+
176
185
  def call(
177
- self,
178
- arguments: Union[str, Dict[str, Any]],
186
+ self,
187
+ arguments: Union[str, Dict[str, Any]],
179
188
  context: Any = None,
180
189
  ) -> Any:
181
190
  """Execute the tool with given arguments.
182
-
191
+
183
192
  Args:
184
193
  arguments: Tool arguments as JSON string or dict
185
194
  context: Optional context to pass as first argument if takes_context=True
186
195
  as_message: Whether to return the result as a ToolResponseMessage
187
-
196
+
188
197
  Returns:
189
198
  The result of the function call
190
-
199
+
191
200
  Raises:
192
201
  ValidationError: If arguments don't match schema
193
202
  ValueError: If function execution fails
@@ -200,24 +209,27 @@ class Tool(Generic[P, R]):
200
209
  raise ValidationError(f"Invalid JSON arguments: {e}")
201
210
  else:
202
211
  args_dict = arguments or {}
203
-
212
+
204
213
  # Get function signature and validate arguments
205
214
  sig = inspect.signature(self.function)
206
-
215
+
207
216
  # Filter out context parameter if needed
208
217
  if self.takes_context:
209
- params = {k: v for k, v in sig.parameters.items()
210
- if k not in ('self', 'cls', 'context', 'ctx')}
218
+ params = {
219
+ k: v
220
+ for k, v in sig.parameters.items()
221
+ if k not in ("self", "cls", "context", "ctx")
222
+ }
211
223
  filtered_sig = sig.replace(parameters=list(params.values()))
212
224
  bound_args = filtered_sig.bind_partial(**args_dict)
213
225
  else:
214
226
  bound_args = sig.bind_partial(**args_dict)
215
-
227
+
216
228
  try:
217
229
  bound_args.apply_defaults()
218
230
  except TypeError as e:
219
231
  raise ValidationError(f"Arguments don't match function signature: {e}")
220
-
232
+
221
233
  # Execute function with or without context
222
234
  try:
223
235
  if self.takes_context:
@@ -226,24 +238,24 @@ class Tool(Generic[P, R]):
226
238
  return self.function(**bound_args.arguments)
227
239
  except Exception as e:
228
240
  raise ValueError(f"Tool execution failed: {e}")
229
-
241
+
230
242
  def call_from_tool_call(
231
- self,
232
- tool_call: Union[Dict[str, Any], Any],
233
- context: Any = None
243
+ self, tool_call: Union[Dict[str, Any], Any], context: Any = None
234
244
  ) -> ToolResponseMessage:
235
245
  """Execute tool from a tool call and return a tool response message.
236
-
246
+
237
247
  Args:
238
248
  tool_call: Tool call dict or object with function.arguments and id
239
249
  context: Optional context to pass to function
240
-
250
+
241
251
  Returns:
242
252
  ToolResponseMessage with tool call ID and result
243
253
  """
244
254
  # Extract tool call information
245
255
  if isinstance(tool_call, dict):
246
- tool_call_id = tool_call.get("id") or tool_call.get("tool_call_id", "unknown")
256
+ tool_call_id = tool_call.get("id") or tool_call.get(
257
+ "tool_call_id", "unknown"
258
+ )
247
259
  if "function" in tool_call:
248
260
  arguments = tool_call["function"].get("arguments", "{}")
249
261
  else:
@@ -255,60 +267,64 @@ class Tool(Generic[P, R]):
255
267
  arguments = getattr(tool_call.function, "arguments", "{}")
256
268
  else:
257
269
  arguments = getattr(tool_call, "arguments", "{}")
258
-
270
+
259
271
  # Execute the tool
260
272
  try:
261
273
  result = self.call(arguments, context)
262
- content = str(result) if result is not None else "Tool executed successfully"
274
+ content = (
275
+ str(result) if result is not None else "Tool executed successfully"
276
+ )
263
277
  except Exception as e:
264
278
  content = f"Tool execution failed: {str(e)}"
265
-
279
+
266
280
  return ToolResponseMessage(
267
- tool_call_id=tool_call_id,
268
- name=self.name,
269
- content=content
281
+ tool_call_id=tool_call_id, name=self.name, content=content
270
282
  )
271
-
283
+
272
284
  def call_from_response(
273
- self,
274
- response: Union["LanguageModelResponse", "Stream", "AsyncStream"],
285
+ self,
286
+ response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
275
287
  context: Any = None,
276
- parallel: bool = True
288
+ parallel: bool = True,
277
289
  ) -> List[ToolResponseMessage]:
278
290
  """Execute tool calls found in a language model response or stream.
279
-
291
+
280
292
  Args:
281
293
  response: LanguageModelResponse, Stream, or AsyncStream
282
294
  context: Optional context to pass to functions
283
295
  parallel: Whether to execute tool calls in parallel
284
-
296
+
285
297
  Returns:
286
298
  List of ToolResponseMessage objects
287
299
  """
288
300
  tool_calls = extract_tool_calls_from_response(response)
289
-
301
+
290
302
  if not tool_calls:
291
303
  return []
292
-
304
+
293
305
  # Filter tool calls that match this tool's name
294
306
  matching_calls = []
295
307
  for tool_call in tool_calls:
296
308
  if isinstance(tool_call, dict):
297
309
  func_name = tool_call.get("function", {}).get("name")
298
310
  else:
299
- func_name = getattr(tool_call.function, "name", None) if hasattr(tool_call, "function") else None
300
-
311
+ func_name = (
312
+ getattr(tool_call.function, "name", None)
313
+ if hasattr(tool_call, "function")
314
+ else None
315
+ )
316
+
301
317
  if func_name == self.name:
302
318
  matching_calls.append(tool_call)
303
-
319
+
304
320
  if not matching_calls:
305
321
  return []
306
-
322
+
307
323
  if parallel and len(matching_calls) > 1:
308
324
  return execute_tool_calls_parallel(self, matching_calls, context)
309
325
  else:
310
326
  return [self.call_from_tool_call(call, context) for call in matching_calls]
311
-
327
+
312
328
  def to_dict(self) -> Dict[str, Any]:
313
329
  """Convert tool to dictionary format suitable for API calls."""
314
330
  return {
@@ -317,68 +333,68 @@ class Tool(Generic[P, R]):
317
333
  "name": self.name,
318
334
  "description": self.description,
319
335
  "parameters": self.parameters_json_schema,
320
- "strict": self.strict
321
- }
336
+ "strict": self.strict,
337
+ },
322
338
  }
323
339
 
324
340
 
325
341
  @overload
326
- def function_tool(
342
+ def define_tool(
327
343
  function: Callable[P, R],
328
- ) -> Tool[P, R]:
329
- """Overload for direct decorator usage: @function_tool"""
344
+ ) -> Tool:
345
+ """Overload for direct decorator usage: @define_tool"""
330
346
  ...
331
347
 
332
348
 
333
349
  @overload
334
- def function_tool(
350
+ def define_tool(
335
351
  *,
336
352
  name: Optional[str] = None,
337
353
  description: Optional[str] = None,
338
354
  takes_context: bool = False,
339
355
  strict: bool = True,
340
- ) -> Callable[[Callable[P, R]], Tool[P, R]]:
341
- """Overload for decorator with parameters: @function_tool(...)"""
356
+ ) -> Callable[[Callable[P, R]], Tool]:
357
+ """Overload for decorator with parameters: @define_tool(...)"""
342
358
  ...
343
359
 
344
360
 
345
- def function_tool(
361
+ def define_tool(
346
362
  function: Optional[Callable[P, R]] = None,
347
363
  *,
348
364
  name: Optional[str] = None,
349
365
  description: Optional[str] = None,
350
366
  takes_context: bool = False,
351
367
  strict: bool = True,
352
- ) -> Union[Tool[P, R], Callable[[Callable[P, R]], Tool[P, R]]]:
368
+ ) -> Union[Tool, Callable[[Callable[P, R]], Tool]]:
353
369
  """Decorator to create a Tool from a function.
354
-
370
+
355
371
  Args:
356
- func: Function to wrap (when used as @function_tool)
372
+ func: Function to wrap (when used as @define_tool)
357
373
  name: Override tool name (defaults to function name)
358
374
  description: Override tool description (defaults to function docstring)
359
375
  takes_context: Whether function expects context as first parameter
360
376
  strict: Whether to enforce strict JSON schema validation
361
-
377
+
362
378
  Returns:
363
379
  Tool instance or decorator function
364
-
380
+
365
381
  Example:
366
- @function_tool
382
+ @define_tool
367
383
  def my_tool(x: int, y: str = "default") -> str:
368
384
  \"\"\"Does something useful.\"\"\"
369
385
  return f"{x}: {y}"
370
-
386
+
371
387
  # Or with parameters:
372
- @function_tool(name="custom_name", takes_context=True)
388
+ @define_tool(name="custom_name", takes_context=True)
373
389
  def context_tool(ctx, value: int) -> int:
374
390
  return value * 2
375
391
  """
376
-
377
- def _create_tool(target_func: Callable[P, R]) -> Tool[P, R]:
392
+
393
+ def _create_tool(target_func: Callable[P, R]) -> Tool:
378
394
  # Extract function metadata
379
395
  func_name = name or target_func.__name__
380
396
  func_description = description or (target_func.__doc__ or "").strip()
381
-
397
+
382
398
  # Generate JSON schema from function signature
383
399
  try:
384
400
  # Try using Pydantic converter first for better schema generation
@@ -386,8 +402,7 @@ def function_tool(
386
402
  if pydantic_fields:
387
403
  # Create temporary Pydantic model to get schema
388
404
  temp_model = convert_to_pydantic_model(
389
- target_func,
390
- name=f"{func_name}_params"
405
+ target_func, name=f"{func_name}_params"
391
406
  )
392
407
  schema = temp_model.model_json_schema()
393
408
  # Extract just the properties and required fields
@@ -405,81 +420,83 @@ def function_tool(
405
420
  except Exception:
406
421
  # Ultimate fallback
407
422
  parameters_schema = _generate_schema_from_signature(target_func, strict)
408
-
409
- return Tool[P, R](
423
+
424
+ return Tool(
410
425
  name=func_name,
411
426
  description=func_description,
412
427
  function=target_func,
413
428
  parameters_json_schema=parameters_schema,
414
429
  takes_context=takes_context,
415
- strict=strict
430
+ strict=strict,
416
431
  )
417
-
432
+
418
433
  # Handle decorator usage patterns
419
434
  if function is None:
420
- # Used as @function_tool(...)
435
+ # Used as @define_tool(...)
421
436
  return _create_tool
422
437
  else:
423
- # Used as @function_tool
438
+ # Used as @define_tool
424
439
  return _create_tool(function)
425
440
 
426
441
 
427
- def _generate_schema_from_signature(func: Callable, strict: bool = True) -> Dict[str, Any]:
442
+ def _generate_schema_from_signature(
443
+ func: Callable, strict: bool = True
444
+ ) -> Dict[str, Any]:
428
445
  """Generate JSON schema from function signature as fallback."""
429
446
  sig = inspect.signature(func)
430
447
  type_hints = get_type_hints(func)
431
-
448
+
432
449
  properties = {}
433
450
  required = []
434
-
451
+
435
452
  for param_name, param in sig.parameters.items():
436
453
  if param_name == "self" or param_name == "cls":
437
454
  continue
438
-
455
+
439
456
  param_type = type_hints.get(param_name, str)
440
-
457
+
441
458
  try:
442
459
  # Use JSON converter for type
443
460
  param_schema = convert_to_json_schema(param_type)
444
461
  except Exception:
445
462
  # Ultimate fallback
446
463
  param_schema = {"type": "string"}
447
-
464
+
448
465
  properties[param_name] = param_schema
449
-
466
+
450
467
  # Add to required if no default value
451
468
  if param.default is inspect.Parameter.empty:
452
469
  required.append(param_name)
453
-
470
+
454
471
  schema = {
455
472
  "type": "object",
456
473
  "properties": properties,
457
474
  }
458
-
475
+
459
476
  if required:
460
477
  schema["required"] = required
461
-
478
+
462
479
  if strict:
463
480
  schema["additionalProperties"] = False
464
-
481
+
465
482
  return schema
466
483
 
467
484
 
468
485
  # Utility functions for batch tool execution
469
- def execute_tools_from_response(
486
+ def execute_tools_from_language_model_response(
470
487
  tools: List[Tool],
471
- response: Union["LanguageModelResponse", "Stream", "AsyncStream"],
488
+ response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
472
489
  context: Any = None,
473
- parallel: bool = True
490
+ parallel: bool = True,
474
491
  ) -> List[ToolResponseMessage]:
475
492
  """Execute all matching tools from a response.
476
-
493
+
477
494
  Args:
478
495
  tools: List of tools to check for matches
479
496
  response: LanguageModelResponse, Stream, or AsyncStream
480
497
  context: Optional context to pass to functions
481
498
  parallel: Whether to execute tool calls in parallel
482
-
499
+
483
500
  Returns:
484
501
  List of ToolResponseMessage objects from all executed tools
485
502
  """
@@ -487,4 +504,4 @@ def execute_tools_from_response(
487
504
  for tool in tools:
488
505
  results = tool.call_from_response(response, context, parallel)
489
506
  all_results.extend(results)
490
- return all_results
507
+ return all_results
hammad/logging/logger.py CHANGED
@@ -242,7 +242,7 @@ class RichLoggerFormatter(_logging.Formatter):
242
242
 
243
243
  # Now format with the styled values
244
244
  formatted = self._style._fmt.format(**record.__dict__)
245
- return formatted if formatted != 'None' else ''
245
+ return formatted if formatted != "None" else ""
246
246
 
247
247
  def _build_renderable_style_string(self, style_dict: dict) -> str:
248
248
  """Build a rich markup style string from a CLIStyleRenderableSettings dictionary."""
@@ -953,3 +953,11 @@ def create_logger(
953
953
  console=console,
954
954
  handlers=handlers,
955
955
  )
956
+
957
+
958
+ # internal logger and helper
959
+ _logger = Logger("hammad", level="warning")
960
+
961
+
962
+ def _get_internal_logger(name: str) -> Logger:
963
+ return Logger(name=name, level="warning")
@@ -12,16 +12,14 @@ if TYPE_CHECKING:
12
12
  MCPClientSettings,
13
13
  MCPClientSseSettings,
14
14
  MCPClientStreamableHttpSettings,
15
- MCPClientStdioSettings
15
+ MCPClientStdioSettings,
16
16
  )
17
17
 
18
18
  __all__ = (
19
19
  # hammad.mcp.client
20
20
  "MCPClient",
21
-
22
21
  # hammad.mcp.client.client_service
23
22
  "MCPClientService",
24
-
25
23
  # hammad.mcp.client.settings
26
24
  "MCPClientSettings",
27
25
  "MCPClientSseSettings",
@@ -31,6 +29,7 @@ __all__ = (
31
29
 
32
30
  __getattr__ = create_getattr_importer(__all__)
33
31
 
32
+
34
33
  def __dir__() -> list[str]:
35
34
  """Get the attributes of the client module."""
36
35
  return list(__all__)