hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.30.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,507 +0,0 @@
1
- """hammad.genai.types.tool
2
-
3
- Tool system for agent function calling with JSON schema generation.
4
- """
5
-
6
- import asyncio
7
- import concurrent.futures
8
- from dataclasses import dataclass
9
- import inspect
10
- import json
11
- from typing import (
12
- Any,
13
- Callable,
14
- Dict,
15
- List,
16
- Optional,
17
- Union,
18
- get_type_hints,
19
- TYPE_CHECKING,
20
- Generic,
21
- TypeVar,
22
- ParamSpec,
23
- overload,
24
- )
25
- from pydantic import BaseModel, Field, ValidationError
26
-
27
- from ...formatting.json.converters import convert_to_json_schema
28
- from ...data.models.extensions.pydantic.converters import (
29
- get_pydantic_fields_from_function,
30
- convert_to_pydantic_model,
31
- )
32
- from .base import BaseGenAIModelStream, BaseGenAIModelResponse, BaseTool
33
-
34
-
35
- # Type variables for generic tool typing
36
- P = ParamSpec("P")
37
- R = TypeVar("R")
38
-
39
- __all__ = (
40
- "Tool",
41
- "define_tool",
42
- "ToolResponseMessage",
43
- "execute_tool_calls_parallel",
44
- "execute_tools_from_language_model_response",
45
- )
46
-
47
-
48
- @dataclass
49
- class ToolResponseMessage:
50
- """Represents a tool response message for chat completion."""
51
-
52
- tool_call_id: str
53
- """ID of the tool call this response corresponds to."""
54
-
55
- name: str
56
- """Name of the tool that was called."""
57
-
58
- content: str
59
- """The result/output of the tool execution."""
60
-
61
- role: str = "tool"
62
- """Message role, always 'tool'."""
63
-
64
- def to_dict(self) -> Dict[str, Any]:
65
- """Convert to dictionary format for API calls."""
66
- return {
67
- "role": self.role,
68
- "tool_call_id": self.tool_call_id,
69
- "content": self.content,
70
- }
71
-
72
-
73
- def extract_tool_calls_from_response(
74
- response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
75
- ) -> List[Any]:
76
- """Extract tool calls from various response types."""
77
- # ensure type is of agent or language model
78
- if response.type not in ["language_model", "agent", "graph"]:
79
- raise ValueError(f"Response type {response.type} is not supported")
80
-
81
- # Handle LanguageModelResponse
82
- if hasattr(response, "get_tool_calls"):
83
- tool_calls = response.get_tool_calls()
84
- return tool_calls or []
85
-
86
- # Handle Stream/AsyncStream - need to collect first
87
- if hasattr(response, "collect"):
88
- try:
89
- if hasattr(response, "_is_consumed") and not response._is_consumed:
90
- # For streams, we need to consume them first
91
- if asyncio.iscoroutine(response.collect()):
92
- # Async stream
93
- loop = asyncio.get_event_loop()
94
- collected_response = loop.run_until_complete(response.collect())
95
- else:
96
- # Sync stream
97
- collected_response = response.collect()
98
-
99
- if hasattr(collected_response, "get_tool_calls"):
100
- return collected_response.get_tool_calls() or []
101
- else:
102
- # Already consumed, try to get tool calls directly
103
- if hasattr(response, "get_tool_calls"):
104
- return response.get_tool_calls() or []
105
- except Exception:
106
- pass
107
-
108
- # Check if response has tool_calls attribute directly
109
- if hasattr(response, "tool_calls"):
110
- return response.tool_calls or []
111
-
112
- return []
113
-
114
-
115
- def execute_tool_calls_parallel(
116
- tool: "Tool", tool_calls: List[Any], context: Any = None
117
- ) -> List[ToolResponseMessage]:
118
- """Execute multiple tool calls in parallel using ThreadPoolExecutor."""
119
- with concurrent.futures.ThreadPoolExecutor(
120
- max_workers=min(len(tool_calls), 4)
121
- ) as executor:
122
- futures = [
123
- executor.submit(tool.call_from_tool_call, call, context)
124
- for call in tool_calls
125
- ]
126
-
127
- results = []
128
- for future in concurrent.futures.as_completed(futures):
129
- try:
130
- results.append(future.result())
131
- except Exception as e:
132
- # Create error response
133
- results.append(
134
- ToolResponseMessage(
135
- tool_call_id="unknown",
136
- name=tool.name,
137
- content=f"Tool execution failed: {str(e)}",
138
- )
139
- )
140
-
141
- return results
142
-
143
-
144
- class Tool(BaseTool[P, R]):
145
- """A tool that wraps a function for agent execution.
146
-
147
- Combines concepts from both PydanticAI and OpenAI tool specifications
148
- to provide a simple, internalized tool system.
149
- """
150
-
151
- takes_context: bool = Field(
152
- default=False,
153
- description="Whether the function expects a context as first parameter.",
154
- )
155
- """Whether the function expects a context as first parameter."""
156
-
157
- strict: bool = Field(
158
- default=True, description="Whether to enforce strict JSON schema validation."
159
- )
160
- """Whether to enforce strict JSON schema validation."""
161
-
162
- def model_post_init(self, __context: Any) -> None:
163
- """Validate the tool after initialization."""
164
- if not callable(self.function):
165
- raise ValueError("Tool function must be callable")
166
- if not self.name:
167
- raise ValueError("Tool name cannot be empty")
168
- if not self.parameters_json_schema:
169
- raise ValueError("Tool must have parameters JSON schema")
170
-
171
- def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
172
- """Call the tool's function directly with the given arguments.
173
-
174
- This allows using the tool as if it were the original function.
175
-
176
- Args:
177
- *args: Positional arguments to pass to the function
178
- **kwargs: Keyword arguments to pass to the function
179
-
180
- Returns:
181
- The result of the function call
182
- """
183
- return self.function(*args, **kwargs)
184
-
185
- def call(
186
- self,
187
- arguments: Union[str, Dict[str, Any]],
188
- context: Any = None,
189
- ) -> Any:
190
- """Execute the tool with given arguments.
191
-
192
- Args:
193
- arguments: Tool arguments as JSON string or dict
194
- context: Optional context to pass as first argument if takes_context=True
195
- as_message: Whether to return the result as a ToolResponseMessage
196
-
197
- Returns:
198
- The result of the function call
199
-
200
- Raises:
201
- ValidationError: If arguments don't match schema
202
- ValueError: If function execution fails
203
- """
204
- # Parse arguments if they're a JSON string
205
- if isinstance(arguments, str):
206
- try:
207
- args_dict = json.loads(arguments)
208
- except json.JSONDecodeError as e:
209
- raise ValidationError(f"Invalid JSON arguments: {e}")
210
- else:
211
- args_dict = arguments or {}
212
-
213
- # Get function signature and validate arguments
214
- sig = inspect.signature(self.function)
215
-
216
- # Filter out context parameter if needed
217
- if self.takes_context:
218
- params = {
219
- k: v
220
- for k, v in sig.parameters.items()
221
- if k not in ("self", "cls", "context", "ctx")
222
- }
223
- filtered_sig = sig.replace(parameters=list(params.values()))
224
- bound_args = filtered_sig.bind_partial(**args_dict)
225
- else:
226
- bound_args = sig.bind_partial(**args_dict)
227
-
228
- try:
229
- bound_args.apply_defaults()
230
- except TypeError as e:
231
- raise ValidationError(f"Arguments don't match function signature: {e}")
232
-
233
- # Execute function with or without context
234
- try:
235
- if self.takes_context:
236
- return self.function(context, **bound_args.arguments)
237
- else:
238
- return self.function(**bound_args.arguments)
239
- except Exception as e:
240
- raise ValueError(f"Tool execution failed: {e}")
241
-
242
- def call_from_tool_call(
243
- self, tool_call: Union[Dict[str, Any], Any], context: Any = None
244
- ) -> ToolResponseMessage:
245
- """Execute tool from a tool call and return a tool response message.
246
-
247
- Args:
248
- tool_call: Tool call dict or object with function.arguments and id
249
- context: Optional context to pass to function
250
-
251
- Returns:
252
- ToolResponseMessage with tool call ID and result
253
- """
254
- # Extract tool call information
255
- if isinstance(tool_call, dict):
256
- tool_call_id = tool_call.get("id") or tool_call.get(
257
- "tool_call_id", "unknown"
258
- )
259
- if "function" in tool_call:
260
- arguments = tool_call["function"].get("arguments", "{}")
261
- else:
262
- arguments = tool_call.get("arguments", "{}")
263
- else:
264
- # Assume it's a pydantic object or similar
265
- tool_call_id = getattr(tool_call, "id", "unknown")
266
- if hasattr(tool_call, "function"):
267
- arguments = getattr(tool_call.function, "arguments", "{}")
268
- else:
269
- arguments = getattr(tool_call, "arguments", "{}")
270
-
271
- # Execute the tool
272
- try:
273
- result = self.call(arguments, context)
274
- content = (
275
- str(result) if result is not None else "Tool executed successfully"
276
- )
277
- except Exception as e:
278
- content = f"Tool execution failed: {str(e)}"
279
-
280
- return ToolResponseMessage(
281
- tool_call_id=tool_call_id, name=self.name, content=content
282
- )
283
-
284
- def call_from_response(
285
- self,
286
- response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
287
- context: Any = None,
288
- parallel: bool = True,
289
- ) -> List[ToolResponseMessage]:
290
- """Execute tool calls found in a language model response or stream.
291
-
292
- Args:
293
- response: LanguageModelResponse, Stream, or AsyncStream
294
- context: Optional context to pass to functions
295
- parallel: Whether to execute tool calls in parallel
296
-
297
- Returns:
298
- List of ToolResponseMessage objects
299
- """
300
- tool_calls = extract_tool_calls_from_response(response)
301
-
302
- if not tool_calls:
303
- return []
304
-
305
- # Filter tool calls that match this tool's name
306
- matching_calls = []
307
- for tool_call in tool_calls:
308
- if isinstance(tool_call, dict):
309
- func_name = tool_call.get("function", {}).get("name")
310
- else:
311
- func_name = (
312
- getattr(tool_call.function, "name", None)
313
- if hasattr(tool_call, "function")
314
- else None
315
- )
316
-
317
- if func_name == self.name:
318
- matching_calls.append(tool_call)
319
-
320
- if not matching_calls:
321
- return []
322
-
323
- if parallel and len(matching_calls) > 1:
324
- return execute_tool_calls_parallel(self, matching_calls, context)
325
- else:
326
- return [self.call_from_tool_call(call, context) for call in matching_calls]
327
-
328
- def to_dict(self) -> Dict[str, Any]:
329
- """Convert tool to dictionary format suitable for API calls."""
330
- return {
331
- "type": "function",
332
- "function": {
333
- "name": self.name,
334
- "description": self.description,
335
- "parameters": self.parameters_json_schema,
336
- "strict": self.strict,
337
- },
338
- }
339
-
340
-
341
- @overload
342
- def define_tool(
343
- function: Callable[P, R],
344
- ) -> Tool:
345
- """Overload for direct decorator usage: @define_tool"""
346
- ...
347
-
348
-
349
- @overload
350
- def define_tool(
351
- *,
352
- name: Optional[str] = None,
353
- description: Optional[str] = None,
354
- takes_context: bool = False,
355
- strict: bool = True,
356
- ) -> Callable[[Callable[P, R]], Tool]:
357
- """Overload for decorator with parameters: @define_tool(...)"""
358
- ...
359
-
360
-
361
- def define_tool(
362
- function: Optional[Callable[P, R]] = None,
363
- *,
364
- name: Optional[str] = None,
365
- description: Optional[str] = None,
366
- takes_context: bool = False,
367
- strict: bool = True,
368
- ) -> Union[Tool, Callable[[Callable[P, R]], Tool]]:
369
- """Decorator to create a Tool from a function.
370
-
371
- Args:
372
- func: Function to wrap (when used as @define_tool)
373
- name: Override tool name (defaults to function name)
374
- description: Override tool description (defaults to function docstring)
375
- takes_context: Whether function expects context as first parameter
376
- strict: Whether to enforce strict JSON schema validation
377
-
378
- Returns:
379
- Tool instance or decorator function
380
-
381
- Example:
382
- @define_tool
383
- def my_tool(x: int, y: str = "default") -> str:
384
- \"\"\"Does something useful.\"\"\"
385
- return f"{x}: {y}"
386
-
387
- # Or with parameters:
388
- @define_tool(name="custom_name", takes_context=True)
389
- def context_tool(ctx, value: int) -> int:
390
- return value * 2
391
- """
392
-
393
- def _create_tool(target_func: Callable[P, R]) -> Tool:
394
- # Extract function metadata
395
- func_name = name or target_func.__name__
396
- func_description = description or (target_func.__doc__ or "").strip()
397
-
398
- # Generate JSON schema from function signature
399
- try:
400
- # Try using Pydantic converter first for better schema generation
401
- pydantic_fields = get_pydantic_fields_from_function(target_func)
402
- if pydantic_fields:
403
- # Create temporary Pydantic model to get schema
404
- temp_model = convert_to_pydantic_model(
405
- target_func, name=f"{func_name}_params"
406
- )
407
- schema = temp_model.model_json_schema()
408
- # Extract just the properties and required fields
409
- parameters_schema = {
410
- "type": "object",
411
- "properties": schema.get("properties", {}),
412
- }
413
- if "required" in schema:
414
- parameters_schema["required"] = schema["required"]
415
- if strict:
416
- parameters_schema["additionalProperties"] = False
417
- else:
418
- # Fallback to basic JSON schema conversion
419
- parameters_schema = _generate_schema_from_signature(target_func, strict)
420
- except Exception:
421
- # Ultimate fallback
422
- parameters_schema = _generate_schema_from_signature(target_func, strict)
423
-
424
- return Tool(
425
- name=func_name,
426
- description=func_description,
427
- function=target_func,
428
- parameters_json_schema=parameters_schema,
429
- takes_context=takes_context,
430
- strict=strict,
431
- )
432
-
433
- # Handle decorator usage patterns
434
- if function is None:
435
- # Used as @define_tool(...)
436
- return _create_tool
437
- else:
438
- # Used as @define_tool
439
- return _create_tool(function)
440
-
441
-
442
- def _generate_schema_from_signature(
443
- func: Callable, strict: bool = True
444
- ) -> Dict[str, Any]:
445
- """Generate JSON schema from function signature as fallback."""
446
- sig = inspect.signature(func)
447
- type_hints = get_type_hints(func)
448
-
449
- properties = {}
450
- required = []
451
-
452
- for param_name, param in sig.parameters.items():
453
- if param_name == "self" or param_name == "cls":
454
- continue
455
-
456
- param_type = type_hints.get(param_name, str)
457
-
458
- try:
459
- # Use JSON converter for type
460
- param_schema = convert_to_json_schema(param_type)
461
- except Exception:
462
- # Ultimate fallback
463
- param_schema = {"type": "string"}
464
-
465
- properties[param_name] = param_schema
466
-
467
- # Add to required if no default value
468
- if param.default is inspect.Parameter.empty:
469
- required.append(param_name)
470
-
471
- schema = {
472
- "type": "object",
473
- "properties": properties,
474
- }
475
-
476
- if required:
477
- schema["required"] = required
478
-
479
- if strict:
480
- schema["additionalProperties"] = False
481
-
482
- return schema
483
-
484
-
485
- # Utility functions for batch tool execution
486
- def execute_tools_from_language_model_response(
487
- tools: List[Tool],
488
- response: Union["BaseGenAIModelResponse", "BaseGenAIModelStream"],
489
- context: Any = None,
490
- parallel: bool = True,
491
- ) -> List[ToolResponseMessage]:
492
- """Execute all matching tools from a response.
493
-
494
- Args:
495
- tools: List of tools to check for matches
496
- response: LanguageModelResponse, Stream, or AsyncStream
497
- context: Optional context to pass to functions
498
- parallel: Whether to execute tool calls in parallel
499
-
500
- Returns:
501
- List of ToolResponseMessage objects from all executed tools
502
- """
503
- all_results = []
504
- for tool in tools:
505
- results = tool.call_from_response(response, context, parallel)
506
- all_results.extend(results)
507
- return all_results
@@ -1,35 +0,0 @@
1
- """hammad.logging"""
2
-
3
- from typing import TYPE_CHECKING
4
- from .._internal import create_getattr_importer
5
-
6
- if TYPE_CHECKING:
7
- from .logger import Logger, create_logger, create_logger_level, LoggerLevelName
8
- from .decorators import (
9
- trace_function,
10
- trace_cls,
11
- trace,
12
- trace_http,
13
- install_trace_http,
14
- )
15
-
16
-
17
- __all__ = (
18
- "Logger",
19
- "LoggerLevelName",
20
- "create_logger",
21
- "create_logger_level",
22
- "trace_function",
23
- "trace_cls",
24
- "trace",
25
- "trace_http",
26
- "install_trace_http",
27
- )
28
-
29
-
30
- __getattr__ = create_getattr_importer(__all__)
31
-
32
-
33
- def __dir__() -> list[str]:
34
- """Get the attributes of the logging module."""
35
- return list(__all__)