hammad-python 0.0.30__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.30.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.30.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,32 +0,0 @@
1
- """hammad.runtime"""
2
-
3
- from typing import TYPE_CHECKING
4
- from .._internal import create_getattr_importer
5
-
6
-
7
- if TYPE_CHECKING:
8
- from .decorators import (
9
- sequentialize_function,
10
- parallelize_function,
11
- update_batch_type_hints,
12
- )
13
- from .run import run_sequentially, run_parallel, run_with_retry
14
-
15
-
16
- __all__ = (
17
- # hammad.performance.decorators
18
- "sequentialize_function",
19
- "parallelize_function",
20
- "update_batch_type_hints",
21
- # hammad.performance.run
22
- "run_sequentially",
23
- "run_parallel",
24
- "run_with_retry",
25
- )
26
-
27
-
28
- __getattr__ = create_getattr_importer(__all__)
29
-
30
-
31
- def __dir__() -> list[str]:
32
- return list(__all__)
@@ -1,142 +0,0 @@
1
- """hammad.runtime.decorators"""
2
-
3
- import functools
4
- from typing import (
5
- Callable,
6
- Iterable,
7
- List,
8
- Any,
9
- TypeVar,
10
- Optional,
11
- Union,
12
- cast,
13
- )
14
-
15
-
16
- __all__ = (
17
- "sequentialize_function",
18
- "parallelize_function",
19
- "update_batch_type_hints",
20
- )
21
-
22
-
23
- Parameters = TypeVar("Parameters", bound=dict[str, Any])
24
- Return = TypeVar("Return")
25
-
26
- TaskParameters = TypeVar("TaskParameters", bound=dict[str, Any])
27
-
28
-
29
- def sequentialize_function():
30
- """
31
- Decorator to make a function that processes a single item (or argument set)
32
- able to process an iterable of items (or argument sets) sequentially.
33
-
34
- The decorated function will expect an iterable of argument sets as its
35
- primary argument and will return a list of results. If the underlying
36
- function raises an error, execution stops and the error propagates.
37
-
38
- Example:
39
- @sequentialize_function()
40
- def process_single(data, factor):
41
- return data * factor
42
-
43
- # Now call it with a list of argument tuples
44
- results = process_single([(1, 2), (3, 4)])
45
- # results will be [2, 12]
46
- """
47
- from .run import run_sequentially
48
-
49
- def decorator(
50
- func_to_process_single_item: Callable[..., Return],
51
- ) -> Callable[[Iterable[TaskParameters]], List[Return]]:
52
- @functools.wraps(func_to_process_single_item)
53
- def wrapper(args_list_for_func: Iterable[TaskParameters]) -> List[Return]:
54
- return run_sequentially(func_to_process_single_item, args_list_for_func)
55
-
56
- return wrapper
57
-
58
- return decorator
59
-
60
-
61
- def parallelize_function(
62
- max_workers: Optional[int] = None, timeout: Optional[float] = None
63
- ):
64
- """
65
- Decorator to make a function that processes a single item (or argument set)
66
- able to process an iterable of items (or argument sets) in parallel.
67
-
68
- The decorated function will expect an iterable of argument sets as its
69
- primary argument and will return a list of results or exceptions,
70
- maintaining the original order.
71
-
72
- Args:
73
- max_workers (Optional[int]): Max worker threads for parallel execution.
74
- timeout (Optional[float]): Timeout for each individual task.
75
-
76
- Example:
77
- @parallelize_function(max_workers=4, timeout=5.0)
78
- def fetch_url_content(url: str) -> str:
79
- # ... implementation to fetch url ...
80
- return "content"
81
-
82
- # Now call it with a list of URLs
83
- results = fetch_url_content(["http://example.com", "http://example.org"])
84
- # results will be a list of contents or Exception objects.
85
- """
86
- from .run import run_parallel
87
-
88
- def decorator(
89
- func_to_process_single_item: Callable[..., Return],
90
- ) -> Callable[[Iterable[TaskParameters]], List[Union[Return, Exception]]]:
91
- @functools.wraps(func_to_process_single_item)
92
- def wrapper(
93
- args_list_for_func: Iterable[TaskParameters],
94
- ) -> List[Union[Return, Exception]]:
95
- return run_parallel(
96
- func_to_process_single_item,
97
- args_list_for_func,
98
- max_workers=max_workers,
99
- timeout=timeout,
100
- )
101
-
102
- return wrapper
103
-
104
- return decorator
105
-
106
-
107
- def update_batch_type_hints():
108
- """
109
- Decorator that provides better IDE type hinting for functions converted from
110
- single-item to batch processing. This helps IDEs understand the transformation
111
- and provide accurate autocomplete and type checking.
112
-
113
- The decorated function maintains proper type information showing it transforms
114
- from Callable[[T], R] to Callable[[Iterable[T]], List[R]].
115
-
116
- Example:
117
- @typed_batch()
118
- def process_url(url: str) -> dict:
119
- return {"url": url, "status": "ok"}
120
-
121
- # IDE will now correctly understand:
122
- # process_url: (Iterable[str]) -> List[dict]
123
- results = process_url(["http://example.com", "http://test.com"])
124
- """
125
- from .run import run_sequentially
126
-
127
- def decorator(
128
- func: Callable[..., Return],
129
- ) -> Callable[[Iterable[TaskParameters]], List[Return]]:
130
- @functools.wraps(func)
131
- def wrapper(args_list: Iterable[TaskParameters]) -> List[Return]:
132
- return run_sequentially(func, args_list)
133
-
134
- # Preserve original function's type info while updating signature
135
- wrapper.__annotations__ = {
136
- "args_list": Iterable[TaskParameters],
137
- "return": List[Return],
138
- }
139
-
140
- return cast(Callable[[Iterable[TaskParameters]], List[Return]], wrapper)
141
-
142
- return decorator
hammad/runtime/run.py DELETED
@@ -1,299 +0,0 @@
1
- """hammad.runtime.run"""
2
-
3
- import concurrent.futures
4
- import itertools
5
- import functools
6
- from typing import (
7
- Callable,
8
- Iterable,
9
- List,
10
- Any,
11
- TypeVar,
12
- Tuple,
13
- Optional,
14
- Union,
15
- Type,
16
- overload,
17
- )
18
-
19
- from tenacity import (
20
- retry,
21
- stop_after_attempt,
22
- wait_exponential,
23
- retry_if_exception_type,
24
- retry_if_exception,
25
- )
26
-
27
-
28
- __all__ = (
29
- "run_sequentially",
30
- "run_parallel",
31
- "run_with_retry",
32
- )
33
-
34
-
35
- Parameters = TypeVar("Parameters", bound=dict[str, Any])
36
- Return = TypeVar("Return")
37
-
38
- TaskParameters = TypeVar("TaskParameters", bound=dict[str, Any])
39
-
40
-
41
- def run_sequentially(
42
- function: Callable[..., Return],
43
- parameters: Iterable[Parameters],
44
- raise_on_error: bool = False,
45
- ) -> List[Return]:
46
- """Executes a function multiple times sequentially, using a
47
- list of given parameter dictionary definitions.
48
-
49
- If the function raised an exception at any point during
50
- the call, by default the exception will be propogated/ignored
51
- and the run will continue, unless the `raise_on_error` flag is
52
- set to `True`.
53
-
54
- Args:
55
- function : The function to execute.
56
- parameters : An iterable of parameter dictionaries to pass to the function.
57
- raise_on_error : Whether to raise an exception if the function raises an exception.
58
-
59
- Returns:
60
- A list of results from the function calls."""
61
- results: List[Return] = []
62
-
63
- def execute_single_task(params: Parameters) -> Optional[Return]:
64
- """Execute a single task with error handling."""
65
- try:
66
- if isinstance(params, dict):
67
- return function(**params)
68
- else:
69
- # Handle case where params might be a single argument or tuple
70
- if isinstance(params, tuple):
71
- return function(*params)
72
- else:
73
- return function(params)
74
- except Exception as e:
75
- if raise_on_error:
76
- raise
77
- return None
78
-
79
- for params in itertools.chain(parameters):
80
- result = execute_single_task(params)
81
- if result is not None:
82
- results.append(result)
83
-
84
- return results
85
-
86
-
87
- def run_parallel(
88
- function: Callable[..., Return],
89
- parameters: Iterable[Parameters],
90
- max_workers: Optional[int] = None,
91
- timeout: Optional[float] = None,
92
- raise_on_error: bool = False,
93
- ) -> List[Union[Return, Exception]]:
94
- """Executes a function multiple times in parallel, using a
95
- list of given parameter dictionary definitions.
96
-
97
- Uses ThreadPoolExecutor to run tasks concurrently. Results are returned
98
- in the same order as the input parameters.
99
-
100
- Args:
101
- function : The function to execute.
102
- parameters : An iterable of parameter dictionaries to pass to the function.
103
- max_workers : The maximum number of worker threads. If None, defaults
104
- to ThreadPoolExecutor's default (typically based on CPU cores).
105
- timeout : The maximum number of seconds to wait for each individual task
106
- to complete. If a task exceeds this timeout, a
107
- concurrent.futures.TimeoutError will be stored as its result.
108
- If None, tasks will wait indefinitely for completion.
109
- raise_on_error : Whether to raise an exception if the function raises an exception.
110
- If False, exceptions are returned as results instead of being raised.
111
-
112
- Returns:
113
- A list where each element corresponds to the respective item in parameters.
114
- - If a task executed successfully, its return value is stored.
115
- - If a task raised an exception (including TimeoutError due to timeout),
116
- the exception object itself is stored (unless raise_on_error is True).
117
- """
118
- # Materialize parameters to ensure consistent ordering and count
119
- materialized_params = list(parameters)
120
- if not materialized_params:
121
- return []
122
-
123
- with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
124
- futures: List[concurrent.futures.Future] = []
125
-
126
- # Submit all tasks
127
- for params in materialized_params:
128
- if isinstance(params, dict):
129
- future = executor.submit(function, **params)
130
- elif isinstance(params, tuple):
131
- future = executor.submit(function, *params)
132
- else:
133
- future = executor.submit(function, params)
134
- futures.append(future)
135
-
136
- # Collect results in order
137
- results: List[Union[Return, Exception]] = [None] * len(futures) # type: ignore
138
- for i, future in enumerate(futures):
139
- try:
140
- results[i] = future.result(timeout=timeout)
141
- except Exception as e:
142
- if raise_on_error:
143
- raise
144
- results[i] = e
145
-
146
- return results
147
-
148
-
149
- @overload
150
- def run_with_retry(
151
- func: Callable[..., Return],
152
- *,
153
- max_attempts: int = 3,
154
- initial_delay: float = 1.0,
155
- max_delay: float = 60.0,
156
- backoff: float = 2.0,
157
- jitter: Optional[float] = None,
158
- exceptions: Optional[Tuple[Type[Exception], ...]] = None,
159
- reraise: bool = True,
160
- before_retry: Optional[Callable[[Exception], None]] = None,
161
- hook: Optional[Callable[[Exception, dict, dict], Tuple[dict, dict]]] = None,
162
- ) -> Callable[..., Return]: ...
163
-
164
-
165
- @overload
166
- def run_with_retry(
167
- *,
168
- max_attempts: int = 3,
169
- initial_delay: float = 1.0,
170
- max_delay: float = 60.0,
171
- backoff: float = 2.0,
172
- jitter: Optional[float] = None,
173
- exceptions: Optional[Tuple[Type[Exception], ...]] = None,
174
- reraise: bool = True,
175
- before_retry: Optional[Callable[[Exception], None]] = None,
176
- hook: Optional[Callable[[Exception, dict, dict], Tuple[dict, dict]]] = None,
177
- ) -> Callable[[Callable[..., Return]], Callable[..., Return]]: ...
178
-
179
-
180
- def run_with_retry(
181
- func: Optional[Callable[..., Return]] = None,
182
- *,
183
- max_attempts: int = 3,
184
- initial_delay: float = 1.0,
185
- max_delay: float = 60.0,
186
- backoff: float = 2.0,
187
- jitter: Optional[float] = None,
188
- exceptions: Optional[Tuple[Type[Exception], ...]] = None,
189
- reraise: bool = True,
190
- before_retry: Optional[Callable[[Exception], None]] = None,
191
- hook: Optional[Callable[[Exception, dict, dict], Tuple[dict, dict]]] = None,
192
- ) -> Union[
193
- Callable[..., Return], Callable[[Callable[..., Return]], Callable[..., Return]]
194
- ]:
195
- """
196
- Decorator that adds retry logic to functions using tenacity. Essential for robust parallel
197
- processing when dealing with network calls, database operations, or other
198
- operations that might fail transiently.
199
-
200
- Can be used either as a decorator or as a function that takes a function as first argument.
201
-
202
- Args:
203
- func: The function to decorate (when used directly rather than as a decorator)
204
- max_attempts: Maximum number of attempts (including the first try).
205
- initial_delay: Initial delay between retries in seconds.
206
- max_delay: Maximum delay between retries in seconds.
207
- backoff: Multiplier for delay after each failed attempt.
208
- jitter: If set, adds random jitter to delays between retries.
209
- exceptions: Tuple of exception types to retry on. If None, retries on all exceptions.
210
- reraise: Whether to reraise the last exception after all retries fail.
211
- before_retry: Optional callback function to execute before each retry attempt.
212
- Takes the exception as argument.
213
- hook: Optional function to modify args/kwargs before retry.
214
- Takes (exception, current_args_dict, current_kwargs_dict) and
215
- returns (new_args_dict, new_kwargs_dict).
216
-
217
- Example:
218
- # As a decorator:
219
- @run_with_retry(
220
- max_attempts=3,
221
- initial_delay=0.5,
222
- max_delay=5.0,
223
- backoff=2.0,
224
- exceptions=(ConnectionError, TimeoutError),
225
- )
226
- def fetch_data(url: str, timeout: int = 30) -> dict:
227
- return requests.get(url, timeout=timeout).json()
228
-
229
- # As a function:
230
- def fetch_data(url: str, timeout: int = 30) -> dict:
231
- return requests.get(url, timeout=timeout).json()
232
-
233
- fetch_with_retry = run_with_retry(fetch_data, max_attempts=3)
234
- """
235
-
236
- def decorator(f: Callable[..., Return]) -> Callable[..., Return]:
237
- # Create retry configuration
238
- wait_strategy = wait_exponential(
239
- multiplier=initial_delay,
240
- exp_base=backoff,
241
- max=max_delay,
242
- )
243
-
244
- # Build retry arguments
245
- retry_args = {
246
- "stop": stop_after_attempt(max_attempts),
247
- "wait": wait_strategy,
248
- "retry": retry_if_exception_type(exceptions)
249
- if exceptions
250
- else retry_if_exception(lambda e: True),
251
- "reraise": reraise,
252
- }
253
-
254
- if before_retry or hook:
255
- # We need a stateful wrapper to handle callbacks with hooks
256
- @functools.wraps(f)
257
- def wrapper(*args, **kwargs) -> Return:
258
- # Store current args/kwargs that can be modified by hook
259
- current_args = args
260
- current_kwargs = kwargs
261
-
262
- def before_sleep_callback(retry_state):
263
- nonlocal current_args, current_kwargs
264
-
265
- # Only process if there was an exception
266
- if retry_state.outcome and retry_state.outcome.failed:
267
- exc = retry_state.outcome.exception()
268
-
269
- if before_retry:
270
- before_retry(exc)
271
-
272
- if hook:
273
- # Convert args to dict for hook
274
- args_dict = dict(enumerate(current_args))
275
- # Call hook to potentially modify arguments
276
- new_args_dict, new_kwargs = hook(
277
- exc, args_dict, current_kwargs
278
- )
279
- # Convert back to args tuple
280
- current_args = tuple(
281
- new_args_dict[i] for i in range(len(new_args_dict))
282
- )
283
- current_kwargs = new_kwargs
284
-
285
- # Create a wrapped function that uses the current args/kwargs
286
- @retry(**retry_args, before_sleep=before_sleep_callback)
287
- def retryable_func():
288
- return f(*current_args, **current_kwargs)
289
-
290
- return retryable_func()
291
-
292
- return wrapper
293
- else:
294
- # Simple case without callbacks - use tenacity's retry decorator directly
295
- return retry(**retry_args)(f)
296
-
297
- if func is not None:
298
- return decorator(func)
299
- return decorator
@@ -1,49 +0,0 @@
1
- """hammad.service
2
-
3
- An optional extension to the `hammad-python` package, installable with:
4
-
5
- ```bash
6
- pip install hammad-python[service]
7
- ```
8
-
9
- TLDR: FastAPI is already so gosh darn simple, theres no need for server/client
10
- resources within this submodule. This module contains function/decorators for:
11
-
12
- - `@serve` - Easily launch functions as a FastAPI endpoint within a quick server.
13
- - `@serve_mcp` - Serve functions as MCP (Model Context Protocol) server tools.
14
- - `create_service` - Launch a FastAPI server from:
15
- - A function
16
- - A model-like object
17
- - Pydantic models
18
- - Dataclasses
19
- - `hammad.base.model.Model`
20
- - msgspec.Struct
21
- """
22
-
23
- from typing import TYPE_CHECKING
24
- from .._internal import create_getattr_importer
25
-
26
- if TYPE_CHECKING:
27
- from .create import (
28
- create_service,
29
- async_create_service,
30
- )
31
- from .decorators import serve, serve_mcp
32
-
33
-
34
- __all__ = (
35
- # hammad.service.create
36
- "create_service",
37
- "async_create_service",
38
- # hammad.service.decorators
39
- "serve",
40
- "serve_mcp",
41
- )
42
-
43
-
44
- __getattr__ = create_getattr_importer(__all__)
45
-
46
-
47
- def __dir__() -> list[str]:
48
- """Get the attributes of the create and decorators modules."""
49
- return list(__all__)