hammad-python 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hammad/__init__.py +169 -56
- hammad/_core/__init__.py +1 -0
- hammad/_core/_utils/__init__.py +4 -0
- hammad/_core/_utils/_import_utils.py +182 -0
- hammad/ai/__init__.py +59 -0
- hammad/ai/_utils.py +142 -0
- hammad/ai/completions/__init__.py +44 -0
- hammad/ai/completions/client.py +729 -0
- hammad/ai/completions/create.py +686 -0
- hammad/ai/completions/types.py +711 -0
- hammad/ai/completions/utils.py +374 -0
- hammad/ai/embeddings/__init__.py +35 -0
- hammad/ai/embeddings/client/__init__.py +1 -0
- hammad/ai/embeddings/client/base_embeddings_client.py +26 -0
- hammad/ai/embeddings/client/fastembed_text_embeddings_client.py +200 -0
- hammad/ai/embeddings/client/litellm_embeddings_client.py +288 -0
- hammad/ai/embeddings/create.py +159 -0
- hammad/ai/embeddings/types.py +69 -0
- hammad/base/__init__.py +35 -0
- hammad/{based → base}/fields.py +23 -23
- hammad/{based → base}/model.py +124 -14
- hammad/base/utils.py +280 -0
- hammad/cache/__init__.py +30 -12
- hammad/cache/base_cache.py +181 -0
- hammad/cache/cache.py +169 -0
- hammad/cache/decorators.py +261 -0
- hammad/cache/file_cache.py +80 -0
- hammad/cache/ttl_cache.py +74 -0
- hammad/cli/__init__.py +10 -2
- hammad/cli/{styles/animations.py → animations.py} +79 -23
- hammad/cli/{plugins/__init__.py → plugins.py} +85 -90
- hammad/cli/styles/__init__.py +50 -0
- hammad/cli/styles/settings.py +4 -0
- hammad/configuration/__init__.py +35 -0
- hammad/{data/types/files → configuration}/configuration.py +96 -7
- hammad/data/__init__.py +14 -26
- hammad/data/collections/__init__.py +4 -2
- hammad/data/collections/collection.py +300 -75
- hammad/data/collections/vector_collection.py +118 -12
- hammad/data/databases/__init__.py +2 -2
- hammad/data/databases/database.py +383 -32
- hammad/json/__init__.py +2 -2
- hammad/logging/__init__.py +13 -5
- hammad/logging/decorators.py +404 -2
- hammad/logging/logger.py +442 -22
- hammad/multimodal/__init__.py +24 -0
- hammad/{data/types/files → multimodal}/audio.py +21 -6
- hammad/{data/types/files → multimodal}/image.py +5 -5
- hammad/multithreading/__init__.py +304 -0
- hammad/pydantic/__init__.py +2 -2
- hammad/pydantic/converters.py +1 -1
- hammad/pydantic/models/__init__.py +2 -2
- hammad/text/__init__.py +59 -14
- hammad/text/converters.py +723 -0
- hammad/text/{utils/markdown/formatting.py → markdown.py} +25 -23
- hammad/text/text.py +12 -14
- hammad/types/__init__.py +11 -0
- hammad/{data/types/files → types}/file.py +18 -18
- hammad/typing/__init__.py +138 -84
- hammad/web/__init__.py +3 -2
- hammad/web/models.py +245 -0
- hammad/web/search/client.py +75 -23
- hammad/web/utils.py +14 -5
- hammad/yaml/__init__.py +2 -2
- hammad/yaml/converters.py +1 -1
- {hammad_python-0.0.11.dist-info → hammad_python-0.0.13.dist-info}/METADATA +4 -1
- hammad_python-0.0.13.dist-info/RECORD +85 -0
- hammad/based/__init__.py +0 -52
- hammad/based/utils.py +0 -455
- hammad/cache/_cache.py +0 -746
- hammad/data/types/__init__.py +0 -33
- hammad/data/types/files/__init__.py +0 -1
- hammad/data/types/files/document.py +0 -195
- hammad/text/utils/__init__.py +0 -1
- hammad/text/utils/converters.py +0 -229
- hammad/text/utils/markdown/__init__.py +0 -1
- hammad/text/utils/markdown/converters.py +0 -506
- hammad_python-0.0.11.dist-info/RECORD +0 -65
- {hammad_python-0.0.11.dist-info → hammad_python-0.0.13.dist-info}/WHEEL +0 -0
- {hammad_python-0.0.11.dist-info → hammad_python-0.0.13.dist-info}/licenses/LICENSE +0 -0
hammad/cache/_cache.py
DELETED
@@ -1,746 +0,0 @@
|
|
1
|
-
"""hammad.cache._cache
|
2
|
-
|
3
|
-
Contains helpful resources for creating simple cache systems, and
|
4
|
-
decorators that implement "automatic" hashing & caching of function calls.
|
5
|
-
"""
|
6
|
-
|
7
|
-
from __future__ import annotations
|
8
|
-
|
9
|
-
import hashlib
|
10
|
-
import os
|
11
|
-
from functools import wraps
|
12
|
-
import inspect
|
13
|
-
import pickle
|
14
|
-
import time
|
15
|
-
from dataclasses import dataclass
|
16
|
-
from collections import OrderedDict
|
17
|
-
from pathlib import Path
|
18
|
-
from typing import (
|
19
|
-
Any,
|
20
|
-
Callable,
|
21
|
-
TypeVar,
|
22
|
-
Tuple,
|
23
|
-
Optional,
|
24
|
-
overload,
|
25
|
-
ParamSpec,
|
26
|
-
Literal,
|
27
|
-
get_args,
|
28
|
-
TypeAlias,
|
29
|
-
Union,
|
30
|
-
overload,
|
31
|
-
)
|
32
|
-
|
33
|
-
__all__ = [
|
34
|
-
"cached",
|
35
|
-
"auto_cached",
|
36
|
-
"Cache",
|
37
|
-
"TTLCache",
|
38
|
-
"DiskCache",
|
39
|
-
"CacheType",
|
40
|
-
"CacheParams",
|
41
|
-
"CacheReturn",
|
42
|
-
"create_cache",
|
43
|
-
]
|
44
|
-
|
45
|
-
|
46
|
-
# -----------------------------------------------------------------------------
|
47
|
-
# TYPES
|
48
|
-
# -----------------------------------------------------------------------------
|
49
|
-
|
50
|
-
CacheType: TypeAlias = Literal["ttl", "disk"]
|
51
|
-
"""Type of caches that can be created using `hammad`.
|
52
|
-
|
53
|
-
- `"ttl"`: Time-to-live cache.
|
54
|
-
- `"disk"`: Disk-based cache.
|
55
|
-
"""
|
56
|
-
|
57
|
-
CacheParams = ParamSpec("CacheParams")
|
58
|
-
"""Parameter specification for cache functions."""
|
59
|
-
|
60
|
-
CacheReturn = TypeVar("CacheReturn")
|
61
|
-
"""Return type for cache functions."""
|
62
|
-
|
63
|
-
|
64
|
-
# -----------------------------------------------------------------------------
|
65
|
-
# GLOBAL (Internal) CACHE
|
66
|
-
# -----------------------------------------------------------------------------
|
67
|
-
|
68
|
-
|
69
|
-
_hammad_CACHE: None | BaseCache = None
|
70
|
-
"""Internal cache for the `hammad` package. Instantiated when needed."""
|
71
|
-
|
72
|
-
|
73
|
-
def _get_cache() -> BaseCache:
|
74
|
-
"""Returns the global cache instance, creating it if necessary."""
|
75
|
-
global _hammad_CACHE
|
76
|
-
if _hammad_CACHE is None:
|
77
|
-
_hammad_CACHE = TTLCache(maxsize=1000, ttl=3600)
|
78
|
-
return _hammad_CACHE
|
79
|
-
|
80
|
-
|
81
|
-
# -----------------------------------------------------------------------------
|
82
|
-
# BASE
|
83
|
-
# -----------------------------------------------------------------------------
|
84
|
-
|
85
|
-
|
86
|
-
@dataclass
|
87
|
-
class BaseCache:
|
88
|
-
"""Base class for all caches created using `hammad`."""
|
89
|
-
|
90
|
-
type: CacheType
|
91
|
-
"""Type of cache."""
|
92
|
-
|
93
|
-
def __post_init__(self) -> None:
|
94
|
-
"""Post-initialization hook."""
|
95
|
-
if self.type not in get_args(CacheType):
|
96
|
-
raise ValueError(f"Invalid cache type: {self.type}")
|
97
|
-
|
98
|
-
def __contains__(self, key: str) -> bool:
|
99
|
-
"""Check if key exists in cache."""
|
100
|
-
raise NotImplementedError("Subclasses must implement __contains__")
|
101
|
-
|
102
|
-
def __getitem__(self, key: str) -> Any:
|
103
|
-
"""Get value for key."""
|
104
|
-
raise NotImplementedError("Subclasses must implement __getitem__")
|
105
|
-
|
106
|
-
def __setitem__(self, key: str, value: Any) -> None:
|
107
|
-
"""Set value for key."""
|
108
|
-
raise NotImplementedError("Subclasses must implement __setitem__")
|
109
|
-
|
110
|
-
def get(self, key: str, default: Any = None) -> Any:
|
111
|
-
"""Get value with default if key doesn't exist."""
|
112
|
-
try:
|
113
|
-
return self[key]
|
114
|
-
except KeyError:
|
115
|
-
return default
|
116
|
-
|
117
|
-
def clear(self) -> None:
|
118
|
-
"""Clear all cached items."""
|
119
|
-
raise NotImplementedError("Subclasses must implement clear")
|
120
|
-
|
121
|
-
def make_hashable(self, obj: Any) -> str:
|
122
|
-
"""
|
123
|
-
Convert any object to a stable hash string.
|
124
|
-
|
125
|
-
Uses SHA-256 to generate consistent hash representations.
|
126
|
-
Handles nested structures recursively.
|
127
|
-
|
128
|
-
Args:
|
129
|
-
obj: Object to hash
|
130
|
-
|
131
|
-
Returns:
|
132
|
-
Hexadecimal hash string
|
133
|
-
"""
|
134
|
-
|
135
|
-
def _hash_obj(data: Any) -> str:
|
136
|
-
"""Internal recursive hashing function with memoization."""
|
137
|
-
# Handle None first
|
138
|
-
if data is None:
|
139
|
-
return "null"
|
140
|
-
|
141
|
-
if isinstance(data, bool):
|
142
|
-
return f"bool:{data}"
|
143
|
-
elif isinstance(data, int):
|
144
|
-
return f"int:{data}"
|
145
|
-
elif isinstance(data, float):
|
146
|
-
if data != data: # NaN
|
147
|
-
return "float:nan"
|
148
|
-
elif data == float("inf"):
|
149
|
-
return "float:inf"
|
150
|
-
elif data == float("-inf"):
|
151
|
-
return "float:-inf"
|
152
|
-
else:
|
153
|
-
return f"float:{data}"
|
154
|
-
elif isinstance(data, str):
|
155
|
-
return f"str:{data}"
|
156
|
-
elif isinstance(data, bytes):
|
157
|
-
return f"bytes:{data.hex()}"
|
158
|
-
|
159
|
-
# Handle collections
|
160
|
-
elif isinstance(data, (list, tuple)):
|
161
|
-
collection_type = "list" if isinstance(data, list) else "tuple"
|
162
|
-
items = [_hash_obj(item) for item in data]
|
163
|
-
return f"{collection_type}:[{','.join(items)}]"
|
164
|
-
|
165
|
-
elif isinstance(data, set):
|
166
|
-
try:
|
167
|
-
sorted_items = sorted(data, key=lambda x: str(x))
|
168
|
-
except TypeError:
|
169
|
-
sorted_items = sorted(
|
170
|
-
data, key=lambda x: (type(x).__name__, str(x))
|
171
|
-
)
|
172
|
-
items = [_hash_obj(item) for item in sorted_items]
|
173
|
-
return f"set:{{{','.join(items)}}}"
|
174
|
-
|
175
|
-
elif isinstance(data, dict):
|
176
|
-
try:
|
177
|
-
sorted_items = sorted(data.items(), key=lambda x: str(x[0]))
|
178
|
-
except TypeError:
|
179
|
-
# Fallback for non-comparable keys
|
180
|
-
sorted_items = sorted(
|
181
|
-
data.items(), key=lambda x: (type(x[0]).__name__, str(x[0]))
|
182
|
-
)
|
183
|
-
pairs = [f"{_hash_obj(k)}:{_hash_obj(v)}" for k, v in sorted_items]
|
184
|
-
return f"dict:{{{','.join(pairs)}}}"
|
185
|
-
|
186
|
-
elif isinstance(data, type):
|
187
|
-
module = getattr(data, "__module__", "builtins")
|
188
|
-
qualname = getattr(data, "__qualname__", data.__name__)
|
189
|
-
return f"type:{module}.{qualname}"
|
190
|
-
|
191
|
-
elif callable(data):
|
192
|
-
module = getattr(data, "__module__", "unknown")
|
193
|
-
qualname = getattr(
|
194
|
-
data, "__qualname__", getattr(data, "__name__", "unknown_callable")
|
195
|
-
)
|
196
|
-
|
197
|
-
try:
|
198
|
-
source = inspect.getsource(data)
|
199
|
-
normalized_source = " ".join(source.split())
|
200
|
-
return f"callable:{module}.{qualname}:{hash(normalized_source)}"
|
201
|
-
except (OSError, TypeError, IndentationError):
|
202
|
-
return f"callable:{module}.{qualname}"
|
203
|
-
|
204
|
-
elif hasattr(data, "__dict__"):
|
205
|
-
class_info = (
|
206
|
-
f"{data.__class__.__module__}.{data.__class__.__qualname__}"
|
207
|
-
)
|
208
|
-
obj_dict = {"__class__": class_info, **data.__dict__}
|
209
|
-
return f"object:{_hash_obj(obj_dict)}"
|
210
|
-
|
211
|
-
elif hasattr(data, "__slots__"):
|
212
|
-
class_info = (
|
213
|
-
f"{data.__class__.__module__}.{data.__class__.__qualname__}"
|
214
|
-
)
|
215
|
-
slot_dict = {
|
216
|
-
slot: getattr(data, slot, None)
|
217
|
-
for slot in data.__slots__
|
218
|
-
if hasattr(data, slot)
|
219
|
-
}
|
220
|
-
obj_dict = {"__class__": class_info, **slot_dict}
|
221
|
-
return f"slotted_object:{_hash_obj(obj_dict)}"
|
222
|
-
|
223
|
-
else:
|
224
|
-
try:
|
225
|
-
repr_str = repr(data)
|
226
|
-
return f"repr:{type(data).__name__}:{repr_str}"
|
227
|
-
except Exception:
|
228
|
-
# Ultimate fallback
|
229
|
-
return f"unknown:{type(data).__name__}:{id(data)}"
|
230
|
-
|
231
|
-
# Generate the hash representation
|
232
|
-
hash_representation = _hash_obj(obj)
|
233
|
-
|
234
|
-
# Create final SHA-256 hash
|
235
|
-
return hashlib.sha256(
|
236
|
-
hash_representation.encode("utf-8", errors="surrogatepass")
|
237
|
-
).hexdigest()
|
238
|
-
|
239
|
-
|
240
|
-
# -----------------------------------------------------------------------------
|
241
|
-
# TTL CACHE
|
242
|
-
# -----------------------------------------------------------------------------
|
243
|
-
|
244
|
-
|
245
|
-
@dataclass
|
246
|
-
class TTLCache(BaseCache):
|
247
|
-
"""
|
248
|
-
Thread-safe TTL cache implementation with LRU eviction.
|
249
|
-
|
250
|
-
Uses OrderedDict for efficient LRU tracking and automatic cleanup
|
251
|
-
of expired entries on access.
|
252
|
-
"""
|
253
|
-
|
254
|
-
maxsize: int = 1000
|
255
|
-
ttl: int = 3600
|
256
|
-
type: Literal["ttl"] = "ttl"
|
257
|
-
|
258
|
-
def __post_init__(self):
|
259
|
-
"""Initialize TTL cache after dataclass initialization."""
|
260
|
-
super().__post_init__()
|
261
|
-
self._cache: OrderedDict[str, Tuple[Any, float]] = OrderedDict()
|
262
|
-
|
263
|
-
def __contains__(self, key: str) -> bool:
|
264
|
-
"""Check if key exists and is not expired."""
|
265
|
-
if key in self._cache:
|
266
|
-
_value, timestamp = self._cache[key]
|
267
|
-
if time.time() - timestamp <= self.ttl:
|
268
|
-
self._cache.move_to_end(key)
|
269
|
-
return True
|
270
|
-
else:
|
271
|
-
# Expired, remove it
|
272
|
-
del self._cache[key]
|
273
|
-
return False
|
274
|
-
|
275
|
-
def __getitem__(self, key: str) -> Any:
|
276
|
-
"""Get value for key if not expired."""
|
277
|
-
if key in self:
|
278
|
-
return self._cache[key][0]
|
279
|
-
raise KeyError(key)
|
280
|
-
|
281
|
-
def __setitem__(self, key: str, value: Any) -> None:
|
282
|
-
"""Set value with current timestamp."""
|
283
|
-
if len(self._cache) >= self.maxsize and key not in self._cache:
|
284
|
-
self._cleanup_expired()
|
285
|
-
|
286
|
-
if len(self._cache) >= self.maxsize:
|
287
|
-
self._cache.popitem(last=False)
|
288
|
-
|
289
|
-
self._cache[key] = (value, time.time())
|
290
|
-
self._cache.move_to_end(key)
|
291
|
-
|
292
|
-
def _cleanup_expired(self) -> None:
|
293
|
-
"""Remove all expired entries."""
|
294
|
-
current_time = time.time()
|
295
|
-
|
296
|
-
expired_keys = [
|
297
|
-
k
|
298
|
-
for k, (_, ts) in list(self._cache.items())
|
299
|
-
if current_time - ts > self.ttl
|
300
|
-
]
|
301
|
-
for k in expired_keys:
|
302
|
-
if k in self._cache:
|
303
|
-
del self._cache[k]
|
304
|
-
|
305
|
-
def clear(self) -> None:
|
306
|
-
"""Clear all cached items."""
|
307
|
-
self._cache.clear()
|
308
|
-
|
309
|
-
|
310
|
-
# -----------------------------------------------------------------------------
|
311
|
-
# DISK CACHE
|
312
|
-
# -----------------------------------------------------------------------------
|
313
|
-
|
314
|
-
|
315
|
-
@dataclass
|
316
|
-
class DiskCache(BaseCache):
|
317
|
-
"""
|
318
|
-
Persistent disk-based cache that stores data in a directory.
|
319
|
-
|
320
|
-
Uses pickle for serialization and automatically uses __pycache__ directory
|
321
|
-
if no cache directory is specified.
|
322
|
-
"""
|
323
|
-
|
324
|
-
location: Optional[str] = None
|
325
|
-
type: Literal["disk"] = "disk"
|
326
|
-
|
327
|
-
def __post_init__(self):
|
328
|
-
"""Initialize disk cache after dataclass initialization."""
|
329
|
-
super().__post_init__()
|
330
|
-
if self.location is None:
|
331
|
-
self.location = os.path.join(os.getcwd(), "__pycache__")
|
332
|
-
|
333
|
-
self.location_path = Path(self.location)
|
334
|
-
self.location_path.mkdir(exist_ok=True)
|
335
|
-
|
336
|
-
def _get_cache_path(self, key: str) -> Path:
|
337
|
-
"""Get the file path for a cache key."""
|
338
|
-
safe_key = hashlib.sha256(key.encode("utf-8")).hexdigest()
|
339
|
-
return self.location_path / f"cache_{safe_key}.pkl"
|
340
|
-
|
341
|
-
def __contains__(self, key: str) -> bool:
|
342
|
-
"""Check if key exists in cache."""
|
343
|
-
return self._get_cache_path(key).exists()
|
344
|
-
|
345
|
-
def __getitem__(self, key: str) -> Any:
|
346
|
-
"""Get value for key."""
|
347
|
-
cache_path = self._get_cache_path(key)
|
348
|
-
if not cache_path.exists():
|
349
|
-
raise KeyError(key)
|
350
|
-
|
351
|
-
try:
|
352
|
-
with open(cache_path, "rb") as f:
|
353
|
-
return pickle.load(f)
|
354
|
-
except (pickle.PickleError, OSError) as e:
|
355
|
-
cache_path.unlink(missing_ok=True)
|
356
|
-
raise KeyError(key) from e
|
357
|
-
|
358
|
-
def __setitem__(self, key: str, value: Any) -> None:
|
359
|
-
"""Set value for key."""
|
360
|
-
cache_path = self._get_cache_path(key)
|
361
|
-
try:
|
362
|
-
with open(cache_path, "wb") as f:
|
363
|
-
pickle.dump(value, f)
|
364
|
-
except (pickle.PickleError, OSError) as e:
|
365
|
-
cache_path.unlink(missing_ok=True)
|
366
|
-
raise RuntimeError(f"Failed to cache value for key '{key}': {e}") from e
|
367
|
-
|
368
|
-
def clear(self) -> None:
|
369
|
-
"""Clear all cached items."""
|
370
|
-
for cache_file in self.location_path.glob("cache_*.pkl"):
|
371
|
-
try:
|
372
|
-
cache_file.unlink()
|
373
|
-
except OSError:
|
374
|
-
pass
|
375
|
-
|
376
|
-
|
377
|
-
# -----------------------------------------------------------------------------
|
378
|
-
# Primary `Cache` Class -- Used For Factory Initialization
|
379
|
-
# -----------------------------------------------------------------------------
|
380
|
-
|
381
|
-
|
382
|
-
class Cache:
|
383
|
-
"""
|
384
|
-
Helper factory class for creating cache instances.
|
385
|
-
|
386
|
-
Example usage:
|
387
|
-
ttl_cache = Cache(type="ttl", maxsize=100, ttl=60)
|
388
|
-
disk_cache = Cache(type="disk", location="/tmp/cache")
|
389
|
-
"""
|
390
|
-
|
391
|
-
@overload
|
392
|
-
def __new__(
|
393
|
-
cls,
|
394
|
-
type: Literal["ttl"] = "ttl",
|
395
|
-
*,
|
396
|
-
maxsize: Optional[int] = None,
|
397
|
-
ttl: Optional[int] = None,
|
398
|
-
) -> TTLCache:
|
399
|
-
"""
|
400
|
-
Create a new TTL (Time To Live) cache instance.
|
401
|
-
|
402
|
-
Args:
|
403
|
-
type: The type of cache to create.
|
404
|
-
maxsize: The maximum number of items to store in the cache.
|
405
|
-
ttl: The time to live for items in the cache.
|
406
|
-
|
407
|
-
Returns:
|
408
|
-
A new TTL cache instance.
|
409
|
-
"""
|
410
|
-
...
|
411
|
-
|
412
|
-
@overload
|
413
|
-
def __new__(
|
414
|
-
cls, type: Literal["disk"], *, location: Optional[str] = None
|
415
|
-
) -> DiskCache:
|
416
|
-
"""
|
417
|
-
Create a new disk cache instance.
|
418
|
-
|
419
|
-
Args:
|
420
|
-
type: The type of cache to create.
|
421
|
-
location: The directory to store the cache files.
|
422
|
-
|
423
|
-
Returns:
|
424
|
-
A new disk cache instance.
|
425
|
-
"""
|
426
|
-
...
|
427
|
-
|
428
|
-
def __new__(cls, type: CacheType = "ttl", **kwargs: Any) -> BaseCache:
|
429
|
-
"""
|
430
|
-
Create a new cache instance.
|
431
|
-
"""
|
432
|
-
if type == "ttl":
|
433
|
-
valid_ttl_params = {"maxsize", "ttl"}
|
434
|
-
ttl_constructor_kwargs = {
|
435
|
-
k: v
|
436
|
-
for k, v in kwargs.items()
|
437
|
-
if k in valid_ttl_params and v is not None
|
438
|
-
}
|
439
|
-
return TTLCache(type=type, **ttl_constructor_kwargs)
|
440
|
-
elif type == "disk":
|
441
|
-
valid_disk_params = {"location"}
|
442
|
-
disk_constructor_kwargs = {
|
443
|
-
k: v
|
444
|
-
for k, v in kwargs.items()
|
445
|
-
if k in valid_disk_params and v is not None
|
446
|
-
}
|
447
|
-
return DiskCache(type=type, **disk_constructor_kwargs)
|
448
|
-
else:
|
449
|
-
supported_types_tuple = get_args(CacheType)
|
450
|
-
raise ValueError(
|
451
|
-
f"Unsupported cache type: {type}. Supported types are: {supported_types_tuple}"
|
452
|
-
)
|
453
|
-
|
454
|
-
|
455
|
-
# -----------------------------------------------------------------------------
|
456
|
-
# Decorators
|
457
|
-
# -----------------------------------------------------------------------------
|
458
|
-
|
459
|
-
|
460
|
-
@overload
|
461
|
-
def cached(
|
462
|
-
func: Callable[CacheParams, CacheReturn],
|
463
|
-
) -> Callable[CacheParams, CacheReturn]:
|
464
|
-
"""Decorator with automatic key generation, using the global CACHE."""
|
465
|
-
...
|
466
|
-
|
467
|
-
|
468
|
-
@overload
|
469
|
-
def cached(
|
470
|
-
*,
|
471
|
-
key: Optional[Callable[..., str]] = None,
|
472
|
-
ttl: Optional[int] = None,
|
473
|
-
maxsize: Optional[int] = None,
|
474
|
-
cache: Optional[BaseCache] = None,
|
475
|
-
) -> Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]]:
|
476
|
-
"""Decorator with custom key function and/or cache settings."""
|
477
|
-
...
|
478
|
-
|
479
|
-
|
480
|
-
def cached(
|
481
|
-
func: Optional[Callable[CacheParams, CacheReturn]] = None,
|
482
|
-
*,
|
483
|
-
key: Optional[Callable[..., str]] = None,
|
484
|
-
ttl: Optional[int] = None,
|
485
|
-
maxsize: Optional[int] = None,
|
486
|
-
cache: Optional[BaseCache] = None,
|
487
|
-
) -> Union[
|
488
|
-
Callable[CacheParams, CacheReturn],
|
489
|
-
Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]],
|
490
|
-
]:
|
491
|
-
"""
|
492
|
-
Flexible caching decorator that preserves type hints and signatures.
|
493
|
-
|
494
|
-
Can be used with or without arguments:
|
495
|
-
- `@cached`: Uses automatic key generation with the global `hammad.cache.CACHE`.
|
496
|
-
- `@cached(key=custom_key_func)`: Uses a custom key generation function.
|
497
|
-
- `@cached(ttl=300, maxsize=50)`: Creates a new `TTLCache` instance specifically
|
498
|
-
for the decorated function with the given TTL and maxsize.
|
499
|
-
- `@cached(cache=my_cache_instance)`: Uses a user-provided cache instance.
|
500
|
-
|
501
|
-
Args:
|
502
|
-
func: The function to be cached (implicitly passed when used as `@cached`).
|
503
|
-
key: An optional function that takes the same arguments as `func` and
|
504
|
-
returns a string key. If `None`, a key is automatically generated.
|
505
|
-
ttl: Optional. Time-to-live in seconds. If `cache` is not provided and `ttl`
|
506
|
-
or `maxsize` is set, a new `TTLCache` is created for this function using
|
507
|
-
these settings.
|
508
|
-
maxsize: Optional. Maximum number of items in the cache. See `ttl`.
|
509
|
-
cache: Optional. A specific cache instance (conforming to `BaseCache`)
|
510
|
-
to use. If provided, `ttl` and `maxsize` arguments (intended for
|
511
|
-
creating a new per-function cache) are ignored, as the provided
|
512
|
-
cache instance manages its own lifecycle and capacity.
|
513
|
-
|
514
|
-
Returns:
|
515
|
-
The decorated function with caching capabilities.
|
516
|
-
"""
|
517
|
-
effective_cache: BaseCache = _get_cache()
|
518
|
-
|
519
|
-
if cache is not None:
|
520
|
-
effective_cache = cache
|
521
|
-
elif ttl is not None or maxsize is not None:
|
522
|
-
default_maxsize = _get_cache().maxsize
|
523
|
-
default_ttl = _get_cache().ttl
|
524
|
-
|
525
|
-
effective_cache = TTLCache(
|
526
|
-
type="ttl",
|
527
|
-
maxsize=maxsize if maxsize is not None else default_maxsize,
|
528
|
-
ttl=ttl if ttl is not None else default_ttl,
|
529
|
-
)
|
530
|
-
else:
|
531
|
-
effective_cache = _get_cache()
|
532
|
-
|
533
|
-
def decorator(
|
534
|
-
f_to_decorate: Callable[CacheParams, CacheReturn],
|
535
|
-
) -> Callable[CacheParams, CacheReturn]:
|
536
|
-
key_func_to_use: Callable[..., str]
|
537
|
-
if key is None:
|
538
|
-
sig = inspect.signature(f_to_decorate)
|
539
|
-
|
540
|
-
def auto_key_func(
|
541
|
-
*args: CacheParams.args, **kwargs: CacheParams.kwargs
|
542
|
-
) -> str:
|
543
|
-
bound_args = sig.bind(*args, **kwargs)
|
544
|
-
bound_args.apply_defaults()
|
545
|
-
|
546
|
-
key_parts = []
|
547
|
-
for param_name, param_value in bound_args.arguments.items():
|
548
|
-
key_parts.append(
|
549
|
-
f"{param_name}={effective_cache.make_hashable(param_value)}"
|
550
|
-
)
|
551
|
-
|
552
|
-
return f"{f_to_decorate.__module__}.{f_to_decorate.__qualname__}({','.join(key_parts)})"
|
553
|
-
|
554
|
-
key_func_to_use = auto_key_func
|
555
|
-
else:
|
556
|
-
key_func_to_use = key
|
557
|
-
|
558
|
-
@wraps(f_to_decorate)
|
559
|
-
def wrapper(
|
560
|
-
*args: CacheParams.args, **kwargs: CacheParams.kwargs
|
561
|
-
) -> CacheReturn:
|
562
|
-
try:
|
563
|
-
cache_key_value = key_func_to_use(*args, **kwargs)
|
564
|
-
|
565
|
-
if cache_key_value in effective_cache:
|
566
|
-
return effective_cache[cache_key_value]
|
567
|
-
|
568
|
-
result = f_to_decorate(*args, **kwargs)
|
569
|
-
effective_cache[cache_key_value] = result
|
570
|
-
return result
|
571
|
-
|
572
|
-
except Exception:
|
573
|
-
return f_to_decorate(*args, **kwargs)
|
574
|
-
|
575
|
-
setattr(wrapper, "__wrapped__", f_to_decorate)
|
576
|
-
return wrapper
|
577
|
-
|
578
|
-
if func is None:
|
579
|
-
return decorator
|
580
|
-
else:
|
581
|
-
return decorator(func)
|
582
|
-
|
583
|
-
|
584
|
-
def auto_cached(
|
585
|
-
*,
|
586
|
-
ignore: Optional[Tuple[str, ...]] = None,
|
587
|
-
include: Optional[Tuple[str, ...]] = None,
|
588
|
-
ttl: Optional[int] = None,
|
589
|
-
maxsize: Optional[int] = None,
|
590
|
-
cache: Optional[BaseCache] = None,
|
591
|
-
) -> Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]]:
|
592
|
-
"""
|
593
|
-
Advanced caching decorator with automatic parameter selection for key generation.
|
594
|
-
|
595
|
-
Automatically generates cache keys based on a selection of the function's
|
596
|
-
parameters. This decorator internally uses the `cached` decorator.
|
597
|
-
|
598
|
-
Args:
|
599
|
-
ignore: A tuple of parameter names to exclude from cache key generation.
|
600
|
-
Cannot be used with `include`.
|
601
|
-
include: A tuple of parameter names to exclusively include in cache key
|
602
|
-
generation. All other parameters will be ignored. Cannot be used
|
603
|
-
with `ignore`.
|
604
|
-
ttl: Optional. Time-to-live in seconds. Passed to the underlying `cached`
|
605
|
-
decorator. If `cache` is not provided, this can lead to the creation
|
606
|
-
of a new `TTLCache` for the decorated function.
|
607
|
-
maxsize: Optional. Max cache size. Passed to `cached`. See `ttl`.
|
608
|
-
cache: Optional. A specific cache instance (conforming to `BaseCache`)
|
609
|
-
to use. This is passed directly to the underlying `cached` decorator.
|
610
|
-
If provided, `ttl` and `maxsize` arguments might be interpreted
|
611
|
-
differently by `cached` (see `cached` docstring).
|
612
|
-
|
613
|
-
Returns:
|
614
|
-
A decorator function that, when applied, will cache the results of
|
615
|
-
the decorated function.
|
616
|
-
|
617
|
-
Example:
|
618
|
-
```python
|
619
|
-
from hammad.cache import auto_cached, create_cache
|
620
|
-
|
621
|
-
# Example of using a custom cache instance
|
622
|
-
my_user_cache = create_cache(cache_type="ttl", ttl=600, maxsize=50)
|
623
|
-
|
624
|
-
@auto_cached(ignore=('debug_mode', 'logger'), cache=my_user_cache)
|
625
|
-
def fetch_user_data(user_id: int, debug_mode: bool = False, logger: Any = None):
|
626
|
-
# ... expensive operation to fetch data ...
|
627
|
-
print(f"Fetching data for user {user_id}")
|
628
|
-
return {"id": user_id, "data": "some_data"}
|
629
|
-
|
630
|
-
# Example of per-function TTL without a pre-defined cache
|
631
|
-
@auto_cached(include=('url',), ttl=30)
|
632
|
-
def fetch_url_content(url: str, timeout: int = 10):
|
633
|
-
# ... expensive operation to fetch URL ...
|
634
|
-
print(f"Fetching content from {url}")
|
635
|
-
return f"Content from {url}"
|
636
|
-
```
|
637
|
-
"""
|
638
|
-
if ignore and include:
|
639
|
-
raise ValueError("Cannot specify both 'ignore' and 'include' in auto_cached")
|
640
|
-
|
641
|
-
def actual_decorator(
|
642
|
-
func_to_decorate: Callable[CacheParams, CacheReturn],
|
643
|
-
) -> Callable[CacheParams, CacheReturn]:
|
644
|
-
sig = inspect.signature(func_to_decorate)
|
645
|
-
|
646
|
-
def auto_key_generator(
|
647
|
-
*args: CacheParams.args, **kwargs: CacheParams.kwargs
|
648
|
-
) -> str:
|
649
|
-
bound_args = sig.bind(*args, **kwargs)
|
650
|
-
bound_args.apply_defaults()
|
651
|
-
|
652
|
-
params_for_key = bound_args.arguments.copy()
|
653
|
-
|
654
|
-
if include is not None:
|
655
|
-
params_for_key = {
|
656
|
-
k: v for k, v in params_for_key.items() if k in include
|
657
|
-
}
|
658
|
-
elif ignore is not None:
|
659
|
-
params_for_key = {
|
660
|
-
k: v for k, v in params_for_key.items() if k not in ignore
|
661
|
-
}
|
662
|
-
|
663
|
-
# Use the effective cache's make_hashable method
|
664
|
-
effective_cache = cache if cache is not None else _get_cache()
|
665
|
-
key_parts = [
|
666
|
-
f"{k}={effective_cache.make_hashable(v)}"
|
667
|
-
for k, v in sorted(params_for_key.items())
|
668
|
-
]
|
669
|
-
return f"{func_to_decorate.__module__}.{func_to_decorate.__qualname__}({','.join(key_parts)})"
|
670
|
-
|
671
|
-
configured_cached_decorator = cached(
|
672
|
-
key=auto_key_generator, ttl=ttl, maxsize=maxsize, cache=cache
|
673
|
-
)
|
674
|
-
return configured_cached_decorator(func_to_decorate)
|
675
|
-
|
676
|
-
return actual_decorator
|
677
|
-
|
678
|
-
|
679
|
-
# -----------------------------------------------------------------------------
|
680
|
-
# CACHE FACTORY
|
681
|
-
# -----------------------------------------------------------------------------
|
682
|
-
|
683
|
-
|
684
|
-
@overload
|
685
|
-
def create_cache(
|
686
|
-
cache_type: Literal["ttl"], *, maxsize: int = 128, ttl: Optional[float] = None
|
687
|
-
) -> TTLCache: ...
|
688
|
-
|
689
|
-
|
690
|
-
@overload
|
691
|
-
def create_cache(
|
692
|
-
cache_type: Literal["disk"],
|
693
|
-
*,
|
694
|
-
cache_dir: Optional[Union[str, Path]] = None,
|
695
|
-
maxsize: int = 128,
|
696
|
-
) -> DiskCache: ...
|
697
|
-
|
698
|
-
|
699
|
-
@overload
|
700
|
-
def create_cache(cache_type: CacheType, **kwargs: Any) -> BaseCache: ...
|
701
|
-
|
702
|
-
|
703
|
-
def create_cache(cache_type: CacheType, **kwargs: Any) -> BaseCache:
|
704
|
-
"""
|
705
|
-
Factory function to create cache instances of different types.
|
706
|
-
|
707
|
-
Args:
|
708
|
-
cache_type: The type of cache to create. Can be "ttl" or "disk".
|
709
|
-
**kwargs: Additional keyword arguments specific to the cache type.
|
710
|
-
|
711
|
-
Returns:
|
712
|
-
A cache instance of the specified type.
|
713
|
-
|
714
|
-
Raises:
|
715
|
-
ValueError: If an unsupported cache type is provided.
|
716
|
-
|
717
|
-
Examples:
|
718
|
-
```python
|
719
|
-
# Create a TTL cache with custom settings
|
720
|
-
ttl_cache = create_cache("ttl", maxsize=256, ttl=300)
|
721
|
-
|
722
|
-
# Create a disk cache with custom directory
|
723
|
-
disk_cache = create_cache("disk", cache_dir="/tmp/my_cache", maxsize=1000)
|
724
|
-
```
|
725
|
-
"""
|
726
|
-
if cache_type == "ttl":
|
727
|
-
maxsize = kwargs.pop("maxsize", 128)
|
728
|
-
ttl = kwargs.pop("ttl", None)
|
729
|
-
if kwargs:
|
730
|
-
raise TypeError(
|
731
|
-
f"Unexpected keyword arguments for TTL cache: {list(kwargs.keys())}"
|
732
|
-
)
|
733
|
-
return TTLCache(maxsize=maxsize, ttl=ttl)
|
734
|
-
elif cache_type == "disk":
|
735
|
-
cache_dir = kwargs.pop("cache_dir", None)
|
736
|
-
maxsize = kwargs.pop("maxsize", 128)
|
737
|
-
if kwargs:
|
738
|
-
raise TypeError(
|
739
|
-
f"Unexpected keyword arguments for disk cache: {list(kwargs.keys())}"
|
740
|
-
)
|
741
|
-
return DiskCache(cache_dir=cache_dir, maxsize=maxsize)
|
742
|
-
else:
|
743
|
-
valid_types = get_args(CacheType)
|
744
|
-
raise ValueError(
|
745
|
-
f"Unsupported cache type: {cache_type}. Valid types are: {valid_types}"
|
746
|
-
)
|