hammad-python 0.0.29__py3-none-any.whl → 0.0.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. ham/__init__.py +10 -0
  2. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/METADATA +6 -32
  3. hammad_python-0.0.31.dist-info/RECORD +6 -0
  4. hammad/__init__.py +0 -84
  5. hammad/_internal.py +0 -256
  6. hammad/_main.py +0 -226
  7. hammad/cache/__init__.py +0 -40
  8. hammad/cache/base_cache.py +0 -181
  9. hammad/cache/cache.py +0 -169
  10. hammad/cache/decorators.py +0 -261
  11. hammad/cache/file_cache.py +0 -80
  12. hammad/cache/ttl_cache.py +0 -74
  13. hammad/cli/__init__.py +0 -33
  14. hammad/cli/animations.py +0 -573
  15. hammad/cli/plugins.py +0 -867
  16. hammad/cli/styles/__init__.py +0 -55
  17. hammad/cli/styles/settings.py +0 -139
  18. hammad/cli/styles/types.py +0 -358
  19. hammad/cli/styles/utils.py +0 -634
  20. hammad/data/__init__.py +0 -90
  21. hammad/data/collections/__init__.py +0 -49
  22. hammad/data/collections/collection.py +0 -326
  23. hammad/data/collections/indexes/__init__.py +0 -37
  24. hammad/data/collections/indexes/qdrant/__init__.py +0 -1
  25. hammad/data/collections/indexes/qdrant/index.py +0 -723
  26. hammad/data/collections/indexes/qdrant/settings.py +0 -94
  27. hammad/data/collections/indexes/qdrant/utils.py +0 -210
  28. hammad/data/collections/indexes/tantivy/__init__.py +0 -1
  29. hammad/data/collections/indexes/tantivy/index.py +0 -426
  30. hammad/data/collections/indexes/tantivy/settings.py +0 -40
  31. hammad/data/collections/indexes/tantivy/utils.py +0 -176
  32. hammad/data/configurations/__init__.py +0 -35
  33. hammad/data/configurations/configuration.py +0 -564
  34. hammad/data/models/__init__.py +0 -50
  35. hammad/data/models/extensions/__init__.py +0 -4
  36. hammad/data/models/extensions/pydantic/__init__.py +0 -42
  37. hammad/data/models/extensions/pydantic/converters.py +0 -759
  38. hammad/data/models/fields.py +0 -546
  39. hammad/data/models/model.py +0 -1078
  40. hammad/data/models/utils.py +0 -280
  41. hammad/data/sql/__init__.py +0 -24
  42. hammad/data/sql/database.py +0 -576
  43. hammad/data/sql/types.py +0 -127
  44. hammad/data/types/__init__.py +0 -75
  45. hammad/data/types/file.py +0 -431
  46. hammad/data/types/multimodal/__init__.py +0 -36
  47. hammad/data/types/multimodal/audio.py +0 -200
  48. hammad/data/types/multimodal/image.py +0 -182
  49. hammad/data/types/text.py +0 -1308
  50. hammad/formatting/__init__.py +0 -33
  51. hammad/formatting/json/__init__.py +0 -27
  52. hammad/formatting/json/converters.py +0 -158
  53. hammad/formatting/text/__init__.py +0 -63
  54. hammad/formatting/text/converters.py +0 -723
  55. hammad/formatting/text/markdown.py +0 -131
  56. hammad/formatting/yaml/__init__.py +0 -26
  57. hammad/formatting/yaml/converters.py +0 -5
  58. hammad/genai/__init__.py +0 -217
  59. hammad/genai/a2a/__init__.py +0 -32
  60. hammad/genai/a2a/workers.py +0 -552
  61. hammad/genai/agents/__init__.py +0 -59
  62. hammad/genai/agents/agent.py +0 -1973
  63. hammad/genai/agents/run.py +0 -1024
  64. hammad/genai/agents/types/__init__.py +0 -42
  65. hammad/genai/agents/types/agent_context.py +0 -13
  66. hammad/genai/agents/types/agent_event.py +0 -128
  67. hammad/genai/agents/types/agent_hooks.py +0 -220
  68. hammad/genai/agents/types/agent_messages.py +0 -31
  69. hammad/genai/agents/types/agent_response.py +0 -125
  70. hammad/genai/agents/types/agent_stream.py +0 -327
  71. hammad/genai/graphs/__init__.py +0 -125
  72. hammad/genai/graphs/_utils.py +0 -190
  73. hammad/genai/graphs/base.py +0 -1828
  74. hammad/genai/graphs/plugins.py +0 -316
  75. hammad/genai/graphs/types.py +0 -638
  76. hammad/genai/models/__init__.py +0 -1
  77. hammad/genai/models/embeddings/__init__.py +0 -43
  78. hammad/genai/models/embeddings/model.py +0 -226
  79. hammad/genai/models/embeddings/run.py +0 -163
  80. hammad/genai/models/embeddings/types/__init__.py +0 -37
  81. hammad/genai/models/embeddings/types/embedding_model_name.py +0 -75
  82. hammad/genai/models/embeddings/types/embedding_model_response.py +0 -76
  83. hammad/genai/models/embeddings/types/embedding_model_run_params.py +0 -66
  84. hammad/genai/models/embeddings/types/embedding_model_settings.py +0 -47
  85. hammad/genai/models/language/__init__.py +0 -57
  86. hammad/genai/models/language/model.py +0 -1098
  87. hammad/genai/models/language/run.py +0 -878
  88. hammad/genai/models/language/types/__init__.py +0 -40
  89. hammad/genai/models/language/types/language_model_instructor_mode.py +0 -47
  90. hammad/genai/models/language/types/language_model_messages.py +0 -28
  91. hammad/genai/models/language/types/language_model_name.py +0 -239
  92. hammad/genai/models/language/types/language_model_request.py +0 -127
  93. hammad/genai/models/language/types/language_model_response.py +0 -217
  94. hammad/genai/models/language/types/language_model_response_chunk.py +0 -56
  95. hammad/genai/models/language/types/language_model_settings.py +0 -89
  96. hammad/genai/models/language/types/language_model_stream.py +0 -600
  97. hammad/genai/models/language/utils/__init__.py +0 -28
  98. hammad/genai/models/language/utils/requests.py +0 -421
  99. hammad/genai/models/language/utils/structured_outputs.py +0 -135
  100. hammad/genai/models/model_provider.py +0 -4
  101. hammad/genai/models/multimodal.py +0 -47
  102. hammad/genai/models/reranking.py +0 -26
  103. hammad/genai/types/__init__.py +0 -1
  104. hammad/genai/types/base.py +0 -215
  105. hammad/genai/types/history.py +0 -290
  106. hammad/genai/types/tools.py +0 -507
  107. hammad/logging/__init__.py +0 -35
  108. hammad/logging/decorators.py +0 -834
  109. hammad/logging/logger.py +0 -1018
  110. hammad/mcp/__init__.py +0 -53
  111. hammad/mcp/client/__init__.py +0 -35
  112. hammad/mcp/client/client.py +0 -624
  113. hammad/mcp/client/client_service.py +0 -400
  114. hammad/mcp/client/settings.py +0 -178
  115. hammad/mcp/servers/__init__.py +0 -26
  116. hammad/mcp/servers/launcher.py +0 -1161
  117. hammad/runtime/__init__.py +0 -32
  118. hammad/runtime/decorators.py +0 -142
  119. hammad/runtime/run.py +0 -299
  120. hammad/service/__init__.py +0 -49
  121. hammad/service/create.py +0 -527
  122. hammad/service/decorators.py +0 -283
  123. hammad/types.py +0 -288
  124. hammad/typing/__init__.py +0 -435
  125. hammad/web/__init__.py +0 -43
  126. hammad/web/http/__init__.py +0 -1
  127. hammad/web/http/client.py +0 -944
  128. hammad/web/models.py +0 -275
  129. hammad/web/openapi/__init__.py +0 -1
  130. hammad/web/openapi/client.py +0 -740
  131. hammad/web/search/__init__.py +0 -1
  132. hammad/web/search/client.py +0 -1023
  133. hammad/web/utils.py +0 -472
  134. hammad_python-0.0.29.dist-info/RECORD +0 -135
  135. {hammad → ham}/py.typed +0 -0
  136. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/WHEEL +0 -0
  137. {hammad_python-0.0.29.dist-info → hammad_python-0.0.31.dist-info}/licenses/LICENSE +0 -0
@@ -1,181 +0,0 @@
1
- """hammad.cache.base_cache"""
2
-
3
- from dataclasses import dataclass
4
- import hashlib
5
- import inspect
6
- from typing import Any, Literal, ParamSpec, TypeAlias, TypeVar, get_args
7
-
8
- __all__ = (
9
- "BaseCache",
10
- "CacheType",
11
- "CacheParams",
12
- "CacheReturn",
13
- )
14
-
15
-
16
- CacheType: TypeAlias = Literal["ttl", "file"]
17
- """Type of caches that can be created using `hammad`.
18
-
19
- - `"ttl"`: Time-to-live cache.
20
- - `"file"`: File-based cache.
21
- """
22
-
23
- CacheParams = ParamSpec("CacheParams")
24
- """Parameter specification for cache functions."""
25
-
26
- CacheReturn = TypeVar("CacheReturn")
27
- """Return type for cache functions."""
28
-
29
-
30
- @dataclass
31
- class BaseCache:
32
- """Base class for all caches created using `hammad`."""
33
-
34
- type: CacheType
35
- """Type of cache."""
36
-
37
- def __post_init__(self) -> None:
38
- """Post-initialization hook."""
39
- if self.type not in get_args(CacheType):
40
- raise ValueError(f"Invalid cache type: {self.type}")
41
-
42
- def __contains__(self, key: str) -> bool:
43
- """Check if key exists in cache."""
44
- raise NotImplementedError("Subclasses must implement __contains__")
45
-
46
- def __getitem__(self, key: str) -> Any:
47
- """Get value for key."""
48
- raise NotImplementedError("Subclasses must implement __getitem__")
49
-
50
- def __setitem__(self, key: str, value: Any) -> None:
51
- """Set value for key."""
52
- raise NotImplementedError("Subclasses must implement __setitem__")
53
-
54
- def get(self, key: str, default: Any = None) -> Any:
55
- """Get value with default if key doesn't exist."""
56
- try:
57
- return self[key]
58
- except KeyError:
59
- return default
60
-
61
- def clear(self) -> None:
62
- """Clear all cached items."""
63
- raise NotImplementedError("Subclasses must implement clear")
64
-
65
- def make_hashable(self, obj: Any) -> str:
66
- """
67
- Convert any object to a stable hash string.
68
-
69
- Uses SHA-256 to generate consistent hash representations.
70
- Handles nested structures recursively.
71
-
72
- Args:
73
- obj: Object to hash
74
-
75
- Returns:
76
- Hexadecimal hash string
77
- """
78
-
79
- def _hash_obj(data: Any) -> str:
80
- """Internal recursive hashing function with memoization."""
81
- # Handle None first
82
- if data is None:
83
- return "null"
84
-
85
- if isinstance(data, bool):
86
- return f"bool:{data}"
87
- elif isinstance(data, int):
88
- return f"int:{data}"
89
- elif isinstance(data, float):
90
- if data != data: # NaN
91
- return "float:nan"
92
- elif data == float("inf"):
93
- return "float:inf"
94
- elif data == float("-inf"):
95
- return "float:-inf"
96
- else:
97
- return f"float:{data}"
98
- elif isinstance(data, str):
99
- return f"str:{data}"
100
- elif isinstance(data, bytes):
101
- return f"bytes:{data.hex()}"
102
-
103
- # Handle collections
104
- elif isinstance(data, (list, tuple)):
105
- collection_type = "list" if isinstance(data, list) else "tuple"
106
- items = [_hash_obj(item) for item in data]
107
- return f"{collection_type}:[{','.join(items)}]"
108
-
109
- elif isinstance(data, set):
110
- try:
111
- sorted_items = sorted(data, key=lambda x: str(x))
112
- except TypeError:
113
- sorted_items = sorted(
114
- data, key=lambda x: (type(x).__name__, str(x))
115
- )
116
- items = [_hash_obj(item) for item in sorted_items]
117
- return f"set:{{{','.join(items)}}}"
118
-
119
- elif isinstance(data, dict):
120
- try:
121
- sorted_items = sorted(data.items(), key=lambda x: str(x[0]))
122
- except TypeError:
123
- # Fallback for non-comparable keys
124
- sorted_items = sorted(
125
- data.items(), key=lambda x: (type(x[0]).__name__, str(x[0]))
126
- )
127
- pairs = [f"{_hash_obj(k)}:{_hash_obj(v)}" for k, v in sorted_items]
128
- return f"dict:{{{','.join(pairs)}}}"
129
-
130
- elif isinstance(data, type):
131
- module = getattr(data, "__module__", "builtins")
132
- qualname = getattr(data, "__qualname__", data.__name__)
133
- return f"type:{module}.{qualname}"
134
-
135
- elif callable(data):
136
- module = getattr(data, "__module__", "unknown")
137
- qualname = getattr(
138
- data, "__qualname__", getattr(data, "__name__", "unknown_callable")
139
- )
140
-
141
- try:
142
- source = inspect.getsource(data)
143
- normalized_source = " ".join(source.split())
144
- return f"callable:{module}.{qualname}:{hash(normalized_source)}"
145
- except (OSError, TypeError, IndentationError):
146
- return f"callable:{module}.{qualname}"
147
-
148
- elif hasattr(data, "__dict__"):
149
- class_info = (
150
- f"{data.__class__.__module__}.{data.__class__.__qualname__}"
151
- )
152
- obj_dict = {"__class__": class_info, **data.__dict__}
153
- return f"object:{_hash_obj(obj_dict)}"
154
-
155
- elif hasattr(data, "__slots__"):
156
- class_info = (
157
- f"{data.__class__.__module__}.{data.__class__.__qualname__}"
158
- )
159
- slot_dict = {
160
- slot: getattr(data, slot, None)
161
- for slot in data.__slots__
162
- if hasattr(data, slot)
163
- }
164
- obj_dict = {"__class__": class_info, **slot_dict}
165
- return f"slotted_object:{_hash_obj(obj_dict)}"
166
-
167
- else:
168
- try:
169
- repr_str = repr(data)
170
- return f"repr:{type(data).__name__}:{repr_str}"
171
- except Exception:
172
- # Ultimate fallback
173
- return f"unknown:{type(data).__name__}:{id(data)}"
174
-
175
- # Generate the hash representation
176
- hash_representation = _hash_obj(obj)
177
-
178
- # Create final SHA-256 hash
179
- return hashlib.sha256(
180
- hash_representation.encode("utf-8", errors="surrogatepass")
181
- ).hexdigest()
hammad/cache/cache.py DELETED
@@ -1,169 +0,0 @@
1
- """hammad.cache.cache"""
2
-
3
- from typing import (
4
- overload,
5
- TYPE_CHECKING,
6
- Literal,
7
- Optional,
8
- Any,
9
- Union,
10
- get_args,
11
- )
12
- from pathlib import Path
13
-
14
- from .base_cache import BaseCache, CacheType
15
- from .file_cache import FileCache, FileCacheLocation
16
- from .ttl_cache import TTLCache
17
-
18
-
19
- __all__ = ("Cache", "create_cache")
20
-
21
-
22
- class Cache:
23
- """
24
- Helper factory class for creating cache instances.
25
-
26
- Example usage:
27
- ttl_cache = Cache(type="ttl", maxsize=100, ttl=60)
28
- file_cache = Cache(type="file", location="cache.pkl")
29
- """
30
-
31
- @overload
32
- def __new__(
33
- cls,
34
- type: Literal["ttl"] = "ttl",
35
- *,
36
- maxsize: Optional[int] = None,
37
- ttl: Optional[int] = None,
38
- ) -> "TTLCache":
39
- """
40
- Create a new TTL (Time To Live) cache instance.
41
-
42
- Args:
43
- type: The type of cache to create.
44
- maxsize: The maximum number of items to store in the cache.
45
- ttl: The time to live for items in the cache.
46
-
47
- Returns:
48
- A new TTL cache instance.
49
- """
50
- ...
51
-
52
- @overload
53
- def __new__(
54
- cls, type: Literal["file"], *, location: Optional["FileCacheLocation"] = None
55
- ) -> "FileCache":
56
- """
57
- Create a new file cache instance.
58
-
59
- Args:
60
- type: The type of cache to create.
61
- location: The directory to store the cache files.
62
-
63
- Returns:
64
- A new disk cache instance.
65
- """
66
- ...
67
-
68
- def __new__(cls, type: "CacheType" = "ttl", **kwargs: Any) -> "BaseCache":
69
- """
70
- Create a new cache instance.
71
- """
72
- if type == "ttl":
73
- from .ttl_cache import TTLCache
74
-
75
- valid_ttl_params = {"maxsize", "ttl"}
76
- ttl_constructor_kwargs = {
77
- k: v
78
- for k, v in kwargs.items()
79
- if k in valid_ttl_params and v is not None
80
- }
81
- return TTLCache(type=type, **ttl_constructor_kwargs)
82
- elif type == "file":
83
- from .file_cache import FileCache
84
-
85
- valid_file_params = {"location"}
86
- file_constructor_kwargs = {
87
- k: v
88
- for k, v in kwargs.items()
89
- if k in valid_file_params and v is not None
90
- }
91
- return FileCache(type=type, **file_constructor_kwargs)
92
- else:
93
- supported_types_tuple = get_args(CacheType)
94
- raise ValueError(
95
- f"Unsupported cache type: {type}. Supported types are: {supported_types_tuple}"
96
- )
97
-
98
-
99
- # Factory
100
-
101
-
102
- @overload
103
- def create_cache(
104
- type: Literal["ttl"], *, maxsize: int = 128, ttl: Optional[float] = None
105
- ) -> "TTLCache": ...
106
-
107
-
108
- @overload
109
- def create_cache(
110
- type: Literal["file"],
111
- *,
112
- location: Optional["FileCacheLocation"] = None,
113
- maxsize: int = 128,
114
- ) -> "FileCache": ...
115
-
116
-
117
- @overload
118
- def create_cache(type: "CacheType", **kwargs: Any) -> "BaseCache": ...
119
-
120
-
121
- def create_cache(type: "CacheType", **kwargs: Any) -> "BaseCache":
122
- """
123
- Factory function to create cache instances of different types.
124
-
125
- Args:
126
- type: The type of cache to create. Can be "ttl" or "file".
127
- **kwargs: Additional keyword arguments specific to the cache type.
128
-
129
- Returns:
130
- A cache instance of the specified type.
131
-
132
- Raises:
133
- ValueError: If an unsupported cache type is provided.
134
-
135
- Examples:
136
- ```python
137
- # Create a TTL cache with custom settings
138
- ttl_cache = create_cache("ttl", maxsize=256, ttl=300)
139
-
140
- # Create a file cache with custom location
141
- file_cache = create_cache("file", location="/tmp/my_cache", maxsize=1000)
142
- ```
143
- """
144
- if type == "ttl":
145
- from .ttl_cache import TTLCache
146
-
147
- maxsize = kwargs.pop("maxsize", 128)
148
- ttl = kwargs.pop("ttl", None)
149
- if kwargs:
150
- raise TypeError(
151
- f"Unexpected keyword arguments for TTL cache: {list(kwargs.keys())}"
152
- )
153
- return TTLCache(maxsize=maxsize, ttl=ttl)
154
- elif type == "file":
155
- from .file_cache import FileCache
156
-
157
- location = kwargs.pop("location", None)
158
- # FileCache doesn't support maxsize, so we just ignore it
159
- kwargs.pop("maxsize", None)
160
- if kwargs:
161
- raise TypeError(
162
- f"Unexpected keyword arguments for file cache: {list(kwargs.keys())}"
163
- )
164
- return FileCache(location=location, type=type)
165
- else:
166
- valid_types = get_args("CacheType")
167
- raise ValueError(
168
- f"Unsupported cache type: {type}. Valid types are: {valid_types}"
169
- )
@@ -1,261 +0,0 @@
1
- """hammad.cache.decorators"""
2
-
3
- from typing import Callable, Optional, Tuple, Union, overload, TYPE_CHECKING
4
- import inspect
5
- from functools import wraps
6
-
7
- from .base_cache import BaseCache, CacheParams, CacheReturn
8
- from .ttl_cache import TTLCache
9
-
10
- __all__ = (
11
- "get_decorator_cache",
12
- "clear_decorator_cache",
13
- "cached",
14
- "auto_cached",
15
- )
16
-
17
-
18
- # INTERNAL SINGLETON CACHE FOR DECORATORS
19
- _DECORATOR_CACHE: BaseCache | None = None
20
- """Internal singleton cache for decorators."""
21
-
22
-
23
- def get_decorator_cache() -> BaseCache:
24
- """Get the internal singleton cache for decorators."""
25
- global _DECORATOR_CACHE
26
- if _DECORATOR_CACHE is None:
27
- _DECORATOR_CACHE = TTLCache(type="ttl", maxsize=1000, ttl=3600)
28
- return _DECORATOR_CACHE
29
-
30
-
31
- def clear_decorator_cache() -> None:
32
- """Clear the internal singleton cache for decorators."""
33
- global _DECORATOR_CACHE
34
- if _DECORATOR_CACHE is not None:
35
- _DECORATOR_CACHE.clear()
36
- _DECORATOR_CACHE = None
37
-
38
-
39
- @overload
40
- def cached(
41
- func: "Callable[CacheParams, CacheReturn]",
42
- ) -> "Callable[CacheParams, CacheReturn]":
43
- """Decorator with automatic key generation, using the global CACHE."""
44
- ...
45
-
46
-
47
- @overload
48
- def cached(
49
- *,
50
- key: Optional["Callable[..., str]"] = None,
51
- ttl: Optional[int] = None,
52
- maxsize: Optional[int] = None,
53
- cache: Optional["BaseCache"] = None,
54
- ) -> (
55
- "Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]]"
56
- ):
57
- """Decorator with custom key function and/or cache settings."""
58
- ...
59
-
60
-
61
- def cached(
62
- func: Optional["Callable[CacheParams, CacheReturn]"] = None,
63
- *,
64
- key: Optional[Callable[..., str]] = None,
65
- ttl: Optional[int] = None,
66
- maxsize: Optional[int] = None,
67
- cache: Optional["BaseCache"] = None,
68
- ) -> Union[
69
- "Callable[CacheParams, CacheReturn]",
70
- "Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]]",
71
- ]:
72
- """
73
- Flexible caching decorator that preserves type hints and signatures.
74
-
75
- Can be used with or without arguments:
76
- - `@cached`: Uses automatic key generation with the global `hammad.cache.CACHE`.
77
- - `@cached(key=custom_key_func)`: Uses a custom key generation function.
78
- - `@cached(ttl=300, maxsize=50)`: Creates a new `TTLCache` instance specifically
79
- for the decorated function with the given TTL and maxsize.
80
- - `@cached(cache=my_cache_instance)`: Uses a user-provided cache instance.
81
-
82
- Args:
83
- func: The function to be cached (implicitly passed when used as `@cached`).
84
- key: An optional function that takes the same arguments as `func` and
85
- returns a string key. If `None`, a key is automatically generated.
86
- ttl: Optional. Time-to-live in seconds. If `cache` is not provided and `ttl`
87
- or `maxsize` is set, a new `TTLCache` is created for this function using
88
- these settings.
89
- maxsize: Optional. Maximum number of items in the cache. See `ttl`.
90
- cache: Optional. A specific cache instance (conforming to `BaseCache`)
91
- to use. If provided, `ttl` and `maxsize` arguments (intended for
92
- creating a new per-function cache) are ignored, as the provided
93
- cache instance manages its own lifecycle and capacity.
94
-
95
- Returns:
96
- The decorated function with caching capabilities.
97
- """
98
-
99
- effective_cache: BaseCache = get_decorator_cache()
100
-
101
- if cache is not None:
102
- effective_cache = cache
103
- elif ttl is not None or maxsize is not None:
104
- default_maxsize = get_decorator_cache().maxsize
105
- default_ttl = get_decorator_cache().ttl
106
-
107
- effective_cache = TTLCache(
108
- type="ttl",
109
- maxsize=maxsize if maxsize is not None else default_maxsize,
110
- ttl=ttl if ttl is not None else default_ttl,
111
- )
112
- else:
113
- effective_cache = get_decorator_cache()
114
-
115
- def decorator(
116
- f_to_decorate: "Callable[CacheParams, CacheReturn]",
117
- ) -> "Callable[CacheParams, CacheReturn]":
118
- key_func_to_use: "Callable[..., str]"
119
- if key is None:
120
- sig = inspect.signature(f_to_decorate)
121
-
122
- def auto_key_func(
123
- *args: CacheParams.args, **kwargs: CacheParams.kwargs
124
- ) -> str:
125
- bound_args = sig.bind(*args, **kwargs)
126
- bound_args.apply_defaults()
127
-
128
- key_parts = []
129
- for param_name, param_value in bound_args.arguments.items():
130
- key_parts.append(
131
- f"{param_name}={effective_cache.make_hashable(param_value)}"
132
- )
133
-
134
- return f"{f_to_decorate.__module__}.{f_to_decorate.__qualname__}({','.join(key_parts)})"
135
-
136
- key_func_to_use = auto_key_func
137
- else:
138
- key_func_to_use = key
139
-
140
- @wraps(f_to_decorate)
141
- def wrapper(
142
- *args: CacheParams.args, **kwargs: CacheParams.kwargs
143
- ) -> CacheReturn:
144
- try:
145
- cache_key_value = key_func_to_use(*args, **kwargs)
146
-
147
- if cache_key_value in effective_cache:
148
- return effective_cache[cache_key_value]
149
-
150
- result = f_to_decorate(*args, **kwargs)
151
- effective_cache[cache_key_value] = result
152
- return result
153
-
154
- except Exception:
155
- return f_to_decorate(*args, **kwargs)
156
-
157
- setattr(wrapper, "__wrapped__", f_to_decorate)
158
- return wrapper
159
-
160
- if func is None:
161
- return decorator
162
- else:
163
- return decorator(func)
164
-
165
-
166
- def auto_cached(
167
- *,
168
- ignore: Optional[Tuple[str, ...]] = None,
169
- include: Optional[Tuple[str, ...]] = None,
170
- ttl: Optional[int] = None,
171
- maxsize: Optional[int] = None,
172
- cache: Optional["BaseCache"] = None,
173
- ) -> (
174
- "Callable[[Callable[CacheParams, CacheReturn]], Callable[CacheParams, CacheReturn]]"
175
- ):
176
- """
177
- Advanced caching decorator with automatic parameter selection for key generation.
178
-
179
- Automatically generates cache keys based on a selection of the function's
180
- parameters. This decorator internally uses the `cached` decorator.
181
-
182
- Args:
183
- ignore: A tuple of parameter names to exclude from cache key generation.
184
- Cannot be used with `include`.
185
- include: A tuple of parameter names to exclusively include in cache key
186
- generation. All other parameters will be ignored. Cannot be used
187
- with `ignore`.
188
- ttl: Optional. Time-to-live in seconds. Passed to the underlying `cached`
189
- decorator. If `cache` is not provided, this can lead to the creation
190
- of a new `TTLCache` for the decorated function.
191
- maxsize: Optional. Max cache size. Passed to `cached`. See `ttl`.
192
- cache: Optional. A specific cache instance (conforming to `BaseCache`)
193
- to use. This is passed directly to the underlying `cached` decorator.
194
- If provided, `ttl` and `maxsize` arguments might be interpreted
195
- differently by `cached` (see `cached` docstring).
196
-
197
- Returns:
198
- A decorator function that, when applied, will cache the results of
199
- the decorated function.
200
-
201
- Example:
202
- ```python
203
- from hammad.cache import auto_cached, create_cache
204
-
205
- # Example of using a custom cache instance
206
- my_user_cache = create_cache(cache_type="ttl", ttl=600, maxsize=50)
207
-
208
- @auto_cached(ignore=('debug_mode', 'logger'), cache=my_user_cache)
209
- def fetch_user_data(user_id: int, debug_mode: bool = False, logger: Any = None):
210
- # ... expensive operation to fetch data ...
211
- print(f"Fetching data for user {user_id}")
212
- return {"id": user_id, "data": "some_data"}
213
-
214
- # Example of per-function TTL without a pre-defined cache
215
- @auto_cached(include=('url',), ttl=30)
216
- def fetch_url_content(url: str, timeout: int = 10):
217
- # ... expensive operation to fetch URL ...
218
- print(f"Fetching content from {url}")
219
- return f"Content from {url}"
220
- ```
221
- """
222
-
223
- if ignore and include:
224
- raise ValueError("Cannot specify both 'ignore' and 'include' in auto_cached")
225
-
226
- def actual_decorator(
227
- func_to_decorate: "Callable[CacheParams, CacheReturn]",
228
- ) -> "Callable[CacheParams, CacheReturn]":
229
- sig = inspect.signature(func_to_decorate)
230
-
231
- def auto_key_generator(
232
- *args: CacheParams.args, **kwargs: CacheParams.kwargs
233
- ) -> str:
234
- bound_args = sig.bind(*args, **kwargs)
235
- bound_args.apply_defaults()
236
-
237
- params_for_key = bound_args.arguments.copy()
238
-
239
- if include is not None:
240
- params_for_key = {
241
- k: v for k, v in params_for_key.items() if k in include
242
- }
243
- elif ignore is not None:
244
- params_for_key = {
245
- k: v for k, v in params_for_key.items() if k not in ignore
246
- }
247
-
248
- # Use the effective cache's make_hashable method
249
- effective_cache = cache if cache is not None else get_decorator_cache()
250
- key_parts = [
251
- f"{k}={effective_cache.make_hashable(v)}"
252
- for k, v in sorted(params_for_key.items())
253
- ]
254
- return f"{func_to_decorate.__module__}.{func_to_decorate.__qualname__}({','.join(key_parts)})"
255
-
256
- configured_cached_decorator = cached(
257
- key=auto_key_generator, ttl=ttl, maxsize=maxsize, cache=cache
258
- )
259
- return configured_cached_decorator(func_to_decorate)
260
-
261
- return actual_decorator
@@ -1,80 +0,0 @@
1
- """hammad.cache.file_cache"""
2
-
3
- from dataclasses import dataclass
4
- from typing import Any, Literal, Optional, TypeAlias, Union
5
- import os
6
- import hashlib
7
- import pickle
8
- from pathlib import Path
9
-
10
- from .base_cache import BaseCache
11
-
12
- __all__ = ("FileCache", "FileCacheLocation")
13
-
14
-
15
- FileCacheLocation: TypeAlias = Union[
16
- # Example .pkl route
17
- Literal["cache.pkl"], Literal["cache/"], str, Path
18
- ]
19
-
20
-
21
- @dataclass
22
- class FileCache(BaseCache):
23
- """
24
- Persistent disk-based cache that stores data in a directory.
25
-
26
- Uses pickle for serialization and automatically uses __pycache__ directory
27
- if no cache directory is specified.
28
- """
29
-
30
- location: Optional[str] = None
31
- type: Literal["file"] = "file"
32
-
33
- def __post_init__(self):
34
- """Initialize disk cache after dataclass initialization."""
35
- super().__post_init__()
36
- if self.location is None:
37
- self.location = os.path.join(os.getcwd(), "__pycache__")
38
-
39
- self.location_path = Path(self.location)
40
- self.location_path.mkdir(exist_ok=True)
41
-
42
- def _get_cache_path(self, key: str) -> Path:
43
- """Get the file path for a cache key."""
44
- safe_key = hashlib.sha256(key.encode("utf-8")).hexdigest()
45
- return self.location_path / f"cache_{safe_key}.pkl"
46
-
47
- def __contains__(self, key: str) -> bool:
48
- """Check if key exists in cache."""
49
- return self._get_cache_path(key).exists()
50
-
51
- def __getitem__(self, key: str) -> Any:
52
- """Get value for key."""
53
- cache_path = self._get_cache_path(key)
54
- if not cache_path.exists():
55
- raise KeyError(key)
56
-
57
- try:
58
- with open(cache_path, "rb") as f:
59
- return pickle.load(f)
60
- except (pickle.PickleError, OSError) as e:
61
- cache_path.unlink(missing_ok=True)
62
- raise KeyError(key) from e
63
-
64
- def __setitem__(self, key: str, value: Any) -> None:
65
- """Set value for key."""
66
- cache_path = self._get_cache_path(key)
67
- try:
68
- with open(cache_path, "wb") as f:
69
- pickle.dump(value, f)
70
- except (pickle.PickleError, OSError) as e:
71
- cache_path.unlink(missing_ok=True)
72
- raise RuntimeError(f"Failed to cache value for key '{key}': {e}") from e
73
-
74
- def clear(self) -> None:
75
- """Clear all cached items."""
76
- for cache_file in self.location_path.glob("cache_*.pkl"):
77
- try:
78
- cache_file.unlink()
79
- except OSError:
80
- pass