apple-foundation-models 0.2.2__cp312-cp312-macosx_26_0_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- apple_foundation_models-0.2.2.dist-info/METADATA +620 -0
- apple_foundation_models-0.2.2.dist-info/RECORD +20 -0
- apple_foundation_models-0.2.2.dist-info/WHEEL +5 -0
- apple_foundation_models-0.2.2.dist-info/licenses/LICENSE +21 -0
- apple_foundation_models-0.2.2.dist-info/top_level.txt +1 -0
- applefoundationmodels/__init__.py +124 -0
- applefoundationmodels/_foundationmodels.cpython-312-darwin.so +0 -0
- applefoundationmodels/_foundationmodels.pyi +43 -0
- applefoundationmodels/async_session.py +296 -0
- applefoundationmodels/base.py +65 -0
- applefoundationmodels/base_session.py +659 -0
- applefoundationmodels/constants.py +30 -0
- applefoundationmodels/error_codes.json +163 -0
- applefoundationmodels/exceptions.py +122 -0
- applefoundationmodels/libfoundation_models.dylib +0 -0
- applefoundationmodels/py.typed +2 -0
- applefoundationmodels/pydantic_compat.py +144 -0
- applefoundationmodels/session.py +265 -0
- applefoundationmodels/tools.py +285 -0
- applefoundationmodels/types.py +284 -0
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""
|
|
2
|
+
applefoundationmodels: Python bindings for Apple's FoundationModels framework
|
|
3
|
+
|
|
4
|
+
High-level Pythonic interface for accessing Apple Intelligence on-device
|
|
5
|
+
Foundation models.
|
|
6
|
+
|
|
7
|
+
Basic text generation:
|
|
8
|
+
from applefoundationmodels import Session
|
|
9
|
+
|
|
10
|
+
with Session() as session:
|
|
11
|
+
# Check availability
|
|
12
|
+
if not Session.is_ready():
|
|
13
|
+
print("Apple Intelligence not available")
|
|
14
|
+
return
|
|
15
|
+
|
|
16
|
+
# Generate response
|
|
17
|
+
response = session.generate("Hello, how are you?")
|
|
18
|
+
print(response.text) # Access text via .text property
|
|
19
|
+
|
|
20
|
+
Structured output:
|
|
21
|
+
from applefoundationmodels import Session
|
|
22
|
+
|
|
23
|
+
schema = {
|
|
24
|
+
"type": "object",
|
|
25
|
+
"properties": {
|
|
26
|
+
"name": {"type": "string"},
|
|
27
|
+
"age": {"type": "integer"}
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
with Session() as session:
|
|
32
|
+
response = session.generate("Extract: Alice is 28", schema=schema)
|
|
33
|
+
print(response.parsed) # {"name": "Alice", "age": 28}
|
|
34
|
+
|
|
35
|
+
Sync streaming:
|
|
36
|
+
from applefoundationmodels import Session
|
|
37
|
+
|
|
38
|
+
with Session() as session:
|
|
39
|
+
for chunk in session.generate("Tell me a story", stream=True):
|
|
40
|
+
print(chunk.content, end='', flush=True)
|
|
41
|
+
|
|
42
|
+
Async streaming:
|
|
43
|
+
import asyncio
|
|
44
|
+
from applefoundationmodels import AsyncSession
|
|
45
|
+
|
|
46
|
+
async def main():
|
|
47
|
+
async with AsyncSession() as session:
|
|
48
|
+
async for chunk in session.generate("Tell me a story", stream=True):
|
|
49
|
+
print(chunk.content, end='', flush=True)
|
|
50
|
+
|
|
51
|
+
asyncio.run(main())
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
__version__ = "0.1.0"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def apple_intelligence_available() -> bool:
|
|
58
|
+
"""
|
|
59
|
+
Check if Apple Intelligence is available and ready for use.
|
|
60
|
+
|
|
61
|
+
This is a convenience function that checks if the Apple Intelligence
|
|
62
|
+
framework is available on the current device and ready for immediate use.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
True if Apple Intelligence is available and ready, False otherwise
|
|
66
|
+
|
|
67
|
+
Example:
|
|
68
|
+
>>> from applefoundationmodels import apple_intelligence_available
|
|
69
|
+
>>> if apple_intelligence_available():
|
|
70
|
+
... print("Apple Intelligence is ready!")
|
|
71
|
+
... else:
|
|
72
|
+
... print("Apple Intelligence is not available")
|
|
73
|
+
"""
|
|
74
|
+
from .session import Session
|
|
75
|
+
|
|
76
|
+
return Session.is_ready()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# Public API exports
|
|
80
|
+
from .session import Session
|
|
81
|
+
from .async_session import AsyncSession
|
|
82
|
+
from .constants import (
|
|
83
|
+
DEFAULT_TEMPERATURE,
|
|
84
|
+
DEFAULT_MAX_TOKENS,
|
|
85
|
+
MIN_TEMPERATURE,
|
|
86
|
+
MAX_TEMPERATURE,
|
|
87
|
+
TemperaturePreset,
|
|
88
|
+
)
|
|
89
|
+
from .types import (
|
|
90
|
+
Result,
|
|
91
|
+
Availability,
|
|
92
|
+
SessionConfig,
|
|
93
|
+
GenerationParams,
|
|
94
|
+
GenerationResponse,
|
|
95
|
+
StreamChunk,
|
|
96
|
+
StreamCallback,
|
|
97
|
+
ToolCallback,
|
|
98
|
+
)
|
|
99
|
+
from .exceptions import *
|
|
100
|
+
|
|
101
|
+
__all__ = [
|
|
102
|
+
# Version
|
|
103
|
+
"__version__",
|
|
104
|
+
# Convenience functions
|
|
105
|
+
"apple_intelligence_available",
|
|
106
|
+
# Main classes
|
|
107
|
+
"Session",
|
|
108
|
+
"AsyncSession",
|
|
109
|
+
# Constants
|
|
110
|
+
"DEFAULT_TEMPERATURE",
|
|
111
|
+
"DEFAULT_MAX_TOKENS",
|
|
112
|
+
"MIN_TEMPERATURE",
|
|
113
|
+
"MAX_TEMPERATURE",
|
|
114
|
+
"TemperaturePreset",
|
|
115
|
+
# Type definitions
|
|
116
|
+
"Result",
|
|
117
|
+
"Availability",
|
|
118
|
+
"SessionConfig",
|
|
119
|
+
"GenerationParams",
|
|
120
|
+
"GenerationResponse",
|
|
121
|
+
"StreamChunk",
|
|
122
|
+
"StreamCallback",
|
|
123
|
+
"ToolCallback",
|
|
124
|
+
]
|
|
Binary file
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Type stubs for _foundationmodels Cython extension."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
# Initialization and cleanup
|
|
6
|
+
def init() -> None: ...
|
|
7
|
+
def cleanup() -> None: ...
|
|
8
|
+
def get_version() -> str: ...
|
|
9
|
+
|
|
10
|
+
# Availability functions
|
|
11
|
+
def check_availability() -> int: ...
|
|
12
|
+
def get_availability_reason() -> Optional[str]: ...
|
|
13
|
+
def is_ready() -> bool: ...
|
|
14
|
+
|
|
15
|
+
# Session management
|
|
16
|
+
def create_session(config: Optional[Dict[str, Any]] = None) -> int: ...
|
|
17
|
+
|
|
18
|
+
# Text generation
|
|
19
|
+
def generate(prompt: str, temperature: float = 1.0, max_tokens: int = 1024) -> str: ...
|
|
20
|
+
|
|
21
|
+
# Structured generation
|
|
22
|
+
def generate_structured(
|
|
23
|
+
prompt: str,
|
|
24
|
+
schema: Dict[str, Any],
|
|
25
|
+
temperature: float = 1.0,
|
|
26
|
+
max_tokens: int = 1024,
|
|
27
|
+
) -> Dict[str, Any]: ...
|
|
28
|
+
|
|
29
|
+
# Streaming generation
|
|
30
|
+
def generate_stream(
|
|
31
|
+
prompt: str,
|
|
32
|
+
callback: Callable[[Optional[str]], None],
|
|
33
|
+
temperature: float = 1.0,
|
|
34
|
+
max_tokens: int = 1024,
|
|
35
|
+
) -> None: ...
|
|
36
|
+
|
|
37
|
+
# History management
|
|
38
|
+
def get_history() -> List[Any]: ...
|
|
39
|
+
def clear_history() -> None: ...
|
|
40
|
+
|
|
41
|
+
# Tool calling
|
|
42
|
+
def register_tools(tools: Dict[str, Callable]) -> None: ...
|
|
43
|
+
def get_transcript() -> List[Dict[str, Any]]: ...
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AsyncSession API for applefoundationmodels Python bindings.
|
|
3
|
+
|
|
4
|
+
Provides async session management, text generation, and async streaming support.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
import logging
|
|
9
|
+
from typing import (
|
|
10
|
+
Optional,
|
|
11
|
+
Dict,
|
|
12
|
+
Any,
|
|
13
|
+
List,
|
|
14
|
+
AsyncIterator,
|
|
15
|
+
Union,
|
|
16
|
+
TYPE_CHECKING,
|
|
17
|
+
overload,
|
|
18
|
+
Type,
|
|
19
|
+
Coroutine,
|
|
20
|
+
cast,
|
|
21
|
+
)
|
|
22
|
+
from typing_extensions import Literal
|
|
23
|
+
|
|
24
|
+
from .base_session import BaseSession, StreamQueueItem
|
|
25
|
+
from .base import AsyncContextManagedResource
|
|
26
|
+
from .types import (
|
|
27
|
+
GenerationResponse,
|
|
28
|
+
StreamChunk,
|
|
29
|
+
)
|
|
30
|
+
from .pydantic_compat import normalize_schema
|
|
31
|
+
|
|
32
|
+
if TYPE_CHECKING:
|
|
33
|
+
from pydantic import BaseModel
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AsyncSession(BaseSession, AsyncContextManagedResource):
|
|
39
|
+
"""
|
|
40
|
+
Async AI session for maintaining conversation state.
|
|
41
|
+
|
|
42
|
+
AsyncSession provides async/await support for all operations including
|
|
43
|
+
streaming. Use this for async applications. Sessions maintain conversation
|
|
44
|
+
history and can be configured with tools and instructions.
|
|
45
|
+
|
|
46
|
+
Usage:
|
|
47
|
+
async with AsyncSession() as session:
|
|
48
|
+
response = await session.generate("Hello!")
|
|
49
|
+
print(response.text)
|
|
50
|
+
|
|
51
|
+
# Async streaming
|
|
52
|
+
async for chunk in session.generate("Story", stream=True):
|
|
53
|
+
print(chunk.content, end='', flush=True)
|
|
54
|
+
|
|
55
|
+
# With configuration:
|
|
56
|
+
def get_weather(location: str) -> str:
|
|
57
|
+
'''Get current weather for a location.'''
|
|
58
|
+
return f"Weather in {location}: 22°C"
|
|
59
|
+
|
|
60
|
+
session = AsyncSession(
|
|
61
|
+
instructions="You are a helpful assistant.",
|
|
62
|
+
tools=[get_weather]
|
|
63
|
+
)
|
|
64
|
+
response = await session.generate("What's the weather in Paris?")
|
|
65
|
+
await session.aclose()
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
async def _call_ffi(self, func, *args, **kwargs):
|
|
69
|
+
"""Execute FFI call asynchronously via a worker thread."""
|
|
70
|
+
return await asyncio.to_thread(func, *args, **kwargs)
|
|
71
|
+
|
|
72
|
+
def _create_stream_queue_adapter(self) -> BaseSession._StreamQueueAdapter:
|
|
73
|
+
"""Return the async-aware queue adapter used for streaming."""
|
|
74
|
+
loop = asyncio.get_running_loop()
|
|
75
|
+
queue: asyncio.Queue[StreamQueueItem] = asyncio.Queue()
|
|
76
|
+
|
|
77
|
+
def push(item: StreamQueueItem) -> None:
|
|
78
|
+
asyncio.run_coroutine_threadsafe(queue.put(item), loop)
|
|
79
|
+
|
|
80
|
+
async def get_async() -> StreamQueueItem:
|
|
81
|
+
return await queue.get()
|
|
82
|
+
|
|
83
|
+
def get_sync() -> StreamQueueItem:
|
|
84
|
+
raise RuntimeError(
|
|
85
|
+
"Synchronous queue access is not supported for AsyncSession"
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return BaseSession._StreamQueueAdapter(
|
|
89
|
+
push=push,
|
|
90
|
+
get_sync=get_sync,
|
|
91
|
+
get_async=get_async,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def close(self) -> None:
|
|
95
|
+
"""Close the session synchronously.
|
|
96
|
+
|
|
97
|
+
When no event loop is running, this method drives the async cleanup via
|
|
98
|
+
asyncio.run(). If called while an event loop is active, a RuntimeError is
|
|
99
|
+
raised to avoid nested event loops—the caller should instead await
|
|
100
|
+
`aclose()` or use `async with`.
|
|
101
|
+
"""
|
|
102
|
+
if self._closed:
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
asyncio.get_running_loop()
|
|
107
|
+
except RuntimeError:
|
|
108
|
+
asyncio.run(self.aclose())
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
raise RuntimeError(
|
|
112
|
+
"AsyncSession.close() cannot be called while an event loop is running. "
|
|
113
|
+
"Use await session.aclose() or 'async with AsyncSession()'."
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
async def aclose(self) -> None:
|
|
117
|
+
"""Asynchronously close the session and cleanup resources."""
|
|
118
|
+
if self._closed:
|
|
119
|
+
return
|
|
120
|
+
self._mark_closed()
|
|
121
|
+
|
|
122
|
+
# ========================================================================
|
|
123
|
+
# Type overloads for generate() method
|
|
124
|
+
#
|
|
125
|
+
# IMPORTANT: These overloads must be kept in sync with Session.generate()
|
|
126
|
+
# in session.py. The signatures are identical except for:
|
|
127
|
+
# - async keyword (Session: def generate() vs AsyncSession: async def generate())
|
|
128
|
+
# - Return type for streaming (Iterator vs AsyncIterator)
|
|
129
|
+
#
|
|
130
|
+
# When modifying these overloads, update both files to maintain consistency.
|
|
131
|
+
# ========================================================================
|
|
132
|
+
|
|
133
|
+
# Type overload for non-streaming text generation
|
|
134
|
+
@overload
|
|
135
|
+
def generate(
|
|
136
|
+
self,
|
|
137
|
+
prompt: str,
|
|
138
|
+
schema: None = None,
|
|
139
|
+
stream: Literal[False] = False,
|
|
140
|
+
temperature: Optional[float] = None,
|
|
141
|
+
max_tokens: Optional[int] = None,
|
|
142
|
+
) -> Coroutine[Any, Any, GenerationResponse]: ...
|
|
143
|
+
|
|
144
|
+
# Type overload for non-streaming structured generation
|
|
145
|
+
@overload
|
|
146
|
+
def generate(
|
|
147
|
+
self,
|
|
148
|
+
prompt: str,
|
|
149
|
+
schema: Union[Dict[str, Any], Type["BaseModel"]],
|
|
150
|
+
stream: Literal[False] = False,
|
|
151
|
+
temperature: Optional[float] = None,
|
|
152
|
+
max_tokens: Optional[int] = None,
|
|
153
|
+
) -> Coroutine[Any, Any, GenerationResponse]: ...
|
|
154
|
+
|
|
155
|
+
# Type overload for async streaming generation (text only)
|
|
156
|
+
@overload
|
|
157
|
+
def generate(
|
|
158
|
+
self,
|
|
159
|
+
prompt: str,
|
|
160
|
+
schema: None = None,
|
|
161
|
+
stream: Literal[True] = True,
|
|
162
|
+
temperature: Optional[float] = None,
|
|
163
|
+
max_tokens: Optional[int] = None,
|
|
164
|
+
) -> AsyncIterator[StreamChunk]: ...
|
|
165
|
+
|
|
166
|
+
def generate(
|
|
167
|
+
self,
|
|
168
|
+
prompt: str,
|
|
169
|
+
schema: Optional[Union[Dict[str, Any], Type["BaseModel"]]] = None,
|
|
170
|
+
stream: bool = False,
|
|
171
|
+
temperature: Optional[float] = None,
|
|
172
|
+
max_tokens: Optional[int] = None,
|
|
173
|
+
) -> Union[Coroutine[Any, Any, GenerationResponse], AsyncIterator[StreamChunk]]:
|
|
174
|
+
"""
|
|
175
|
+
Generate text or structured output asynchronously, with optional streaming.
|
|
176
|
+
|
|
177
|
+
This unified async method supports three generation modes:
|
|
178
|
+
1. Text generation (schema=None, stream=False) -> GenerationResponse
|
|
179
|
+
2. Structured generation (schema=dict/model, stream=False) -> GenerationResponse
|
|
180
|
+
3. Async streaming (schema=None, stream=True) -> AsyncIterator[StreamChunk]
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
prompt: Input text prompt
|
|
184
|
+
schema: Optional JSON schema dict or Pydantic model for structured output
|
|
185
|
+
stream: If True, return an async iterator of chunks
|
|
186
|
+
temperature: Sampling temperature (0.0-2.0, default: DEFAULT_TEMPERATURE)
|
|
187
|
+
max_tokens: Maximum tokens to generate (default: DEFAULT_MAX_TOKENS)
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
- GenerationResponse with .text or .parsed property (if stream=False)
|
|
191
|
+
- AsyncIterator[StreamChunk] yielding content deltas (if stream=True)
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
RuntimeError: If session is closed
|
|
195
|
+
GenerationError: If generation fails
|
|
196
|
+
ValueError: If schema is provided with stream=True
|
|
197
|
+
|
|
198
|
+
Examples:
|
|
199
|
+
Text generation:
|
|
200
|
+
>>> response = await session.generate("What is Python?")
|
|
201
|
+
>>> print(response.text)
|
|
202
|
+
|
|
203
|
+
Structured generation:
|
|
204
|
+
>>> schema = {"type": "object", "properties": {"name": {"type": "string"}}}
|
|
205
|
+
>>> response = await session.generate("Extract name: John", schema=schema)
|
|
206
|
+
>>> print(response.parsed)
|
|
207
|
+
|
|
208
|
+
Async streaming:
|
|
209
|
+
>>> async for chunk in session.generate("Tell a story", stream=True):
|
|
210
|
+
... print(chunk.content, end='', flush=True)
|
|
211
|
+
"""
|
|
212
|
+
plan = self._plan_generate_call(stream, schema, temperature, max_tokens)
|
|
213
|
+
|
|
214
|
+
if plan.mode == "stream":
|
|
215
|
+
return self._generate_stream_impl(prompt, plan.temperature, plan.max_tokens)
|
|
216
|
+
if plan.mode == "structured" and schema is not None:
|
|
217
|
+
return self._generate_structured_impl(
|
|
218
|
+
prompt, schema, plan.temperature, plan.max_tokens
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
return self._generate_text_impl(prompt, plan.temperature, plan.max_tokens)
|
|
222
|
+
|
|
223
|
+
async def _generate_text_impl(
|
|
224
|
+
self, prompt: str, temperature: float, max_tokens: int
|
|
225
|
+
) -> GenerationResponse:
|
|
226
|
+
"""Internal implementation for async text generation."""
|
|
227
|
+
async with self._async_generation_context() as start_length:
|
|
228
|
+
text = await self._call_ffi(
|
|
229
|
+
self._ffi.generate,
|
|
230
|
+
prompt,
|
|
231
|
+
temperature,
|
|
232
|
+
max_tokens,
|
|
233
|
+
)
|
|
234
|
+
return self._build_generation_response(text, False, start_length)
|
|
235
|
+
|
|
236
|
+
async def _generate_structured_impl(
|
|
237
|
+
self,
|
|
238
|
+
prompt: str,
|
|
239
|
+
schema: Union[Dict[str, Any], Type["BaseModel"]],
|
|
240
|
+
temperature: float,
|
|
241
|
+
max_tokens: int,
|
|
242
|
+
) -> GenerationResponse:
|
|
243
|
+
"""Internal implementation for async structured generation."""
|
|
244
|
+
async with self._async_generation_context() as start_length:
|
|
245
|
+
json_schema = normalize_schema(schema)
|
|
246
|
+
result = await self._call_ffi(
|
|
247
|
+
self._ffi.generate_structured,
|
|
248
|
+
prompt,
|
|
249
|
+
json_schema,
|
|
250
|
+
temperature,
|
|
251
|
+
max_tokens,
|
|
252
|
+
)
|
|
253
|
+
return self._build_generation_response(result, True, start_length)
|
|
254
|
+
|
|
255
|
+
async def _generate_stream_impl(
|
|
256
|
+
self, prompt: str, temperature: float, max_tokens: int
|
|
257
|
+
) -> AsyncIterator[StreamChunk]:
|
|
258
|
+
"""Internal implementation for async streaming generation."""
|
|
259
|
+
start_length = self._begin_generation()
|
|
260
|
+
adapter = self._create_stream_queue_adapter()
|
|
261
|
+
try:
|
|
262
|
+
async for chunk in self._stream_chunks_async_impl(
|
|
263
|
+
prompt, temperature, max_tokens, adapter
|
|
264
|
+
):
|
|
265
|
+
yield chunk
|
|
266
|
+
finally:
|
|
267
|
+
self._end_generation(start_length)
|
|
268
|
+
|
|
269
|
+
async def get_history(self) -> List[Dict[str, Any]]:
|
|
270
|
+
"""
|
|
271
|
+
Get conversation history asynchronously.
|
|
272
|
+
|
|
273
|
+
Returns:
|
|
274
|
+
List of message dictionaries with 'role' and 'content' keys
|
|
275
|
+
|
|
276
|
+
Example:
|
|
277
|
+
>>> history = await session.get_history()
|
|
278
|
+
>>> for msg in history:
|
|
279
|
+
... print(f"{msg['role']}: {msg['content']}")
|
|
280
|
+
"""
|
|
281
|
+
self._check_closed()
|
|
282
|
+
result = await self._call_ffi(self._ffi.get_history)
|
|
283
|
+
return cast(List[Dict[str, Any]], result)
|
|
284
|
+
|
|
285
|
+
async def clear_history(self) -> None:
|
|
286
|
+
"""
|
|
287
|
+
Clear conversation history asynchronously.
|
|
288
|
+
|
|
289
|
+
Removes all messages from the session while keeping the session active.
|
|
290
|
+
"""
|
|
291
|
+
self._check_closed()
|
|
292
|
+
await self._call_ffi(self._ffi.clear_history)
|
|
293
|
+
# Reset to current transcript length (may include persistent instructions)
|
|
294
|
+
self._last_transcript_length = len(self.transcript)
|
|
295
|
+
|
|
296
|
+
# Properties inherited from BaseSession (transcript, last_generation_transcript)
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base classes for applefoundationmodels.
|
|
3
|
+
|
|
4
|
+
Provides base functionality for context-managed resources.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import TypeVar
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="ContextManagedResource")
|
|
12
|
+
AT = TypeVar("AT", bound="AsyncContextManagedResource")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ContextManagedResource(ABC):
|
|
16
|
+
"""
|
|
17
|
+
Base class for resources that support context manager protocol.
|
|
18
|
+
|
|
19
|
+
Provides standard __enter__ and __exit__ methods that call the
|
|
20
|
+
close() method on exit. Subclasses must implement close().
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __enter__(self: T) -> T:
|
|
24
|
+
"""Context manager entry."""
|
|
25
|
+
return self
|
|
26
|
+
|
|
27
|
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
28
|
+
"""Context manager exit with automatic cleanup."""
|
|
29
|
+
self.close()
|
|
30
|
+
|
|
31
|
+
@abstractmethod
|
|
32
|
+
def close(self) -> None:
|
|
33
|
+
"""
|
|
34
|
+
Close and cleanup resources.
|
|
35
|
+
|
|
36
|
+
Must be implemented by subclasses.
|
|
37
|
+
"""
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class AsyncContextManagedResource(ABC):
|
|
42
|
+
"""
|
|
43
|
+
Base class for resources that support async context manager protocol.
|
|
44
|
+
|
|
45
|
+
Provides standard __aenter__ and __aexit__ methods that call the
|
|
46
|
+
aclose() method on exit.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
async def __aenter__(self: AT) -> AT:
|
|
50
|
+
"""Async context manager entry."""
|
|
51
|
+
return self
|
|
52
|
+
|
|
53
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
54
|
+
"""Async context manager exit with automatic cleanup."""
|
|
55
|
+
await self.aclose()
|
|
56
|
+
|
|
57
|
+
@abstractmethod
|
|
58
|
+
async def aclose(self) -> None:
|
|
59
|
+
"""
|
|
60
|
+
Close and cleanup resources asynchronously.
|
|
61
|
+
|
|
62
|
+
Must be implemented by subclasses. This is called by the async
|
|
63
|
+
context manager (__aexit__).
|
|
64
|
+
"""
|
|
65
|
+
pass
|