pyview-web 0.3.0__py3-none-any.whl → 0.8.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. pyview/__init__.py +16 -6
  2. pyview/assets/js/app.js +1 -0
  3. pyview/assets/js/uploaders.js +221 -0
  4. pyview/assets/package-lock.json +16 -14
  5. pyview/assets/package.json +2 -2
  6. pyview/async_stream_runner.py +2 -1
  7. pyview/auth/__init__.py +3 -1
  8. pyview/auth/provider.py +6 -6
  9. pyview/auth/required.py +7 -10
  10. pyview/binding/__init__.py +47 -0
  11. pyview/binding/binder.py +134 -0
  12. pyview/binding/context.py +33 -0
  13. pyview/binding/converters.py +191 -0
  14. pyview/binding/helpers.py +78 -0
  15. pyview/binding/injectables.py +119 -0
  16. pyview/binding/params.py +105 -0
  17. pyview/binding/result.py +32 -0
  18. pyview/changesets/__init__.py +2 -0
  19. pyview/changesets/changesets.py +8 -3
  20. pyview/cli/commands/create_view.py +4 -3
  21. pyview/cli/main.py +1 -1
  22. pyview/components/__init__.py +72 -0
  23. pyview/components/base.py +212 -0
  24. pyview/components/lifecycle.py +85 -0
  25. pyview/components/manager.py +366 -0
  26. pyview/components/renderer.py +14 -0
  27. pyview/components/slots.py +73 -0
  28. pyview/csrf.py +4 -2
  29. pyview/events/AutoEventDispatch.py +98 -0
  30. pyview/events/BaseEventHandler.py +51 -8
  31. pyview/events/__init__.py +2 -1
  32. pyview/instrumentation/__init__.py +3 -3
  33. pyview/instrumentation/interfaces.py +57 -33
  34. pyview/instrumentation/noop.py +21 -18
  35. pyview/js.py +20 -23
  36. pyview/live_routes.py +5 -3
  37. pyview/live_socket.py +167 -44
  38. pyview/live_view.py +24 -12
  39. pyview/meta.py +14 -2
  40. pyview/phx_message.py +7 -8
  41. pyview/playground/__init__.py +10 -0
  42. pyview/playground/builder.py +118 -0
  43. pyview/playground/favicon.py +39 -0
  44. pyview/pyview.py +54 -20
  45. pyview/session.py +2 -0
  46. pyview/static/assets/app.js +2088 -806
  47. pyview/static/assets/uploaders.js +221 -0
  48. pyview/stream.py +308 -0
  49. pyview/template/__init__.py +11 -1
  50. pyview/template/live_template.py +12 -8
  51. pyview/template/live_view_template.py +338 -0
  52. pyview/template/render_diff.py +33 -7
  53. pyview/template/root_template.py +21 -9
  54. pyview/template/serializer.py +2 -5
  55. pyview/template/template_view.py +170 -0
  56. pyview/template/utils.py +3 -2
  57. pyview/uploads.py +344 -55
  58. pyview/vendor/flet/pubsub/__init__.py +3 -1
  59. pyview/vendor/flet/pubsub/pub_sub.py +10 -18
  60. pyview/vendor/ibis/__init__.py +3 -7
  61. pyview/vendor/ibis/compiler.py +25 -32
  62. pyview/vendor/ibis/context.py +13 -15
  63. pyview/vendor/ibis/errors.py +0 -6
  64. pyview/vendor/ibis/filters.py +70 -76
  65. pyview/vendor/ibis/loaders.py +6 -7
  66. pyview/vendor/ibis/nodes.py +40 -42
  67. pyview/vendor/ibis/template.py +4 -5
  68. pyview/vendor/ibis/tree.py +62 -3
  69. pyview/vendor/ibis/utils.py +14 -15
  70. pyview/ws_handler.py +116 -86
  71. {pyview_web-0.3.0.dist-info → pyview_web-0.8.0a2.dist-info}/METADATA +39 -33
  72. pyview_web-0.8.0a2.dist-info/RECORD +80 -0
  73. pyview_web-0.8.0a2.dist-info/WHEEL +4 -0
  74. pyview_web-0.8.0a2.dist-info/entry_points.txt +3 -0
  75. pyview_web-0.3.0.dist-info/LICENSE +0 -21
  76. pyview_web-0.3.0.dist-info/RECORD +0 -58
  77. pyview_web-0.3.0.dist-info/WHEEL +0 -4
  78. pyview_web-0.3.0.dist-info/entry_points.txt +0 -3
@@ -0,0 +1,221 @@
1
+ /**
2
+ * PyView External S3 Uploaders
3
+ *
4
+ * Client-side uploaders for external S3 uploads.
5
+ *
6
+ * Available uploaders:
7
+ * - S3: Simple POST upload to S3 using presigned POST URLs
8
+ * - S3Multipart: Multipart upload for large files (>5GB)
9
+ */
10
+
11
+ window.Uploaders = window.Uploaders || {};
12
+
13
+ // S3 Simple POST uploader
14
+ // Uses presigned POST URLs for direct upload to S3
15
+ // Works for files up to ~5GB
16
+ if (!window.Uploaders.S3) {
17
+ window.Uploaders.S3 = function (entries, onViewError) {
18
+ entries.forEach((entry) => {
19
+ let formData = new FormData();
20
+ let { url, fields } = entry.meta;
21
+
22
+ // Add all fields from presigned POST
23
+ Object.entries(fields).forEach(([key, val]) =>
24
+ formData.append(key, val)
25
+ );
26
+ formData.append("file", entry.file);
27
+
28
+ let xhr = new XMLHttpRequest();
29
+ onViewError(() => xhr.abort());
30
+
31
+ xhr.onload = () => {
32
+ if (xhr.status === 204 || xhr.status === 200) {
33
+ entry.progress(100);
34
+ } else {
35
+ entry.error(`S3 upload failed with status ${xhr.status}`);
36
+ }
37
+ };
38
+ xhr.onerror = () => entry.error("Network error during upload");
39
+
40
+ xhr.upload.addEventListener("progress", (event) => {
41
+ if (event.lengthComputable) {
42
+ let percent = Math.round((event.loaded / event.total) * 100);
43
+ if (percent < 100) {
44
+ entry.progress(percent);
45
+ }
46
+ }
47
+ });
48
+
49
+ xhr.open("POST", url, true);
50
+ xhr.send(formData);
51
+ });
52
+ };
53
+ }
54
+
55
+ // S3 Multipart uploader for large files
56
+ // Uploads file in chunks with retry logic and concurrency control
57
+ //
58
+ // - Exponential backoff retry (max 3 attempts per part)
59
+ // - Concurrency limit (max 6 parallel uploads)
60
+ // - Automatic cleanup on fatal errors
61
+ //
62
+ // Based on AWS best practices:
63
+ // https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html
64
+ //
65
+ // Server must:
66
+ // 1. Return metadata with: uploader="S3Multipart", upload_id, part_urls, chunk_size
67
+ // 2. Provide entry_complete callback to finalize the upload
68
+ if (!window.Uploaders.S3Multipart) {
69
+ window.Uploaders.S3Multipart = function (entries, onViewError) {
70
+ entries.forEach((entry) => {
71
+ const { upload_id, part_urls, chunk_size, key } = entry.meta;
72
+ const file = entry.file;
73
+ const parts = []; // Store {PartNumber, ETag} for each uploaded part
74
+
75
+ const MAX_RETRIES = 3;
76
+ const MAX_CONCURRENT = 6;
77
+ let uploadedParts = 0;
78
+ let activeUploads = 0;
79
+ let partIndex = 0;
80
+ let hasError = false;
81
+ const totalParts = part_urls.length;
82
+
83
+ console.log(`[S3Multipart] Starting upload for ${entry.file.name}`);
84
+ console.log(`[S3Multipart] Total parts: ${totalParts}, chunk size: ${chunk_size}`);
85
+ console.log(`[S3Multipart] Max concurrent uploads: ${MAX_CONCURRENT}, max retries: ${MAX_RETRIES}`);
86
+
87
+ // Add a custom method to send completion data directly
88
+ // This bypasses entry.progress() which only handles numbers
89
+ entry.complete = function(completionData) {
90
+ console.log(`[S3Multipart] Calling entry.complete with:`, completionData);
91
+ // Call pushFileProgress directly with the completion data
92
+ entry.view.pushFileProgress(entry.fileEl, entry.ref, completionData);
93
+ };
94
+
95
+ // Upload a single part with retry logic
96
+ const uploadPart = (index, retryCount = 0) => {
97
+ if (hasError) return; // Stop if we've hit a fatal error
98
+
99
+ const partNumber = index + 1;
100
+ const url = part_urls[index];
101
+ const start = index * chunk_size;
102
+ const end = Math.min(start + chunk_size, file.size);
103
+ const chunk = file.slice(start, end);
104
+
105
+ console.log(`[S3Multipart] Starting part ${partNumber}/${totalParts}, size: ${chunk.size} bytes, attempt ${retryCount + 1}`);
106
+
107
+ const xhr = new XMLHttpRequest();
108
+ onViewError(() => xhr.abort());
109
+
110
+ // Track upload progress within this chunk
111
+ xhr.upload.addEventListener("progress", (event) => {
112
+ if (event.lengthComputable) {
113
+ // Calculate overall progress: completed parts + current part's progress
114
+ const completedBytes = uploadedParts * chunk_size;
115
+ const currentPartBytes = event.loaded;
116
+ const totalBytes = file.size;
117
+ const overallPercent = Math.round(((completedBytes + currentPartBytes) / totalBytes) * 100);
118
+
119
+ // Don't report 100% until all parts complete and we send completion data
120
+ if (overallPercent < 100) {
121
+ entry.progress(overallPercent);
122
+ }
123
+ }
124
+ });
125
+
126
+ xhr.onload = () => {
127
+ activeUploads--;
128
+
129
+ if (xhr.status === 200) {
130
+ const etag = xhr.getResponseHeader('ETag');
131
+ console.log(`[S3Multipart] Part ${partNumber} succeeded, ETag: ${etag}`);
132
+
133
+ if (!etag) {
134
+ console.error(`[S3Multipart] Part ${partNumber} missing ETag!`);
135
+ entry.error(`Part ${partNumber} upload succeeded but no ETag returned`);
136
+ hasError = true;
137
+ return;
138
+ }
139
+
140
+ // Store the part with its ETag
141
+ parts.push({
142
+ PartNumber: partNumber,
143
+ ETag: etag.replace(/"/g, '')
144
+ });
145
+ uploadedParts++;
146
+
147
+ // Update progress
148
+ const progressPercent = Math.round((uploadedParts / totalParts) * 100);
149
+ console.log(`[S3Multipart] Progress: ${uploadedParts}/${totalParts} parts (${progressPercent}%)`);
150
+
151
+ if (uploadedParts < totalParts) {
152
+ entry.progress(progressPercent < 100 ? progressPercent : 99);
153
+ uploadNextPart(); // Start next part
154
+ } else {
155
+ // All parts complete!
156
+ const completionData = {
157
+ complete: true,
158
+ upload_id: upload_id,
159
+ key: key,
160
+ parts: parts.sort((a, b) => a.PartNumber - b.PartNumber)
161
+ };
162
+ console.log(`[S3Multipart] All parts complete! Sending completion data`);
163
+ entry.complete(completionData);
164
+ }
165
+ } else {
166
+ // Upload failed - retry with exponential backoff
167
+ console.error(`[S3Multipart] Part ${partNumber} failed with status ${xhr.status}, attempt ${retryCount + 1}`);
168
+
169
+ if (retryCount < MAX_RETRIES) {
170
+ // Exponential backoff: 1s, 2s, 4s, max 10s
171
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
172
+ console.log(`[S3Multipart] Retrying part ${partNumber} in ${delay}ms...`);
173
+
174
+ setTimeout(() => {
175
+ uploadPart(index, retryCount + 1);
176
+ }, delay);
177
+ } else {
178
+ // Max retries exceeded - fatal error
179
+ console.error(`[S3Multipart] Part ${partNumber} failed after ${MAX_RETRIES} retries, aborting upload`);
180
+ entry.error(`Part ${partNumber} failed after ${MAX_RETRIES} attempts. Upload aborted.`);
181
+ hasError = true;
182
+ }
183
+ }
184
+ };
185
+
186
+ xhr.onerror = () => {
187
+ activeUploads--;
188
+ console.error(`[S3Multipart] Network error on part ${partNumber}, attempt ${retryCount + 1}`);
189
+
190
+ if (retryCount < MAX_RETRIES) {
191
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
192
+ console.log(`[S3Multipart] Retrying part ${partNumber} after network error in ${delay}ms...`);
193
+
194
+ setTimeout(() => {
195
+ uploadPart(index, retryCount + 1);
196
+ }, delay);
197
+ } else {
198
+ console.error(`[S3Multipart] Part ${partNumber} network error after ${MAX_RETRIES} retries, aborting upload`);
199
+ entry.error(`Part ${partNumber} network error after ${MAX_RETRIES} attempts. Upload aborted.`);
200
+ hasError = true;
201
+ }
202
+ };
203
+
204
+ xhr.open('PUT', url, true);
205
+ xhr.send(chunk);
206
+ activeUploads++;
207
+ };
208
+
209
+ // Upload next part if we haven't hit the concurrency limit
210
+ const uploadNextPart = () => {
211
+ while (partIndex < totalParts && activeUploads < MAX_CONCURRENT && !hasError) {
212
+ uploadPart(partIndex);
213
+ partIndex++;
214
+ }
215
+ };
216
+
217
+ // Start initial batch of uploads
218
+ uploadNextPart();
219
+ });
220
+ };
221
+ }
pyview/stream.py ADDED
@@ -0,0 +1,308 @@
1
+ """
2
+ Phoenix LiveView Streams implementation for pyview.
3
+
4
+ Streams provide efficient rendering of large collections by:
5
+ - Not keeping items in server memory (only operations are tracked)
6
+ - Sending only changed items over the wire
7
+ - Letting the client manage DOM state
8
+
9
+ Example usage:
10
+ @dataclass
11
+ class ChatContext:
12
+ messages: Stream[Message]
13
+
14
+ class ChatLive(LiveView[ChatContext]):
15
+ async def mount(self, socket, session):
16
+ messages = await load_messages()
17
+ socket.context = ChatContext(
18
+ messages=Stream(messages, name="messages")
19
+ )
20
+
21
+ async def handle_event(self, event, payload, socket):
22
+ if event == "send":
23
+ msg = await create_message(payload["text"])
24
+ socket.context.messages.insert(msg)
25
+ elif event == "delete":
26
+ socket.context.messages.delete_by_id(f"messages-{payload['id']}")
27
+ """
28
+
29
+ from dataclasses import dataclass, field
30
+ from typing import Any, Callable, Generic, Iterator, TypeVar
31
+
32
+ T = TypeVar("T")
33
+
34
+
35
+ @dataclass
36
+ class StreamInsert:
37
+ """Represents a pending insert operation."""
38
+
39
+ dom_id: str
40
+ item: Any
41
+ at: int
42
+ limit: int | None
43
+ update_only: bool
44
+
45
+
46
+ @dataclass
47
+ class StreamOps:
48
+ """Pending stream operations to be sent to the client."""
49
+
50
+ ref: str
51
+ inserts: list[StreamInsert] = field(default_factory=list)
52
+ deletes: list[str] = field(default_factory=list)
53
+ reset: bool = False
54
+
55
+ def has_operations(self) -> bool:
56
+ """Check if there are any pending operations."""
57
+ return bool(self.inserts or self.deletes or self.reset)
58
+
59
+
60
+ class Stream(Generic[T]):
61
+ """
62
+ A stream collection for efficient list rendering.
63
+
64
+ Streams track operations (inserts, deletes) rather than keeping all items
65
+ in memory. This matches Phoenix LiveView's design where the server doesn't
66
+ maintain the full list - only the client does.
67
+
68
+ Args:
69
+ items: Initial items to populate the stream (optional)
70
+ name: Stream reference name (required) - used in wire format and DOM IDs
71
+ dom_id: Function to generate DOM ID from an item (optional)
72
+ Defaults to using item.id or item["id"]
73
+
74
+ Example:
75
+ # With dataclass items
76
+ stream = Stream(users, name="users") # Uses user.id -> "users-{id}"
77
+
78
+ # With dict items
79
+ stream = Stream(items, name="items", dom_id=lambda x: f"item-{x['uuid']}")
80
+
81
+ # Operations
82
+ stream.insert(new_user) # Append
83
+ stream.insert(new_user, at=0) # Prepend
84
+ stream.insert(user, at=2) # Insert at index
85
+ stream.delete(user) # Delete by item
86
+ stream.delete_by_id("users-123") # Delete by DOM ID
87
+ stream.reset(new_users) # Clear and replace all
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ items: list[T] | None = None,
93
+ *,
94
+ name: str,
95
+ dom_id: Callable[[T], str] | None = None,
96
+ ):
97
+ if not name:
98
+ raise ValueError("Stream 'name' is required")
99
+
100
+ self.name = name
101
+ self._dom_id_fn = dom_id or self._default_dom_id
102
+
103
+ # Pending operations (cleared after each render)
104
+ self._ops = StreamOps(ref=name)
105
+
106
+ # Track items for iteration (only pending inserts, not full history)
107
+ # This is used by templates to render the items
108
+ self._pending_items: list[tuple[str, T]] = []
109
+
110
+ # Initialize with items if provided
111
+ if items:
112
+ for item in items:
113
+ self._do_insert(item, at=-1, limit=None, update_only=False)
114
+
115
+ def _default_dom_id(self, item: T) -> str:
116
+ """Default DOM ID generator - uses item.id or item['id']."""
117
+ if hasattr(item, "id"):
118
+ return f"{self.name}-{item.id}" # type: ignore[union-attr]
119
+ if isinstance(item, dict) and "id" in item:
120
+ return f"{self.name}-{item['id']}"
121
+ raise ValueError(
122
+ f"Cannot generate DOM ID for item {item!r}. "
123
+ f"Item must have an 'id' attribute/key, or provide a dom_id function."
124
+ )
125
+
126
+ def _do_insert(self, item: T, at: int, limit: int | None, update_only: bool) -> str:
127
+ """Internal insert implementation."""
128
+ dom_id = self._dom_id_fn(item)
129
+ self._ops.inserts.append(
130
+ StreamInsert(
131
+ dom_id=dom_id,
132
+ item=item,
133
+ at=at,
134
+ limit=limit,
135
+ update_only=update_only,
136
+ )
137
+ )
138
+ self._pending_items.append((dom_id, item))
139
+ return dom_id
140
+
141
+ def insert(
142
+ self,
143
+ item: T,
144
+ *,
145
+ at: int = -1,
146
+ limit: int | None = None,
147
+ update_only: bool = False,
148
+ ) -> str:
149
+ """
150
+ Insert or update an item in the stream.
151
+
152
+ Args:
153
+ item: The item to insert
154
+ at: Position to insert at
155
+ -1 = append (end)
156
+ 0 = prepend (beginning)
157
+ N = specific index
158
+ limit: Maximum items to keep (client-side enforcement)
159
+ Positive = keep first N (removes from end)
160
+ Negative = keep last N (removes from beginning)
161
+ update_only: If True, only update existing items (don't add new)
162
+
163
+ Returns:
164
+ The DOM ID of the inserted item
165
+
166
+ Note:
167
+ If an item with the same DOM ID already exists in the client DOM,
168
+ it will be updated in place (not moved) regardless of the 'at' value.
169
+ """
170
+ return self._do_insert(item, at, limit, update_only)
171
+
172
+ def insert_many(
173
+ self,
174
+ items: list[T],
175
+ *,
176
+ at: int = -1,
177
+ limit: int | None = None,
178
+ ) -> list[str]:
179
+ """
180
+ Insert multiple items.
181
+
182
+ Args:
183
+ items: List of items to insert
184
+ at: Position for all items (-1=append, 0=prepend, N=index)
185
+ limit: Maximum items to keep
186
+
187
+ Returns:
188
+ List of DOM IDs for inserted items
189
+ """
190
+ return [self._do_insert(item, at, limit, False) for item in items]
191
+
192
+ def delete(self, item: T) -> str:
193
+ """
194
+ Delete an item from the stream.
195
+
196
+ Args:
197
+ item: The item to delete (used to compute DOM ID)
198
+
199
+ Returns:
200
+ The DOM ID of the deleted item
201
+ """
202
+ dom_id = self._dom_id_fn(item)
203
+ self._ops.deletes.append(dom_id)
204
+ return dom_id
205
+
206
+ def delete_by_id(self, dom_id: str) -> str:
207
+ """
208
+ Delete an item by its DOM ID.
209
+
210
+ Args:
211
+ dom_id: The DOM ID to delete (e.g., "users-123")
212
+
213
+ Returns:
214
+ The DOM ID that was deleted
215
+ """
216
+ self._ops.deletes.append(dom_id)
217
+ return dom_id
218
+
219
+ def reset(self, items: list[T] | None = None) -> None:
220
+ """
221
+ Clear all items and optionally replace with new ones.
222
+
223
+ This sends a reset signal to the client, which removes all existing
224
+ stream items from the DOM before applying any new inserts.
225
+
226
+ Args:
227
+ items: New items to populate after clearing (optional)
228
+ """
229
+ self._ops.reset = True
230
+ self._ops.inserts.clear()
231
+ self._pending_items.clear()
232
+
233
+ if items:
234
+ for item in items:
235
+ self._do_insert(item, at=-1, limit=None, update_only=False)
236
+
237
+ def __iter__(self) -> Iterator[tuple[str, T]]:
238
+ """
239
+ Iterate over pending items as (dom_id, item) tuples.
240
+
241
+ This is used by templates to render stream items:
242
+
243
+ {% for dom_id, user in users %}
244
+ <div id="{{ dom_id }}">{{ user.name }}</div>
245
+ {% endfor %}
246
+ """
247
+ return iter(self._pending_items)
248
+
249
+ def __len__(self) -> int:
250
+ """Return the number of pending items."""
251
+ return len(self._pending_items)
252
+
253
+ def __bool__(self) -> bool:
254
+ """Stream is truthy if it has pending operations or items."""
255
+ return bool(self._pending_items) or self._ops.has_operations()
256
+
257
+ # --- Internal methods for rendering ---
258
+
259
+ def _get_pending_ops(self) -> StreamOps | None:
260
+ """
261
+ Get pending operations and clear them.
262
+
263
+ Called by the renderer after processing the stream.
264
+ Returns None if there are no pending operations.
265
+ """
266
+ if not self._ops.has_operations():
267
+ return None
268
+
269
+ ops = self._ops
270
+ self._ops = StreamOps(ref=self.name)
271
+ self._pending_items.clear()
272
+ return ops
273
+
274
+ def _to_wire_format(self, ops: StreamOps) -> list:
275
+ """
276
+ Convert operations to Phoenix LiveView 0.19+/0.20 wire format.
277
+
278
+ Format: [stream_ref, [[dom_id, at, limit], ...], [delete_ids], reset?]
279
+
280
+ The Phoenix JS client expects:
281
+ - stream_ref: the stream name/reference
282
+ - inserts: array of [dom_id, at, limit] tuples
283
+ - dom_id: string identifier for the DOM element
284
+ - at: position (-1=append, 0=prepend, N=specific index)
285
+ - limit: max items to keep (positive=remove from start, negative=remove from end, null=no limit)
286
+ - deletes: array of dom_ids to remove
287
+ - reset: ONLY included if true (omitted = no reset, because JS checks `reset !== undefined`)
288
+
289
+ Note: update_only is stored internally but not sent over wire in 0.20 format.
290
+ """
291
+ inserts = [[ins.dom_id, ins.at, ins.limit] for ins in ops.inserts]
292
+
293
+ # Only include reset if true - JS checks `reset !== undefined` to trigger reset
294
+ if ops.reset:
295
+ return [ops.ref, inserts, ops.deletes, True]
296
+ else:
297
+ return [ops.ref, inserts, ops.deletes]
298
+
299
+ def _get_wire_format(self) -> list | None:
300
+ """
301
+ Convenience method to get wire format directly.
302
+
303
+ Returns None if no pending operations.
304
+ """
305
+ ops = self._get_pending_ops()
306
+ if ops is None:
307
+ return None
308
+ return self._to_wire_format(ops)
@@ -1,5 +1,9 @@
1
+ import sys
2
+
1
3
  from pyview.vendor.ibis import Template
2
- from .live_template import LiveTemplate, template_file, RenderedContent, LiveRender
4
+
5
+ from .context_processor import context_processor
6
+ from .live_template import LiveRender, LiveTemplate, RenderedContent, template_file
3
7
  from .root_template import RootTemplate, RootTemplateContext, defaultRootTemplate
4
8
  from .utils import find_associated_css, find_associated_file
5
9
  from .context_processor import context_processor
@@ -17,3 +21,9 @@ __all__ = [
17
21
  "find_associated_file",
18
22
  "context_processor",
19
23
  ]
24
+
25
+ # T-string template support is only available on Python 3.14+
26
+ if sys.version_info >= (3, 14):
27
+ from .template_view import TemplateView
28
+
29
+ __all__.append("TemplateView")
@@ -1,10 +1,13 @@
1
- from pyview.vendor.ibis import Template
2
- from typing import Any, Union, Protocol, Optional, ClassVar
3
- from dataclasses import asdict, Field
4
- from .serializer import serialize
5
1
  import os.path
6
- from pyview.template.context_processor import apply_context_processors
2
+ from dataclasses import Field, asdict
3
+ from typing import Any, ClassVar, Optional, Protocol, Union
4
+
5
+ from pyview.components import SocketWithComponents
7
6
  from pyview.meta import PyViewMeta
7
+ from pyview.template.context_processor import apply_context_processors
8
+ from pyview.vendor.ibis import Template
9
+
10
+ from .serializer import serialize
8
11
 
9
12
 
10
13
  class DataclassInstance(Protocol):
@@ -47,7 +50,7 @@ class LiveTemplate:
47
50
  class RenderedContent(Protocol):
48
51
  def tree(self) -> dict[str, Any]: ...
49
52
 
50
- def text(self) -> str: ...
53
+ def text(self, socket: Optional[SocketWithComponents] = None) -> str: ...
51
54
 
52
55
 
53
56
  class LiveRender:
@@ -59,7 +62,8 @@ class LiveRender:
59
62
  def tree(self) -> dict[str, Any]:
60
63
  return self.template.tree(self.assigns, self.meta)
61
64
 
62
- def text(self) -> str:
65
+ def text(self, socket: Optional[SocketWithComponents] = None) -> str:
66
+ # socket parameter unused for Ibis templates (only used by t-string templates)
63
67
  return self.template.text(self.assigns, self.meta)
64
68
 
65
69
 
@@ -77,7 +81,7 @@ def template_file(filename: str) -> Optional[LiveTemplate]:
77
81
  if cached_mtime == mtime:
78
82
  return cached_template
79
83
 
80
- with open(filename, "r") as f:
84
+ with open(filename) as f:
81
85
  t = LiveTemplate(Template(f.read(), template_id=filename))
82
86
  _cache[filename] = (mtime, t)
83
87
  return t