studyctl 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- studyctl/__init__.py +3 -0
- studyctl/calendar.py +140 -0
- studyctl/cli/__init__.py +56 -0
- studyctl/cli/_config.py +128 -0
- studyctl/cli/_content.py +462 -0
- studyctl/cli/_lazy.py +35 -0
- studyctl/cli/_review.py +491 -0
- studyctl/cli/_schedule.py +125 -0
- studyctl/cli/_setup.py +164 -0
- studyctl/cli/_shared.py +83 -0
- studyctl/cli/_state.py +69 -0
- studyctl/cli/_sync.py +156 -0
- studyctl/cli/_web.py +228 -0
- studyctl/content/__init__.py +5 -0
- studyctl/content/markdown_converter.py +271 -0
- studyctl/content/models.py +31 -0
- studyctl/content/notebooklm_client.py +434 -0
- studyctl/content/splitter.py +159 -0
- studyctl/content/storage.py +105 -0
- studyctl/content/syllabus.py +416 -0
- studyctl/history.py +982 -0
- studyctl/maintenance.py +69 -0
- studyctl/mcp/__init__.py +1 -0
- studyctl/mcp/server.py +58 -0
- studyctl/mcp/tools.py +234 -0
- studyctl/pdf.py +89 -0
- studyctl/review_db.py +277 -0
- studyctl/review_loader.py +375 -0
- studyctl/scheduler.py +242 -0
- studyctl/services/__init__.py +6 -0
- studyctl/services/content.py +39 -0
- studyctl/services/review.py +127 -0
- studyctl/settings.py +367 -0
- studyctl/shared.py +425 -0
- studyctl/state.py +120 -0
- studyctl/sync.py +229 -0
- studyctl/tui/__main__.py +33 -0
- studyctl/tui/app.py +395 -0
- studyctl/tui/study_cards.py +396 -0
- studyctl/web/__init__.py +1 -0
- studyctl/web/app.py +68 -0
- studyctl/web/routes/__init__.py +1 -0
- studyctl/web/routes/artefacts.py +57 -0
- studyctl/web/routes/cards.py +86 -0
- studyctl/web/routes/courses.py +91 -0
- studyctl/web/routes/history.py +69 -0
- studyctl/web/server.py +260 -0
- studyctl/web/static/app.js +853 -0
- studyctl/web/static/icon-192.svg +4 -0
- studyctl/web/static/icon-512.svg +4 -0
- studyctl/web/static/index.html +50 -0
- studyctl/web/static/manifest.json +21 -0
- studyctl/web/static/style.css +657 -0
- studyctl/web/static/sw.js +14 -0
- studyctl-2.0.0.dist-info/METADATA +49 -0
- studyctl-2.0.0.dist-info/RECORD +58 -0
- studyctl-2.0.0.dist-info/WHEEL +4 -0
- studyctl-2.0.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,416 @@
|
|
|
1
|
+
"""Syllabus generation, parsing, and state management for chunked audio/video."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import contextlib
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import tempfile
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from enum import StrEnum
|
|
13
|
+
from typing import TYPE_CHECKING, Any
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
STATE_FILENAME = "syllabus_state.json"
|
|
21
|
+
|
|
22
|
+
_TITLE_CLEAN_RE = re.compile(r"[^\w\s-]")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def title_case_name(name: str) -> str:
|
|
26
|
+
"""Clean a title for NotebookLM artifact display. Preserves Title Case.
|
|
27
|
+
|
|
28
|
+
Unlike sanitize_filename() which lowercases for filesystem paths,
|
|
29
|
+
this keeps capitalisation for readable display names in the NotebookLM UI.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
name: Raw episode title.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Cleaned title with each word capitalised, max 100 chars.
|
|
36
|
+
"""
|
|
37
|
+
name = _TITLE_CLEAN_RE.sub("", name)
|
|
38
|
+
name = " ".join(name.split())
|
|
39
|
+
return name[:100].strip().title()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# Matches: Episode 1: "Title Here"\nChapters: 1, 2\nSummary: ...
|
|
43
|
+
_EPISODE_RE = re.compile(
|
|
44
|
+
r'Episode\s+(\d+):\s*"([^"]+)"\s*\n'
|
|
45
|
+
r"Chapters?:\s*([\d,\s]+)\s*\n"
|
|
46
|
+
r"Summary:\s*(.+)",
|
|
47
|
+
re.IGNORECASE,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
_CHAPTER_NUM_RE = re.compile(r"chapter_(\d+)", re.IGNORECASE)
|
|
51
|
+
|
|
52
|
+
SYLLABUS_PROMPT_TEMPLATE = """\
|
|
53
|
+
I have uploaded several sources, each representing a sequential chapter \
|
|
54
|
+
from a single technical eBook. Here are the chapters:
|
|
55
|
+
|
|
56
|
+
{source_list}
|
|
57
|
+
|
|
58
|
+
Please divide these chapters into a "Podcast Syllabus" consisting of \
|
|
59
|
+
logical chunks. Strictly limit each chunk to at most {max_chapters} \
|
|
60
|
+
chapters. Group them by related technical concepts.
|
|
61
|
+
|
|
62
|
+
Format your response EXACTLY as follows, one entry per chunk:
|
|
63
|
+
|
|
64
|
+
Episode 1: "Episode Title Here"
|
|
65
|
+
Chapters: 1, 2
|
|
66
|
+
Summary: One or two sentence summary.
|
|
67
|
+
|
|
68
|
+
Episode 2: "Episode Title Here"
|
|
69
|
+
Chapters: 3
|
|
70
|
+
Summary: One or two sentence summary.
|
|
71
|
+
|
|
72
|
+
Use ONLY the chapter numbers listed above. Output ONLY the syllabus."""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class SyllabusParseError(Exception):
|
|
76
|
+
"""Raised when the LLM syllabus response cannot be parsed."""
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class SyllabusStateError(Exception):
|
|
80
|
+
"""Raised when the state file is missing, corrupt, or invalid."""
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ChunkStatus(StrEnum):
|
|
84
|
+
PENDING = "pending"
|
|
85
|
+
GENERATING = "generating"
|
|
86
|
+
COMPLETED = "completed"
|
|
87
|
+
FAILED = "failed"
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@dataclass
|
|
91
|
+
class ChunkArtifact:
|
|
92
|
+
"""Tracks a single artifact (audio or video) within a chunk."""
|
|
93
|
+
|
|
94
|
+
task_id: str = ""
|
|
95
|
+
status: str = "pending"
|
|
96
|
+
|
|
97
|
+
def to_json(self) -> dict[str, str]:
|
|
98
|
+
return {"task_id": self.task_id, "status": self.status}
|
|
99
|
+
|
|
100
|
+
@classmethod
|
|
101
|
+
def from_json(cls, data: dict[str, str]) -> ChunkArtifact:
|
|
102
|
+
return cls(task_id=data.get("task_id", ""), status=data.get("status", "pending"))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class SyllabusChunk:
|
|
107
|
+
"""A single episode in the syllabus plan."""
|
|
108
|
+
|
|
109
|
+
episode: int
|
|
110
|
+
title: str
|
|
111
|
+
chapters: list[int]
|
|
112
|
+
source_ids: list[str]
|
|
113
|
+
chapter_titles: list[str] = field(default_factory=list)
|
|
114
|
+
status: ChunkStatus = ChunkStatus.PENDING
|
|
115
|
+
artifacts: dict[str, ChunkArtifact] = field(default_factory=dict)
|
|
116
|
+
|
|
117
|
+
def to_json(self) -> dict[str, Any]:
|
|
118
|
+
return {
|
|
119
|
+
"episode": self.episode,
|
|
120
|
+
"title": self.title,
|
|
121
|
+
"chapters": self.chapters,
|
|
122
|
+
"source_ids": self.source_ids,
|
|
123
|
+
"chapter_titles": self.chapter_titles,
|
|
124
|
+
"status": self.status.value,
|
|
125
|
+
"artifacts": {k: v.to_json() for k, v in self.artifacts.items()},
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
@classmethod
|
|
129
|
+
def from_json(cls, data: dict[str, Any]) -> SyllabusChunk:
|
|
130
|
+
artifacts = {k: ChunkArtifact.from_json(v) for k, v in data.get("artifacts", {}).items()}
|
|
131
|
+
return cls(
|
|
132
|
+
episode=data["episode"],
|
|
133
|
+
title=data["title"],
|
|
134
|
+
chapters=data["chapters"],
|
|
135
|
+
source_ids=data["source_ids"],
|
|
136
|
+
chapter_titles=data.get("chapter_titles", []),
|
|
137
|
+
status=ChunkStatus(data.get("status", "pending")),
|
|
138
|
+
artifacts=artifacts,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@dataclass
|
|
143
|
+
class SyllabusState:
|
|
144
|
+
"""Root state object for the syllabus workflow."""
|
|
145
|
+
|
|
146
|
+
notebook_id: str
|
|
147
|
+
book_name: str
|
|
148
|
+
created: str
|
|
149
|
+
max_chapters: int
|
|
150
|
+
generate_audio: bool
|
|
151
|
+
generate_video: bool
|
|
152
|
+
chunks: dict[int, SyllabusChunk]
|
|
153
|
+
|
|
154
|
+
def to_json(self) -> dict[str, Any]:
|
|
155
|
+
return {
|
|
156
|
+
"notebook_id": self.notebook_id,
|
|
157
|
+
"book_name": self.book_name,
|
|
158
|
+
"created": self.created,
|
|
159
|
+
"max_chapters": self.max_chapters,
|
|
160
|
+
"generate_audio": self.generate_audio,
|
|
161
|
+
"generate_video": self.generate_video,
|
|
162
|
+
"chunks": [chunk.to_json() for chunk in self.chunks.values()],
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
@classmethod
|
|
166
|
+
def from_json(cls, data: dict[str, Any]) -> SyllabusState:
|
|
167
|
+
"""Load state from parsed JSON with structural validation.
|
|
168
|
+
|
|
169
|
+
Raises:
|
|
170
|
+
SyllabusStateError: If required fields are missing or malformed.
|
|
171
|
+
"""
|
|
172
|
+
try:
|
|
173
|
+
chunks_list = [SyllabusChunk.from_json(c) for c in data["chunks"]]
|
|
174
|
+
except (KeyError, TypeError, ValueError) as exc:
|
|
175
|
+
raise SyllabusStateError(f"Corrupt state file: {exc}") from exc
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
return cls(
|
|
179
|
+
notebook_id=data["notebook_id"],
|
|
180
|
+
book_name=data["book_name"],
|
|
181
|
+
created=data.get("created", ""),
|
|
182
|
+
max_chapters=data.get("max_chapters", 2),
|
|
183
|
+
generate_audio=data.get("generate_audio", True),
|
|
184
|
+
generate_video=data.get("generate_video", True),
|
|
185
|
+
chunks={c.episode: c for c in chunks_list},
|
|
186
|
+
)
|
|
187
|
+
except KeyError as exc:
|
|
188
|
+
raise SyllabusStateError(f"Missing required field: {exc}") from exc
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def build_prompt(sources: list[tuple[str, str]], max_chapters: int) -> str:
|
|
192
|
+
"""Build the syllabus generation prompt with numbered source titles.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
sources: List of (source_id, title) tuples, in chapter order.
|
|
196
|
+
max_chapters: Maximum chapters per episode.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Formatted prompt string.
|
|
200
|
+
"""
|
|
201
|
+
source_list = "\n".join(f"{i}. {title}" for i, (_, title) in enumerate(sources, 1))
|
|
202
|
+
return SYLLABUS_PROMPT_TEMPLATE.format(source_list=source_list, max_chapters=max_chapters)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def parse_syllabus_response(
|
|
206
|
+
response: str,
|
|
207
|
+
source_map: dict[int, str],
|
|
208
|
+
title_map: dict[int, str] | None = None,
|
|
209
|
+
) -> dict[int, SyllabusChunk]:
|
|
210
|
+
"""Parse LLM syllabus response into chunks.
|
|
211
|
+
|
|
212
|
+
Uses binary success/fallback: if all chapters are covered by the
|
|
213
|
+
parsed episodes, accept. Otherwise raise SyllabusParseError.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
response: Raw LLM response text.
|
|
217
|
+
source_map: Mapping of chapter_number -> source_id.
|
|
218
|
+
title_map: Mapping of chapter_number -> source title.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Dict of episode_number -> SyllabusChunk.
|
|
222
|
+
|
|
223
|
+
Raises:
|
|
224
|
+
SyllabusParseError: If the response cannot be fully parsed.
|
|
225
|
+
"""
|
|
226
|
+
logger.debug("Raw syllabus response: %s", response)
|
|
227
|
+
title_map = title_map or {}
|
|
228
|
+
|
|
229
|
+
matches = _EPISODE_RE.findall(response)
|
|
230
|
+
if not matches:
|
|
231
|
+
raise SyllabusParseError("No episodes found in LLM response")
|
|
232
|
+
|
|
233
|
+
all_chapter_nums = set(source_map.keys())
|
|
234
|
+
chunks: dict[int, SyllabusChunk] = {}
|
|
235
|
+
assigned_chapters: set[int] = set()
|
|
236
|
+
|
|
237
|
+
for ep_str, title, chapters_str, *_ in matches:
|
|
238
|
+
episode = int(ep_str)
|
|
239
|
+
chapter_nums = [int(c.strip()) for c in chapters_str.split(",") if c.strip()]
|
|
240
|
+
source_ids = [source_map[c] for c in chapter_nums if c in source_map]
|
|
241
|
+
chapter_titles = [title_map[c] for c in chapter_nums if c in title_map]
|
|
242
|
+
assigned_chapters.update(chapter_nums)
|
|
243
|
+
|
|
244
|
+
chunks[episode] = SyllabusChunk(
|
|
245
|
+
episode=episode,
|
|
246
|
+
title=title.strip(),
|
|
247
|
+
chapters=chapter_nums,
|
|
248
|
+
source_ids=source_ids,
|
|
249
|
+
chapter_titles=chapter_titles,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
missing = all_chapter_nums - assigned_chapters
|
|
253
|
+
if missing:
|
|
254
|
+
raise SyllabusParseError(f"Chapters {sorted(missing)} not assigned to any episode")
|
|
255
|
+
|
|
256
|
+
return chunks
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def build_fixed_size_chunks(
|
|
260
|
+
source_map: dict[int, str],
|
|
261
|
+
max_chapters: int,
|
|
262
|
+
title_map: dict[int, str] | None = None,
|
|
263
|
+
) -> dict[int, SyllabusChunk]:
|
|
264
|
+
"""Build fixed-size chapter chunks as a fallback.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
source_map: Mapping of chapter_number -> source_id.
|
|
268
|
+
max_chapters: Maximum chapters per chunk.
|
|
269
|
+
title_map: Mapping of chapter_number -> source title.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Dict of episode_number -> SyllabusChunk.
|
|
273
|
+
|
|
274
|
+
Raises:
|
|
275
|
+
ValueError: If max_chapters < 1 or source_map is empty.
|
|
276
|
+
"""
|
|
277
|
+
if max_chapters < 1:
|
|
278
|
+
raise ValueError("max_chapters must be >= 1")
|
|
279
|
+
if not source_map:
|
|
280
|
+
raise ValueError("source_map is empty")
|
|
281
|
+
|
|
282
|
+
title_map = title_map or {}
|
|
283
|
+
sorted_chapters = sorted(source_map.keys())
|
|
284
|
+
chunks: dict[int, SyllabusChunk] = {}
|
|
285
|
+
episode = 1
|
|
286
|
+
|
|
287
|
+
for i in range(0, len(sorted_chapters), max_chapters):
|
|
288
|
+
chapter_nums = sorted_chapters[i : i + max_chapters]
|
|
289
|
+
source_ids = [source_map[c] for c in chapter_nums]
|
|
290
|
+
chapter_titles = [title_map[c] for c in chapter_nums if c in title_map]
|
|
291
|
+
chapter_range = f"{chapter_nums[0]}-{chapter_nums[-1]}"
|
|
292
|
+
chunks[episode] = SyllabusChunk(
|
|
293
|
+
episode=episode,
|
|
294
|
+
title=f"Chapters {chapter_range}",
|
|
295
|
+
chapters=chapter_nums,
|
|
296
|
+
source_ids=source_ids,
|
|
297
|
+
chapter_titles=chapter_titles,
|
|
298
|
+
)
|
|
299
|
+
episode += 1
|
|
300
|
+
|
|
301
|
+
return chunks
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def map_sources_to_chapters(
|
|
305
|
+
sources: list[tuple[str, str]],
|
|
306
|
+
) -> tuple[dict[int, str], dict[int, str]]:
|
|
307
|
+
"""Map chapter numbers to source IDs and titles by parsing source titles.
|
|
308
|
+
|
|
309
|
+
All-or-nothing: if any source title fails to parse, falls back to
|
|
310
|
+
positional indexing for all sources.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
sources: List of (source_id, title) tuples.
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
Tuple of (chapter_number -> source_id, chapter_number -> title).
|
|
317
|
+
"""
|
|
318
|
+
if not sources:
|
|
319
|
+
return {}, {}
|
|
320
|
+
|
|
321
|
+
id_map: dict[int, str] = {}
|
|
322
|
+
title_map: dict[int, str] = {}
|
|
323
|
+
for source_id, title in sources:
|
|
324
|
+
match = _CHAPTER_NUM_RE.search(title or "")
|
|
325
|
+
if not match:
|
|
326
|
+
logger.warning(
|
|
327
|
+
"Cannot parse chapter number from '%s'; using positional fallback",
|
|
328
|
+
title,
|
|
329
|
+
)
|
|
330
|
+
ids = {i + 1: sid for i, (sid, _) in enumerate(sources)}
|
|
331
|
+
titles = {i + 1: t for i, (_, t) in enumerate(sources)}
|
|
332
|
+
return ids, titles
|
|
333
|
+
chapter_num = int(match.group(1))
|
|
334
|
+
id_map[chapter_num] = source_id
|
|
335
|
+
title_map[chapter_num] = title or ""
|
|
336
|
+
|
|
337
|
+
return id_map, title_map
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def read_state(state_path: Path) -> SyllabusState:
|
|
341
|
+
"""Load syllabus state from a JSON file.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
state_path: Path to the state file.
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Parsed SyllabusState.
|
|
348
|
+
|
|
349
|
+
Raises:
|
|
350
|
+
SyllabusStateError: If the file is missing, corrupt, or invalid.
|
|
351
|
+
"""
|
|
352
|
+
if not state_path.is_file():
|
|
353
|
+
raise SyllabusStateError(
|
|
354
|
+
f"No syllabus found at {state_path}. Run 'studyctl content syllabus' first."
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
try:
|
|
358
|
+
data = json.loads(state_path.read_text(encoding="utf-8"))
|
|
359
|
+
except (json.JSONDecodeError, OSError) as exc:
|
|
360
|
+
raise SyllabusStateError(f"Cannot read state file: {exc}") from exc
|
|
361
|
+
|
|
362
|
+
return SyllabusState.from_json(data)
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def write_state(state: SyllabusState, state_path: Path) -> None:
|
|
366
|
+
"""Atomically write syllabus state to a JSON file.
|
|
367
|
+
|
|
368
|
+
Uses write-to-temp-then-rename for crash safety.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
state: The state to persist.
|
|
372
|
+
state_path: Target file path.
|
|
373
|
+
"""
|
|
374
|
+
state_path.parent.mkdir(parents=True, exist_ok=True)
|
|
375
|
+
data = json.dumps(state.to_json(), indent=2, ensure_ascii=False)
|
|
376
|
+
|
|
377
|
+
tmp_fd, tmp_path = tempfile.mkstemp(
|
|
378
|
+
dir=str(state_path.parent),
|
|
379
|
+
suffix=".tmp",
|
|
380
|
+
prefix=".syllabus_state_",
|
|
381
|
+
)
|
|
382
|
+
try:
|
|
383
|
+
with os.fdopen(tmp_fd, "w", encoding="utf-8") as f:
|
|
384
|
+
f.write(data)
|
|
385
|
+
f.write("\n")
|
|
386
|
+
f.flush()
|
|
387
|
+
os.fsync(f.fileno())
|
|
388
|
+
os.replace(tmp_path, str(state_path))
|
|
389
|
+
except BaseException:
|
|
390
|
+
with contextlib.suppress(OSError):
|
|
391
|
+
os.unlink(tmp_path)
|
|
392
|
+
raise
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def get_next_chunk(state: SyllabusState) -> SyllabusChunk | None:
|
|
396
|
+
"""Select the next chunk to generate, by priority.
|
|
397
|
+
|
|
398
|
+
Priority: GENERATING (resume interrupted) > FAILED (retry) > PENDING (new).
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
state: Current syllabus state.
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
The next chunk to process, or None if all are completed.
|
|
405
|
+
"""
|
|
406
|
+
priority = [ChunkStatus.GENERATING, ChunkStatus.FAILED, ChunkStatus.PENDING]
|
|
407
|
+
for target_status in priority:
|
|
408
|
+
for chunk in sorted(state.chunks.values(), key=lambda c: c.episode):
|
|
409
|
+
if chunk.status == target_status:
|
|
410
|
+
return chunk
|
|
411
|
+
return None
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
def has_non_pending_chunks(state: SyllabusState) -> bool:
|
|
415
|
+
"""Check if any chunks have progressed beyond pending."""
|
|
416
|
+
return any(c.status != ChunkStatus.PENDING for c in state.chunks.values())
|