@ztffn/presentation-generator-plugin 1.3.3 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +4 -3
- package/README.md +27 -17
- package/agents/presentation-content.md +1 -2
- package/agents/presentation-narrative.md +4 -4
- package/agents/presentation-style.md +196 -0
- package/hooks/enforce-style-schema.sh +70 -0
- package/hooks/hooks.json +49 -0
- package/hooks/pre-validate-presentation-json.sh +62 -0
- package/hooks/validate-presentation-json.sh +64 -0
- package/package.json +1 -1
- package/scripts/outline_to_graph.py +413 -0
- package/scripts/validate_draft.py +2 -1
- package/skills/graph-json-spec/SKILL.md +73 -620
- package/skills/presentation-generator/SKILL.md +82 -35
- package/skills/presentation-generator/presentation-guide.md +2 -2
- package/skills/slide-recipes/SKILL.md +326 -0
- package/agents/presentation-design.md +0 -187
|
@@ -0,0 +1,413 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
outline_to_graph.py — Deterministic markdown-to-ReactFlow converter.
|
|
4
|
+
Parses a presentation outline markdown and emits valid graph JSON with
|
|
5
|
+
correct node wrappers, positions, edges, and handles. Default visual
|
|
6
|
+
treatment on all slides; a styling agent applies visual refinements later.
|
|
7
|
+
Usage: python3 outline_to_graph.py <outline.md> [-o <output.json>]
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import sys
|
|
11
|
+
import re
|
|
12
|
+
import json
|
|
13
|
+
import argparse
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from datetime import datetime, timezone
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# ---------------------------------------------------------------------------
|
|
19
|
+
# Parsing helpers
|
|
20
|
+
# ---------------------------------------------------------------------------
|
|
21
|
+
|
|
22
|
+
def slugify(text):
|
|
23
|
+
"""Convert a title string to a kebab-case slug."""
|
|
24
|
+
text = text.lower().strip()
|
|
25
|
+
text = re.sub(r"[''']", "", text) # remove apostrophes
|
|
26
|
+
text = re.sub(r"[^a-z0-9\s-]", "", text) # remove non-alphanum
|
|
27
|
+
text = re.sub(r"[\s_]+", "-", text) # spaces/underscores to hyphens
|
|
28
|
+
text = re.sub(r"-{2,}", "-", text) # collapse multiple hyphens
|
|
29
|
+
return text.strip("-")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def parse_spine(text):
|
|
33
|
+
"""Parse the SPINE section into an ordered list of (number, title)."""
|
|
34
|
+
spines = []
|
|
35
|
+
for m in re.finditer(r"^\d+\.\s+\*\*(.+?)\*\*", text, re.MULTILINE):
|
|
36
|
+
spines.append(m.group(1).strip())
|
|
37
|
+
return spines
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def parse_drilldowns(text):
|
|
41
|
+
"""Parse DRILL-DOWNS into {parent_spine_num: [(child_num, child_subnum, title), ...]}."""
|
|
42
|
+
drilldowns = {}
|
|
43
|
+
current_parent = None
|
|
44
|
+
|
|
45
|
+
for line in text.splitlines():
|
|
46
|
+
parent_match = re.match(r"###\s+Under Spine (\d+)", line)
|
|
47
|
+
if parent_match:
|
|
48
|
+
current_parent = int(parent_match.group(1))
|
|
49
|
+
if current_parent not in drilldowns:
|
|
50
|
+
drilldowns[current_parent] = []
|
|
51
|
+
continue
|
|
52
|
+
|
|
53
|
+
if current_parent is not None:
|
|
54
|
+
child_match = re.match(
|
|
55
|
+
r"-\s+\*\*(\d+)\.(\d+)\*\*\s+(.+?):\s*(.*)", line
|
|
56
|
+
)
|
|
57
|
+
if child_match:
|
|
58
|
+
major = int(child_match.group(1))
|
|
59
|
+
minor = int(child_match.group(2))
|
|
60
|
+
title = child_match.group(3).strip()
|
|
61
|
+
drilldowns[current_parent].append((major, minor, title))
|
|
62
|
+
|
|
63
|
+
return drilldowns
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def parse_content_blocks(text):
|
|
67
|
+
"""Parse CONTENT PER SLIDE into a dict keyed by slide identifier (e.g. '1', '2.1')."""
|
|
68
|
+
blocks = {}
|
|
69
|
+
# Split on ### SPINE headers
|
|
70
|
+
parts = re.split(r"(?=^### SPINE )", text, flags=re.MULTILINE)
|
|
71
|
+
|
|
72
|
+
for part in parts:
|
|
73
|
+
header_match = re.match(
|
|
74
|
+
r"^### SPINE (\d+(?:\.\d+)?)\s*:\s*(.+?)(?:\s*\(.*\))?\s*$",
|
|
75
|
+
part, re.MULTILINE,
|
|
76
|
+
)
|
|
77
|
+
if not header_match:
|
|
78
|
+
continue
|
|
79
|
+
|
|
80
|
+
slide_id = header_match.group(1)
|
|
81
|
+
block_text = part[header_match.end():]
|
|
82
|
+
|
|
83
|
+
block = extract_fields(block_text)
|
|
84
|
+
blocks[slide_id] = block
|
|
85
|
+
|
|
86
|
+
return blocks
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def extract_fields(text):
|
|
90
|
+
"""Extract Key message, Content, Speaker notes, Transition from a content block."""
|
|
91
|
+
fields = {}
|
|
92
|
+
|
|
93
|
+
# Define field markers in order
|
|
94
|
+
markers = [
|
|
95
|
+
("key_message", r"\*\*Key message:\*\*"),
|
|
96
|
+
("content", r"\*\*Content:\*\*"),
|
|
97
|
+
("speaker_notes", r"\*\*Speaker notes:\*\*"),
|
|
98
|
+
("transition", r"\*\*Transition to next:\*\*"),
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
# Find positions of all markers
|
|
102
|
+
positions = []
|
|
103
|
+
for name, pattern in markers:
|
|
104
|
+
m = re.search(pattern, text)
|
|
105
|
+
if m:
|
|
106
|
+
positions.append((m.start(), m.end(), name))
|
|
107
|
+
|
|
108
|
+
# Also find next section boundary (---)
|
|
109
|
+
for m in re.finditer(r"^---\s*$", text, re.MULTILINE):
|
|
110
|
+
positions.append((m.start(), m.end(), "__separator__"))
|
|
111
|
+
|
|
112
|
+
positions.sort(key=lambda x: x[0])
|
|
113
|
+
|
|
114
|
+
for i, (start, end, name) in enumerate(positions):
|
|
115
|
+
if name == "__separator__":
|
|
116
|
+
continue
|
|
117
|
+
# Find next marker/separator position
|
|
118
|
+
next_pos = len(text)
|
|
119
|
+
for j in range(i + 1, len(positions)):
|
|
120
|
+
next_pos = positions[j][0]
|
|
121
|
+
break
|
|
122
|
+
value = text[end:next_pos].strip()
|
|
123
|
+
fields[name] = value
|
|
124
|
+
|
|
125
|
+
return fields
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def build_content_markdown(block):
|
|
129
|
+
"""Build the data.content field from key_message and content fields."""
|
|
130
|
+
parts = []
|
|
131
|
+
if block.get("key_message"):
|
|
132
|
+
parts.append(f"## {block['key_message']}")
|
|
133
|
+
parts.append("")
|
|
134
|
+
if block.get("content"):
|
|
135
|
+
parts.append(block["content"])
|
|
136
|
+
return "\n".join(parts).strip()
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# ---------------------------------------------------------------------------
|
|
140
|
+
# Graph construction
|
|
141
|
+
# ---------------------------------------------------------------------------
|
|
142
|
+
|
|
143
|
+
def build_node(node_id, title, topic, content, notes, transition, position,
|
|
144
|
+
is_first=False, is_last=False):
|
|
145
|
+
"""Build a single ReactFlow node dict."""
|
|
146
|
+
data = {
|
|
147
|
+
"label": title,
|
|
148
|
+
"topic": topic,
|
|
149
|
+
"content": content,
|
|
150
|
+
"notes": notes,
|
|
151
|
+
"centered": False,
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if transition:
|
|
155
|
+
data["transitionToNext"] = transition
|
|
156
|
+
|
|
157
|
+
if is_first or is_last:
|
|
158
|
+
data["centered"] = True
|
|
159
|
+
data["brandFont"] = True
|
|
160
|
+
data["showBranding"] = True
|
|
161
|
+
|
|
162
|
+
# Remove empty optional fields
|
|
163
|
+
if not data.get("notes"):
|
|
164
|
+
del data["notes"]
|
|
165
|
+
if not data.get("content"):
|
|
166
|
+
del data["content"]
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
"id": node_id,
|
|
170
|
+
"type": "huma",
|
|
171
|
+
"position": {"x": position[0], "y": position[1]},
|
|
172
|
+
"data": data,
|
|
173
|
+
"style": {"width": 180, "height": 70},
|
|
174
|
+
"measured": {"width": 180, "height": 70},
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def make_edge_pair_horizontal(source_id, target_id):
|
|
179
|
+
"""Generate bidirectional horizontal edge pair (right/left)."""
|
|
180
|
+
return [
|
|
181
|
+
{
|
|
182
|
+
"id": f"e-{source_id}-{target_id}",
|
|
183
|
+
"source": source_id,
|
|
184
|
+
"target": target_id,
|
|
185
|
+
"sourceHandle": "s-right",
|
|
186
|
+
"targetHandle": "t-left",
|
|
187
|
+
},
|
|
188
|
+
{
|
|
189
|
+
"id": f"e-{target_id}-{source_id}",
|
|
190
|
+
"source": target_id,
|
|
191
|
+
"target": source_id,
|
|
192
|
+
"sourceHandle": "s-left",
|
|
193
|
+
"targetHandle": "t-right",
|
|
194
|
+
},
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def make_edge_pair_vertical(parent_id, child_id):
|
|
199
|
+
"""Generate bidirectional vertical edge pair (bottom/top)."""
|
|
200
|
+
return [
|
|
201
|
+
{
|
|
202
|
+
"id": f"e-{parent_id}-{child_id}",
|
|
203
|
+
"source": parent_id,
|
|
204
|
+
"target": child_id,
|
|
205
|
+
"sourceHandle": "s-bottom",
|
|
206
|
+
"targetHandle": "t-top",
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
"id": f"e-{child_id}-{parent_id}",
|
|
210
|
+
"source": child_id,
|
|
211
|
+
"target": parent_id,
|
|
212
|
+
"sourceHandle": "s-top",
|
|
213
|
+
"targetHandle": "t-bottom",
|
|
214
|
+
},
|
|
215
|
+
]
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# ---------------------------------------------------------------------------
|
|
219
|
+
# Main pipeline
|
|
220
|
+
# ---------------------------------------------------------------------------
|
|
221
|
+
|
|
222
|
+
def parse_outline(outline_path):
|
|
223
|
+
"""Parse the outline markdown and return graph JSON dict."""
|
|
224
|
+
text = Path(outline_path).read_text(encoding="utf-8")
|
|
225
|
+
|
|
226
|
+
# 1. Extract title
|
|
227
|
+
title_match = re.match(r"^#\s+Presentation Outline:\s*(.+)", text, re.MULTILINE)
|
|
228
|
+
if not title_match:
|
|
229
|
+
print("ERROR Could not find title line (expected '# Presentation Outline: ...')",
|
|
230
|
+
file=sys.stderr)
|
|
231
|
+
sys.exit(1)
|
|
232
|
+
deck_title = title_match.group(1).strip()
|
|
233
|
+
|
|
234
|
+
# 2. Split into sections
|
|
235
|
+
spine_section = ""
|
|
236
|
+
drilldown_section = ""
|
|
237
|
+
content_section = ""
|
|
238
|
+
|
|
239
|
+
spine_match = re.search(r"^## SPINE\b.*$", text, re.MULTILINE)
|
|
240
|
+
drilldown_match = re.search(r"^## DRILL-DOWNS\b.*$", text, re.MULTILINE)
|
|
241
|
+
content_match = re.search(r"^## CONTENT PER SLIDE\b.*$", text, re.MULTILINE)
|
|
242
|
+
|
|
243
|
+
if spine_match and drilldown_match:
|
|
244
|
+
spine_section = text[spine_match.start():drilldown_match.start()]
|
|
245
|
+
if drilldown_match and content_match:
|
|
246
|
+
drilldown_section = text[drilldown_match.start():content_match.start()]
|
|
247
|
+
if content_match:
|
|
248
|
+
content_section = text[content_match.start():]
|
|
249
|
+
|
|
250
|
+
# 3. Parse each section
|
|
251
|
+
spine_titles = parse_spine(spine_section)
|
|
252
|
+
drilldowns = parse_drilldowns(drilldown_section)
|
|
253
|
+
content_blocks = parse_content_blocks(content_section)
|
|
254
|
+
|
|
255
|
+
# Also pick up any spine slides that appear in content but not in the SPINE list
|
|
256
|
+
# (e.g. Spine 8 in the test outline)
|
|
257
|
+
content_spine_nums = sorted(
|
|
258
|
+
int(k) for k in content_blocks if "." not in k
|
|
259
|
+
)
|
|
260
|
+
for num in content_spine_nums:
|
|
261
|
+
if num > len(spine_titles):
|
|
262
|
+
# Extract title from content block header
|
|
263
|
+
pattern = rf"^### SPINE {num}\s*:\s*(.+?)(?:\s*\(.*\))?\s*$"
|
|
264
|
+
m = re.search(pattern, content_section, re.MULTILINE)
|
|
265
|
+
if m:
|
|
266
|
+
spine_titles.append(m.group(1).strip())
|
|
267
|
+
|
|
268
|
+
if not spine_titles:
|
|
269
|
+
print("ERROR No spine nodes found in outline", file=sys.stderr)
|
|
270
|
+
sys.exit(1)
|
|
271
|
+
|
|
272
|
+
# 4. Build nodes and edges
|
|
273
|
+
nodes = []
|
|
274
|
+
edges = []
|
|
275
|
+
spine_node_ids = []
|
|
276
|
+
# Track x positions of spine nodes for drill-down placement
|
|
277
|
+
spine_x_positions = {}
|
|
278
|
+
|
|
279
|
+
for i, title in enumerate(spine_titles):
|
|
280
|
+
spine_num = i + 1
|
|
281
|
+
node_id = slugify(title)
|
|
282
|
+
x = i * 240
|
|
283
|
+
y = 0
|
|
284
|
+
spine_x_positions[spine_num] = x
|
|
285
|
+
|
|
286
|
+
block = content_blocks.get(str(spine_num), {})
|
|
287
|
+
content = build_content_markdown(block)
|
|
288
|
+
notes = block.get("speaker_notes", "")
|
|
289
|
+
transition = block.get("transition", "")
|
|
290
|
+
topic = f"{spine_num:02d} / {title}"
|
|
291
|
+
|
|
292
|
+
is_first = i == 0
|
|
293
|
+
is_last = i == len(spine_titles) - 1
|
|
294
|
+
|
|
295
|
+
node = build_node(
|
|
296
|
+
node_id, title, topic, content, notes, transition,
|
|
297
|
+
(x, y), is_first=is_first, is_last=is_last,
|
|
298
|
+
)
|
|
299
|
+
nodes.append(node)
|
|
300
|
+
spine_node_ids.append(node_id)
|
|
301
|
+
|
|
302
|
+
# Spine horizontal edges
|
|
303
|
+
for i in range(len(spine_node_ids) - 1):
|
|
304
|
+
edges.extend(make_edge_pair_horizontal(spine_node_ids[i], spine_node_ids[i + 1]))
|
|
305
|
+
|
|
306
|
+
# 5. Build drill-down nodes
|
|
307
|
+
for parent_num, children in sorted(drilldowns.items()):
|
|
308
|
+
parent_x = spine_x_positions.get(parent_num, 0)
|
|
309
|
+
parent_id = spine_node_ids[parent_num - 1] if parent_num <= len(spine_node_ids) else None
|
|
310
|
+
if parent_id is None:
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
child_node_ids = []
|
|
314
|
+
for idx, (major, minor, title) in enumerate(children):
|
|
315
|
+
node_id = slugify(title)
|
|
316
|
+
x = parent_x + idx * 240
|
|
317
|
+
y = 150
|
|
318
|
+
|
|
319
|
+
slide_key = f"{major}.{minor}"
|
|
320
|
+
block = content_blocks.get(slide_key, {})
|
|
321
|
+
content = build_content_markdown(block)
|
|
322
|
+
notes = block.get("speaker_notes", "")
|
|
323
|
+
transition = block.get("transition", "")
|
|
324
|
+
topic = f"{major:02d}.{minor} / {title}"
|
|
325
|
+
|
|
326
|
+
node = build_node(node_id, title, topic, content, notes, transition, (x, y))
|
|
327
|
+
nodes.append(node)
|
|
328
|
+
child_node_ids.append(node_id)
|
|
329
|
+
|
|
330
|
+
# Parent to first child (vertical)
|
|
331
|
+
if child_node_ids:
|
|
332
|
+
edges.extend(make_edge_pair_vertical(parent_id, child_node_ids[0]))
|
|
333
|
+
|
|
334
|
+
# Sibling horizontal edges
|
|
335
|
+
for i in range(len(child_node_ids) - 1):
|
|
336
|
+
edges.extend(make_edge_pair_horizontal(child_node_ids[i], child_node_ids[i + 1]))
|
|
337
|
+
|
|
338
|
+
# 6. Build output
|
|
339
|
+
output = {
|
|
340
|
+
"meta": {
|
|
341
|
+
"name": deck_title,
|
|
342
|
+
"updatedAt": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.000Z"),
|
|
343
|
+
},
|
|
344
|
+
"nodes": nodes,
|
|
345
|
+
"edges": edges,
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
return output
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def run_validator(json_path):
|
|
352
|
+
"""Run validate_draft.py against the output. Returns True if valid."""
|
|
353
|
+
validator_path = Path(__file__).parent / "validate_draft.py"
|
|
354
|
+
if not validator_path.exists():
|
|
355
|
+
print(f"WARNING Validator not found at {validator_path}", file=sys.stderr)
|
|
356
|
+
return True
|
|
357
|
+
|
|
358
|
+
# Import and call validate directly
|
|
359
|
+
import importlib.util
|
|
360
|
+
spec = importlib.util.spec_from_file_location("validate_draft", validator_path)
|
|
361
|
+
mod = importlib.util.module_from_spec(spec)
|
|
362
|
+
spec.loader.exec_module(mod)
|
|
363
|
+
return mod.validate(str(json_path))
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def main():
|
|
367
|
+
parser = argparse.ArgumentParser(
|
|
368
|
+
description="Convert presentation outline markdown to ReactFlow graph JSON"
|
|
369
|
+
)
|
|
370
|
+
parser.add_argument("outline", help="Path to outline markdown file")
|
|
371
|
+
parser.add_argument("-o", "--output", help="Output JSON path (stdout if omitted)")
|
|
372
|
+
args = parser.parse_args()
|
|
373
|
+
|
|
374
|
+
outline_path = Path(args.outline)
|
|
375
|
+
if not outline_path.exists():
|
|
376
|
+
print(f"ERROR Outline file not found: {outline_path}", file=sys.stderr)
|
|
377
|
+
sys.exit(1)
|
|
378
|
+
|
|
379
|
+
graph = parse_outline(outline_path)
|
|
380
|
+
|
|
381
|
+
# Write output
|
|
382
|
+
json_str = json.dumps(graph, indent=2, ensure_ascii=False)
|
|
383
|
+
|
|
384
|
+
if args.output:
|
|
385
|
+
out_path = Path(args.output)
|
|
386
|
+
out_path.parent.mkdir(parents=True, exist_ok=True)
|
|
387
|
+
out_path.write_text(json_str + "\n", encoding="utf-8")
|
|
388
|
+
print(f"Wrote {out_path}")
|
|
389
|
+
else:
|
|
390
|
+
print(json_str)
|
|
391
|
+
|
|
392
|
+
# Summary
|
|
393
|
+
node_count = len(graph["nodes"])
|
|
394
|
+
edge_count = len(graph["edges"])
|
|
395
|
+
print(f"Summary: {node_count} nodes, {edge_count} edges", file=sys.stderr)
|
|
396
|
+
|
|
397
|
+
# Validate
|
|
398
|
+
if args.output:
|
|
399
|
+
valid = run_validator(args.output)
|
|
400
|
+
else:
|
|
401
|
+
# Write to temp file for validation
|
|
402
|
+
import tempfile
|
|
403
|
+
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
|
404
|
+
f.write(json_str)
|
|
405
|
+
tmp_path = f.name
|
|
406
|
+
valid = run_validator(tmp_path)
|
|
407
|
+
Path(tmp_path).unlink(missing_ok=True)
|
|
408
|
+
|
|
409
|
+
sys.exit(0 if valid else 1)
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
if __name__ == "__main__":
|
|
413
|
+
main()
|
|
@@ -21,6 +21,7 @@ ALLOWED_DATA_FIELDS = {
|
|
|
21
21
|
"inlineVideoControls", "inlineVideoAutoplay", "inlineVideoLoop",
|
|
22
22
|
"scene", "chart", "charts",
|
|
23
23
|
"sceneGroup", "focus",
|
|
24
|
+
"transitionToNext",
|
|
24
25
|
}
|
|
25
26
|
|
|
26
27
|
VALID_SOURCE_HANDLES = {"s-right", "s-left", "s-top", "s-bottom"}
|
|
@@ -77,7 +78,7 @@ def validate(path):
|
|
|
77
78
|
errors.append(f"{p}: data must be an object")
|
|
78
79
|
continue
|
|
79
80
|
|
|
80
|
-
if not str(node_data.get("label", "")).strip():
|
|
81
|
+
if not str(node_data.get("label", "")).strip() and node_data.get("type") != "chart":
|
|
81
82
|
errors.append(f"{p}: data.label is missing or empty (this is the slide title)")
|
|
82
83
|
|
|
83
84
|
unknown = set(node_data.keys()) - ALLOWED_DATA_FIELDS
|