delimit-cli 4.1.43 → 4.1.44

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,1276 @@
1
- # content_engine Pro module (stubbed in npm package)
2
- # Full implementation available on delimit.ai server
1
+ """Autonomous content engine for Delimit.
2
+
3
+ Generates terminal demo videos, uploads to YouTube, and manages tweet scheduling.
4
+ All content is value-first: tutorials, governance insights, and real demos.
5
+
6
+ Components:
7
+ - Cast generator: scripted terminal demos via asciinema
8
+ - Video renderer: HTML + puppeteer CDP screencast + ffmpeg compositing
9
+ - YouTube uploader: OAuth2 upload via google-api-python-client
10
+ - Tweet scheduler: queue-based tweet posting via tweepy
11
+ - Cron orchestrator: ties it all together on a schedule
12
+ """
13
+
14
+ import json
15
+ import logging
16
+ import os
17
+ import shutil
18
+ import subprocess
19
+ import tempfile
20
+ import time
21
+ from datetime import datetime, timezone
22
+ from pathlib import Path
23
+ from typing import Any, Dict, List, Optional
24
+
25
+ logger = logging.getLogger("delimit.content_engine")
26
+
27
+ SECRETS_DIR = Path.home() / ".delimit" / "secrets"
28
+ CONTENT_DIR = Path.home() / ".delimit" / "content"
29
+ CONTENT_LOG = Path.home() / ".delimit" / "content_log.jsonl"
30
+ TWEET_QUEUE = Path.home() / ".delimit" / "tweet_queue.json"
31
+ TWEET_SCHEDULE = Path.home() / ".delimit" / "content" / "tweet_schedule.json"
32
+ VIDEO_QUEUE = Path.home() / ".delimit" / "video_queue.json"
33
+ ASSETS_DIR = CONTENT_DIR / "assets"
34
+ VIDEOS_DIR = CONTENT_DIR / "videos"
35
+ CASTS_DIR = CONTENT_DIR / "casts"
36
+
37
+
38
+ # ═══════════════════════════════════════════════════════════════════════
39
+ # VIDEO SCRIPTS — each is a scripted terminal demo
40
+ # ═══════════════════════════════════════════════════════════════════════
41
+
42
+ VIDEO_SCRIPTS = {
43
+ "install": {
44
+ "title": "Install Delimit in 60 Seconds",
45
+ "description": "Set up API governance for your AI coding workflow in under a minute. Delimit catches breaking API changes before they reach production.",
46
+ "tags": ["delimit", "mcp", "ai governance", "claude code", "codex", "api governance", "openapi"],
47
+ "category": "28",
48
+ "commands": [
49
+ ("echo '# Install Delimit CLI'", 1.5),
50
+ ("npx delimit-cli@latest init --preset default", 4),
51
+ ("echo ''", 0.5),
52
+ ("echo '# Check your governance setup'", 1.5),
53
+ ("npx delimit-cli@latest doctor", 3),
54
+ ("echo ''", 0.5),
55
+ ("echo '# Done! Breaking changes will be caught automatically.'", 2),
56
+ ],
57
+ "duration_estimate": 60,
58
+ },
59
+ "breaking_changes": {
60
+ "title": "Catch Breaking API Changes Before Merge",
61
+ "description": "Detect 23 types of breaking changes in OpenAPI specs automatically. No configuration needed.",
62
+ "tags": ["openapi", "api", "breaking changes", "github action", "ci cd", "api governance"],
63
+ "category": "28",
64
+ "commands": [
65
+ ("echo '# Detecting breaking API changes with Delimit'", 1.5),
66
+ ("echo '# Lets compare two versions of an API spec'", 1.5),
67
+ ("cat api/v1.yaml | head -20", 2),
68
+ ("echo ''", 0.5),
69
+ ("echo '# Now lint for breaking changes'", 1.5),
70
+ ("npx delimit-cli@latest lint --old api/v1.yaml --new api/v2.yaml", 4),
71
+ ("echo ''", 0.5),
72
+ ("echo '# 3 breaking changes caught before merge!'", 2),
73
+ ],
74
+ "duration_estimate": 75,
75
+ },
76
+ "github_action": {
77
+ "title": "Add API Governance to Any Repo in 30 Seconds",
78
+ "description": "One YAML file. Zero configuration. Automatic breaking change detection on every PR.",
79
+ "tags": ["github actions", "ci cd", "api governance", "openapi", "pull request"],
80
+ "category": "28",
81
+ "commands": [
82
+ ("echo '# Add Delimit to your GitHub Actions workflow'", 1.5),
83
+ ("echo '# Just add this to .github/workflows/api-check.yml:'", 1.5),
84
+ ("cat <<'EOF'\nname: API Governance\non: [pull_request]\njobs:\n check:\n runs-on: ubuntu-latest\n steps:\n - uses: actions/checkout@v4\n - uses: delimit-ai/delimit-action@v1\n with:\n spec: api/openapi.yaml\nEOF", 3),
85
+ ("echo ''", 0.5),
86
+ ("echo '# Thats it. Every PR now gets breaking change detection.'", 2),
87
+ ],
88
+ "duration_estimate": 50,
89
+ },
90
+ "policy_presets": {
91
+ "title": "3 Policy Presets for API Governance",
92
+ "description": "Strict, default, or relaxed. Match your API governance to your team's risk tolerance.",
93
+ "tags": ["api governance", "policy", "openapi", "breaking changes", "configuration"],
94
+ "category": "28",
95
+ "commands": [
96
+ ("echo '# Delimit policy presets'", 1.5),
97
+ ("echo ''", 0.3),
98
+ ("echo '# Strict: every violation is an error'", 1.5),
99
+ ("npx delimit-cli@latest init --preset strict", 3),
100
+ ("echo ''", 0.5),
101
+ ("echo '# Default: balanced warnings and errors'", 1.5),
102
+ ("npx delimit-cli@latest init --preset default", 3),
103
+ ("echo ''", 0.5),
104
+ ("echo '# Relaxed: warnings only, nothing blocks'", 1.5),
105
+ ("npx delimit-cli@latest init --preset relaxed", 3),
106
+ ("echo ''", 0.5),
107
+ ("echo '# Pick the one that fits your team.'", 2),
108
+ ],
109
+ "duration_estimate": 70,
110
+ },
111
+ "multi_model": {
112
+ "title": "One Workspace for Every AI Coding Assistant",
113
+ "description": "Switch between Claude Code, Codex, and Gemini CLI without losing context. Your governance rules follow you.",
114
+ "tags": ["claude code", "codex", "gemini", "ai coding", "mcp", "context"],
115
+ "category": "28",
116
+ "commands": [
117
+ ("echo '# The problem: context loss when switching AI assistants'", 1.5),
118
+ ("echo '# Claude Code knows your API spec...'", 1.5),
119
+ ("echo '# But Codex starts from zero.'", 1.5),
120
+ ("echo ''", 0.5),
121
+ ("echo '# Delimit solves this with a shared governance layer.'", 1.5),
122
+ ("echo '# Your policies, your spec, your rules -- everywhere.'", 2),
123
+ ("echo ''", 0.5),
124
+ ("echo '# Setup:'", 1),
125
+ ("npx delimit-cli@latest init", 3),
126
+ ("echo ''", 0.5),
127
+ ("echo '# Now every assistant sees the same governance rules.'", 2),
128
+ ],
129
+ "duration_estimate": 65,
130
+ },
131
+ "diff_engine": {
132
+ "title": "23 Change Types Detected Automatically",
133
+ "description": "Delimit's diff engine classifies every API change: endpoints, parameters, schemas, security, and more.",
134
+ "tags": ["api diff", "openapi", "change detection", "semver", "api versioning"],
135
+ "category": "28",
136
+ "commands": [
137
+ ("echo '# Delimit detects 23 types of API changes'", 1.5),
138
+ ("echo '# Endpoint added, removed, or renamed'", 1),
139
+ ("echo '# Parameter type changed'", 1),
140
+ ("echo '# Required field added to request body'", 1),
141
+ ("echo '# Response schema modified'", 1),
142
+ ("echo '# Security scheme changed'", 1),
143
+ ("echo '# ...and 17 more'", 1),
144
+ ("echo ''", 0.5),
145
+ ("echo '# Each change gets a semver classification:'", 1.5),
146
+ ("echo '# MAJOR = breaking, MINOR = additive, PATCH = fix'", 2),
147
+ ("echo ''", 0.5),
148
+ ("npx delimit-cli@latest diff --old api/v1.yaml --new api/v2.yaml", 4),
149
+ ],
150
+ "duration_estimate": 70,
151
+ },
152
+ "zero_config": {
153
+ "title": "Zero Configuration API Governance",
154
+ "description": "Delimit works out of the box. No YAML to write, no rules to configure. Just point it at your spec.",
155
+ "tags": ["zero config", "api governance", "openapi", "developer experience", "dx"],
156
+ "category": "28",
157
+ "commands": [
158
+ ("echo '# Zero config API governance'", 1.5),
159
+ ("echo '# Step 1: You have an OpenAPI spec'", 1.5),
160
+ ("ls api/", 2),
161
+ ("echo ''", 0.5),
162
+ ("echo '# Step 2: Run delimit lint'", 1.5),
163
+ ("npx delimit-cli@latest lint --old api/v1.yaml --new api/v2.yaml", 4),
164
+ ("echo ''", 0.5),
165
+ ("echo '# Thats it. No config files. No setup. It just works.'", 2),
166
+ ],
167
+ "duration_estimate": 55,
168
+ },
169
+ "ai_agents": {
170
+ "title": "Why AI Agents Need API Governance",
171
+ "description": "AI coding agents generate API changes fast. Without governance, breaking changes ship to production unchecked.",
172
+ "tags": ["ai agents", "api governance", "ai safety", "code review", "automation"],
173
+ "category": "28",
174
+ "commands": [
175
+ ("echo '# AI agents are writing code faster than humans can review'", 2),
176
+ ("echo '# They generate API changes in seconds'", 1.5),
177
+ ("echo '# But who checks if those changes break consumers?'", 2),
178
+ ("echo ''", 0.5),
179
+ ("echo '# Delimit is the governance layer for AI-generated APIs'", 2),
180
+ ("echo ''", 0.5),
181
+ ("echo '# Add one GitHub Action:'", 1.5),
182
+ ("echo ' - uses: delimit-ai/delimit-action@v1'", 1),
183
+ ("echo ' with:'", 0.5),
184
+ ("echo ' spec: api/openapi.yaml'", 1),
185
+ ("echo ''", 0.5),
186
+ ("echo '# Every AI-generated PR gets breaking change detection.'", 2),
187
+ ("echo '# Governance isnt optional. Its infrastructure.'", 2),
188
+ ],
189
+ "duration_estimate": 75,
190
+ },
191
+ }
192
+
193
+
194
+ # ═══════════════════════════════════════════════════════════════════════
195
+ # CAST GENERATOR — scripted terminal demos via asciinema format
196
+ # ═══════════════════════════════════════════════════════════════════════
197
+
198
+ def generate_cast(script_id: str, output_path: Optional[str] = None) -> Dict[str, Any]:
199
+ """Generate an asciinema .cast file from a video script.
200
+
201
+ Creates a v2 asciicast format file with synthetic typing and output.
202
+ Does NOT actually run the commands -- generates realistic-looking output.
203
+ """
204
+ if script_id not in VIDEO_SCRIPTS:
205
+ return {"error": f"Unknown script: {script_id}", "available": list(VIDEO_SCRIPTS.keys())}
206
+
207
+ script = VIDEO_SCRIPTS[script_id]
208
+ CASTS_DIR.mkdir(parents=True, exist_ok=True)
209
+
210
+ if not output_path:
211
+ output_path = str(CASTS_DIR / f"{script_id}.cast")
212
+
213
+ # v2 asciicast header
214
+ header = {
215
+ "version": 2,
216
+ "width": 100,
217
+ "height": 30,
218
+ "timestamp": int(time.time()),
219
+ "env": {"TERM": "xterm-256color", "SHELL": "/bin/bash"},
220
+ "title": script["title"],
221
+ }
222
+
223
+ events = []
224
+ current_time = 0.5 # start after half second
225
+
226
+ for cmd_text, pause_after in script["commands"]:
227
+ # Type the command character by character (simulated typing speed)
228
+ prompt = "$ "
229
+ events.append([current_time, "o", f"\r\n\x1b[1;32m{prompt}\x1b[0m"])
230
+ current_time += 0.1
231
+
232
+ for char in cmd_text:
233
+ events.append([round(current_time, 3), "o", char])
234
+ current_time += 0.04 # 40ms per character = realistic typing
235
+
236
+ # Press enter
237
+ current_time += 0.2
238
+ events.append([round(current_time, 3), "o", "\r\n"])
239
+ current_time += 0.3
240
+
241
+ # Generate synthetic output based on command
242
+ output = _synthetic_output(cmd_text, script_id)
243
+ if output:
244
+ for line in output.split("\n"):
245
+ events.append([round(current_time, 3), "o", line + "\r\n"])
246
+ current_time += 0.05
247
+
248
+ current_time += pause_after
249
+
250
+ # Write the cast file
251
+ with open(output_path, "w") as f:
252
+ f.write(json.dumps(header) + "\n")
253
+ for event in events:
254
+ f.write(json.dumps(event) + "\n")
255
+
256
+ return {
257
+ "cast_path": output_path,
258
+ "script_id": script_id,
259
+ "title": script["title"],
260
+ "events": len(events),
261
+ "duration_seconds": round(current_time, 1),
262
+ }
263
+
264
+
265
+ def _synthetic_output(cmd: str, script_id: str) -> str:
266
+ """Generate realistic synthetic output for demo commands."""
267
+ if "delimit-cli@latest init" in cmd or "delimit-cli@latest init" in cmd:
268
+ preset = "default"
269
+ if "--preset strict" in cmd:
270
+ preset = "strict"
271
+ elif "--preset relaxed" in cmd:
272
+ preset = "relaxed"
273
+ return (
274
+ f"\x1b[1;36mdelimit\x1b[0m Initializing project with {preset} preset...\n"
275
+ f"\x1b[1;32m +\x1b[0m Created .delimit/policies.yml\n"
276
+ f"\x1b[1;32m +\x1b[0m Created .github/workflows/api-check.yml\n"
277
+ f"\x1b[1;32m done\x1b[0m Project initialized with {preset} policy preset."
278
+ )
279
+ elif "delimit-cli@latest doctor" in cmd:
280
+ return (
281
+ "\x1b[1;36mdelimit\x1b[0m Running diagnostics...\n"
282
+ "\x1b[1;32m pass\x1b[0m Policy file found\n"
283
+ "\x1b[1;32m pass\x1b[0m GitHub Action configured\n"
284
+ "\x1b[1;32m pass\x1b[0m OpenAPI spec detected\n"
285
+ "\x1b[1;32m pass\x1b[0m Git repository initialized\n"
286
+ "\x1b[1;36m 4/4 checks passed\x1b[0m"
287
+ )
288
+ elif "delimit-cli@latest lint" in cmd:
289
+ return (
290
+ "\x1b[1;36mdelimit\x1b[0m Linting API changes...\n"
291
+ "\n"
292
+ "\x1b[1;31m BREAKING\x1b[0m Endpoint removed: DELETE /api/v1/users/{id}\n"
293
+ "\x1b[1;31m BREAKING\x1b[0m Required field added: POST /api/v1/orders body.shipping_method\n"
294
+ "\x1b[1;31m BREAKING\x1b[0m Response type changed: GET /api/v1/products[].price (string -> number)\n"
295
+ "\x1b[1;33m WARNING\x1b[0m Endpoint deprecated: GET /api/v1/legacy/search\n"
296
+ "\n"
297
+ "\x1b[1;31m 3 breaking changes\x1b[0m | \x1b[1;33m1 warning\x1b[0m | Semver: MAJOR"
298
+ )
299
+ elif "delimit-cli@latest diff" in cmd:
300
+ return (
301
+ "\x1b[1;36mdelimit\x1b[0m Diffing API specs...\n"
302
+ "\n"
303
+ " \x1b[1;31mremoved\x1b[0m DELETE /api/v1/users/{id}\n"
304
+ " \x1b[1;31mchanged\x1b[0m POST /api/v1/orders (added required field)\n"
305
+ " \x1b[1;31mchanged\x1b[0m GET /api/v1/products (response type changed)\n"
306
+ " \x1b[1;33madded\x1b[0m POST /api/v2/users/bulk\n"
307
+ " \x1b[1;33madded\x1b[0m GET /api/v2/analytics\n"
308
+ "\n"
309
+ " \x1b[1;36m5 changes\x1b[0m: 3 breaking, 2 additive | Semver: MAJOR"
310
+ )
311
+ elif cmd.startswith("cat ") and "yaml" in cmd:
312
+ return (
313
+ "openapi: '3.0.3'\n"
314
+ "info:\n"
315
+ " title: Sample API\n"
316
+ " version: 1.0.0\n"
317
+ "paths:\n"
318
+ " /api/v1/users:\n"
319
+ " get:\n"
320
+ " summary: List users\n"
321
+ " responses:\n"
322
+ " '200':\n"
323
+ " description: OK"
324
+ )
325
+ elif cmd.startswith("ls api"):
326
+ return "openapi.yaml v1.yaml v2.yaml"
327
+ elif cmd.startswith("echo "):
328
+ return "" # echo commands render their own output
329
+ elif cmd.startswith("cat <<"):
330
+ return "" # heredoc renders itself
331
+
332
+ return ""
333
+
334
+
335
+ # ═══════════════════════════════════════════════════════════════════════
336
+ # VIDEO RENDERER — HTML + puppeteer screencast + ffmpeg
337
+ # ═══════════════════════════════════════════════════════════════════════
338
+
339
+ _TERMINAL_HTML_TEMPLATE = """<!DOCTYPE html>
340
+ <html>
341
+ <head>
342
+ <meta charset="utf-8">
343
+ <style>
344
+ * { margin: 0; padding: 0; box-sizing: border-box; }
345
+ body {
346
+ background: #0d1117;
347
+ width: 1920px;
348
+ height: 1080px;
349
+ font-family: 'SF Mono', 'Fira Code', 'Consolas', monospace;
350
+ display: flex;
351
+ flex-direction: column;
352
+ align-items: center;
353
+ justify-content: center;
354
+ overflow: hidden;
355
+ }
356
+ .header {
357
+ text-align: center;
358
+ margin-bottom: 24px;
359
+ }
360
+ .logo {
361
+ font-size: 28px;
362
+ font-weight: 700;
363
+ color: #58a6ff;
364
+ letter-spacing: 2px;
365
+ }
366
+ .title {
367
+ font-size: 36px;
368
+ font-weight: 600;
369
+ color: #e6edf3;
370
+ margin-top: 8px;
371
+ }
372
+ .terminal-container {
373
+ background: #161b22;
374
+ border-radius: 12px;
375
+ border: 1px solid #30363d;
376
+ width: 1600px;
377
+ height: 750px;
378
+ overflow: hidden;
379
+ box-shadow: 0 16px 48px rgba(0,0,0,0.4);
380
+ }
381
+ .terminal-header {
382
+ background: #21262d;
383
+ height: 36px;
384
+ display: flex;
385
+ align-items: center;
386
+ padding: 0 16px;
387
+ gap: 8px;
388
+ }
389
+ .dot { width: 12px; height: 12px; border-radius: 50%; }
390
+ .dot-red { background: #f85149; }
391
+ .dot-yellow { background: #d29922; }
392
+ .dot-green { background: #3fb950; }
393
+ .terminal-body {
394
+ padding: 20px;
395
+ font-size: 18px;
396
+ line-height: 1.6;
397
+ color: #e6edf3;
398
+ white-space: pre-wrap;
399
+ overflow: hidden;
400
+ height: 714px;
401
+ }
402
+ .watermark {
403
+ margin-top: 24px;
404
+ font-size: 20px;
405
+ color: #484f58;
406
+ letter-spacing: 1px;
407
+ }
408
+ #player { width: 100%; height: 100%; }
409
+ </style>
410
+ <link rel="stylesheet" type="text/css" href="https://cdn.jsdelivr.net/npm/asciinema-player@3.7.1/dist/bundle/asciinema-player.css" />
411
+ </head>
412
+ <body>
413
+ <div class="header">
414
+ <div class="logo">DELIMIT</div>
415
+ <div class="title">{{TITLE}}</div>
416
+ </div>
417
+ <div class="terminal-container">
418
+ <div class="terminal-header">
419
+ <div class="dot dot-red"></div>
420
+ <div class="dot dot-yellow"></div>
421
+ <div class="dot dot-green"></div>
422
+ </div>
423
+ <div class="terminal-body">
424
+ <div id="player"></div>
425
+ </div>
426
+ </div>
427
+ <div class="watermark">delimit.ai</div>
428
+
429
+ <script src="https://cdn.jsdelivr.net/npm/asciinema-player@3.7.1/dist/bundle/asciinema-player.min.js"></script>
430
+ <script>
431
+ const player = AsciinemaPlayer.create(
432
+ '{{CAST_URL}}',
433
+ document.getElementById('player'),
434
+ {
435
+ cols: 100,
436
+ rows: 28,
437
+ autoPlay: true,
438
+ speed: 1,
439
+ theme: 'monokai',
440
+ fit: 'width',
441
+ terminalFontFamily: "'SF Mono', 'Fira Code', 'Consolas', monospace",
442
+ terminalFontSize: '18px',
443
+ }
444
+ );
445
+ </script>
446
+ </body>
447
+ </html>"""
448
+
449
+
450
+ def _generate_ambient_music(output_path: str, duration: int = 120) -> Dict[str, Any]:
451
+ """Generate ambient background music using ffmpeg sine wave synthesis."""
452
+ try:
453
+ cmd = [
454
+ "ffmpeg", "-y",
455
+ "-f", "lavfi", "-i", f"sine=frequency=174:duration={duration}",
456
+ "-f", "lavfi", "-i", f"sine=frequency=261:duration={duration}",
457
+ "-f", "lavfi", "-i", f"sine=frequency=349:duration={duration}",
458
+ "-filter_complex",
459
+ "[0:a][1:a][2:a]amix=inputs=3,lowpass=f=300,volume=0.08",
460
+ "-c:a", "aac", "-b:a", "128k",
461
+ output_path,
462
+ ]
463
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
464
+ if result.returncode != 0:
465
+ return {"error": f"ffmpeg failed: {result.stderr[:500]}"}
466
+ return {"path": output_path, "duration": duration}
467
+ except Exception as e:
468
+ return {"error": str(e)}
469
+
470
+
471
+ def _create_puppeteer_script(html_path: str, output_path: str, duration_ms: int) -> str:
472
+ """Create a Node.js puppeteer script for CDP screencast capture."""
473
+ return f"""
474
+ const puppeteer = require('puppeteer');
475
+ const fs = require('fs');
476
+ const path = require('path');
477
+
478
+ (async () => {{
479
+ const browser = await puppeteer.launch({{
480
+ headless: 'new',
481
+ args: ['--no-sandbox', '--disable-setuid-sandbox', '--window-size=1920,1080'],
482
+ defaultViewport: {{ width: 1920, height: 1080 }},
483
+ }});
484
+
485
+ const page = await browser.newPage();
486
+ await page.goto('file://{html_path}', {{ waitUntil: 'networkidle0', timeout: 30000 }});
487
+
488
+ // Wait for asciinema player to load
489
+ await new Promise(r => setTimeout(r, 2000));
490
+
491
+ const framesDir = '{output_path}_frames';
492
+ if (!fs.existsSync(framesDir)) fs.mkdirSync(framesDir, {{ recursive: true }});
493
+
494
+ const client = await page.createCDPSession();
495
+
496
+ let frameCount = 0;
497
+ client.on('Page.screencastFrame', async (params) => {{
498
+ const frameFile = path.join(framesDir, `frame_${{String(frameCount).padStart(6, '0')}}.png`);
499
+ fs.writeFileSync(frameFile, Buffer.from(params.data, 'base64'));
500
+ frameCount++;
501
+ await client.send('Page.screencastFrameAck', {{ sessionId: params.sessionId }});
502
+ }});
503
+
504
+ await client.send('Page.startScreencast', {{
505
+ format: 'png',
506
+ quality: 80,
507
+ maxWidth: 1920,
508
+ maxHeight: 1080,
509
+ everyNthFrame: 1,
510
+ }});
511
+
512
+ // Record for the duration
513
+ await new Promise(r => setTimeout(r, {duration_ms}));
514
+
515
+ await client.send('Page.stopScreencast');
516
+ await browser.close();
517
+
518
+ console.log(JSON.stringify({{ frames: frameCount, framesDir }}));
519
+ }})();
520
+ """
521
+
522
+
523
+ def render_video(cast_path: str, output_path: str, title: str, duration_seconds: int = 90) -> Dict[str, Any]:
524
+ """Render a .cast file to MP4 via puppeteer screencast + ffmpeg compositing.
525
+
526
+ Pipeline:
527
+ 1. Create HTML page embedding asciinema player with the cast file
528
+ 2. Use puppeteer CDP screencast to capture frames
529
+ 3. Generate ambient music track
530
+ 4. Combine frames + music with ffmpeg
531
+ """
532
+ VIDEOS_DIR.mkdir(parents=True, exist_ok=True)
533
+ ASSETS_DIR.mkdir(parents=True, exist_ok=True)
534
+
535
+ with tempfile.TemporaryDirectory(prefix="delimit_video_") as tmpdir:
536
+ tmpdir_path = Path(tmpdir)
537
+
538
+ # 1. Create HTML page
539
+ html_content = _TERMINAL_HTML_TEMPLATE.replace("{{TITLE}}", title)
540
+ html_content = html_content.replace("{{CAST_URL}}", cast_path)
541
+ html_path = tmpdir_path / "player.html"
542
+ html_path.write_text(html_content)
543
+
544
+ # 2. Run puppeteer to capture frames
545
+ puppeteer_script = _create_puppeteer_script(
546
+ str(html_path),
547
+ str(tmpdir_path / "video"),
548
+ duration_seconds * 1000,
549
+ )
550
+ script_path = tmpdir_path / "capture.js"
551
+ script_path.write_text(puppeteer_script)
552
+
553
+ try:
554
+ result = subprocess.run(
555
+ ["node", str(script_path)],
556
+ capture_output=True, text=True,
557
+ timeout=duration_seconds + 60,
558
+ env={**os.environ, "NODE_PATH": "/usr/lib/node_modules"},
559
+ )
560
+ except subprocess.TimeoutExpired:
561
+ return {"error": "Puppeteer capture timed out"}
562
+
563
+ if result.returncode != 0:
564
+ return {"error": f"Puppeteer failed: {result.stderr[:500]}"}
565
+
566
+ try:
567
+ capture_info = json.loads(result.stdout.strip())
568
+ except (json.JSONDecodeError, ValueError):
569
+ return {"error": f"Puppeteer output parse error: {result.stdout[:200]}"}
570
+
571
+ frames_dir = capture_info.get("framesDir", "")
572
+ frame_count = capture_info.get("frames", 0)
573
+
574
+ if frame_count == 0:
575
+ return {"error": "No frames captured"}
576
+
577
+ # 3. Generate ambient music
578
+ music_path = str(ASSETS_DIR / "ambient.m4a")
579
+ if not Path(music_path).exists():
580
+ music_result = _generate_ambient_music(music_path, duration_seconds + 30)
581
+ if "error" in music_result:
582
+ logger.warning("Music generation failed, proceeding without: %s", music_result["error"])
583
+ music_path = None
584
+
585
+ # 4. Combine frames into video with ffmpeg
586
+ fps = max(1, frame_count // duration_seconds)
587
+ raw_video = str(tmpdir_path / "raw.mp4")
588
+
589
+ ffmpeg_cmd = [
590
+ "ffmpeg", "-y",
591
+ "-framerate", str(fps),
592
+ "-i", f"{frames_dir}/frame_%06d.png",
593
+ "-c:v", "libx264",
594
+ "-pix_fmt", "yuv420p",
595
+ "-preset", "medium",
596
+ "-crf", "23",
597
+ raw_video,
598
+ ]
599
+ sub = subprocess.run(ffmpeg_cmd, capture_output=True, text=True, timeout=120)
600
+ if sub.returncode != 0:
601
+ return {"error": f"ffmpeg frame assembly failed: {sub.stderr[:500]}"}
602
+
603
+ # 5. Add music track if available
604
+ if music_path and Path(music_path).exists():
605
+ final_cmd = [
606
+ "ffmpeg", "-y",
607
+ "-i", raw_video,
608
+ "-i", music_path,
609
+ "-c:v", "copy",
610
+ "-c:a", "aac",
611
+ "-shortest",
612
+ "-map", "0:v:0",
613
+ "-map", "1:a:0",
614
+ output_path,
615
+ ]
616
+ sub = subprocess.run(final_cmd, capture_output=True, text=True, timeout=60)
617
+ if sub.returncode != 0:
618
+ # Fall back to video without music
619
+ shutil.copy2(raw_video, output_path)
620
+ else:
621
+ shutil.copy2(raw_video, output_path)
622
+
623
+ return {
624
+ "video_path": output_path,
625
+ "title": title,
626
+ "frames": frame_count,
627
+ "duration_seconds": duration_seconds,
628
+ "has_music": music_path is not None and Path(music_path).exists(),
629
+ }
630
+
631
+
632
+ def generate_video(script_id: str) -> Dict[str, Any]:
633
+ """Full pipeline: generate cast, render to video."""
634
+ script = VIDEO_SCRIPTS.get(script_id)
635
+ if not script:
636
+ return {"error": f"Unknown script: {script_id}", "available": list(VIDEO_SCRIPTS.keys())}
637
+
638
+ # Step 1: Generate cast
639
+ cast_result = generate_cast(script_id)
640
+ if "error" in cast_result:
641
+ return cast_result
642
+
643
+ # Step 2: Render video
644
+ output_path = str(VIDEOS_DIR / f"{script_id}.mp4")
645
+ duration = script.get("duration_estimate", 90)
646
+
647
+ video_result = render_video(
648
+ cast_result["cast_path"],
649
+ output_path,
650
+ script["title"],
651
+ duration,
652
+ )
653
+
654
+ if "error" in video_result:
655
+ # Return partial result with cast but video error
656
+ return {
657
+ "cast": cast_result,
658
+ "video_error": video_result["error"],
659
+ }
660
+
661
+ return {
662
+ "cast": cast_result,
663
+ "video": video_result,
664
+ "script": {
665
+ "id": script_id,
666
+ "title": script["title"],
667
+ "description": script["description"],
668
+ "tags": script["tags"],
669
+ },
670
+ }
671
+
672
+
673
+ # ═══════════════════════════════════════════════════════════════════════
674
+ # YOUTUBE UPLOADER — OAuth2 via google-api-python-client
675
+ # ═══════════════════════════════════════════════════════════════════════
676
+
677
+ def _get_youtube_credentials():
678
+ """Load and refresh YouTube OAuth2 credentials."""
679
+ from google.oauth2.credentials import Credentials
680
+
681
+ tokens_path = SECRETS_DIR / "youtube-tokens.json"
682
+ client_path = SECRETS_DIR / "youtube-oauth-client.json"
683
+
684
+ if not tokens_path.exists():
685
+ return None, "Missing youtube-tokens.json in ~/.delimit/secrets/"
686
+ if not client_path.exists():
687
+ return None, "Missing youtube-oauth-client.json in ~/.delimit/secrets/"
688
+
689
+ tokens = json.loads(tokens_path.read_text())
690
+ client_creds = json.loads(client_path.read_text())
691
+
692
+ # Handle both 'installed' and 'web' client types
693
+ client_info = client_creds.get("installed", client_creds.get("web", {}))
694
+
695
+ creds = Credentials(
696
+ token=tokens.get("access_token"),
697
+ refresh_token=tokens.get("refresh_token"),
698
+ token_uri="https://oauth2.googleapis.com/token",
699
+ client_id=client_info.get("client_id"),
700
+ client_secret=client_info.get("client_secret"),
701
+ scopes=["https://www.googleapis.com/auth/youtube.upload"],
702
+ )
703
+
704
+ # Refresh if expired
705
+ if creds.expired or not creds.valid:
706
+ from google.auth.transport.requests import Request
707
+ try:
708
+ creds.refresh(Request())
709
+ # Persist refreshed tokens
710
+ tokens["access_token"] = creds.token
711
+ tokens_path.write_text(json.dumps(tokens, indent=2))
712
+ except Exception as e:
713
+ return None, f"Token refresh failed: {e}"
714
+
715
+ return creds, None
716
+
717
+
718
+ def test_youtube_auth() -> Dict[str, Any]:
719
+ """Test YouTube OAuth token refresh without uploading.
720
+
721
+ Note: The stored tokens may only have youtube.upload scope, so listing
722
+ channels may fail with 403. That is fine -- a successful token refresh
723
+ means uploads will work. We treat scope errors as authenticated.
724
+ """
725
+ creds, error = _get_youtube_credentials()
726
+ if error:
727
+ return {"authenticated": False, "error": error}
728
+
729
+ try:
730
+ from googleapiclient.discovery import build
731
+ youtube = build("youtube", "v3", credentials=creds)
732
+ # Try listing channels to get channel name
733
+ response = youtube.channels().list(part="snippet", mine=True).execute()
734
+ channels = response.get("items", [])
735
+ if channels:
736
+ return {
737
+ "authenticated": True,
738
+ "channel": channels[0]["snippet"]["title"],
739
+ "channel_id": channels[0]["id"],
740
+ }
741
+ return {"authenticated": True, "channel": "unknown (no channels found)"}
742
+ except Exception as e:
743
+ error_str = str(e)
744
+ # 403 insufficient scopes means auth works but token scope is limited
745
+ # This is expected when tokens only have youtube.upload scope
746
+ if "insufficientPermissions" in error_str or "authentication scopes" in error_str:
747
+ return {
748
+ "authenticated": True,
749
+ "note": "Token valid but limited to upload scope (channels.list requires youtube.readonly)",
750
+ "token_valid": creds.token is not None,
751
+ }
752
+ return {"authenticated": False, "error": error_str}
753
+
754
+
755
+ def upload_to_youtube(video_path: str, title: str, description: str,
756
+ tags: List[str], category: str = "28",
757
+ privacy: str = "public") -> Dict[str, Any]:
758
+ """Upload a video to YouTube via OAuth2.
759
+
760
+ Args:
761
+ video_path: Path to the MP4 file.
762
+ title: Video title.
763
+ description: Video description (CTA appended automatically).
764
+ tags: List of tag strings.
765
+ category: YouTube category ID (28 = Science & Technology).
766
+ privacy: public, unlisted, or private.
767
+
768
+ Returns:
769
+ Dict with video_id and url on success, or error.
770
+ """
771
+ if not Path(video_path).exists():
772
+ return {"error": f"Video file not found: {video_path}"}
773
+
774
+ creds, error = _get_youtube_credentials()
775
+ if error:
776
+ return {"error": error}
777
+
778
+ try:
779
+ from googleapiclient.discovery import build
780
+ from googleapiclient.http import MediaFileUpload
781
+
782
+ youtube = build("youtube", "v3", credentials=creds)
783
+
784
+ # Append CTA to description
785
+ full_description = (
786
+ f"{description}\n\n"
787
+ "Get started:\n"
788
+ " npx delimit-cli@latest init\n\n"
789
+ "GitHub Action:\n"
790
+ " https://github.com/marketplace/actions/delimit-api-governance\n\n"
791
+ "Docs: https://delimit.ai/docs\n"
792
+ "GitHub: https://github.com/delimit-ai/delimit-mcp-server"
793
+ )
794
+
795
+ body = {
796
+ "snippet": {
797
+ "title": title,
798
+ "description": full_description,
799
+ "tags": tags,
800
+ "categoryId": category,
801
+ },
802
+ "status": {
803
+ "privacyStatus": privacy,
804
+ "selfDeclaredMadeForKids": False,
805
+ },
806
+ }
807
+
808
+ media = MediaFileUpload(video_path, mimetype="video/mp4", resumable=True)
809
+ request = youtube.videos().insert(part="snippet,status", body=body, media_body=media)
810
+
811
+ # Resumable upload with progress tracking
812
+ response = None
813
+ while response is None:
814
+ status, response = request.next_chunk()
815
+ if status:
816
+ logger.info("Upload progress: %d%%", int(status.progress() * 100))
817
+
818
+ video_id = response["id"]
819
+ result = {
820
+ "uploaded": True,
821
+ "video_id": video_id,
822
+ "url": f"https://youtube.com/watch?v={video_id}",
823
+ "title": title,
824
+ "privacy": privacy,
825
+ }
826
+
827
+ _log_content_event("youtube_upload", result)
828
+ return result
829
+
830
+ except Exception as e:
831
+ return {"error": f"YouTube upload failed: {e}"}
832
+
833
+
834
+ # ═══════════════════════════════════════════════════════════════════════
835
+ # TWEET SCHEDULER — queue-based tweet management
836
+ # ═══════════════════════════════════════════════════════════════════════
837
+
838
+ def _load_tweet_queue() -> List[Dict[str, Any]]:
839
+ """Load the tweet queue from disk."""
840
+ if not TWEET_QUEUE.exists():
841
+ return []
842
+ try:
843
+ return json.loads(TWEET_QUEUE.read_text())
844
+ except (json.JSONDecodeError, ValueError):
845
+ return []
846
+
847
+
848
+ def _save_tweet_queue(queue: List[Dict[str, Any]]):
849
+ """Save the tweet queue to disk."""
850
+ TWEET_QUEUE.parent.mkdir(parents=True, exist_ok=True)
851
+ TWEET_QUEUE.write_text(json.dumps(queue, indent=2))
852
+
853
+
854
+ def add_tweets_to_queue(tweets: List[str]) -> Dict[str, Any]:
855
+ """Add tweets to the posting queue.
856
+
857
+ Args:
858
+ tweets: List of tweet text strings to queue.
859
+ """
860
+ queue = _load_tweet_queue()
861
+ added = 0
862
+ for text in tweets:
863
+ text = text.strip() if text else ""
864
+ if not text or len(text) > 280:
865
+ continue
866
+ # Deduplicate by text content
867
+ existing_texts = {t["text"] for t in queue}
868
+ if text not in existing_texts:
869
+ queue.append({
870
+ "text": text,
871
+ "added_at": datetime.now(timezone.utc).isoformat(),
872
+ "posted": False,
873
+ "posted_at": None,
874
+ "tweet_id": None,
875
+ })
876
+ added += 1
877
+
878
+ _save_tweet_queue(queue)
879
+ return {
880
+ "added": added,
881
+ "queue_size": len([t for t in queue if not t["posted"]]),
882
+ "total": len(queue),
883
+ }
884
+
885
+
886
+ def get_next_tweet() -> Optional[Dict[str, Any]]:
887
+ """Get the next unposted tweet from the queue."""
888
+ queue = _load_tweet_queue()
889
+ for tweet in queue:
890
+ if not tweet.get("posted"):
891
+ return tweet
892
+ return None
893
+
894
+
895
+ def _load_tweet_schedule() -> Dict[str, Any]:
896
+ """Load the tweet schedule from disk."""
897
+ if not TWEET_SCHEDULE.exists():
898
+ return {}
899
+ try:
900
+ return json.loads(TWEET_SCHEDULE.read_text())
901
+ except (json.JSONDecodeError, ValueError):
902
+ return {}
903
+
904
+
905
+ def _save_tweet_schedule(schedule: Dict[str, Any]):
906
+ """Save the tweet schedule to disk."""
907
+ TWEET_SCHEDULE.parent.mkdir(parents=True, exist_ok=True)
908
+ TWEET_SCHEDULE.write_text(json.dumps(schedule, indent=2))
909
+
910
+
911
+ def get_scheduled_tweet() -> Optional[Dict[str, Any]]:
912
+ """Get today's scheduled tweet based on day of week and current week rotation.
913
+
914
+ Reads the tweet schedule, determines the current day (ET timezone) and
915
+ the current week (cycling through weeks by ISO week number modulo total
916
+ weeks), and returns the matching tweet entry.
917
+
918
+ Returns None if:
919
+ - No schedule file exists
920
+ - Today is a rest day (no tweet scheduled)
921
+ - The tweet has status "skip_if_no_news"
922
+ - The tweet was already posted
923
+ """
924
+ try:
925
+ from zoneinfo import ZoneInfo
926
+ except ImportError:
927
+ from backports.zoneinfo import ZoneInfo # Python <3.9
928
+
929
+ schedule = _load_tweet_schedule()
930
+ weeks = schedule.get("weeks", [])
931
+ if not weeks:
932
+ return None
933
+
934
+ # Determine today in ET
935
+ et = ZoneInfo("America/New_York")
936
+ now_et = datetime.now(et)
937
+ day_name = now_et.strftime("%A").lower() # monday, tuesday, ...
938
+
939
+ # Cycle through weeks using ISO week number
940
+ iso_week = now_et.isocalendar()[1]
941
+ week_index = iso_week % len(weeks)
942
+ current_week = weeks[week_index]
943
+
944
+ # Find today's tweet in the current week
945
+ today_tweet = None
946
+ today_index = None
947
+ for idx, tweet in enumerate(current_week.get("tweets", [])):
948
+ if tweet.get("day") == day_name:
949
+ today_tweet = tweet
950
+ today_index = idx
951
+ break
952
+
953
+ if today_tweet is None:
954
+ return None
955
+
956
+ # Skip if already posted
957
+ if today_tweet.get("status") == "posted":
958
+ return None
959
+
960
+ # Skip if conditional and no news
961
+ if today_tweet.get("status") == "skip_if_no_news":
962
+ return None
963
+
964
+ # Build the return dict
965
+ result = {
966
+ "text": today_tweet.get("text", ""),
967
+ "day_type": today_tweet.get("day_type", ""),
968
+ "media_type": today_tweet.get("media_type", "none"),
969
+ "media_note": today_tweet.get("media_note", ""),
970
+ "is_thread": today_tweet.get("is_thread", False),
971
+ "thread_tweets": today_tweet.get("thread_tweets", []),
972
+ "youtube_link_in_reply": today_tweet.get("youtube_link_in_reply", False),
973
+ }
974
+ if today_tweet.get("video_script"):
975
+ result["video_script"] = today_tweet["video_script"]
976
+
977
+ # Mark as posted in the schedule file
978
+ current_week["tweets"][today_index]["status"] = "posted"
979
+ current_week["tweets"][today_index]["posted_at"] = datetime.now(timezone.utc).isoformat()
980
+ _save_tweet_schedule(schedule)
981
+
982
+ return result
983
+
984
+
985
+ def post_next_tweet() -> Dict[str, Any]:
986
+ """Post the next scheduled or queued tweet via the Twitter API.
987
+
988
+ Checks the day-typed tweet schedule first. Falls back to the flat queue
989
+ if no scheduled tweet is available for today.
990
+ """
991
+ from ai.social import post_tweet, should_post_today
992
+
993
+ if not should_post_today():
994
+ daily_limit = int(os.environ.get("DELIMIT_DAILY_TWEETS", "8"))
995
+ return {"status": "skipped", "reason": f"Daily posting limit reached ({daily_limit}/day)"}
996
+
997
+ # --- Try day-typed schedule first ---
998
+ scheduled = get_scheduled_tweet()
999
+ if scheduled:
1000
+ if scheduled.get("is_thread") and scheduled.get("thread_tweets"):
1001
+ # Post thread: first tweet, then replies
1002
+ tweets_to_post = scheduled["thread_tweets"]
1003
+ first_result = post_tweet(tweets_to_post[0])
1004
+ if "error" in first_result:
1005
+ return first_result
1006
+ parent_id = first_result.get("id", "")
1007
+ posted_ids = [parent_id]
1008
+ for reply_text in tweets_to_post[1:]:
1009
+ reply_result = post_tweet(reply_text, reply_to_id=parent_id)
1010
+ if "error" not in reply_result:
1011
+ parent_id = reply_result.get("id", "")
1012
+ posted_ids.append(parent_id)
1013
+ _log_content_event("scheduled_thread_posted", {
1014
+ "day_type": scheduled["day_type"],
1015
+ "tweet_count": len(tweets_to_post),
1016
+ "tweet_ids": posted_ids,
1017
+ })
1018
+ return {
1019
+ "status": "posted",
1020
+ "source": "schedule",
1021
+ "day_type": scheduled["day_type"],
1022
+ "is_thread": True,
1023
+ "tweet_ids": posted_ids,
1024
+ }
1025
+ else:
1026
+ # Single scheduled tweet
1027
+ result = post_tweet(scheduled["text"])
1028
+ if "error" not in result:
1029
+ _log_content_event("scheduled_tweet_posted", {
1030
+ "text": scheduled["text"][:100],
1031
+ "day_type": scheduled["day_type"],
1032
+ "tweet_id": result.get("id"),
1033
+ })
1034
+ return {
1035
+ **result,
1036
+ "source": "schedule",
1037
+ "day_type": scheduled["day_type"],
1038
+ }
1039
+ return result
1040
+
1041
+ # --- Fall back to flat queue ---
1042
+ queue = _load_tweet_queue()
1043
+ for i, tweet in enumerate(queue):
1044
+ if not tweet.get("posted"):
1045
+ result = post_tweet(tweet["text"])
1046
+ if "error" not in result:
1047
+ queue[i]["posted"] = True
1048
+ queue[i]["posted_at"] = datetime.now(timezone.utc).isoformat()
1049
+ queue[i]["tweet_id"] = result.get("id")
1050
+ _save_tweet_queue(queue)
1051
+ _log_content_event("tweet_posted", {
1052
+ "text": tweet["text"][:100],
1053
+ "tweet_id": result.get("id"),
1054
+ })
1055
+ return result
1056
+
1057
+ return {"status": "empty", "reason": "No scheduled or queued tweets available"}
1058
+
1059
+
1060
+ def get_tweet_queue_status() -> Dict[str, Any]:
1061
+ """Get current tweet queue status."""
1062
+ queue = _load_tweet_queue()
1063
+ pending = [t for t in queue if not t.get("posted")]
1064
+ posted = [t for t in queue if t.get("posted")]
1065
+ return {
1066
+ "pending": len(pending),
1067
+ "posted": len(posted),
1068
+ "total": len(queue),
1069
+ "next_tweet": pending[0]["text"][:100] if pending else None,
1070
+ }
1071
+
1072
+
1073
+ # ═══════════════════════════════════════════════════════════════════════
1074
+ # VIDEO QUEUE — manage scheduled video generation/uploads
1075
+ # ═══════════════════════════════════════════════════════════════════════
1076
+
1077
+ def _load_video_queue() -> List[Dict[str, Any]]:
1078
+ """Load the video queue from disk."""
1079
+ if not VIDEO_QUEUE.exists():
1080
+ return []
1081
+ try:
1082
+ return json.loads(VIDEO_QUEUE.read_text())
1083
+ except (json.JSONDecodeError, ValueError):
1084
+ return []
1085
+
1086
+
1087
+ def _save_video_queue(queue: List[Dict[str, Any]]):
1088
+ """Save the video queue to disk."""
1089
+ VIDEO_QUEUE.parent.mkdir(parents=True, exist_ok=True)
1090
+ VIDEO_QUEUE.write_text(json.dumps(queue, indent=2))
1091
+
1092
+
1093
+ def populate_video_queue() -> Dict[str, Any]:
1094
+ """Populate the video queue with all available scripts that haven't been uploaded."""
1095
+ queue = _load_video_queue()
1096
+ existing_ids = {v["script_id"] for v in queue}
1097
+ added = 0
1098
+
1099
+ for script_id, script in VIDEO_SCRIPTS.items():
1100
+ if script_id not in existing_ids:
1101
+ queue.append({
1102
+ "script_id": script_id,
1103
+ "title": script["title"],
1104
+ "added_at": datetime.now(timezone.utc).isoformat(),
1105
+ "generated": False,
1106
+ "uploaded": False,
1107
+ "video_path": None,
1108
+ "video_id": None,
1109
+ "video_url": None,
1110
+ })
1111
+ added += 1
1112
+
1113
+ _save_video_queue(queue)
1114
+ return {"added": added, "total": len(queue)}
1115
+
1116
+
1117
+ def get_next_video() -> Optional[Dict[str, Any]]:
1118
+ """Get the next video to generate or upload."""
1119
+ queue = _load_video_queue()
1120
+ # First: find one that is generated but not uploaded
1121
+ for v in queue:
1122
+ if v.get("generated") and not v.get("uploaded"):
1123
+ return v
1124
+ # Then: find one that hasn't been generated
1125
+ for v in queue:
1126
+ if not v.get("generated"):
1127
+ return v
1128
+ return None
1129
+
1130
+
1131
+ def process_next_video() -> Dict[str, Any]:
1132
+ """Generate and/or upload the next video in the queue."""
1133
+ queue = _load_video_queue()
1134
+
1135
+ for i, entry in enumerate(queue):
1136
+ # Upload if generated but not uploaded
1137
+ if entry.get("generated") and not entry.get("uploaded"):
1138
+ script = VIDEO_SCRIPTS.get(entry["script_id"])
1139
+ if not script:
1140
+ continue
1141
+ upload_result = upload_to_youtube(
1142
+ entry["video_path"],
1143
+ script["title"],
1144
+ script["description"],
1145
+ script["tags"],
1146
+ script.get("category", "28"),
1147
+ )
1148
+ if "error" not in upload_result:
1149
+ queue[i]["uploaded"] = True
1150
+ queue[i]["video_id"] = upload_result.get("video_id")
1151
+ queue[i]["video_url"] = upload_result.get("url")
1152
+ _save_video_queue(queue)
1153
+ return {"action": "uploaded", "result": upload_result}
1154
+
1155
+ # Generate if not generated
1156
+ if not entry.get("generated"):
1157
+ gen_result = generate_video(entry["script_id"])
1158
+ if "video" in gen_result and "error" not in gen_result.get("video", {}):
1159
+ queue[i]["generated"] = True
1160
+ queue[i]["video_path"] = gen_result["video"]["video_path"]
1161
+ _save_video_queue(queue)
1162
+ return {"action": "generated", "result": gen_result}
1163
+ return {"action": "generation_failed", "result": gen_result}
1164
+
1165
+ return {"action": "none", "reason": "All videos generated and uploaded"}
1166
+
1167
+
1168
+ # ═══════════════════════════════════════════════════════════════════════
1169
+ # CONTENT SCHEDULE — view upcoming content
1170
+ # ═══════════════════════════════════════════════════════════════════════
1171
+
1172
+ def get_content_schedule() -> Dict[str, Any]:
1173
+ """Get the full content schedule: tweets, videos, and history."""
1174
+ tweet_status = get_tweet_queue_status()
1175
+ video_queue = _load_video_queue()
1176
+
1177
+ pending_videos = [v for v in video_queue if not v.get("uploaded")]
1178
+ uploaded_videos = [v for v in video_queue if v.get("uploaded")]
1179
+
1180
+ # Recent content log
1181
+ recent_log = []
1182
+ if CONTENT_LOG.exists():
1183
+ lines = CONTENT_LOG.read_text().strip().split("\n")
1184
+ for line in reversed(lines[-20:]):
1185
+ try:
1186
+ recent_log.append(json.loads(line))
1187
+ except (json.JSONDecodeError, ValueError):
1188
+ pass
1189
+
1190
+ # Tweet schedule calendar info
1191
+ tweet_schedule = _load_tweet_schedule()
1192
+ schedule_info = None
1193
+ if tweet_schedule.get("weeks"):
1194
+ try:
1195
+ from zoneinfo import ZoneInfo
1196
+ except ImportError:
1197
+ from backports.zoneinfo import ZoneInfo
1198
+ et = ZoneInfo("America/New_York")
1199
+ now_et = datetime.now(et)
1200
+ iso_week = now_et.isocalendar()[1]
1201
+ total_weeks = len(tweet_schedule["weeks"])
1202
+ week_index = iso_week % total_weeks
1203
+ current_week = tweet_schedule["weeks"][week_index]
1204
+ day_name = now_et.strftime("%A").lower()
1205
+ today_entry = None
1206
+ for t in current_week.get("tweets", []):
1207
+ if t.get("day") == day_name:
1208
+ today_entry = t
1209
+ break
1210
+ pending_scheduled = sum(
1211
+ 1 for w in tweet_schedule["weeks"]
1212
+ for t in w.get("tweets", [])
1213
+ if t.get("status") == "pending"
1214
+ )
1215
+ schedule_info = {
1216
+ "total_weeks": total_weeks,
1217
+ "current_week": current_week.get("week", week_index + 1),
1218
+ "today": day_name,
1219
+ "today_day_type": today_entry.get("day_type") if today_entry else None,
1220
+ "today_status": today_entry.get("status") if today_entry else "rest",
1221
+ "pending_scheduled": pending_scheduled,
1222
+ }
1223
+
1224
+ return {
1225
+ "tweets": tweet_status,
1226
+ "tweet_schedule": schedule_info,
1227
+ "videos": {
1228
+ "pending": len(pending_videos),
1229
+ "uploaded": len(uploaded_videos),
1230
+ "next": pending_videos[0] if pending_videos else None,
1231
+ },
1232
+ "schedule": {
1233
+ "tweets": "1x daily via day-typed schedule (9:30am ET), flat queue fallback",
1234
+ "youtube": "1x weekly (Tuesday 10am ET)",
1235
+ },
1236
+ "recent_activity": recent_log[:10],
1237
+ }
1238
+
1239
+
1240
+ # ═══════════════════════════════════════════════════════════════════════
1241
+ # LOGGING
1242
+ # ═══════════════════════════════════════════════════════════════════════
1243
+
1244
+ def _log_content_event(event_type: str, data: Dict[str, Any]):
1245
+ """Log a content engine event to the JSONL log."""
1246
+ CONTENT_LOG.parent.mkdir(parents=True, exist_ok=True)
1247
+ entry = {
1248
+ "ts": datetime.now(timezone.utc).isoformat(),
1249
+ "event": event_type,
1250
+ **data,
1251
+ }
1252
+ with open(CONTENT_LOG, "a") as f:
1253
+ f.write(json.dumps(entry) + "\n")
1254
+
1255
+
1256
+ # ═══════════════════════════════════════════════════════════════════════
1257
+ # SEED DATA — pre-populate tweet queue with value-first content
1258
+ # ═══════════════════════════════════════════════════════════════════════
1259
+
1260
+ SEED_TWEETS = [
1261
+ "API governance tip: The 3 most common breaking changes we catch:\n\n1. Endpoint removed without deprecation\n2. Required field added to request body\n3. Response field type changed\n\nAll detectable before merge.\n\nnpx delimit-cli init",
1262
+ "Your CI pipeline checks code quality, test coverage, and security.\n\nBut does it check if your API changes break consumers?\n\nOne line of YAML:\n - uses: delimit-ai/delimit-action@v1\n with:\n spec: api/openapi.yaml",
1263
+ "AI coding agents generate API changes faster than humans can review.\n\nThat is exactly why you need automated governance.\n\nDelimit catches breaking changes on every PR -- whether a human or an AI wrote the code.\n\ndelimit.ai",
1264
+ "Quick tip: Run `npx delimit-cli doctor` in any project to check your governance setup.\n\nIt checks for policies, specs, workflows, and git config in seconds.",
1265
+ "The problem with AI coding assistants is not capability. It is context loss.\n\nEvery time you switch from Claude to Codex to Gemini, you start from zero.\n\nThat is the real productivity killer.\n\ndelimit.ai -- one workspace for every assistant.",
1266
+ "Hot take: In 2 years, unmanaged AI agents touching production code will be as unacceptable as unmanaged SSH keys.\n\nGovernance is not optional. It is infrastructure.",
1267
+ "Use policy presets to match your team's risk tolerance:\n\n- strict: all violations are errors\n- default: balanced\n- relaxed: warnings only\n\nnpx delimit-cli init --preset strict",
1268
+ "We built Delimit because we got tired of breaking changes slipping through code review.\n\n23 change types detected. Automatic semver classification. Zero config.\n\nGitHub Action: github.com/marketplace/actions/delimit-api-governance",
1269
+ "What is your API governance process today?\n\nManual review? CI check? Nothing?\n\nNo judgment -- that is why we built this.\n\nnpx delimit-cli init",
1270
+ "Delimit detects 23 types of API changes automatically:\n\n- Endpoints added, removed, renamed\n- Parameter types changed\n- Required fields added\n- Response schemas modified\n- Security schemes changed\n\nEach gets a semver classification: MAJOR, MINOR, or PATCH.",
1271
+ ]
1272
+
1273
+
1274
+ def seed_tweet_queue() -> Dict[str, Any]:
1275
+ """Seed the tweet queue with value-first content."""
1276
+ return add_tweets_to_queue(SEED_TWEETS)