@renoise/plugin 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +15 -0
- package/.claude-plugin/plugin.json +23 -0
- package/README.md +53 -0
- package/hooks/check-api-key.sh +28 -0
- package/hooks/hooks.json +28 -0
- package/hooks/session-start.sh +40 -0
- package/index.mjs +1 -0
- package/openclaw.plugin.json +22 -0
- package/package.json +22 -0
- package/skills/director/SKILL.md +269 -0
- package/skills/director/references/narrative-pacing.md +257 -0
- package/skills/director/references/style-library.md +179 -0
- package/skills/file-upload/SKILL.md +79 -0
- package/skills/file-upload/scripts/upload.mjs +103 -0
- package/skills/gemini-gen/SKILL.md +236 -0
- package/skills/gemini-gen/scripts/gemini.mjs +220 -0
- package/skills/product-sheet-generate/SKILL.md +75 -0
- package/skills/renoise-gen/SKILL.md +364 -0
- package/skills/renoise-gen/references/api-endpoints.md +142 -0
- package/skills/renoise-gen/references/video-capabilities.md +524 -0
- package/skills/renoise-gen/renoise-cli.mjs +723 -0
- package/skills/scene-generate/SKILL.md +52 -0
- package/skills/short-film-editor/SKILL.md +478 -0
- package/skills/short-film-editor/examples/mystery-package-4shot.md +260 -0
- package/skills/short-film-editor/references/continuity-guide.md +170 -0
- package/skills/short-film-editor/scripts/analyze-beats.py +271 -0
- package/skills/short-film-editor/scripts/batch-generate.sh +150 -0
- package/skills/short-film-editor/scripts/split-grid.sh +70 -0
- package/skills/tiktok-content-maker/SKILL.md +140 -0
- package/skills/tiktok-content-maker/examples/dress-demo.md +86 -0
- package/skills/tiktok-content-maker/references/ecom-prompt-guide.md +266 -0
- package/skills/video-download/SKILL.md +161 -0
- package/skills/video-download/scripts/download-video.sh +91 -0
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Beat analysis for short film editing.
|
|
4
|
+
Analyzes audio to extract BPM, beat positions, section boundaries,
|
|
5
|
+
and suggests clip cut points within Seedance's 5-15s constraint.
|
|
6
|
+
|
|
7
|
+
Usage: python3 analyze-beats.py <audio_file>
|
|
8
|
+
Output: JSON to stdout
|
|
9
|
+
|
|
10
|
+
Dependencies: pip3 install librosa soundfile numpy
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
import librosa
|
|
19
|
+
import numpy as np
|
|
20
|
+
except ImportError:
|
|
21
|
+
print(
|
|
22
|
+
json.dumps({
|
|
23
|
+
"error": "Missing dependencies. Install with: pip3 install librosa soundfile numpy"
|
|
24
|
+
}),
|
|
25
|
+
file=sys.stderr,
|
|
26
|
+
)
|
|
27
|
+
sys.exit(1)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
MIN_SEGMENT_S = 5.0
|
|
31
|
+
MAX_SEGMENT_S = 15.0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def analyze_beats(audio_path: str) -> dict:
|
|
35
|
+
"""Analyze audio file and return beat/section/cut data."""
|
|
36
|
+
y, sr = librosa.load(audio_path, sr=22050)
|
|
37
|
+
total_duration = librosa.get_duration(y=y, sr=sr)
|
|
38
|
+
|
|
39
|
+
# Beat tracking
|
|
40
|
+
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
|
|
41
|
+
beat_times = librosa.frames_to_time(beat_frames, sr=sr).tolist()
|
|
42
|
+
bpm = float(np.round(tempo, 1)) if np.ndim(tempo) == 0 else float(np.round(tempo[0], 1))
|
|
43
|
+
|
|
44
|
+
# Section boundary detection via spectral change
|
|
45
|
+
# Use a smaller number of segments for short audio
|
|
46
|
+
n_segments = max(4, min(10, int(total_duration / 10)))
|
|
47
|
+
boundaries = _detect_sections(y, sr, n_segments)
|
|
48
|
+
|
|
49
|
+
# Build sections with labels
|
|
50
|
+
section_labels = _label_sections(boundaries, total_duration)
|
|
51
|
+
|
|
52
|
+
# Generate suggested cuts respecting 5-15s constraint
|
|
53
|
+
suggested_cuts = _suggest_cuts(section_labels, beat_times, total_duration)
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
"bpm": bpm,
|
|
57
|
+
"total_duration_s": round(total_duration, 2),
|
|
58
|
+
"beats": [round(b, 3) for b in beat_times],
|
|
59
|
+
"sections": section_labels,
|
|
60
|
+
"suggested_cuts": suggested_cuts,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _detect_sections(y, sr, n_segments: int) -> list[float]:
|
|
65
|
+
"""Detect section boundaries using spectral features."""
|
|
66
|
+
# Compute spectral features
|
|
67
|
+
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
|
|
68
|
+
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
|
|
69
|
+
features = np.vstack([chroma, mfcc])
|
|
70
|
+
|
|
71
|
+
# Use agglomerative clustering for boundary detection
|
|
72
|
+
try:
|
|
73
|
+
bound_frames = librosa.segment.agglomerative(features, k=n_segments)
|
|
74
|
+
bound_times = librosa.frames_to_time(bound_frames, sr=sr).tolist()
|
|
75
|
+
except Exception:
|
|
76
|
+
# Fallback: evenly spaced sections
|
|
77
|
+
duration = librosa.get_duration(y=y, sr=sr)
|
|
78
|
+
bound_times = [i * duration / n_segments for i in range(1, n_segments)]
|
|
79
|
+
|
|
80
|
+
# Remove boundaries too close to start/end
|
|
81
|
+
bound_times = [b for b in bound_times if 2.0 < b < (librosa.get_duration(y=y, sr=sr) - 2.0)]
|
|
82
|
+
|
|
83
|
+
return sorted(set(round(b, 2) for b in bound_times))
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _label_sections(boundaries: list[float], total_duration: float) -> list[dict]:
|
|
87
|
+
"""Assign labels to sections based on position in the track."""
|
|
88
|
+
labels_map = ["intro", "verse", "build", "chorus", "bridge", "verse2", "chorus2", "outro"]
|
|
89
|
+
points = [0.0] + boundaries + [total_duration]
|
|
90
|
+
sections = []
|
|
91
|
+
|
|
92
|
+
for i in range(len(points) - 1):
|
|
93
|
+
label_idx = min(i, len(labels_map) - 1)
|
|
94
|
+
sections.append({
|
|
95
|
+
"start": round(points[i], 2),
|
|
96
|
+
"end": round(points[i + 1], 2),
|
|
97
|
+
"label": labels_map[label_idx],
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
return sections
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _suggest_cuts(
|
|
104
|
+
sections: list[dict],
|
|
105
|
+
beats: list[float],
|
|
106
|
+
total_duration: float,
|
|
107
|
+
) -> list[dict]:
|
|
108
|
+
"""
|
|
109
|
+
Generate suggested cut points respecting 5-15s segment constraints.
|
|
110
|
+
Uses section boundaries as primary cuts, then subdivides or merges as needed.
|
|
111
|
+
"""
|
|
112
|
+
raw_segments = []
|
|
113
|
+
for sec in sections:
|
|
114
|
+
start = sec["start"]
|
|
115
|
+
end = sec["end"]
|
|
116
|
+
duration = end - start
|
|
117
|
+
label = sec["label"]
|
|
118
|
+
|
|
119
|
+
if duration <= MAX_SEGMENT_S:
|
|
120
|
+
raw_segments.append({"time": start, "end": end, "duration": round(duration, 2), "section": label})
|
|
121
|
+
else:
|
|
122
|
+
# Subdivide long sections at beat points
|
|
123
|
+
sub_segments = _subdivide_at_beats(start, end, beats, label)
|
|
124
|
+
raw_segments.extend(sub_segments)
|
|
125
|
+
|
|
126
|
+
# Merge segments that are too short
|
|
127
|
+
merged = _merge_short_segments(raw_segments)
|
|
128
|
+
|
|
129
|
+
# Final pass: ensure all within bounds
|
|
130
|
+
result = []
|
|
131
|
+
for seg in merged:
|
|
132
|
+
dur = seg["end"] - seg["time"]
|
|
133
|
+
if dur >= MIN_SEGMENT_S:
|
|
134
|
+
result.append({
|
|
135
|
+
"time": round(seg["time"], 2),
|
|
136
|
+
"end": round(seg["end"], 2),
|
|
137
|
+
"duration": round(dur, 2),
|
|
138
|
+
"section": seg["section"],
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
# Handle edge case: if no valid segments, create evenly spaced ones
|
|
142
|
+
if not result:
|
|
143
|
+
n = max(1, int(total_duration / 10))
|
|
144
|
+
seg_dur = total_duration / n
|
|
145
|
+
for i in range(n):
|
|
146
|
+
s = i * seg_dur
|
|
147
|
+
e = min((i + 1) * seg_dur, total_duration)
|
|
148
|
+
result.append({
|
|
149
|
+
"time": round(s, 2),
|
|
150
|
+
"end": round(e, 2),
|
|
151
|
+
"duration": round(e - s, 2),
|
|
152
|
+
"section": f"segment_{i + 1}",
|
|
153
|
+
})
|
|
154
|
+
|
|
155
|
+
return result
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _subdivide_at_beats(
|
|
159
|
+
start: float,
|
|
160
|
+
end: float,
|
|
161
|
+
beats: list[float],
|
|
162
|
+
label: str,
|
|
163
|
+
) -> list[dict]:
|
|
164
|
+
"""Split a long section at beat points into 5-15s segments."""
|
|
165
|
+
section_beats = [b for b in beats if start <= b <= end]
|
|
166
|
+
if not section_beats:
|
|
167
|
+
# No beats found; split evenly
|
|
168
|
+
n = max(2, int((end - start) / 10))
|
|
169
|
+
seg_len = (end - start) / n
|
|
170
|
+
return [
|
|
171
|
+
{
|
|
172
|
+
"time": round(start + i * seg_len, 2),
|
|
173
|
+
"end": round(start + (i + 1) * seg_len, 2),
|
|
174
|
+
"duration": round(seg_len, 2),
|
|
175
|
+
"section": label,
|
|
176
|
+
}
|
|
177
|
+
for i in range(n)
|
|
178
|
+
]
|
|
179
|
+
|
|
180
|
+
segments = []
|
|
181
|
+
current_start = start
|
|
182
|
+
|
|
183
|
+
for beat in section_beats:
|
|
184
|
+
if beat - current_start >= MAX_SEGMENT_S:
|
|
185
|
+
# Find the best beat to cut at (closest to target ~10s)
|
|
186
|
+
target = current_start + 10.0
|
|
187
|
+
candidates = [b for b in section_beats if current_start + MIN_SEGMENT_S <= b <= current_start + MAX_SEGMENT_S]
|
|
188
|
+
if candidates:
|
|
189
|
+
cut = min(candidates, key=lambda b: abs(b - target))
|
|
190
|
+
segments.append({
|
|
191
|
+
"time": round(current_start, 2),
|
|
192
|
+
"end": round(cut, 2),
|
|
193
|
+
"duration": round(cut - current_start, 2),
|
|
194
|
+
"section": label,
|
|
195
|
+
})
|
|
196
|
+
current_start = cut
|
|
197
|
+
|
|
198
|
+
# Final segment
|
|
199
|
+
if end - current_start >= MIN_SEGMENT_S:
|
|
200
|
+
segments.append({
|
|
201
|
+
"time": round(current_start, 2),
|
|
202
|
+
"end": round(end, 2),
|
|
203
|
+
"duration": round(end - current_start, 2),
|
|
204
|
+
"section": label,
|
|
205
|
+
})
|
|
206
|
+
elif segments:
|
|
207
|
+
# Extend the last segment
|
|
208
|
+
segments[-1]["end"] = round(end, 2)
|
|
209
|
+
segments[-1]["duration"] = round(end - segments[-1]["time"], 2)
|
|
210
|
+
else:
|
|
211
|
+
segments.append({
|
|
212
|
+
"time": round(start, 2),
|
|
213
|
+
"end": round(end, 2),
|
|
214
|
+
"duration": round(end - start, 2),
|
|
215
|
+
"section": label,
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
return segments
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _merge_short_segments(segments: list[dict]) -> list[dict]:
|
|
222
|
+
"""Merge segments shorter than MIN_SEGMENT_S with their neighbors."""
|
|
223
|
+
if not segments:
|
|
224
|
+
return []
|
|
225
|
+
|
|
226
|
+
merged = [segments[0].copy()]
|
|
227
|
+
|
|
228
|
+
for seg in segments[1:]:
|
|
229
|
+
prev = merged[-1]
|
|
230
|
+
prev_dur = prev["end"] - prev["time"]
|
|
231
|
+
seg_dur = seg["end"] - seg["time"]
|
|
232
|
+
|
|
233
|
+
if seg_dur < MIN_SEGMENT_S:
|
|
234
|
+
# Try merging with previous if combined still <= MAX
|
|
235
|
+
combined = prev_dur + seg_dur
|
|
236
|
+
if combined <= MAX_SEGMENT_S:
|
|
237
|
+
prev["end"] = seg["end"]
|
|
238
|
+
prev["duration"] = round(combined, 2)
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
if prev_dur < MIN_SEGMENT_S:
|
|
242
|
+
# Extend previous into current
|
|
243
|
+
combined = prev_dur + seg_dur
|
|
244
|
+
if combined <= MAX_SEGMENT_S:
|
|
245
|
+
prev["end"] = seg["end"]
|
|
246
|
+
prev["duration"] = round(combined, 2)
|
|
247
|
+
prev["section"] = seg["section"]
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
merged.append(seg.copy())
|
|
251
|
+
|
|
252
|
+
return merged
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def main():
|
|
256
|
+
if len(sys.argv) < 2:
|
|
257
|
+
print("Usage: python3 analyze-beats.py <audio_file>", file=sys.stderr)
|
|
258
|
+
print("Supported formats: mp3, aac, wav, m4a, ogg, flac", file=sys.stderr)
|
|
259
|
+
sys.exit(1)
|
|
260
|
+
|
|
261
|
+
audio_path = sys.argv[1]
|
|
262
|
+
if not Path(audio_path).exists():
|
|
263
|
+
print(json.dumps({"error": f"File not found: {audio_path}"}), file=sys.stderr)
|
|
264
|
+
sys.exit(1)
|
|
265
|
+
|
|
266
|
+
result = analyze_beats(audio_path)
|
|
267
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
main()
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
#
|
|
3
|
+
# Batch video generation for short film projects.
|
|
4
|
+
# Reads a prompts JSON file and sequentially creates/waits/retrieves each shot.
|
|
5
|
+
#
|
|
6
|
+
# Usage:
|
|
7
|
+
# bash batch-generate.sh --project <project-id> --ratio <ratio> --prompts-file <prompts.json>
|
|
8
|
+
#
|
|
9
|
+
# Prompts JSON format:
|
|
10
|
+
# [
|
|
11
|
+
# { "shot_id": "S1", "prompt": "...", "duration": 8 },
|
|
12
|
+
# { "shot_id": "S2", "prompt": "...", "duration": 13 },
|
|
13
|
+
# ...
|
|
14
|
+
# ]
|
|
15
|
+
|
|
16
|
+
set -euo pipefail
|
|
17
|
+
|
|
18
|
+
# ---- Parse args ----
|
|
19
|
+
PROJECT=""
|
|
20
|
+
RATIO="16:9"
|
|
21
|
+
PROMPTS_FILE=""
|
|
22
|
+
TIMEOUT=600
|
|
23
|
+
|
|
24
|
+
while [[ $# -gt 0 ]]; do
|
|
25
|
+
case "$1" in
|
|
26
|
+
--project) PROJECT="$2"; shift 2 ;;
|
|
27
|
+
--ratio) RATIO="$2"; shift 2 ;;
|
|
28
|
+
--prompts-file) PROMPTS_FILE="$2"; shift 2 ;;
|
|
29
|
+
--timeout) TIMEOUT="$2"; shift 2 ;;
|
|
30
|
+
*) echo "Unknown arg: $1"; exit 1 ;;
|
|
31
|
+
esac
|
|
32
|
+
done
|
|
33
|
+
|
|
34
|
+
if [[ -z "$PROMPTS_FILE" ]]; then
|
|
35
|
+
echo "Error: --prompts-file is required."
|
|
36
|
+
echo "Usage: bash batch-generate.sh --project <id> --ratio <ratio> --prompts-file <prompts.json>"
|
|
37
|
+
exit 1
|
|
38
|
+
fi
|
|
39
|
+
|
|
40
|
+
if [[ ! -f "$PROMPTS_FILE" ]]; then
|
|
41
|
+
echo "Error: File not found: $PROMPTS_FILE"
|
|
42
|
+
exit 1
|
|
43
|
+
fi
|
|
44
|
+
|
|
45
|
+
# ---- Locate renoise-cli ----
|
|
46
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
47
|
+
CLI="${SCRIPT_DIR}/../../renoise-gen/renoise-cli.mjs"
|
|
48
|
+
|
|
49
|
+
if [[ ! -f "$CLI" ]]; then
|
|
50
|
+
echo "Error: renoise-cli.mjs not found at $CLI"
|
|
51
|
+
exit 1
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
# ---- Check balance ----
|
|
55
|
+
echo "=== Checking balance ==="
|
|
56
|
+
node "$CLI" me
|
|
57
|
+
echo ""
|
|
58
|
+
|
|
59
|
+
# ---- Read prompts ----
|
|
60
|
+
SHOT_COUNT=$(jq 'length' "$PROMPTS_FILE")
|
|
61
|
+
echo "=== Batch generation: $SHOT_COUNT shots ==="
|
|
62
|
+
echo "Project: ${PROJECT:-'(none)'}"
|
|
63
|
+
echo "Ratio: $RATIO"
|
|
64
|
+
echo "Timeout per shot: ${TIMEOUT}s"
|
|
65
|
+
echo ""
|
|
66
|
+
|
|
67
|
+
# ---- Results tracking ----
|
|
68
|
+
RESULTS=()
|
|
69
|
+
FAILED=0
|
|
70
|
+
|
|
71
|
+
for i in $(seq 0 $((SHOT_COUNT - 1))); do
|
|
72
|
+
SHOT_ID=$(jq -r ".[$i].shot_id" "$PROMPTS_FILE")
|
|
73
|
+
PROMPT=$(jq -r ".[$i].prompt" "$PROMPTS_FILE")
|
|
74
|
+
DURATION=$(jq -r ".[$i].duration" "$PROMPTS_FILE")
|
|
75
|
+
|
|
76
|
+
echo "--- [$((i + 1))/$SHOT_COUNT] $SHOT_ID (${DURATION}s) ---"
|
|
77
|
+
|
|
78
|
+
# Build tags
|
|
79
|
+
TAGS="$SHOT_ID"
|
|
80
|
+
if [[ -n "$PROJECT" ]]; then
|
|
81
|
+
TAGS="$PROJECT,$SHOT_ID"
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
# Create task
|
|
85
|
+
CREATE_OUTPUT=$(node "$CLI" create \
|
|
86
|
+
--prompt "$PROMPT" \
|
|
87
|
+
--duration "$DURATION" \
|
|
88
|
+
--ratio "$RATIO" \
|
|
89
|
+
--tags "$TAGS" 2>&1) || {
|
|
90
|
+
echo "[FAILED] $SHOT_ID — create error:"
|
|
91
|
+
echo "$CREATE_OUTPUT"
|
|
92
|
+
FAILED=$((FAILED + 1))
|
|
93
|
+
RESULTS+=("$SHOT_ID|FAILED|—|create error")
|
|
94
|
+
echo ""
|
|
95
|
+
echo "Stopping batch — fix the issue and re-run."
|
|
96
|
+
break
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
# Extract task ID from create output
|
|
100
|
+
TASK_ID=$(echo "$CREATE_OUTPUT" | grep -oE 'id=[0-9]+' | head -1 | cut -d= -f2)
|
|
101
|
+
|
|
102
|
+
if [[ -z "$TASK_ID" ]]; then
|
|
103
|
+
echo "[FAILED] $SHOT_ID — could not parse task ID from output:"
|
|
104
|
+
echo "$CREATE_OUTPUT"
|
|
105
|
+
FAILED=$((FAILED + 1))
|
|
106
|
+
RESULTS+=("$SHOT_ID|FAILED|—|no task ID")
|
|
107
|
+
break
|
|
108
|
+
fi
|
|
109
|
+
|
|
110
|
+
echo "Task created: #$TASK_ID"
|
|
111
|
+
|
|
112
|
+
# Wait for completion
|
|
113
|
+
WAIT_OUTPUT=$(node "$CLI" wait "$TASK_ID" --timeout "$TIMEOUT" 2>&1) || {
|
|
114
|
+
echo "[FAILED] $SHOT_ID (task #$TASK_ID) — wait error:"
|
|
115
|
+
echo "$WAIT_OUTPUT"
|
|
116
|
+
FAILED=$((FAILED + 1))
|
|
117
|
+
RESULTS+=("$SHOT_ID|FAILED|#$TASK_ID|wait timeout/error")
|
|
118
|
+
echo ""
|
|
119
|
+
echo "Stopping batch — the task may still be running. Check with: node renoise-cli.mjs get $TASK_ID"
|
|
120
|
+
break
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Get result
|
|
124
|
+
RESULT_OUTPUT=$(node "$CLI" result "$TASK_ID" 2>&1)
|
|
125
|
+
VIDEO_URL=$(echo "$RESULT_OUTPUT" | jq -r '.result.videoUrl // .videoUrl // "unknown"' 2>/dev/null || echo "unknown")
|
|
126
|
+
|
|
127
|
+
echo "[SUCCESS] $SHOT_ID → $VIDEO_URL"
|
|
128
|
+
RESULTS+=("$SHOT_ID|SUCCESS|#$TASK_ID|$VIDEO_URL")
|
|
129
|
+
echo ""
|
|
130
|
+
done
|
|
131
|
+
|
|
132
|
+
# ---- Summary ----
|
|
133
|
+
echo ""
|
|
134
|
+
echo "========================================="
|
|
135
|
+
echo " BATCH GENERATION SUMMARY"
|
|
136
|
+
echo "========================================="
|
|
137
|
+
printf "%-8s %-10s %-10s %s\n" "Shot" "Status" "Task" "URL"
|
|
138
|
+
printf "%-8s %-10s %-10s %s\n" "----" "------" "----" "---"
|
|
139
|
+
|
|
140
|
+
for entry in "${RESULTS[@]}"; do
|
|
141
|
+
IFS='|' read -r shot status task url <<< "$entry"
|
|
142
|
+
printf "%-8s %-10s %-10s %s\n" "$shot" "$status" "$task" "$url"
|
|
143
|
+
done
|
|
144
|
+
|
|
145
|
+
echo ""
|
|
146
|
+
echo "Total: ${#RESULTS[@]}/$SHOT_COUNT completed, $FAILED failed"
|
|
147
|
+
|
|
148
|
+
if [[ $FAILED -gt 0 ]]; then
|
|
149
|
+
exit 1
|
|
150
|
+
fi
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
#
|
|
3
|
+
# Split a storyboard grid image into individual panel images.
|
|
4
|
+
# Uses ImageMagick to crop each cell from an NxM grid.
|
|
5
|
+
#
|
|
6
|
+
# Usage:
|
|
7
|
+
# bash split-grid.sh <grid_image> <output_dir> <rows> <cols>
|
|
8
|
+
#
|
|
9
|
+
# Example:
|
|
10
|
+
# bash split-grid.sh grid.png storyboard/ 2 4
|
|
11
|
+
# → storyboard/S1.png, S2.png, S3.png, ..., S8.png
|
|
12
|
+
#
|
|
13
|
+
# Dependencies: ImageMagick (brew install imagemagick)
|
|
14
|
+
|
|
15
|
+
set -euo pipefail
|
|
16
|
+
|
|
17
|
+
GRID_IMAGE="${1:-}"
|
|
18
|
+
OUTPUT_DIR="${2:-}"
|
|
19
|
+
ROWS="${3:-2}"
|
|
20
|
+
COLS="${4:-4}"
|
|
21
|
+
|
|
22
|
+
if [[ -z "$GRID_IMAGE" || -z "$OUTPUT_DIR" ]]; then
|
|
23
|
+
echo "Usage: split-grid.sh <grid_image> <output_dir> [rows] [cols]"
|
|
24
|
+
echo "Example: split-grid.sh grid.png storyboard/ 2 4"
|
|
25
|
+
exit 1
|
|
26
|
+
fi
|
|
27
|
+
|
|
28
|
+
if ! command -v magick &>/dev/null && ! command -v convert &>/dev/null; then
|
|
29
|
+
echo "Error: ImageMagick not found. Install with: brew install imagemagick"
|
|
30
|
+
exit 1
|
|
31
|
+
fi
|
|
32
|
+
|
|
33
|
+
# Determine ImageMagick command (v7: magick, v6: convert/identify)
|
|
34
|
+
if command -v magick &>/dev/null; then
|
|
35
|
+
IDENTIFY="magick identify"
|
|
36
|
+
CONVERT="magick"
|
|
37
|
+
else
|
|
38
|
+
IDENTIFY="identify"
|
|
39
|
+
CONVERT="convert"
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
mkdir -p "$OUTPUT_DIR"
|
|
43
|
+
|
|
44
|
+
# Get image dimensions
|
|
45
|
+
DIMS=$($IDENTIFY -format "%wx%h" "$GRID_IMAGE")
|
|
46
|
+
IMG_W=$(echo "$DIMS" | cut -dx -f1)
|
|
47
|
+
IMG_H=$(echo "$DIMS" | cut -dx -f2)
|
|
48
|
+
|
|
49
|
+
CELL_W=$((IMG_W / COLS))
|
|
50
|
+
CELL_H=$((IMG_H / ROWS))
|
|
51
|
+
|
|
52
|
+
echo "Grid: ${IMG_W}x${IMG_H}, Layout: ${ROWS}x${COLS}, Cell: ${CELL_W}x${CELL_H}"
|
|
53
|
+
|
|
54
|
+
INDEX=1
|
|
55
|
+
for row in $(seq 0 $((ROWS - 1))); do
|
|
56
|
+
for col in $(seq 0 $((COLS - 1))); do
|
|
57
|
+
X=$((col * CELL_W))
|
|
58
|
+
Y=$((row * CELL_H))
|
|
59
|
+
OUT_FILE="${OUTPUT_DIR}/S${INDEX}.png"
|
|
60
|
+
|
|
61
|
+
$CONVERT "$GRID_IMAGE" -crop "${CELL_W}x${CELL_H}+${X}+${Y}" +repage "$OUT_FILE"
|
|
62
|
+
|
|
63
|
+
SIZE=$(ls -lh "$OUT_FILE" | awk '{print $5}')
|
|
64
|
+
echo " S${INDEX}: ${CELL_W}x${CELL_H} @ +${X}+${Y} → ${OUT_FILE} (${SIZE})"
|
|
65
|
+
|
|
66
|
+
INDEX=$((INDEX + 1))
|
|
67
|
+
done
|
|
68
|
+
done
|
|
69
|
+
|
|
70
|
+
echo "Done: $((INDEX - 1)) panels extracted"
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: tiktok-content-maker
|
|
3
|
+
description: >
|
|
4
|
+
TikTok e-commerce short video script generator. Analyzes product photos,
|
|
5
|
+
generates 15s video scripts with video prompts and English dialogue.
|
|
6
|
+
Use when user says "TikTok product video", "ecommerce video",
|
|
7
|
+
"product video", "sales video", "shoot product". Do NOT use for non-ecommerce videos or
|
|
8
|
+
general creative direction (use director instead).
|
|
9
|
+
allowed-tools: Bash, Read
|
|
10
|
+
metadata:
|
|
11
|
+
author: renoise
|
|
12
|
+
version: 0.1.0
|
|
13
|
+
category: video-production
|
|
14
|
+
tags: [product, ecommerce, tiktok]
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
# Content Maker — E-commerce Short Video Script + Generation
|
|
18
|
+
|
|
19
|
+
## Overview
|
|
20
|
+
|
|
21
|
+
End-to-end e-commerce short video tool: user provides product images (+ optional model images) → analyze product info → generate 15-second TikTok script (video prompt with embedded English dialogue) → submit video generation task.
|
|
22
|
+
|
|
23
|
+
## Workflow
|
|
24
|
+
|
|
25
|
+
### Phase 1: Material Collection & Product Analysis
|
|
26
|
+
|
|
27
|
+
1. **Collect material paths**: Ask user for images
|
|
28
|
+
- `Product image path` (required): Product hero image. **Best: clean white-background product photo with no text/labels/decorations**. Images with marketing text overlays will interfere with the model.
|
|
29
|
+
- `Model image path` (optional, for analysis reference only): Shows how the product is worn/used. **Note: Model images are only used to understand product usage — they are NOT uploaded to Renoise** (privacy detection will block images containing realistic human faces).
|
|
30
|
+
|
|
31
|
+
2. **Analyze product info**:
|
|
32
|
+
- Use the `gemini-gen` skill to analyze product images — send the image(s) with a prompt requesting product analysis (type, color, material, selling points, brand tone, scene suggestions)
|
|
33
|
+
- Alternatively, view images directly via the Read tool and analyze manually
|
|
34
|
+
- Extract: product type, color, material, selling points, brand tone, applicable scenarios
|
|
35
|
+
- **(Critical) Understand correct product usage from lifestyle images**:
|
|
36
|
+
- What is the user's posture? (standing/sitting/lying/walking)
|
|
37
|
+
- Where is the product positioned on the body? (handheld/floor/table/under body)
|
|
38
|
+
- How does the product interact with the body? (hand pressure vs body weight vs wearing vs applying)
|
|
39
|
+
- Where is the usage scenario? (gym/office/home/outdoors)
|
|
40
|
+
- If the user provides a product link, use WebFetch to scrape product detail page for additional context
|
|
41
|
+
|
|
42
|
+
3. **Present analysis results** for user to confirm or supplement. Results must include a clear "**Usage description**", e.g.:
|
|
43
|
+
> Usage: Place the peanut ball on the floor/yoga mat, user lies on top of the ball, using body weight to massage the muscles along both sides of the spine. The peanut-shaped groove avoids the spine while the two ball ends work the erector spinae muscles.
|
|
44
|
+
|
|
45
|
+
### Phase 2: 15-Second Script + Prompt Generation
|
|
46
|
+
|
|
47
|
+
Based on analysis results + reference guide, generate a complete 15-second video script.
|
|
48
|
+
|
|
49
|
+
**Must reference the following guide** (Read before generating):
|
|
50
|
+
- `${CLAUDE_SKILL_DIR}/references/ecom-prompt-guide.md` — E-commerce video prompt guide
|
|
51
|
+
|
|
52
|
+
**Prompt structure (3 required components):**
|
|
53
|
+
|
|
54
|
+
#### Part A: Product Anchoring (first line of prompt)
|
|
55
|
+
|
|
56
|
+
Product appearance is conveyed by the reference image. The prompt only needs **one sentence** stating what the product is + its use case:
|
|
57
|
+
|
|
58
|
+
```
|
|
59
|
+
The product is a [brand] [product type] for [primary use case], shown in the reference image.
|
|
60
|
+
The product must match the reference image exactly in every frame. Do not invent any packaging, box, or container unless the reference image shows one.
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
**Key**: Do not repeat color, material, shape, or logo descriptions in the prompt — that information is already in the reference image. Save prompt space for the hook and visual narrative.
|
|
64
|
+
|
|
65
|
+
#### Part B: Dialogue Embedding (throughout)
|
|
66
|
+
|
|
67
|
+
Dialogue must be in English, embedded in the narrative using forced lip-sync format:
|
|
68
|
+
```
|
|
69
|
+
Spoken dialogue (say EXACTLY, word-for-word): "..."
|
|
70
|
+
Mouth clearly visible when speaking, lip-sync aligned.
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
**Dialogue style requirements**:
|
|
74
|
+
- **Best-friend casual tone**: Like recommending to a friend, not reading ad copy
|
|
75
|
+
- **High information density**: Every sentence includes specific details (numbers, comparisons, usage scenarios) — no filler
|
|
76
|
+
- **No hard sell**: Don't end with "link below" or generic CTAs. Use natural personal recommendations (e.g., "Best money I have spent this year", "Trust me just start")
|
|
77
|
+
|
|
78
|
+
**Dialogue pacing** (4 lines, matching 4 time segments):
|
|
79
|
+
```
|
|
80
|
+
[0-3s] Hook — One sentence to stop the scroll (pain point / suspense / result-first)
|
|
81
|
+
[3-8s] Selling point — Specific specs + personal experience
|
|
82
|
+
[8-12s] Scene — Where to use + portability / versatility
|
|
83
|
+
[12-15s] Close — Genuine personal recommendation, no hard sell
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
#### Part C: Visual Narrative (one continuous narrative)
|
|
87
|
+
|
|
88
|
+
**Video structure (one continuous 15-second video):**
|
|
89
|
+
```
|
|
90
|
+
[0-3s] HOOK — High-impact opening. Must: fast camera movement (whip pan / snap dolly in) + dynamic action + start speaking immediately. Never start slow.
|
|
91
|
+
[3-8s] SHOWCASE — Product display + model interaction. Camera transitions to reveal material details.
|
|
92
|
+
[8-12s] SCENE — Real-life usage scenario. Pull back to medium/wide shot.
|
|
93
|
+
[12-15s] CLOSE — Model faces camera + product in frame + natural ending. Frame holds steady.
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
**Output 3 items:**
|
|
97
|
+
|
|
98
|
+
#### 1. Video Prompt (English, with dialogue)
|
|
99
|
+
Director-dictation style paragraph (6-10 sentences, one thing per sentence), containing:
|
|
100
|
+
- Product anchoring (one sentence, Part A) at the very beginning
|
|
101
|
+
- Dialogue embedded with `Spoken dialogue (say EXACTLY, word-for-word):` format (Part B)
|
|
102
|
+
- `Mouth clearly visible when speaking, lip-sync aligned.` after each dialogue line
|
|
103
|
+
- Ad-6D Protocol elements interspersed
|
|
104
|
+
- Model appearance consistency description (gender, hair, skin tone, body type, outfit)
|
|
105
|
+
- At least 3 camera movement changes
|
|
106
|
+
- Lighting/atmosphere description
|
|
107
|
+
|
|
108
|
+
#### 2. Dialogue Script (English, with timestamps)
|
|
109
|
+
List the 4 dialogue lines separately with their time segments for easy review.
|
|
110
|
+
|
|
111
|
+
#### 3. BGM / Sound Design Suggestions
|
|
112
|
+
- Recommend music style matching the product tone
|
|
113
|
+
- Key moment sound effect cues
|
|
114
|
+
|
|
115
|
+
**Reference example**: Read `${CLAUDE_SKILL_DIR}/examples/dress-demo.md` for the latest standard output format.
|
|
116
|
+
|
|
117
|
+
### Phase 3: User Confirmation
|
|
118
|
+
|
|
119
|
+
After presenting the full script, ask the user:
|
|
120
|
+
- Whether to adjust dialogue
|
|
121
|
+
- Whether to change the scene
|
|
122
|
+
- Whether to modify prompt details
|
|
123
|
+
- Proceed to submission after confirmation
|
|
124
|
+
|
|
125
|
+
### Phase 4: Upload Materials + Submit Video Generation Task
|
|
126
|
+
|
|
127
|
+
After user confirms the script, upload the product image and submit the video generation task.
|
|
128
|
+
|
|
129
|
+
**Important rules**:
|
|
130
|
+
- Only upload product images — **never upload model/real person photos** (privacy detection will block images containing realistic human faces, error: `InputImageSensitiveContentDetected.PrivacyInformation`)
|
|
131
|
+
- Model appearance is controlled entirely by prompt text description
|
|
132
|
+
- Product images should ideally be clean white-background product photos, avoid images with marketing text overlays
|
|
133
|
+
- For batch generation: upload the product image once, reuse the material ID to submit multiple tasks with different scenes
|
|
134
|
+
|
|
135
|
+
## Important Notes
|
|
136
|
+
|
|
137
|
+
- Images support jpg/jpeg/png/webp formats
|
|
138
|
+
- Video prompts must be entirely in English
|
|
139
|
+
- Dialogue must be in English, embedded in the prompt (`Spoken dialogue (say EXACTLY, word-for-word): "..."`)
|
|
140
|
+
- **Do not output separate subtitle text** — dialogue is already in the prompt, no additional subtitle layer needed
|