@renoise/video-maker 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/plugin.json +5 -0
- package/README.md +50 -0
- package/hooks/hooks.json +16 -0
- package/hooks/session-start.sh +17 -0
- package/lib/gemini.ts +49 -0
- package/package.json +22 -0
- package/skills/director/SKILL.md +272 -0
- package/skills/director/references/narrative-pacing.md +257 -0
- package/skills/director/references/style-library.md +179 -0
- package/skills/product-sheet-generate/SKILL.md +75 -0
- package/skills/renoise-gen/SKILL.md +362 -0
- package/skills/renoise-gen/references/api-endpoints.md +138 -0
- package/skills/renoise-gen/references/video-capabilities.md +524 -0
- package/skills/renoise-gen/renoise-cli.mjs +723 -0
- package/skills/scene-generate/SKILL.md +52 -0
- package/skills/short-film-editor/SKILL.md +479 -0
- package/skills/short-film-editor/examples/mystery-package-4shot.md +260 -0
- package/skills/short-film-editor/references/continuity-guide.md +170 -0
- package/skills/short-film-editor/scripts/analyze-beats.py +271 -0
- package/skills/short-film-editor/scripts/batch-generate.sh +150 -0
- package/skills/short-film-editor/scripts/generate-storyboard-html.ts +714 -0
- package/skills/short-film-editor/scripts/split-grid.sh +70 -0
- package/skills/tiktok-content-maker/SKILL.md +143 -0
- package/skills/tiktok-content-maker/examples/dress-demo.md +86 -0
- package/skills/tiktok-content-maker/references/ecom-prompt-guide.md +261 -0
- package/skills/tiktok-content-maker/scripts/analyze-images.ts +122 -0
- package/skills/video-download/SKILL.md +161 -0
- package/skills/video-download/scripts/download-video.sh +91 -0
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Beat analysis for short film editing.
|
|
4
|
+
Analyzes audio to extract BPM, beat positions, section boundaries,
|
|
5
|
+
and suggests clip cut points within Seedance's 5-15s constraint.
|
|
6
|
+
|
|
7
|
+
Usage: python3 analyze-beats.py <audio_file>
|
|
8
|
+
Output: JSON to stdout
|
|
9
|
+
|
|
10
|
+
Dependencies: pip3 install librosa soundfile numpy
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import sys
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
import librosa
|
|
19
|
+
import numpy as np
|
|
20
|
+
except ImportError:
|
|
21
|
+
print(
|
|
22
|
+
json.dumps({
|
|
23
|
+
"error": "Missing dependencies. Install with: pip3 install librosa soundfile numpy"
|
|
24
|
+
}),
|
|
25
|
+
file=sys.stderr,
|
|
26
|
+
)
|
|
27
|
+
sys.exit(1)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
MIN_SEGMENT_S = 5.0
|
|
31
|
+
MAX_SEGMENT_S = 15.0
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def analyze_beats(audio_path: str) -> dict:
|
|
35
|
+
"""Analyze audio file and return beat/section/cut data."""
|
|
36
|
+
y, sr = librosa.load(audio_path, sr=22050)
|
|
37
|
+
total_duration = librosa.get_duration(y=y, sr=sr)
|
|
38
|
+
|
|
39
|
+
# Beat tracking
|
|
40
|
+
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
|
|
41
|
+
beat_times = librosa.frames_to_time(beat_frames, sr=sr).tolist()
|
|
42
|
+
bpm = float(np.round(tempo, 1)) if np.ndim(tempo) == 0 else float(np.round(tempo[0], 1))
|
|
43
|
+
|
|
44
|
+
# Section boundary detection via spectral change
|
|
45
|
+
# Use a smaller number of segments for short audio
|
|
46
|
+
n_segments = max(4, min(10, int(total_duration / 10)))
|
|
47
|
+
boundaries = _detect_sections(y, sr, n_segments)
|
|
48
|
+
|
|
49
|
+
# Build sections with labels
|
|
50
|
+
section_labels = _label_sections(boundaries, total_duration)
|
|
51
|
+
|
|
52
|
+
# Generate suggested cuts respecting 5-15s constraint
|
|
53
|
+
suggested_cuts = _suggest_cuts(section_labels, beat_times, total_duration)
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
"bpm": bpm,
|
|
57
|
+
"total_duration_s": round(total_duration, 2),
|
|
58
|
+
"beats": [round(b, 3) for b in beat_times],
|
|
59
|
+
"sections": section_labels,
|
|
60
|
+
"suggested_cuts": suggested_cuts,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _detect_sections(y, sr, n_segments: int) -> list[float]:
|
|
65
|
+
"""Detect section boundaries using spectral features."""
|
|
66
|
+
# Compute spectral features
|
|
67
|
+
chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
|
|
68
|
+
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=13)
|
|
69
|
+
features = np.vstack([chroma, mfcc])
|
|
70
|
+
|
|
71
|
+
# Use agglomerative clustering for boundary detection
|
|
72
|
+
try:
|
|
73
|
+
bound_frames = librosa.segment.agglomerative(features, k=n_segments)
|
|
74
|
+
bound_times = librosa.frames_to_time(bound_frames, sr=sr).tolist()
|
|
75
|
+
except Exception:
|
|
76
|
+
# Fallback: evenly spaced sections
|
|
77
|
+
duration = librosa.get_duration(y=y, sr=sr)
|
|
78
|
+
bound_times = [i * duration / n_segments for i in range(1, n_segments)]
|
|
79
|
+
|
|
80
|
+
# Remove boundaries too close to start/end
|
|
81
|
+
bound_times = [b for b in bound_times if 2.0 < b < (librosa.get_duration(y=y, sr=sr) - 2.0)]
|
|
82
|
+
|
|
83
|
+
return sorted(set(round(b, 2) for b in bound_times))
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _label_sections(boundaries: list[float], total_duration: float) -> list[dict]:
|
|
87
|
+
"""Assign labels to sections based on position in the track."""
|
|
88
|
+
labels_map = ["intro", "verse", "build", "chorus", "bridge", "verse2", "chorus2", "outro"]
|
|
89
|
+
points = [0.0] + boundaries + [total_duration]
|
|
90
|
+
sections = []
|
|
91
|
+
|
|
92
|
+
for i in range(len(points) - 1):
|
|
93
|
+
label_idx = min(i, len(labels_map) - 1)
|
|
94
|
+
sections.append({
|
|
95
|
+
"start": round(points[i], 2),
|
|
96
|
+
"end": round(points[i + 1], 2),
|
|
97
|
+
"label": labels_map[label_idx],
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
return sections
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _suggest_cuts(
|
|
104
|
+
sections: list[dict],
|
|
105
|
+
beats: list[float],
|
|
106
|
+
total_duration: float,
|
|
107
|
+
) -> list[dict]:
|
|
108
|
+
"""
|
|
109
|
+
Generate suggested cut points respecting 5-15s segment constraints.
|
|
110
|
+
Uses section boundaries as primary cuts, then subdivides or merges as needed.
|
|
111
|
+
"""
|
|
112
|
+
raw_segments = []
|
|
113
|
+
for sec in sections:
|
|
114
|
+
start = sec["start"]
|
|
115
|
+
end = sec["end"]
|
|
116
|
+
duration = end - start
|
|
117
|
+
label = sec["label"]
|
|
118
|
+
|
|
119
|
+
if duration <= MAX_SEGMENT_S:
|
|
120
|
+
raw_segments.append({"time": start, "end": end, "duration": round(duration, 2), "section": label})
|
|
121
|
+
else:
|
|
122
|
+
# Subdivide long sections at beat points
|
|
123
|
+
sub_segments = _subdivide_at_beats(start, end, beats, label)
|
|
124
|
+
raw_segments.extend(sub_segments)
|
|
125
|
+
|
|
126
|
+
# Merge segments that are too short
|
|
127
|
+
merged = _merge_short_segments(raw_segments)
|
|
128
|
+
|
|
129
|
+
# Final pass: ensure all within bounds
|
|
130
|
+
result = []
|
|
131
|
+
for seg in merged:
|
|
132
|
+
dur = seg["end"] - seg["time"]
|
|
133
|
+
if dur >= MIN_SEGMENT_S:
|
|
134
|
+
result.append({
|
|
135
|
+
"time": round(seg["time"], 2),
|
|
136
|
+
"end": round(seg["end"], 2),
|
|
137
|
+
"duration": round(dur, 2),
|
|
138
|
+
"section": seg["section"],
|
|
139
|
+
})
|
|
140
|
+
|
|
141
|
+
# Handle edge case: if no valid segments, create evenly spaced ones
|
|
142
|
+
if not result:
|
|
143
|
+
n = max(1, int(total_duration / 10))
|
|
144
|
+
seg_dur = total_duration / n
|
|
145
|
+
for i in range(n):
|
|
146
|
+
s = i * seg_dur
|
|
147
|
+
e = min((i + 1) * seg_dur, total_duration)
|
|
148
|
+
result.append({
|
|
149
|
+
"time": round(s, 2),
|
|
150
|
+
"end": round(e, 2),
|
|
151
|
+
"duration": round(e - s, 2),
|
|
152
|
+
"section": f"segment_{i + 1}",
|
|
153
|
+
})
|
|
154
|
+
|
|
155
|
+
return result
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def _subdivide_at_beats(
|
|
159
|
+
start: float,
|
|
160
|
+
end: float,
|
|
161
|
+
beats: list[float],
|
|
162
|
+
label: str,
|
|
163
|
+
) -> list[dict]:
|
|
164
|
+
"""Split a long section at beat points into 5-15s segments."""
|
|
165
|
+
section_beats = [b for b in beats if start <= b <= end]
|
|
166
|
+
if not section_beats:
|
|
167
|
+
# No beats found; split evenly
|
|
168
|
+
n = max(2, int((end - start) / 10))
|
|
169
|
+
seg_len = (end - start) / n
|
|
170
|
+
return [
|
|
171
|
+
{
|
|
172
|
+
"time": round(start + i * seg_len, 2),
|
|
173
|
+
"end": round(start + (i + 1) * seg_len, 2),
|
|
174
|
+
"duration": round(seg_len, 2),
|
|
175
|
+
"section": label,
|
|
176
|
+
}
|
|
177
|
+
for i in range(n)
|
|
178
|
+
]
|
|
179
|
+
|
|
180
|
+
segments = []
|
|
181
|
+
current_start = start
|
|
182
|
+
|
|
183
|
+
for beat in section_beats:
|
|
184
|
+
if beat - current_start >= MAX_SEGMENT_S:
|
|
185
|
+
# Find the best beat to cut at (closest to target ~10s)
|
|
186
|
+
target = current_start + 10.0
|
|
187
|
+
candidates = [b for b in section_beats if current_start + MIN_SEGMENT_S <= b <= current_start + MAX_SEGMENT_S]
|
|
188
|
+
if candidates:
|
|
189
|
+
cut = min(candidates, key=lambda b: abs(b - target))
|
|
190
|
+
segments.append({
|
|
191
|
+
"time": round(current_start, 2),
|
|
192
|
+
"end": round(cut, 2),
|
|
193
|
+
"duration": round(cut - current_start, 2),
|
|
194
|
+
"section": label,
|
|
195
|
+
})
|
|
196
|
+
current_start = cut
|
|
197
|
+
|
|
198
|
+
# Final segment
|
|
199
|
+
if end - current_start >= MIN_SEGMENT_S:
|
|
200
|
+
segments.append({
|
|
201
|
+
"time": round(current_start, 2),
|
|
202
|
+
"end": round(end, 2),
|
|
203
|
+
"duration": round(end - current_start, 2),
|
|
204
|
+
"section": label,
|
|
205
|
+
})
|
|
206
|
+
elif segments:
|
|
207
|
+
# Extend the last segment
|
|
208
|
+
segments[-1]["end"] = round(end, 2)
|
|
209
|
+
segments[-1]["duration"] = round(end - segments[-1]["time"], 2)
|
|
210
|
+
else:
|
|
211
|
+
segments.append({
|
|
212
|
+
"time": round(start, 2),
|
|
213
|
+
"end": round(end, 2),
|
|
214
|
+
"duration": round(end - start, 2),
|
|
215
|
+
"section": label,
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
return segments
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _merge_short_segments(segments: list[dict]) -> list[dict]:
|
|
222
|
+
"""Merge segments shorter than MIN_SEGMENT_S with their neighbors."""
|
|
223
|
+
if not segments:
|
|
224
|
+
return []
|
|
225
|
+
|
|
226
|
+
merged = [segments[0].copy()]
|
|
227
|
+
|
|
228
|
+
for seg in segments[1:]:
|
|
229
|
+
prev = merged[-1]
|
|
230
|
+
prev_dur = prev["end"] - prev["time"]
|
|
231
|
+
seg_dur = seg["end"] - seg["time"]
|
|
232
|
+
|
|
233
|
+
if seg_dur < MIN_SEGMENT_S:
|
|
234
|
+
# Try merging with previous if combined still <= MAX
|
|
235
|
+
combined = prev_dur + seg_dur
|
|
236
|
+
if combined <= MAX_SEGMENT_S:
|
|
237
|
+
prev["end"] = seg["end"]
|
|
238
|
+
prev["duration"] = round(combined, 2)
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
if prev_dur < MIN_SEGMENT_S:
|
|
242
|
+
# Extend previous into current
|
|
243
|
+
combined = prev_dur + seg_dur
|
|
244
|
+
if combined <= MAX_SEGMENT_S:
|
|
245
|
+
prev["end"] = seg["end"]
|
|
246
|
+
prev["duration"] = round(combined, 2)
|
|
247
|
+
prev["section"] = seg["section"]
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
merged.append(seg.copy())
|
|
251
|
+
|
|
252
|
+
return merged
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def main():
|
|
256
|
+
if len(sys.argv) < 2:
|
|
257
|
+
print("Usage: python3 analyze-beats.py <audio_file>", file=sys.stderr)
|
|
258
|
+
print("Supported formats: mp3, aac, wav, m4a, ogg, flac", file=sys.stderr)
|
|
259
|
+
sys.exit(1)
|
|
260
|
+
|
|
261
|
+
audio_path = sys.argv[1]
|
|
262
|
+
if not Path(audio_path).exists():
|
|
263
|
+
print(json.dumps({"error": f"File not found: {audio_path}"}), file=sys.stderr)
|
|
264
|
+
sys.exit(1)
|
|
265
|
+
|
|
266
|
+
result = analyze_beats(audio_path)
|
|
267
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
if __name__ == "__main__":
|
|
271
|
+
main()
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
#
|
|
3
|
+
# Batch video generation for short film projects.
|
|
4
|
+
# Reads a prompts JSON file and sequentially creates/waits/retrieves each shot.
|
|
5
|
+
#
|
|
6
|
+
# Usage:
|
|
7
|
+
# bash batch-generate.sh --project <project-id> --ratio <ratio> --prompts-file <prompts.json>
|
|
8
|
+
#
|
|
9
|
+
# Prompts JSON format:
|
|
10
|
+
# [
|
|
11
|
+
# { "shot_id": "S1", "prompt": "...", "duration": 8 },
|
|
12
|
+
# { "shot_id": "S2", "prompt": "...", "duration": 13 },
|
|
13
|
+
# ...
|
|
14
|
+
# ]
|
|
15
|
+
|
|
16
|
+
set -euo pipefail
|
|
17
|
+
|
|
18
|
+
# ---- Parse args ----
|
|
19
|
+
PROJECT=""
|
|
20
|
+
RATIO="16:9"
|
|
21
|
+
PROMPTS_FILE=""
|
|
22
|
+
TIMEOUT=600
|
|
23
|
+
|
|
24
|
+
while [[ $# -gt 0 ]]; do
|
|
25
|
+
case "$1" in
|
|
26
|
+
--project) PROJECT="$2"; shift 2 ;;
|
|
27
|
+
--ratio) RATIO="$2"; shift 2 ;;
|
|
28
|
+
--prompts-file) PROMPTS_FILE="$2"; shift 2 ;;
|
|
29
|
+
--timeout) TIMEOUT="$2"; shift 2 ;;
|
|
30
|
+
*) echo "Unknown arg: $1"; exit 1 ;;
|
|
31
|
+
esac
|
|
32
|
+
done
|
|
33
|
+
|
|
34
|
+
if [[ -z "$PROMPTS_FILE" ]]; then
|
|
35
|
+
echo "Error: --prompts-file is required."
|
|
36
|
+
echo "Usage: bash batch-generate.sh --project <id> --ratio <ratio> --prompts-file <prompts.json>"
|
|
37
|
+
exit 1
|
|
38
|
+
fi
|
|
39
|
+
|
|
40
|
+
if [[ ! -f "$PROMPTS_FILE" ]]; then
|
|
41
|
+
echo "Error: File not found: $PROMPTS_FILE"
|
|
42
|
+
exit 1
|
|
43
|
+
fi
|
|
44
|
+
|
|
45
|
+
# ---- Locate renoise-cli ----
|
|
46
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
47
|
+
CLI="${SCRIPT_DIR}/../../renoise-gen/renoise-cli.mjs"
|
|
48
|
+
|
|
49
|
+
if [[ ! -f "$CLI" ]]; then
|
|
50
|
+
echo "Error: renoise-cli.mjs not found at $CLI"
|
|
51
|
+
exit 1
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
# ---- Check balance ----
|
|
55
|
+
echo "=== Checking balance ==="
|
|
56
|
+
node "$CLI" me
|
|
57
|
+
echo ""
|
|
58
|
+
|
|
59
|
+
# ---- Read prompts ----
|
|
60
|
+
SHOT_COUNT=$(jq 'length' "$PROMPTS_FILE")
|
|
61
|
+
echo "=== Batch generation: $SHOT_COUNT shots ==="
|
|
62
|
+
echo "Project: ${PROJECT:-'(none)'}"
|
|
63
|
+
echo "Ratio: $RATIO"
|
|
64
|
+
echo "Timeout per shot: ${TIMEOUT}s"
|
|
65
|
+
echo ""
|
|
66
|
+
|
|
67
|
+
# ---- Results tracking ----
|
|
68
|
+
RESULTS=()
|
|
69
|
+
FAILED=0
|
|
70
|
+
|
|
71
|
+
for i in $(seq 0 $((SHOT_COUNT - 1))); do
|
|
72
|
+
SHOT_ID=$(jq -r ".[$i].shot_id" "$PROMPTS_FILE")
|
|
73
|
+
PROMPT=$(jq -r ".[$i].prompt" "$PROMPTS_FILE")
|
|
74
|
+
DURATION=$(jq -r ".[$i].duration" "$PROMPTS_FILE")
|
|
75
|
+
|
|
76
|
+
echo "--- [$((i + 1))/$SHOT_COUNT] $SHOT_ID (${DURATION}s) ---"
|
|
77
|
+
|
|
78
|
+
# Build tags
|
|
79
|
+
TAGS="$SHOT_ID"
|
|
80
|
+
if [[ -n "$PROJECT" ]]; then
|
|
81
|
+
TAGS="$PROJECT,$SHOT_ID"
|
|
82
|
+
fi
|
|
83
|
+
|
|
84
|
+
# Create task
|
|
85
|
+
CREATE_OUTPUT=$(node "$CLI" create \
|
|
86
|
+
--prompt "$PROMPT" \
|
|
87
|
+
--duration "$DURATION" \
|
|
88
|
+
--ratio "$RATIO" \
|
|
89
|
+
--tags "$TAGS" 2>&1) || {
|
|
90
|
+
echo "[FAILED] $SHOT_ID — create error:"
|
|
91
|
+
echo "$CREATE_OUTPUT"
|
|
92
|
+
FAILED=$((FAILED + 1))
|
|
93
|
+
RESULTS+=("$SHOT_ID|FAILED|—|create error")
|
|
94
|
+
echo ""
|
|
95
|
+
echo "Stopping batch — fix the issue and re-run."
|
|
96
|
+
break
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
# Extract task ID from create output
|
|
100
|
+
TASK_ID=$(echo "$CREATE_OUTPUT" | grep -oE 'id=[0-9]+' | head -1 | cut -d= -f2)
|
|
101
|
+
|
|
102
|
+
if [[ -z "$TASK_ID" ]]; then
|
|
103
|
+
echo "[FAILED] $SHOT_ID — could not parse task ID from output:"
|
|
104
|
+
echo "$CREATE_OUTPUT"
|
|
105
|
+
FAILED=$((FAILED + 1))
|
|
106
|
+
RESULTS+=("$SHOT_ID|FAILED|—|no task ID")
|
|
107
|
+
break
|
|
108
|
+
fi
|
|
109
|
+
|
|
110
|
+
echo "Task created: #$TASK_ID"
|
|
111
|
+
|
|
112
|
+
# Wait for completion
|
|
113
|
+
WAIT_OUTPUT=$(node "$CLI" wait "$TASK_ID" --timeout "$TIMEOUT" 2>&1) || {
|
|
114
|
+
echo "[FAILED] $SHOT_ID (task #$TASK_ID) — wait error:"
|
|
115
|
+
echo "$WAIT_OUTPUT"
|
|
116
|
+
FAILED=$((FAILED + 1))
|
|
117
|
+
RESULTS+=("$SHOT_ID|FAILED|#$TASK_ID|wait timeout/error")
|
|
118
|
+
echo ""
|
|
119
|
+
echo "Stopping batch — the task may still be running. Check with: node renoise-cli.mjs get $TASK_ID"
|
|
120
|
+
break
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
# Get result
|
|
124
|
+
RESULT_OUTPUT=$(node "$CLI" result "$TASK_ID" 2>&1)
|
|
125
|
+
VIDEO_URL=$(echo "$RESULT_OUTPUT" | jq -r '.result.videoUrl // .videoUrl // "unknown"' 2>/dev/null || echo "unknown")
|
|
126
|
+
|
|
127
|
+
echo "[SUCCESS] $SHOT_ID → $VIDEO_URL"
|
|
128
|
+
RESULTS+=("$SHOT_ID|SUCCESS|#$TASK_ID|$VIDEO_URL")
|
|
129
|
+
echo ""
|
|
130
|
+
done
|
|
131
|
+
|
|
132
|
+
# ---- Summary ----
|
|
133
|
+
echo ""
|
|
134
|
+
echo "========================================="
|
|
135
|
+
echo " BATCH GENERATION SUMMARY"
|
|
136
|
+
echo "========================================="
|
|
137
|
+
printf "%-8s %-10s %-10s %s\n" "Shot" "Status" "Task" "URL"
|
|
138
|
+
printf "%-8s %-10s %-10s %s\n" "----" "------" "----" "---"
|
|
139
|
+
|
|
140
|
+
for entry in "${RESULTS[@]}"; do
|
|
141
|
+
IFS='|' read -r shot status task url <<< "$entry"
|
|
142
|
+
printf "%-8s %-10s %-10s %s\n" "$shot" "$status" "$task" "$url"
|
|
143
|
+
done
|
|
144
|
+
|
|
145
|
+
echo ""
|
|
146
|
+
echo "Total: ${#RESULTS[@]}/$SHOT_COUNT completed, $FAILED failed"
|
|
147
|
+
|
|
148
|
+
if [[ $FAILED -gt 0 ]]; then
|
|
149
|
+
exit 1
|
|
150
|
+
fi
|