@biggora/claude-plugins 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +13 -0
- package/CLAUDE.md +55 -0
- package/LICENSE +1 -1
- package/README.md +208 -39
- package/bin/cli.js +39 -0
- package/package.json +30 -17
- package/registry/registry.json +166 -1
- package/registry/schema.json +10 -0
- package/src/commands/skills/add.js +194 -0
- package/src/commands/skills/list.js +52 -0
- package/src/commands/skills/remove.js +27 -0
- package/src/commands/skills/update.js +74 -0
- package/src/config.js +5 -0
- package/src/skills/codex-cli/SKILL.md +265 -0
- package/src/skills/commafeed-api/SKILL.md +1012 -0
- package/src/skills/gemini-cli/SKILL.md +379 -0
- package/src/skills/gemini-cli/references/commands.md +145 -0
- package/src/skills/gemini-cli/references/configuration.md +182 -0
- package/src/skills/gemini-cli/references/headless-and-scripting.md +181 -0
- package/src/skills/gemini-cli/references/mcp-and-extensions.md +254 -0
- package/src/skills/n8n-api/SKILL.md +623 -0
- package/src/skills/notebook-lm/SKILL.md +217 -0
- package/src/skills/notebook-lm/references/artifact-options.md +168 -0
- package/src/skills/notebook-lm/references/auth.md +58 -0
- package/src/skills/notebook-lm/references/workflows.md +144 -0
- package/src/skills/screen-recording/SKILL.md +309 -0
- package/src/skills/screen-recording/references/approach1-programmatic.md +311 -0
- package/src/skills/screen-recording/references/approach2-xvfb.md +232 -0
- package/src/skills/screen-recording/references/design-patterns.md +168 -0
- package/src/skills/test-mobile-app/SKILL.md +212 -0
- package/src/skills/test-mobile-app/references/report-template.md +95 -0
- package/src/skills/test-mobile-app/references/setup-appium.md +154 -0
- package/src/skills/test-mobile-app/scripts/analyze_apk.py +164 -0
- package/src/skills/test-mobile-app/scripts/check_environment.py +116 -0
- package/src/skills/test-mobile-app/scripts/generate_report.py +250 -0
- package/src/skills/test-mobile-app/scripts/run_tests.py +326 -0
- package/src/skills/test-web-ui/SKILL.md +232 -0
- package/src/skills/test-web-ui/references/test_case_schema.md +102 -0
- package/src/skills/test-web-ui/scripts/discover.py +176 -0
- package/src/skills/test-web-ui/scripts/generate_report.py +237 -0
- package/src/skills/test-web-ui/scripts/run_tests.py +296 -0
- package/src/skills/text-to-speech/SKILL.md +236 -0
- package/src/skills/text-to-speech/references/espeak-cli.md +277 -0
- package/src/skills/text-to-speech/references/kokoro-onnx.md +124 -0
- package/src/skills/text-to-speech/references/online-engines.md +128 -0
- package/src/skills/text-to-speech/references/pyttsx3-espeak.md +143 -0
- package/src/skills/tm-search/SKILL.md +240 -0
- package/src/skills/tm-search/references/field-guide.md +79 -0
- package/src/skills/tm-search/references/scraping-fallback.md +140 -0
- package/src/skills/tm-search/scripts/tm_search.py +375 -0
- package/src/skills/wp-rest-api/SKILL.md +114 -0
- package/src/skills/wp-rest-api/references/authentication.md +18 -0
- package/src/skills/wp-rest-api/references/custom-content-types.md +20 -0
- package/src/skills/wp-rest-api/references/discovery-and-params.md +20 -0
- package/src/skills/wp-rest-api/references/responses-and-fields.md +30 -0
- package/src/skills/wp-rest-api/references/routes-and-endpoints.md +36 -0
- package/src/skills/wp-rest-api/references/schema.md +22 -0
- package/src/skills/youtube-search/SKILL.md +412 -0
- package/src/skills/youtube-search/references/parsing-examples.md +159 -0
- package/src/skills/youtube-search/references/youtube-api-quota.md +85 -0
- package/src/skills/youtube-thumbnail/SKILL.md +1060 -0
- package/tests/commands/info.test.js +49 -0
- package/tests/commands/install.test.js +36 -0
- package/tests/commands/list.test.js +66 -0
- package/tests/commands/publish.test.js +182 -0
- package/tests/commands/search.test.js +45 -0
- package/tests/commands/uninstall.test.js +29 -0
- package/tests/commands/update.test.js +59 -0
- package/tests/functional/skills-lifecycle.test.js +293 -0
- package/tests/helpers/fixtures.js +63 -0
- package/tests/integration/cli.test.js +83 -0
- package/tests/skills/add.test.js +138 -0
- package/tests/skills/list.test.js +63 -0
- package/tests/skills/remove.test.js +38 -0
- package/tests/skills/update.test.js +60 -0
- package/tests/unit/config.test.js +31 -0
- package/tests/unit/registry.test.js +79 -0
- package/tests/unit/utils.test.js +150 -0
- package/tests/validation/registry-schema.test.js +112 -0
- package/tests/validation/skills-validation.test.js +96 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# Approach 2: Virtual Display Recording (Xvfb + FFmpeg x11grab)
|
|
2
|
+
|
|
3
|
+
Capture a real running application on a virtual screen.
|
|
4
|
+
|
|
5
|
+
## How It Works
|
|
6
|
+
|
|
7
|
+
```
|
|
8
|
+
Xvfb :99 ←── virtual display (invisible, in RAM)
|
|
9
|
+
↑
|
|
10
|
+
Your app runs here (DISPLAY=:99)
|
|
11
|
+
↓
|
|
12
|
+
FFmpeg x11grab ←── records the virtual display → MP4
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Full Working Template
|
|
16
|
+
|
|
17
|
+
```python
|
|
18
|
+
#!/usr/bin/env python3
|
|
19
|
+
"""
|
|
20
|
+
Virtual Display Screen Recorder
|
|
21
|
+
Launches a real app on a virtual display and records it.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import subprocess, os, time, signal, shutil
|
|
25
|
+
|
|
26
|
+
WIDTH, HEIGHT = 1280, 720
|
|
27
|
+
DISPLAY_NUM = ":99"
|
|
28
|
+
FPS = 24
|
|
29
|
+
DURATION = 30 # seconds to record
|
|
30
|
+
OUTPUT = "/home/claude/recording.mp4"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def start_virtual_display():
|
|
34
|
+
"""Start Xvfb virtual display"""
|
|
35
|
+
proc = subprocess.Popen([
|
|
36
|
+
"Xvfb", DISPLAY_NUM,
|
|
37
|
+
"-screen", "0", f"{WIDTH}x{HEIGHT}x24",
|
|
38
|
+
"-ac", "-nolisten", "tcp"
|
|
39
|
+
], stderr=subprocess.DEVNULL)
|
|
40
|
+
time.sleep(1.0) # wait for display to initialize
|
|
41
|
+
print(f"✅ Virtual display {DISPLAY_NUM} started (PID {proc.pid})")
|
|
42
|
+
return proc
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def start_recording(output_path, duration=None):
|
|
46
|
+
"""Start FFmpeg recording of virtual display"""
|
|
47
|
+
cmd = [
|
|
48
|
+
"ffmpeg",
|
|
49
|
+
"-f", "x11grab",
|
|
50
|
+
"-video_size", f"{WIDTH}x{HEIGHT}",
|
|
51
|
+
"-framerate", str(FPS),
|
|
52
|
+
"-i", DISPLAY_NUM,
|
|
53
|
+
"-c:v", "libx264",
|
|
54
|
+
"-preset", "ultrafast",
|
|
55
|
+
"-pix_fmt", "yuv420p",
|
|
56
|
+
]
|
|
57
|
+
if duration:
|
|
58
|
+
cmd.extend(["-t", str(duration)])
|
|
59
|
+
cmd.extend([output_path, "-y", "-loglevel", "quiet"])
|
|
60
|
+
|
|
61
|
+
proc = subprocess.Popen(cmd)
|
|
62
|
+
time.sleep(0.5)
|
|
63
|
+
print(f"✅ Recording started → {output_path}")
|
|
64
|
+
return proc
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def run_app_on_display(app_command, env_extra=None):
|
|
68
|
+
"""Run an application on the virtual display"""
|
|
69
|
+
env = os.environ.copy()
|
|
70
|
+
env["DISPLAY"] = DISPLAY_NUM
|
|
71
|
+
if env_extra:
|
|
72
|
+
env.update(env_extra)
|
|
73
|
+
proc = subprocess.Popen(app_command, env=env, stderr=subprocess.DEVNULL)
|
|
74
|
+
return proc
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def record_session(app_fn, duration=30, output=OUTPUT):
|
|
78
|
+
"""
|
|
79
|
+
Full recording session:
|
|
80
|
+
1. Start virtual display
|
|
81
|
+
2. Start recording
|
|
82
|
+
3. Run app_fn(display) — your automation code
|
|
83
|
+
4. Stop recording, stop display
|
|
84
|
+
"""
|
|
85
|
+
xvfb = start_virtual_display()
|
|
86
|
+
recorder = start_recording(output, duration)
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
app_fn(DISPLAY_NUM)
|
|
90
|
+
# Wait remaining time or until done
|
|
91
|
+
time.sleep(duration)
|
|
92
|
+
finally:
|
|
93
|
+
recorder.send_signal(signal.SIGINT)
|
|
94
|
+
recorder.wait()
|
|
95
|
+
xvfb.terminate()
|
|
96
|
+
xvfb.wait()
|
|
97
|
+
print(f"✅ Recording saved: {os.path.getsize(output):,} bytes")
|
|
98
|
+
|
|
99
|
+
return output
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# ── EXAMPLE: Record a Python Tkinter app ─────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
def my_tkinter_app_script():
|
|
105
|
+
return '''
|
|
106
|
+
import tkinter as tk
|
|
107
|
+
import time
|
|
108
|
+
|
|
109
|
+
root = tk.Tk()
|
|
110
|
+
root.title("My App Demo")
|
|
111
|
+
root.geometry("1280x720")
|
|
112
|
+
root.configure(bg="#0c0c20")
|
|
113
|
+
|
|
114
|
+
label = tk.Label(root, text="Loading...", font=("Arial", 48),
|
|
115
|
+
bg="#0c0c20", fg="white")
|
|
116
|
+
label.pack(pady=200)
|
|
117
|
+
|
|
118
|
+
def update():
|
|
119
|
+
texts = ["Detecting issues...", "Processing...", "✅ Complete!", "100% Accurate"]
|
|
120
|
+
for i, t in enumerate(texts):
|
|
121
|
+
root.after(i * 2000, lambda t=t: label.configure(text=t))
|
|
122
|
+
root.after(8000, root.destroy)
|
|
123
|
+
|
|
124
|
+
root.after(500, update)
|
|
125
|
+
root.mainloop()
|
|
126
|
+
'''
|
|
127
|
+
|
|
128
|
+
def run_demo(display):
|
|
129
|
+
# Write app to temp file
|
|
130
|
+
with open('/tmp/demo_app.py', 'w') as f:
|
|
131
|
+
f.write(my_tkinter_app_script())
|
|
132
|
+
|
|
133
|
+
env = os.environ.copy()
|
|
134
|
+
env["DISPLAY"] = display
|
|
135
|
+
subprocess.Popen(["python3", "/tmp/demo_app.py"], env=env)
|
|
136
|
+
time.sleep(10) # let app run
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# ── EXAMPLE: Record a Chromium browser session ───────────────────────────────
|
|
140
|
+
|
|
141
|
+
def record_browser(url, duration=20, output=OUTPUT):
|
|
142
|
+
"""Record a browser navigating to a URL"""
|
|
143
|
+
xvfb = start_virtual_display()
|
|
144
|
+
recorder = start_recording(output, duration)
|
|
145
|
+
|
|
146
|
+
env = os.environ.copy()
|
|
147
|
+
env["DISPLAY"] = DISPLAY_NUM
|
|
148
|
+
|
|
149
|
+
# Launch Chromium in window mode (not headless — we WANT it visible)
|
|
150
|
+
subprocess.Popen([
|
|
151
|
+
"chromium", "--no-sandbox",
|
|
152
|
+
f"--window-size={WIDTH},{HEIGHT}",
|
|
153
|
+
"--window-position=0,0",
|
|
154
|
+
"--start-maximized",
|
|
155
|
+
url
|
|
156
|
+
], env=env, stderr=subprocess.DEVNULL)
|
|
157
|
+
|
|
158
|
+
time.sleep(duration)
|
|
159
|
+
recorder.send_signal(signal.SIGINT)
|
|
160
|
+
recorder.wait()
|
|
161
|
+
xvfb.terminate()
|
|
162
|
+
print(f"✅ Browser recording saved: {output}")
|
|
163
|
+
return output
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
# ── POST-PROCESSING ───────────────────────────────────────────────────────────
|
|
167
|
+
|
|
168
|
+
def add_narration(video_path, narration_text, output_path):
|
|
169
|
+
"""Add TTS narration to a recorded video"""
|
|
170
|
+
import pyttsx3
|
|
171
|
+
|
|
172
|
+
engine = pyttsx3.init()
|
|
173
|
+
engine.setProperty('rate', 140)
|
|
174
|
+
engine.save_to_file(narration_text, '/tmp/narration.wav')
|
|
175
|
+
engine.runAndWait()
|
|
176
|
+
|
|
177
|
+
subprocess.run([
|
|
178
|
+
'ffmpeg', '-i', '/tmp/narration.wav',
|
|
179
|
+
'-c:a', 'libmp3lame', '-b:a', '128k',
|
|
180
|
+
'/tmp/narration.mp3', '-y', '-loglevel', 'quiet'
|
|
181
|
+
])
|
|
182
|
+
|
|
183
|
+
subprocess.run([
|
|
184
|
+
'ffmpeg', '-i', video_path, '-i', '/tmp/narration.mp3',
|
|
185
|
+
'-c:v', 'copy', '-c:a', 'aac',
|
|
186
|
+
'-shortest', output_path, '-y', '-loglevel', 'quiet'
|
|
187
|
+
])
|
|
188
|
+
return output_path
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def add_overlay_text(video_path, text, position="bottom", output_path=None):
|
|
192
|
+
"""Add text overlay to recorded video using FFmpeg"""
|
|
193
|
+
if not output_path:
|
|
194
|
+
output_path = video_path.replace('.mp4', '_overlay.mp4')
|
|
195
|
+
|
|
196
|
+
if position == "bottom":
|
|
197
|
+
vf = f"drawtext=text='{text}':fontcolor=white:fontsize=24:x=(w-text_w)/2:y=h-th-20:box=1:boxcolor=black@0.5"
|
|
198
|
+
else:
|
|
199
|
+
vf = f"drawtext=text='{text}':fontcolor=white:fontsize=24:x=20:y=20"
|
|
200
|
+
|
|
201
|
+
subprocess.run([
|
|
202
|
+
'ffmpeg', '-i', video_path, '-vf', vf,
|
|
203
|
+
output_path, '-y', '-loglevel', 'quiet'
|
|
204
|
+
])
|
|
205
|
+
return output_path
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
if __name__ == "__main__":
|
|
209
|
+
result = record_session(run_demo, duration=12, output=OUTPUT)
|
|
210
|
+
# Add narration
|
|
211
|
+
final = add_narration(result, "This is our automated UI demo.",
|
|
212
|
+
"/home/claude/final_recording.mp4")
|
|
213
|
+
shutil.copy(final, "/mnt/user-data/outputs/recording.mp4")
|
|
214
|
+
print(f"🎉 Done: /mnt/user-data/outputs/recording.mp4")
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
## When to Use This Approach
|
|
218
|
+
|
|
219
|
+
✅ When you need to show a REAL running application
|
|
220
|
+
✅ When demonstrating web UI (real browser rendering)
|
|
221
|
+
✅ When the app has complex visual state hard to reproduce with Pillow
|
|
222
|
+
|
|
223
|
+
❌ Don't use for simple text/graphic demos (too slow — use Approach 1)
|
|
224
|
+
❌ Avoid if network access to the app is needed (may be blocked)
|
|
225
|
+
|
|
226
|
+
## Important Notes
|
|
227
|
+
|
|
228
|
+
- Xvfb uses RAM for the framebuffer — 1280x720x24 ≈ 3.5MB per frame
|
|
229
|
+
- Always kill both `ffmpeg` and `Xvfb` in a `finally` block to avoid orphan processes
|
|
230
|
+
- Use `DISPLAY=:99` not `:0` to avoid conflicts with any host display
|
|
231
|
+
- FFmpeg's `-preset ultrafast` is recommended for real-time capture
|
|
232
|
+
- For longer recordings (>60s), consider `-crf 28` to reduce file size
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
# Design Patterns for Screen Recording Videos
|
|
2
|
+
|
|
3
|
+
## UI Component Library (Pillow)
|
|
4
|
+
|
|
5
|
+
### Rounded Rectangle
|
|
6
|
+
```python
|
|
7
|
+
def rounded_rect(draw, x, y, w, h, r, fill=None, outline=None, width=1):
|
|
8
|
+
draw.rectangle([x+r, y, x+w-r, y+h], fill=fill)
|
|
9
|
+
draw.rectangle([x, y+r, x+w, y+h-r], fill=fill)
|
|
10
|
+
draw.ellipse([x, y, x+2*r, y+2*r], fill=fill)
|
|
11
|
+
draw.ellipse([x+w-2*r, y, x+w, y+2*r], fill=fill)
|
|
12
|
+
draw.ellipse([x, y+h-2*r, x+2*r, y+h], fill=fill)
|
|
13
|
+
draw.ellipse([x+w-2*r, y+h-2*r, x+w, y+h], fill=fill)
|
|
14
|
+
if outline:
|
|
15
|
+
draw.arc([x, y, x+2*r, y+2*r], 180, 270, fill=outline, width=width)
|
|
16
|
+
draw.arc([x+w-2*r, y, x+w, y+2*r], 270, 360, fill=outline, width=width)
|
|
17
|
+
draw.arc([x, y+h-2*r, x+2*r, y+h], 90, 180, fill=outline, width=width)
|
|
18
|
+
draw.arc([x+w-2*r, y+h-2*r, x+w, y+h], 0, 90, fill=outline, width=width)
|
|
19
|
+
draw.line([x+r, y, x+w-r, y], fill=outline, width=width)
|
|
20
|
+
draw.line([x+r, y+h, x+w-r, y+h], fill=outline, width=width)
|
|
21
|
+
draw.line([x, y+r, x, y+h-r], fill=outline, width=width)
|
|
22
|
+
draw.line([x+w, y+r, x+w, y+h-r], fill=outline, width=width)
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
### Notification Toast
|
|
26
|
+
```python
|
|
27
|
+
def draw_toast(draw, message, t, appear_at=0, dismiss_at=3, icon="✓", color=(60,200,100)):
|
|
28
|
+
p = ease(max(0, t - appear_at), 0.4)
|
|
29
|
+
if t > dismiss_at:
|
|
30
|
+
p = 1 - ease(t - dismiss_at, 0.3)
|
|
31
|
+
if p <= 0:
|
|
32
|
+
return
|
|
33
|
+
x, y = 900, int(30 + (1-p) * -80)
|
|
34
|
+
draw.rectangle([x, y, x+340, y+60], fill=(30, 40, 30),
|
|
35
|
+
outline=color, width=2)
|
|
36
|
+
draw.text((x+15, y+18), f"{icon} {message}", fill=color)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Metric Card
|
|
40
|
+
```python
|
|
41
|
+
def draw_metric(draw, x, y, label, value, unit="", trend=None):
|
|
42
|
+
draw.rectangle([x, y, x+220, y+110], fill=(20, 25, 55),
|
|
43
|
+
outline=(60, 80, 160), width=1)
|
|
44
|
+
draw.text((x+12, y+10), label, fill=(140, 140, 180))
|
|
45
|
+
draw.text((x+12, y+45), str(value), fill=(255, 255, 255))
|
|
46
|
+
if unit:
|
|
47
|
+
draw.text((x+12+len(str(value))*18, y+55), unit, fill=(140, 140, 180))
|
|
48
|
+
if trend == "up":
|
|
49
|
+
draw.text((x+180, y+10), "↑", fill=(60, 200, 100))
|
|
50
|
+
elif trend == "down":
|
|
51
|
+
draw.text((x+180, y+10), "↓", fill=(255, 80, 80))
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Simple Bar Chart
|
|
55
|
+
```python
|
|
56
|
+
def draw_bar_chart(draw, data, x, y, w, h, t, animate=True):
|
|
57
|
+
"""data = [(label, value), ...]"""
|
|
58
|
+
max_val = max(v for _, v in data)
|
|
59
|
+
bar_w = w // len(data) - 10
|
|
60
|
+
|
|
61
|
+
for i, (label, val) in enumerate(data):
|
|
62
|
+
p = ease(t, 1.0) if animate else 1.0
|
|
63
|
+
bar_h = int((val / max_val) * h * p)
|
|
64
|
+
bx = x + i * (bar_w + 10)
|
|
65
|
+
# Bar
|
|
66
|
+
draw.rectangle([bx, y+h-bar_h, bx+bar_w, y+h], fill=(60, 120, 255))
|
|
67
|
+
# Label
|
|
68
|
+
draw.text((bx, y+h+8), label, fill=(140, 140, 180))
|
|
69
|
+
# Value
|
|
70
|
+
if p > 0.7:
|
|
71
|
+
draw.text((bx, y+h-bar_h-24), str(val), fill=(240, 240, 255))
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Transition Patterns
|
|
75
|
+
|
|
76
|
+
### Fade between scenes
|
|
77
|
+
```python
|
|
78
|
+
def cross_fade(draw_a, draw_b, t, fade_start, fade_duration, img_size):
|
|
79
|
+
if t < fade_start:
|
|
80
|
+
draw_a(...)
|
|
81
|
+
elif t < fade_start + fade_duration:
|
|
82
|
+
p = ease(t - fade_start, fade_duration)
|
|
83
|
+
# Render both on separate images and blend
|
|
84
|
+
img_a = render_scene_a(t)
|
|
85
|
+
img_b = render_scene_b(t - fade_start)
|
|
86
|
+
return Image.blend(img_a, img_b, p)
|
|
87
|
+
else:
|
|
88
|
+
draw_b(...)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Slide transition
|
|
92
|
+
```python
|
|
93
|
+
def slide_in(draw, content_fn, t, direction="left", duration=0.5):
|
|
94
|
+
p = ease(t, duration)
|
|
95
|
+
if direction == "left":
|
|
96
|
+
offset_x = int((1 - p) * 1280)
|
|
97
|
+
# Draw content shifted right, moving left
|
|
98
|
+
# Use PIL paste with offset
|
|
99
|
+
elif direction == "up":
|
|
100
|
+
offset_y = int((1 - p) * 720)
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Narration Script Patterns
|
|
104
|
+
|
|
105
|
+
### Per-scene narration timing
|
|
106
|
+
```python
|
|
107
|
+
SCENES = [
|
|
108
|
+
{"id": "intro", "duration": 4, "narration": "Welcome to our demo."},
|
|
109
|
+
{"id": "feature", "duration": 6, "narration": "Our system detects issues automatically."},
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
def build_full_narration(scenes):
|
|
113
|
+
"""Concatenate all narrations for single TTS call"""
|
|
114
|
+
# Add natural pauses between scenes
|
|
115
|
+
parts = []
|
|
116
|
+
for s in scenes:
|
|
117
|
+
parts.append(s["narration"])
|
|
118
|
+
parts.append("...") # pause
|
|
119
|
+
return " ".join(parts)
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
### Voice settings
|
|
123
|
+
```python
|
|
124
|
+
engine = pyttsx3.init()
|
|
125
|
+
|
|
126
|
+
# Speed: 100=slow, 150=normal, 200=fast
|
|
127
|
+
engine.setProperty('rate', 140)
|
|
128
|
+
|
|
129
|
+
# Volume: 0.0-1.0
|
|
130
|
+
engine.setProperty('volume', 0.9)
|
|
131
|
+
|
|
132
|
+
# List available voices (usually 1-2 with espeak)
|
|
133
|
+
voices = engine.getProperty('voices')
|
|
134
|
+
for v in voices:
|
|
135
|
+
print(v.id, v.name, v.languages)
|
|
136
|
+
# Set voice by ID
|
|
137
|
+
engine.setProperty('voice', voices[0].id)
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
## File Size Optimization
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
# For web delivery: compress output
|
|
144
|
+
subprocess.run([
|
|
145
|
+
'ffmpeg', '-i', 'output.mp4',
|
|
146
|
+
'-c:v', 'libx264', '-crf', '28', # 23=good, 28=smaller, 35=small
|
|
147
|
+
'-preset', 'slow', # slow=better compression
|
|
148
|
+
'-c:a', 'aac', '-b:a', '96k',
|
|
149
|
+
'output_compressed.mp4', '-y', '-loglevel', 'quiet'
|
|
150
|
+
])
|
|
151
|
+
|
|
152
|
+
# For GIF (social media / documentation)
|
|
153
|
+
subprocess.run([
|
|
154
|
+
'ffmpeg', '-i', 'output.mp4',
|
|
155
|
+
'-vf', 'fps=10,scale=960:-1:flags=lanczos',
|
|
156
|
+
'-loop', '0',
|
|
157
|
+
'demo.gif', '-y', '-loglevel', 'quiet'
|
|
158
|
+
])
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
## Checklist Before Rendering
|
|
162
|
+
|
|
163
|
+
- [ ] All scenes have `draw_fn` implemented
|
|
164
|
+
- [ ] `total_duration = sum(s["duration"] for s in SCENES)` is correct
|
|
165
|
+
- [ ] `make_frame` returns `np.array(img)` (not PIL Image)
|
|
166
|
+
- [ ] Audio `.with_duration(total_duration)` called
|
|
167
|
+
- [ ] Output path is in `/home/claude/` first, then copied to `/mnt/user-data/outputs/`
|
|
168
|
+
- [ ] `apt-get install -y espeak-ng` was run if pyttsx3 is used
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: test-mobile-app
|
|
3
|
+
description: >
|
|
4
|
+
Automated mobile application testing skill. Use this skill whenever the user
|
|
5
|
+
wants to test a mobile app (Android or iOS), write test cases, analyze app
|
|
6
|
+
structure, run automated UI tests via emulator, or generate test reports.
|
|
7
|
+
Trigger when user mentions: "test my app", "run tests", "UI testing",
|
|
8
|
+
"write test cases", "check app functionality", "test on emulator",
|
|
9
|
+
"mobile QA", "test coverage", "use case testing", "user scenario testing",
|
|
10
|
+
or any combination of mobile + test/check/verify/validate.
|
|
11
|
+
Also trigger when user uploads or references an APK, .ipa, or a mobile
|
|
12
|
+
project folder (React Native, Flutter, Android, iOS) and asks what to do next.
|
|
13
|
+
Always use this skill for any mobile app QA task — even partial ones like
|
|
14
|
+
"just write some use cases" or "show me what tests I should run".
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
# Mobile App Testing Skill
|
|
18
|
+
|
|
19
|
+
This skill enables Claude to perform end-to-end mobile application testing:
|
|
20
|
+
1. **Analyze** the app structure and infer user-facing functionality
|
|
21
|
+
2. **Generate** use cases from an end-user perspective
|
|
22
|
+
3. **Write** concrete test scenarios with expected results
|
|
23
|
+
4. **Execute** tests via Appium + Android emulator (or interpret results statically)
|
|
24
|
+
5. **Produce** a structured HTML/Markdown test report
|
|
25
|
+
|
|
26
|
+
---
|
|
27
|
+
|
|
28
|
+
## Phase 1 — App Analysis
|
|
29
|
+
|
|
30
|
+
### What to collect
|
|
31
|
+
|
|
32
|
+
Before generating use cases, gather as much context as possible:
|
|
33
|
+
|
|
34
|
+
- **Source code** (Android/Java/Kotlin, iOS/Swift, React Native, Flutter)
|
|
35
|
+
- **APK file** — use `androguard` to extract Activity list, permissions, Manifest
|
|
36
|
+
- **Screenshots** — analyze UI from images
|
|
37
|
+
- **Description** — what the app does, target audience
|
|
38
|
+
|
|
39
|
+
### APK Analysis (Android)
|
|
40
|
+
|
|
41
|
+
Read `scripts/analyze_apk.py` for full script. Quick usage:
|
|
42
|
+
```bash
|
|
43
|
+
python3 scripts/analyze_apk.py path/to/app.apk
|
|
44
|
+
```
|
|
45
|
+
Outputs: package name, activities, permissions, strings → feeds into use case generation.
|
|
46
|
+
|
|
47
|
+
### Source Code Analysis
|
|
48
|
+
|
|
49
|
+
If source is available, scan for:
|
|
50
|
+
- Screen/Activity/Fragment/Page names → each is a potential use case surface
|
|
51
|
+
- Navigation graphs (React Navigation, NavController)
|
|
52
|
+
- API endpoints called (network requests)
|
|
53
|
+
- Form fields, validation logic
|
|
54
|
+
- Authentication flows
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## Phase 2 — Use Case Generation
|
|
59
|
+
|
|
60
|
+
### Methodology
|
|
61
|
+
|
|
62
|
+
Think from the perspective of a **real end user** — not a developer.
|
|
63
|
+
Ask: *"What would a person actually do with this app?"*
|
|
64
|
+
|
|
65
|
+
Use case format:
|
|
66
|
+
```
|
|
67
|
+
UC-<N>: <Short Title>
|
|
68
|
+
Actor: End User
|
|
69
|
+
Precondition: <What must be true before this action>
|
|
70
|
+
Steps:
|
|
71
|
+
1. <action>
|
|
72
|
+
2. <action>
|
|
73
|
+
...
|
|
74
|
+
Expected outcome: <what the user sees/gets>
|
|
75
|
+
Priority: High / Medium / Low
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Use Case Categories to Always Cover
|
|
79
|
+
|
|
80
|
+
1. **Onboarding** — first launch, tutorial, permissions prompt
|
|
81
|
+
2. **Authentication** — registration, login, logout, password reset
|
|
82
|
+
3. **Core Feature Flow** — the primary value action of the app (1-3 flows)
|
|
83
|
+
4. **Data Entry** — any form: required fields, validation, error states
|
|
84
|
+
5. **Navigation** — bottom nav, back button, deep links
|
|
85
|
+
6. **Empty States** — what happens when there's no data
|
|
86
|
+
7. **Error Handling** — no internet, server error, invalid input
|
|
87
|
+
8. **Settings / Profile** — change preferences, update data
|
|
88
|
+
9. **Notifications** — if the app uses push notifications
|
|
89
|
+
10. **Accessibility** — basic: is text readable, are tap targets big enough
|
|
90
|
+
|
|
91
|
+
Aim for **15–30 use cases** depending on app complexity.
|
|
92
|
+
|
|
93
|
+
---
|
|
94
|
+
|
|
95
|
+
## Phase 3 — Test Scenario Writing
|
|
96
|
+
|
|
97
|
+
For each use case, write a test scenario:
|
|
98
|
+
|
|
99
|
+
```
|
|
100
|
+
TEST-<N>: <Title>
|
|
101
|
+
Related UC: UC-<N>
|
|
102
|
+
Type: Functional | UI | Regression | Smoke
|
|
103
|
+
Steps:
|
|
104
|
+
1. Launch app
|
|
105
|
+
2. <specific action with exact input data>
|
|
106
|
+
3. ...
|
|
107
|
+
Assertions:
|
|
108
|
+
- Element <locator> is visible
|
|
109
|
+
- Text "<expected>" is displayed
|
|
110
|
+
- Screen navigates to <ScreenName>
|
|
111
|
+
- No crash / error dialog
|
|
112
|
+
Expected Result: PASS / FAIL criteria
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Test Types to Include
|
|
116
|
+
|
|
117
|
+
| Type | When to use |
|
|
118
|
+
|------|-------------|
|
|
119
|
+
| **Smoke** | Quick sanity — does app launch, core screens load? |
|
|
120
|
+
| **Functional** | Does feature X work correctly? |
|
|
121
|
+
| **UI/Visual** | Are elements present, correctly labeled, accessible? |
|
|
122
|
+
| **Edge Case** | Empty fields, special characters, very long strings |
|
|
123
|
+
| **Regression** | After a change — did existing features break? |
|
|
124
|
+
|
|
125
|
+
---
|
|
126
|
+
|
|
127
|
+
## Phase 4 — Test Execution
|
|
128
|
+
|
|
129
|
+
### Environment Setup
|
|
130
|
+
|
|
131
|
+
Read `references/setup-appium.md` for full Appium + emulator setup.
|
|
132
|
+
|
|
133
|
+
**Quick check:**
|
|
134
|
+
```bash
|
|
135
|
+
python3 scripts/check_environment.py
|
|
136
|
+
```
|
|
137
|
+
This verifies: adb, emulator, Appium server, Python client.
|
|
138
|
+
|
|
139
|
+
### Running Tests
|
|
140
|
+
|
|
141
|
+
```bash
|
|
142
|
+
# Run all tests
|
|
143
|
+
python3 scripts/run_tests.py --apk path/to/app.apk --output results/
|
|
144
|
+
|
|
145
|
+
# Run smoke tests only
|
|
146
|
+
python3 scripts/run_tests.py --apk path/to/app.apk --suite smoke --output results/
|
|
147
|
+
|
|
148
|
+
# Run on specific device
|
|
149
|
+
python3 scripts/run_tests.py --apk path/to/app.apk --device emulator-5554 --output results/
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Test Execution Without Emulator (Static Mode)
|
|
153
|
+
|
|
154
|
+
If no emulator is available, Claude can:
|
|
155
|
+
1. Analyze source code / screenshots statically
|
|
156
|
+
2. Write all test scenarios
|
|
157
|
+
3. Mark execution status as `MANUAL_REQUIRED`
|
|
158
|
+
4. Generate a report with all test cases ready to be run manually
|
|
159
|
+
|
|
160
|
+
Use `--static` flag:
|
|
161
|
+
```bash
|
|
162
|
+
python3 scripts/run_tests.py --static --output results/
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
---
|
|
166
|
+
|
|
167
|
+
## Phase 5 — Report Generation
|
|
168
|
+
|
|
169
|
+
```bash
|
|
170
|
+
python3 scripts/generate_report.py --results results/ --output test_report.html
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
Report includes:
|
|
174
|
+
- Summary: total tests, passed, failed, skipped
|
|
175
|
+
- Per-test details: steps, assertions, actual vs expected, screenshots
|
|
176
|
+
- Use case coverage matrix
|
|
177
|
+
- Issues found (with severity: Critical / Major / Minor)
|
|
178
|
+
- Environment info (device, OS, app version)
|
|
179
|
+
|
|
180
|
+
Read `references/report-template.md` for report structure details.
|
|
181
|
+
|
|
182
|
+
---
|
|
183
|
+
|
|
184
|
+
## Workflow Summary
|
|
185
|
+
|
|
186
|
+
```
|
|
187
|
+
1. Receive app (APK / source / description / screenshots)
|
|
188
|
+
↓
|
|
189
|
+
2. Run analyze_apk.py OR inspect source code
|
|
190
|
+
↓
|
|
191
|
+
3. Generate use cases (UC-1...UC-N) — show to user, ask for feedback
|
|
192
|
+
↓
|
|
193
|
+
4. Write test scenarios (TEST-1...TEST-N) — derive from use cases
|
|
194
|
+
↓
|
|
195
|
+
5. Check environment (check_environment.py)
|
|
196
|
+
↓
|
|
197
|
+
6a. Emulator available → run_tests.py → capture results
|
|
198
|
+
6b. No emulator → static mode → mark for manual execution
|
|
199
|
+
↓
|
|
200
|
+
7. generate_report.py → HTML report → present to user
|
|
201
|
+
```
|
|
202
|
+
|
|
203
|
+
---
|
|
204
|
+
|
|
205
|
+
## Important Notes
|
|
206
|
+
|
|
207
|
+
- **Always show use cases to the user before writing tests** — they know their app best.
|
|
208
|
+
- **Locators**: Prefer `accessibility id` > `resource-id` > `xpath`. Never use index-based xpath.
|
|
209
|
+
- **Waits**: Always use explicit waits (`WebDriverWait`), never `time.sleep`.
|
|
210
|
+
- **Screenshots**: Capture on every assertion failure automatically.
|
|
211
|
+
- **Crash detection**: After every interaction, check for crash dialogs (`scripts/crash_detector.py`).
|
|
212
|
+
- **Language**: Generate use cases and reports in the language the user is using.
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# Test Report Structure
|
|
2
|
+
|
|
3
|
+
## tests.json Format
|
|
4
|
+
|
|
5
|
+
This is the input format for `run_tests.py`:
|
|
6
|
+
|
|
7
|
+
```json
|
|
8
|
+
{
|
|
9
|
+
"app": "com.example.myapp",
|
|
10
|
+
"version": "1.2.3",
|
|
11
|
+
"generated_at": "2025-01-01T12:00:00",
|
|
12
|
+
"use_cases": [
|
|
13
|
+
{
|
|
14
|
+
"id": "UC-1",
|
|
15
|
+
"title": "User Login with valid credentials",
|
|
16
|
+
"priority": "High"
|
|
17
|
+
}
|
|
18
|
+
],
|
|
19
|
+
"tests": [
|
|
20
|
+
{
|
|
21
|
+
"id": "TEST-001",
|
|
22
|
+
"title": "Successful login with valid email and password",
|
|
23
|
+
"related_uc": "UC-1",
|
|
24
|
+
"type": "smoke",
|
|
25
|
+
"steps": [
|
|
26
|
+
{ "action": "Launch app", "type": "wait", "seconds": 2 },
|
|
27
|
+
{ "action": "Tap Login button", "type": "tap", "locator": "Login" },
|
|
28
|
+
{ "action": "Enter email", "type": "type", "locator": "Email field", "value": "user@test.com" },
|
|
29
|
+
{ "action": "Enter password", "type": "type", "locator": "Password field", "value": "password123" },
|
|
30
|
+
{ "action": "Tap Submit", "type": "tap", "locator": "Submit" },
|
|
31
|
+
{ "action": "Assert home screen visible", "type": "assert_text", "expected": "Welcome" },
|
|
32
|
+
{ "action": "Assert no errors", "type": "assert_no_error" }
|
|
33
|
+
],
|
|
34
|
+
"assertions": [
|
|
35
|
+
"Home screen is displayed",
|
|
36
|
+
"User name is shown",
|
|
37
|
+
"No error messages"
|
|
38
|
+
]
|
|
39
|
+
}
|
|
40
|
+
]
|
|
41
|
+
}
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
## Step Action Types
|
|
45
|
+
|
|
46
|
+
| type | Required fields | Description |
|
|
47
|
+
|------|----------------|-------------|
|
|
48
|
+
| `tap` | `locator` | Tap by accessibility id |
|
|
49
|
+
| `tap_id` | `locator` | Tap by resource-id |
|
|
50
|
+
| `type` | `locator`, `value` | Type text into field (by accessibility id) |
|
|
51
|
+
| `type_id` | `locator`, `value` | Type text into field (by resource-id) |
|
|
52
|
+
| `scroll_down` | — | Swipe screen upward |
|
|
53
|
+
| `back` | — | Press hardware back button |
|
|
54
|
+
| `wait` | `seconds` | Pause execution |
|
|
55
|
+
| `assert_visible` | `locator` | Assert element present by accessibility id |
|
|
56
|
+
| `assert_text` | `expected` | Assert text exists anywhere on screen |
|
|
57
|
+
| `assert_no_error` | — | Assert no error/crash text on screen |
|
|
58
|
+
|
|
59
|
+
## results.json Format (output)
|
|
60
|
+
|
|
61
|
+
```json
|
|
62
|
+
{
|
|
63
|
+
"timestamp": "2025-01-01T12:05:00",
|
|
64
|
+
"mode": "automated",
|
|
65
|
+
"apk": "app.apk",
|
|
66
|
+
"total": 10,
|
|
67
|
+
"passed": 7,
|
|
68
|
+
"failed": 2,
|
|
69
|
+
"errors": 0,
|
|
70
|
+
"manual": 1,
|
|
71
|
+
"tests": [
|
|
72
|
+
{
|
|
73
|
+
"test_id": "TEST-001",
|
|
74
|
+
"title": "Successful login",
|
|
75
|
+
"status": "PASS",
|
|
76
|
+
"steps_log": ["Step 1: Launch app", "Step 2: Tap Login"],
|
|
77
|
+
"assertions": [
|
|
78
|
+
{ "check": "Text present: 'Welcome'", "passed": true }
|
|
79
|
+
],
|
|
80
|
+
"error": null,
|
|
81
|
+
"screenshot_path": null,
|
|
82
|
+
"duration_ms": 3420,
|
|
83
|
+
"issues_found": []
|
|
84
|
+
}
|
|
85
|
+
]
|
|
86
|
+
}
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Issue Severity Levels
|
|
90
|
+
|
|
91
|
+
| Severity | Criteria |
|
|
92
|
+
|----------|----------|
|
|
93
|
+
| **Critical** | App crash, data loss, cannot complete core flow |
|
|
94
|
+
| **Major** | Feature broken, incorrect data displayed, error not handled |
|
|
95
|
+
| **Minor** | Visual glitch, misleading label, minor UX issue |
|