@heylemon/lemonade 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/build-info.json +3 -3
- package/dist/canvas-host/a2ui/.bundle.hash +1 -1
- package/package.json +1 -1
- package/skills/brave-search/SKILL.md +57 -0
- package/skills/brave-search/content.js +86 -0
- package/skills/brave-search/package.json +14 -0
- package/skills/brave-search/search.js +179 -0
- package/skills/caldav-calendar/SKILL.md +104 -0
- package/skills/frontend-design/SKILL.md +39 -0
- package/skills/self-improving-agent/SKILL.md +128 -0
- package/skills/stock-analysis/SKILL.md +131 -0
- package/skills/stock-analysis/scripts/analyze_stock.py +2532 -0
- package/skills/stock-analysis/scripts/dividends.py +365 -0
- package/skills/stock-analysis/scripts/hot_scanner.py +565 -0
- package/skills/stock-analysis/scripts/portfolio.py +528 -0
- package/skills/stock-analysis/scripts/rumor_scanner.py +330 -0
- package/skills/stock-analysis/scripts/watchlist.py +318 -0
- package/skills/tavily-search/SKILL.md +38 -0
- package/skills/tavily-search/scripts/extract.mjs +59 -0
- package/skills/tavily-search/scripts/search.mjs +101 -0
- package/skills/youtube-watcher/SKILL.md +46 -0
- package/skills/youtube-watcher/scripts/get_transcript.py +81 -0
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
function usage() {
|
|
4
|
+
console.error(`Usage: extract.mjs "url1" ["url2" ...]`);
|
|
5
|
+
process.exit(2);
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
const args = process.argv.slice(2);
|
|
9
|
+
if (args.length === 0 || args[0] === "-h" || args[0] === "--help") usage();
|
|
10
|
+
|
|
11
|
+
const urls = args.filter(a => !a.startsWith("-"));
|
|
12
|
+
|
|
13
|
+
if (urls.length === 0) {
|
|
14
|
+
console.error("No URLs provided");
|
|
15
|
+
usage();
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
const apiKey = (process.env.TAVILY_API_KEY ?? "").trim();
|
|
19
|
+
if (!apiKey) {
|
|
20
|
+
console.error("Missing TAVILY_API_KEY");
|
|
21
|
+
process.exit(1);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const resp = await fetch("https://api.tavily.com/extract", {
|
|
25
|
+
method: "POST",
|
|
26
|
+
headers: {
|
|
27
|
+
"Content-Type": "application/json",
|
|
28
|
+
},
|
|
29
|
+
body: JSON.stringify({
|
|
30
|
+
api_key: apiKey,
|
|
31
|
+
urls: urls,
|
|
32
|
+
}),
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
if (!resp.ok) {
|
|
36
|
+
const text = await resp.text().catch(() => "");
|
|
37
|
+
throw new Error(`Tavily Extract failed (${resp.status}): ${text}`);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const data = await resp.json();
|
|
41
|
+
|
|
42
|
+
const results = data.results ?? [];
|
|
43
|
+
const failed = data.failed_results ?? [];
|
|
44
|
+
|
|
45
|
+
for (const r of results) {
|
|
46
|
+
const url = String(r?.url ?? "").trim();
|
|
47
|
+
const content = String(r?.raw_content ?? "").trim();
|
|
48
|
+
|
|
49
|
+
console.log(`# ${url}\n`);
|
|
50
|
+
console.log(content || "(no content extracted)");
|
|
51
|
+
console.log("\n---\n");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (failed.length > 0) {
|
|
55
|
+
console.log("## Failed URLs\n");
|
|
56
|
+
for (const f of failed) {
|
|
57
|
+
console.log(`- ${f.url}: ${f.error}`);
|
|
58
|
+
}
|
|
59
|
+
}
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
function usage() {
|
|
4
|
+
console.error(`Usage: search.mjs "query" [-n 5] [--deep] [--topic general|news] [--days 7]`);
|
|
5
|
+
process.exit(2);
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
const args = process.argv.slice(2);
|
|
9
|
+
if (args.length === 0 || args[0] === "-h" || args[0] === "--help") usage();
|
|
10
|
+
|
|
11
|
+
const query = args[0];
|
|
12
|
+
let n = 5;
|
|
13
|
+
let searchDepth = "basic";
|
|
14
|
+
let topic = "general";
|
|
15
|
+
let days = null;
|
|
16
|
+
|
|
17
|
+
for (let i = 1; i < args.length; i++) {
|
|
18
|
+
const a = args[i];
|
|
19
|
+
if (a === "-n") {
|
|
20
|
+
n = Number.parseInt(args[i + 1] ?? "5", 10);
|
|
21
|
+
i++;
|
|
22
|
+
continue;
|
|
23
|
+
}
|
|
24
|
+
if (a === "--deep") {
|
|
25
|
+
searchDepth = "advanced";
|
|
26
|
+
continue;
|
|
27
|
+
}
|
|
28
|
+
if (a === "--topic") {
|
|
29
|
+
topic = args[i + 1] ?? "general";
|
|
30
|
+
i++;
|
|
31
|
+
continue;
|
|
32
|
+
}
|
|
33
|
+
if (a === "--days") {
|
|
34
|
+
days = Number.parseInt(args[i + 1] ?? "7", 10);
|
|
35
|
+
i++;
|
|
36
|
+
continue;
|
|
37
|
+
}
|
|
38
|
+
console.error(`Unknown arg: ${a}`);
|
|
39
|
+
usage();
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const apiKey = (process.env.TAVILY_API_KEY ?? "").trim();
|
|
43
|
+
if (!apiKey) {
|
|
44
|
+
console.error("Missing TAVILY_API_KEY");
|
|
45
|
+
process.exit(1);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const body = {
|
|
49
|
+
api_key: apiKey,
|
|
50
|
+
query: query,
|
|
51
|
+
search_depth: searchDepth,
|
|
52
|
+
topic: topic,
|
|
53
|
+
max_results: Math.max(1, Math.min(n, 20)),
|
|
54
|
+
include_answer: true,
|
|
55
|
+
include_raw_content: false,
|
|
56
|
+
};
|
|
57
|
+
|
|
58
|
+
if (topic === "news" && days) {
|
|
59
|
+
body.days = days;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
const resp = await fetch("https://api.tavily.com/search", {
|
|
63
|
+
method: "POST",
|
|
64
|
+
headers: {
|
|
65
|
+
"Content-Type": "application/json",
|
|
66
|
+
},
|
|
67
|
+
body: JSON.stringify(body),
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
if (!resp.ok) {
|
|
71
|
+
const text = await resp.text().catch(() => "");
|
|
72
|
+
throw new Error(`Tavily Search failed (${resp.status}): ${text}`);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
const data = await resp.json();
|
|
76
|
+
|
|
77
|
+
// Print AI-generated answer if available
|
|
78
|
+
if (data.answer) {
|
|
79
|
+
console.log("## Answer\n");
|
|
80
|
+
console.log(data.answer);
|
|
81
|
+
console.log("\n---\n");
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Print results
|
|
85
|
+
const results = (data.results ?? []).slice(0, n);
|
|
86
|
+
console.log("## Sources\n");
|
|
87
|
+
|
|
88
|
+
for (const r of results) {
|
|
89
|
+
const title = String(r?.title ?? "").trim();
|
|
90
|
+
const url = String(r?.url ?? "").trim();
|
|
91
|
+
const content = String(r?.content ?? "").trim();
|
|
92
|
+
const score = r?.score ? ` (relevance: ${(r.score * 100).toFixed(0)}%)` : "";
|
|
93
|
+
|
|
94
|
+
if (!title || !url) continue;
|
|
95
|
+
console.log(`- **${title}**${score}`);
|
|
96
|
+
console.log(` ${url}`);
|
|
97
|
+
if (content) {
|
|
98
|
+
console.log(` ${content.slice(0, 300)}${content.length > 300 ? "..." : ""}`);
|
|
99
|
+
}
|
|
100
|
+
console.log();
|
|
101
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: youtube-watcher
|
|
3
|
+
description: Fetch and read transcripts from YouTube videos. Use when you need to summarize a video, answer questions about its content, or extract information from it.
|
|
4
|
+
author: michael gathara
|
|
5
|
+
version: 1.0.0
|
|
6
|
+
triggers:
|
|
7
|
+
- "watch youtube"
|
|
8
|
+
- "summarize video"
|
|
9
|
+
- "video transcript"
|
|
10
|
+
- "youtube summary"
|
|
11
|
+
- "analyze video"
|
|
12
|
+
metadata: {"lemonade":{"emoji":"📺","requires":{"bins":["yt-dlp"]},"install":[{"id":"brew","kind":"brew","formula":"yt-dlp","bins":["yt-dlp"],"label":"Install yt-dlp (brew)"},{"id":"pip","kind":"pip","package":"yt-dlp","bins":["yt-dlp"],"label":"Install yt-dlp (pip)"}]}}
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
# YouTube Watcher
|
|
16
|
+
|
|
17
|
+
Fetch transcripts from YouTube videos to enable summarization, QA, and content extraction.
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
### Get Transcript
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
python3 {baseDir}/scripts/get_transcript.py "https://www.youtube.com/watch?v=VIDEO_ID"
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Examples
|
|
28
|
+
|
|
29
|
+
**Summarize a video:**
|
|
30
|
+
|
|
31
|
+
1. Get the transcript:
|
|
32
|
+
```bash
|
|
33
|
+
python3 {baseDir}/scripts/get_transcript.py "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
|
|
34
|
+
```
|
|
35
|
+
2. Read the output and summarize it for the user.
|
|
36
|
+
|
|
37
|
+
**Find specific information:**
|
|
38
|
+
|
|
39
|
+
1. Get the transcript.
|
|
40
|
+
2. Search the text for keywords or answer the user's question based on the content.
|
|
41
|
+
|
|
42
|
+
## Notes
|
|
43
|
+
|
|
44
|
+
- Requires `yt-dlp` to be installed and available in the PATH.
|
|
45
|
+
- Works with videos that have closed captions (CC) or auto-generated subtitles.
|
|
46
|
+
- If a video has no subtitles, the script will fail with an error message.
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import argparse
|
|
3
|
+
import os
|
|
4
|
+
import re
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import tempfile
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
def clean_vtt(content: str) -> str:
|
|
11
|
+
"""
|
|
12
|
+
Clean WebVTT content to plain text.
|
|
13
|
+
Removes headers, timestamps, and duplicate lines.
|
|
14
|
+
"""
|
|
15
|
+
lines = content.splitlines()
|
|
16
|
+
text_lines = []
|
|
17
|
+
seen = set()
|
|
18
|
+
|
|
19
|
+
timestamp_pattern = re.compile(r'\d{2}:\d{2}:\d{2}\.\d{3}\s-->\s\d{2}:\d{2}:\d{2}\.\d{3}')
|
|
20
|
+
|
|
21
|
+
for line in lines:
|
|
22
|
+
line = line.strip()
|
|
23
|
+
if not line or line == 'WEBVTT' or line.isdigit():
|
|
24
|
+
continue
|
|
25
|
+
if timestamp_pattern.match(line):
|
|
26
|
+
continue
|
|
27
|
+
if line.startswith('NOTE') or line.startswith('STYLE'):
|
|
28
|
+
continue
|
|
29
|
+
|
|
30
|
+
if text_lines and text_lines[-1] == line:
|
|
31
|
+
continue
|
|
32
|
+
|
|
33
|
+
line = re.sub(r'<[^>]+>', '', line)
|
|
34
|
+
|
|
35
|
+
text_lines.append(line)
|
|
36
|
+
|
|
37
|
+
return '\n'.join(text_lines)
|
|
38
|
+
|
|
39
|
+
def get_transcript(url: str):
|
|
40
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
41
|
+
cmd = [
|
|
42
|
+
"yt-dlp",
|
|
43
|
+
"--write-subs",
|
|
44
|
+
"--write-auto-subs",
|
|
45
|
+
"--skip-download",
|
|
46
|
+
"--sub-lang", "en",
|
|
47
|
+
"--output", "subs",
|
|
48
|
+
url
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
subprocess.run(cmd, cwd=temp_dir, check=True, capture_output=True)
|
|
53
|
+
except subprocess.CalledProcessError as e:
|
|
54
|
+
print(f"Error running yt-dlp: {e.stderr.decode()}", file=sys.stderr)
|
|
55
|
+
sys.exit(1)
|
|
56
|
+
except FileNotFoundError:
|
|
57
|
+
print("Error: yt-dlp not found. Please install it.", file=sys.stderr)
|
|
58
|
+
sys.exit(1)
|
|
59
|
+
|
|
60
|
+
temp_path = Path(temp_dir)
|
|
61
|
+
vtt_files = list(temp_path.glob("*.vtt"))
|
|
62
|
+
|
|
63
|
+
if not vtt_files:
|
|
64
|
+
print("No subtitles found.", file=sys.stderr)
|
|
65
|
+
sys.exit(1)
|
|
66
|
+
|
|
67
|
+
vtt_file = vtt_files[0]
|
|
68
|
+
|
|
69
|
+
content = vtt_file.read_text(encoding='utf-8')
|
|
70
|
+
clean_text = clean_vtt(content)
|
|
71
|
+
print(clean_text)
|
|
72
|
+
|
|
73
|
+
def main():
|
|
74
|
+
parser = argparse.ArgumentParser(description="Fetch YouTube transcript.")
|
|
75
|
+
parser.add_argument("url", help="YouTube video URL")
|
|
76
|
+
args = parser.parse_args()
|
|
77
|
+
|
|
78
|
+
get_transcript(args.url)
|
|
79
|
+
|
|
80
|
+
if __name__ == "__main__":
|
|
81
|
+
main()
|