create-bubblelab-app 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +42 -1
- package/package.json +1 -1
- package/templates/basic/package.json +5 -3
- package/templates/basic/src/index.ts +26 -7
- package/templates/reddit-scraper/.env.example +10 -0
- package/templates/reddit-scraper/.tmp/bubble-script-1759870622580-qdld7i.ts +103 -0
- package/templates/reddit-scraper/README.md +254 -0
- package/templates/reddit-scraper/package.json +23 -0
- package/templates/reddit-scraper/src/index.ts +58 -0
- package/templates/reddit-scraper/src/reddit-news-flow.ts +93 -0
- package/templates/reddit-scraper/tsconfig.json +20 -0
package/bin/cli.js
CHANGED
|
@@ -33,6 +33,11 @@ async function main() {
|
|
|
33
33
|
value: 'basic',
|
|
34
34
|
description: 'AI agent that researches weather using web search',
|
|
35
35
|
},
|
|
36
|
+
{
|
|
37
|
+
title: 'Reddit News Scraper',
|
|
38
|
+
value: 'reddit-scraper',
|
|
39
|
+
description: 'AI agent that scrapes Reddit and summarizes news',
|
|
40
|
+
},
|
|
36
41
|
],
|
|
37
42
|
initial: 0,
|
|
38
43
|
},
|
|
@@ -47,6 +52,18 @@ async function main() {
|
|
|
47
52
|
],
|
|
48
53
|
initial: 0,
|
|
49
54
|
},
|
|
55
|
+
{
|
|
56
|
+
type: 'password',
|
|
57
|
+
name: 'googleApiKey',
|
|
58
|
+
message: 'Enter your Google API key (required for AI models):',
|
|
59
|
+
validate: (value) =>
|
|
60
|
+
value.length > 0 ? true : 'Google API key is required',
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
type: 'password',
|
|
64
|
+
name: 'firecrawlApiKey',
|
|
65
|
+
message: 'Enter your Firecrawl API key (optional, press Enter to skip):',
|
|
66
|
+
},
|
|
50
67
|
]);
|
|
51
68
|
|
|
52
69
|
if (!response.projectName) {
|
|
@@ -54,7 +71,13 @@ async function main() {
|
|
|
54
71
|
process.exit(1);
|
|
55
72
|
}
|
|
56
73
|
|
|
57
|
-
const {
|
|
74
|
+
const {
|
|
75
|
+
projectName,
|
|
76
|
+
template,
|
|
77
|
+
packageManager,
|
|
78
|
+
googleApiKey,
|
|
79
|
+
firecrawlApiKey,
|
|
80
|
+
} = response;
|
|
58
81
|
const targetDir = path.join(process.cwd(), projectName);
|
|
59
82
|
|
|
60
83
|
// Check if directory exists
|
|
@@ -77,6 +100,24 @@ async function main() {
|
|
|
77
100
|
packageJson.name = projectName;
|
|
78
101
|
fs.writeFileSync(packageJsonPath, JSON.stringify(packageJson, null, 2));
|
|
79
102
|
|
|
103
|
+
// Create .env file with API keys
|
|
104
|
+
console.log(pc.cyan('🔑 Creating .env file...\n'));
|
|
105
|
+
const envContent = [
|
|
106
|
+
'# BubbleLab Configuration',
|
|
107
|
+
'# Google API Key (required for AI models)',
|
|
108
|
+
`GOOGLE_API_KEY=${googleApiKey}`,
|
|
109
|
+
'',
|
|
110
|
+
'# Firecrawl API Key (optional, for advanced web scraping)',
|
|
111
|
+
firecrawlApiKey
|
|
112
|
+
? `FIRECRAWL_API_KEY=${firecrawlApiKey}`
|
|
113
|
+
: '# FIRECRAWL_API_KEY=your_firecrawl_api_key_here',
|
|
114
|
+
'',
|
|
115
|
+
'# Other optional configurations',
|
|
116
|
+
'# CITY=New York',
|
|
117
|
+
'',
|
|
118
|
+
].join('\n');
|
|
119
|
+
fs.writeFileSync(path.join(targetDir, '.env'), envContent);
|
|
120
|
+
|
|
80
121
|
// Install dependencies
|
|
81
122
|
console.log(pc.cyan('📦 Installing dependencies...\n'));
|
|
82
123
|
try {
|
package/package.json
CHANGED
|
@@ -3,14 +3,16 @@
|
|
|
3
3
|
"version": "0.1.0",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"private": true,
|
|
6
|
+
"description": "BubbleLab AI agent application",
|
|
6
7
|
"scripts": {
|
|
7
8
|
"dev": "tsx src/index.ts",
|
|
8
9
|
"build": "tsc",
|
|
9
|
-
"start": "node dist/index.js"
|
|
10
|
+
"start": "node dist/index.js",
|
|
11
|
+
"typecheck": "tsc --noEmit"
|
|
10
12
|
},
|
|
11
13
|
"dependencies": {
|
|
12
|
-
"@bubblelab/bubble-core": "^0.1.
|
|
13
|
-
"@bubblelab/bubble-runtime": "^0.1.
|
|
14
|
+
"@bubblelab/bubble-core": "^0.1.1",
|
|
15
|
+
"@bubblelab/bubble-runtime": "^0.1.4"
|
|
14
16
|
},
|
|
15
17
|
"devDependencies": {
|
|
16
18
|
"@types/node": "^20.12.12",
|
|
@@ -11,7 +11,16 @@
|
|
|
11
11
|
|
|
12
12
|
import { BubbleRunner } from '@bubblelab/bubble-runtime';
|
|
13
13
|
import { BubbleFactory } from '@bubblelab/bubble-core';
|
|
14
|
-
import {
|
|
14
|
+
import { readFileSync } from 'fs';
|
|
15
|
+
import { fileURLToPath } from 'url';
|
|
16
|
+
import { dirname, join } from 'path';
|
|
17
|
+
import { config } from 'dotenv';
|
|
18
|
+
import { CredentialType } from '@bubblelab/shared-schemas';
|
|
19
|
+
|
|
20
|
+
// Load environment variables from .env file
|
|
21
|
+
config();
|
|
22
|
+
|
|
23
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
15
24
|
|
|
16
25
|
async function main() {
|
|
17
26
|
console.log('🫧 BubbleLab Weather Agent\n');
|
|
@@ -21,11 +30,13 @@ async function main() {
|
|
|
21
30
|
await bubbleFactory.registerDefaults();
|
|
22
31
|
console.log('✅ BubbleFactory initialized\n');
|
|
23
32
|
|
|
24
|
-
// Step 2:
|
|
25
|
-
const
|
|
26
|
-
console.log('✅ BubbleRunner created\n');
|
|
33
|
+
// Step 2: Read the flow code as a string
|
|
34
|
+
const flowCode = readFileSync(join(__dirname, 'weather-flow.ts'), 'utf-8');
|
|
27
35
|
|
|
28
|
-
// Step 3:
|
|
36
|
+
// Step 3: Create a BubbleRunner with your flow code
|
|
37
|
+
const runner = new BubbleRunner(flowCode, bubbleFactory);
|
|
38
|
+
|
|
39
|
+
// Step 4: (Optional) Modify bubble parameters dynamically
|
|
29
40
|
const bubbles = runner.getParsedBubbles();
|
|
30
41
|
const bubbleIds = Object.keys(bubbles).map(Number);
|
|
31
42
|
|
|
@@ -39,12 +50,17 @@ async function main() {
|
|
|
39
50
|
`What is the current weather in ${city}? Find information from the web and provide a detailed report.`
|
|
40
51
|
);
|
|
41
52
|
}
|
|
53
|
+
// Inject the credentials
|
|
54
|
+
runner.injector.injectCredentials(bubbles, [], {
|
|
55
|
+
[CredentialType.GOOGLE_GEMINI_CRED]: process.env.GOOGLE_GEMINI_CRED,
|
|
56
|
+
[CredentialType.FIRECRAWL_API_KEY]: process.env.FIRECRAWL_API_KEY,
|
|
57
|
+
});
|
|
42
58
|
|
|
43
|
-
// Step
|
|
59
|
+
// Step 5: Execute the flow
|
|
44
60
|
console.log('🤖 Running AI agent...\n');
|
|
45
61
|
const result = await runner.runAll();
|
|
46
62
|
|
|
47
|
-
// Step
|
|
63
|
+
// Step 6: Display results
|
|
48
64
|
console.log('📊 Results:');
|
|
49
65
|
console.log('─'.repeat(50));
|
|
50
66
|
console.log(JSON.stringify(result, null, 2));
|
|
@@ -63,6 +79,9 @@ async function main() {
|
|
|
63
79
|
console.log('\n📈 Execution Summary:');
|
|
64
80
|
console.log(summary);
|
|
65
81
|
}
|
|
82
|
+
|
|
83
|
+
// Force exit to close any lingering connections (AI model HTTP clients, etc.)
|
|
84
|
+
process.exit(0);
|
|
66
85
|
}
|
|
67
86
|
|
|
68
87
|
// Run the example
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# Google Gemini API Key (required for AI agent)
|
|
2
|
+
# Get your key at: https://aistudio.google.com/app/apikey
|
|
3
|
+
GOOGLE_API_KEY=your_google_api_key_here
|
|
4
|
+
|
|
5
|
+
# Optional: Specify subreddit to scrape (default: worldnews)
|
|
6
|
+
SUBREDDIT=worldnews
|
|
7
|
+
|
|
8
|
+
# Optional: Number of posts to scrape (default: 10)
|
|
9
|
+
POST_LIMIT=10
|
|
10
|
+
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Reddit News Scraper Flow
|
|
3
|
+
*
|
|
4
|
+
* This is a simple BubbleFlow that scrapes Reddit and summarizes news posts.
|
|
5
|
+
*/
|
|
6
|
+
import {
|
|
7
|
+
BubbleFlow,
|
|
8
|
+
RedditScrapeTool,
|
|
9
|
+
AIAgentBubble,
|
|
10
|
+
type WebhookEvent,
|
|
11
|
+
} from '@bubblelab/bubble-core';
|
|
12
|
+
import { CredentialType } from '@bubblelab/shared-schemas';
|
|
13
|
+
import { config } from 'dotenv';
|
|
14
|
+
|
|
15
|
+
// Load environment variables from .env file
|
|
16
|
+
config();
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Payload interface for the Reddit news flow
|
|
20
|
+
*/
|
|
21
|
+
interface RedditNewsPayload extends WebhookEvent {
|
|
22
|
+
subreddit: string;
|
|
23
|
+
limit: number;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* RedditNewsFlow - Scrapes Reddit and summarizes news
|
|
28
|
+
*
|
|
29
|
+
* This flow demonstrates:
|
|
30
|
+
* - Using RedditScrapeTool to scrape subreddit posts
|
|
31
|
+
* - Using AIAgentBubble to analyze and summarize content
|
|
32
|
+
* - Simple 2-step workflow
|
|
33
|
+
*/
|
|
34
|
+
export class RedditNewsFlow extends BubbleFlow<'webhook/http'> {
|
|
35
|
+
async handle(payload: RedditNewsPayload) {
|
|
36
|
+
const subreddit = payload.subreddit || 'worldnews';
|
|
37
|
+
this.logger?.logLine(36, 'Statement: VariableDeclaration');
|
|
38
|
+
const limit = payload.limit || 10;
|
|
39
|
+
this.logger?.logLine(37, 'Statement: VariableDeclaration');
|
|
40
|
+
|
|
41
|
+
// Step 1: Scrape Reddit for posts
|
|
42
|
+
const scrapeResult = await new RedditScrapeTool({
|
|
43
|
+
subreddit: subreddit,
|
|
44
|
+
sort: 'hot',
|
|
45
|
+
limit: limit
|
|
46
|
+
}, {logger: this.logger, variableId: 417, dependencyGraph: {"name":"reddit-scrape-tool","uniqueId":"417","variableId":417,"variableName":"scrapeResult","nodeType":"tool","dependencies":[]}, currentUniqueId: "417"}).action();
|
|
47
|
+
this.logger?.logLine(44, 'Statement: VariableDeclaration');
|
|
48
|
+
|
|
49
|
+
if (!scrapeResult.success || !scrapeResult.data?.posts) {
|
|
50
|
+
throw new Error('Failed to scrape Reddit or no posts found.');
|
|
51
|
+
this.logger?.logLine(47, 'Statement: ThrowStatement');
|
|
52
|
+
}
|
|
53
|
+
this.logger?.logLine(48, 'Statement: IfStatement');
|
|
54
|
+
|
|
55
|
+
const posts = scrapeResult.data.posts;
|
|
56
|
+
this.logger?.logLine(50, 'Statement: VariableDeclaration');
|
|
57
|
+
|
|
58
|
+
// Format posts for AI
|
|
59
|
+
const postsText = posts
|
|
60
|
+
.map(
|
|
61
|
+
(
|
|
62
|
+
post: {
|
|
63
|
+
title: string;
|
|
64
|
+
score: number;
|
|
65
|
+
selftext: string;
|
|
66
|
+
postUrl: string;
|
|
67
|
+
},
|
|
68
|
+
i: number
|
|
69
|
+
) =>
|
|
70
|
+
`${i + 1}. "${post.title}" (${post.score} upvotes)\n ${post.selftext || 'No description'}\n URL: ${post.postUrl}`
|
|
71
|
+
)
|
|
72
|
+
.join('\n\n');
|
|
73
|
+
this.logger?.logLine(66, 'Statement: VariableDeclaration');
|
|
74
|
+
|
|
75
|
+
// Step 2: Summarize the news using AI
|
|
76
|
+
const summaryResult = await new AIAgentBubble({
|
|
77
|
+
message: `Here are the top ${posts.length} posts from r/${subreddit}:
|
|
78
|
+
|
|
79
|
+
${postsText}
|
|
80
|
+
|
|
81
|
+
Please provide:
|
|
82
|
+
1. A summary of the top 5 most important/popular news items
|
|
83
|
+
2. Key themes or trends you notice
|
|
84
|
+
3. A one-paragraph executive summary
|
|
85
|
+
|
|
86
|
+
Format the response in a clear, readable way.`,
|
|
87
|
+
model: {
|
|
88
|
+
model: 'google/gemini-2.5-flash',
|
|
89
|
+
},
|
|
90
|
+
tools: []
|
|
91
|
+
}, {logger: this.logger, variableId: 422, dependencyGraph: {"name":"ai-agent","uniqueId":"422","variableId":422,"variableName":"summaryResult","nodeType":"service","dependencies":[]}, currentUniqueId: "422"}).action();
|
|
92
|
+
this.logger?.logLine(84, 'Statement: VariableDeclaration');
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
subreddit,
|
|
96
|
+
postsScraped: posts.length,
|
|
97
|
+
summary: summaryResult.data?.response,
|
|
98
|
+
timestamp: new Date().toISOString(),
|
|
99
|
+
status: 'success',
|
|
100
|
+
};
|
|
101
|
+
this.logger?.logLine(92, 'Statement: ReturnStatement');
|
|
102
|
+
}
|
|
103
|
+
}
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
# BubbleLab Reddit News Scraper
|
|
2
|
+
|
|
3
|
+
Welcome to your BubbleLab Reddit scraper! This template demonstrates how to build AI agents that scrape and analyze Reddit content using BubbleLab's workflow engine.
|
|
4
|
+
|
|
5
|
+
## 🎯 What This Does
|
|
6
|
+
|
|
7
|
+
This example creates an AI agent that:
|
|
8
|
+
|
|
9
|
+
- Scrapes posts from any Reddit subreddit
|
|
10
|
+
- Uses Google Gemini AI to analyze and summarize content
|
|
11
|
+
- Identifies key themes and trends in the posts
|
|
12
|
+
- Exports results to a JSON file
|
|
13
|
+
|
|
14
|
+
## 🚀 Quick Start
|
|
15
|
+
|
|
16
|
+
### 1. Set Up Environment Variables
|
|
17
|
+
|
|
18
|
+
Your `.env` file was created during setup. You can modify it to customize the scraper:
|
|
19
|
+
|
|
20
|
+
```env
|
|
21
|
+
GOOGLE_API_KEY=your_google_api_key_here
|
|
22
|
+
|
|
23
|
+
# Optional: Specify subreddit to scrape (default: worldnews)
|
|
24
|
+
SUBREDDIT=worldnews
|
|
25
|
+
|
|
26
|
+
# Optional: Number of posts to scrape (default: 10)
|
|
27
|
+
POST_LIMIT=10
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
#### Get API Keys
|
|
31
|
+
|
|
32
|
+
- **Google Gemini API**: https://aistudio.google.com/app/apikey (Free tier available)
|
|
33
|
+
|
|
34
|
+
### 2. Run the Example
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
npm run dev
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
or
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pnpm dev
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
You should see output like:
|
|
47
|
+
|
|
48
|
+
```
|
|
49
|
+
🫧 BubbleLab Reddit News Scraper
|
|
50
|
+
|
|
51
|
+
✅ BubbleFactory initialized
|
|
52
|
+
|
|
53
|
+
📊 Configuration:
|
|
54
|
+
Subreddit: r/worldnews
|
|
55
|
+
Post Limit: 10
|
|
56
|
+
|
|
57
|
+
🤖 Running Reddit scraper...
|
|
58
|
+
|
|
59
|
+
📊 Results:
|
|
60
|
+
──────────────────────────────────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
📝 NEWS SUMMARY:
|
|
63
|
+
|
|
64
|
+
[AI-generated summary of Reddit posts]
|
|
65
|
+
|
|
66
|
+
──────────────────────────────────────────────────────────────────────
|
|
67
|
+
|
|
68
|
+
✅ Scraped 10 posts from r/worldnews
|
|
69
|
+
💾 Results exported to: reddit-news-worldnews-1234567890.json
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## 📚 Project Structure
|
|
73
|
+
|
|
74
|
+
```
|
|
75
|
+
my-bubblelab-app/
|
|
76
|
+
├── src/
|
|
77
|
+
│ ├── index.ts # Main entry point
|
|
78
|
+
│ └── reddit-news-flow.ts # Reddit scraper flow definition
|
|
79
|
+
├── package.json
|
|
80
|
+
├── tsconfig.json
|
|
81
|
+
├── .env
|
|
82
|
+
└── README.md
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## 🧩 Understanding the Code
|
|
86
|
+
|
|
87
|
+
### The Flow (reddit-news-flow.ts)
|
|
88
|
+
|
|
89
|
+
This is a simple 2-step workflow:
|
|
90
|
+
|
|
91
|
+
```typescript
|
|
92
|
+
export class RedditNewsFlow extends BubbleFlow<'webhook/http'> {
|
|
93
|
+
async handle(payload: RedditNewsPayload) {
|
|
94
|
+
// Step 1: Scrape Reddit for posts
|
|
95
|
+
const scrapeResult = await new RedditScrapeTool({
|
|
96
|
+
subreddit: 'worldnews',
|
|
97
|
+
sort: 'hot',
|
|
98
|
+
limit: 10,
|
|
99
|
+
}).action();
|
|
100
|
+
|
|
101
|
+
// Step 2: Summarize the news using AI
|
|
102
|
+
const summaryResult = await new AIAgentBubble({
|
|
103
|
+
message: `Summarize these Reddit posts...`,
|
|
104
|
+
model: { model: 'google/gemini-2.5-flash' },
|
|
105
|
+
}).action();
|
|
106
|
+
|
|
107
|
+
return { summary: summaryResult.data?.response };
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
**Key components:**
|
|
113
|
+
|
|
114
|
+
1. **RedditScrapeTool** - Scrapes posts from Reddit
|
|
115
|
+
- `subreddit`: Which subreddit to scrape
|
|
116
|
+
- `sort`: Sorting method ('hot', 'new', 'top')
|
|
117
|
+
- `limit`: Number of posts to fetch
|
|
118
|
+
|
|
119
|
+
2. **AIAgentBubble** - AI-powered analysis
|
|
120
|
+
- `message`: The task/prompt for the AI
|
|
121
|
+
- `model`: Which AI model to use
|
|
122
|
+
|
|
123
|
+
### Running the Flow (index.ts)
|
|
124
|
+
|
|
125
|
+
The `index.ts` file shows how to execute the flow:
|
|
126
|
+
|
|
127
|
+
```typescript
|
|
128
|
+
const runner = new BubbleRunner(flowCode, bubbleFactory);
|
|
129
|
+
const result = await runner.runAll({ subreddit, limit });
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
## 🎨 Customization
|
|
133
|
+
|
|
134
|
+
### Change the Subreddit
|
|
135
|
+
|
|
136
|
+
Set the `SUBREDDIT` environment variable:
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
SUBREDDIT="technology" npm run dev
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
Or modify `.env`:
|
|
143
|
+
|
|
144
|
+
```env
|
|
145
|
+
SUBREDDIT=technology
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
### Scrape More Posts
|
|
149
|
+
|
|
150
|
+
Change the `POST_LIMIT`:
|
|
151
|
+
|
|
152
|
+
```bash
|
|
153
|
+
POST_LIMIT=20 npm run dev
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
### Change Sort Order
|
|
157
|
+
|
|
158
|
+
Edit `src/reddit-news-flow.ts`:
|
|
159
|
+
|
|
160
|
+
```typescript
|
|
161
|
+
const scrapeResult = await new RedditScrapeTool({
|
|
162
|
+
subreddit: subreddit,
|
|
163
|
+
sort: 'new', // Options: 'hot', 'new', 'top', 'rising'
|
|
164
|
+
limit: limit,
|
|
165
|
+
}).action();
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Customize AI Analysis
|
|
169
|
+
|
|
170
|
+
Modify the AI prompt in `src/reddit-news-flow.ts`:
|
|
171
|
+
|
|
172
|
+
```typescript
|
|
173
|
+
const summaryResult = await new AIAgentBubble({
|
|
174
|
+
message: `Analyze these posts and focus on technology trends...`,
|
|
175
|
+
model: { model: 'google/gemini-2.5-flash' },
|
|
176
|
+
}).action();
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
## 🔧 Development
|
|
180
|
+
|
|
181
|
+
### Build for Production
|
|
182
|
+
|
|
183
|
+
```bash
|
|
184
|
+
npm run build
|
|
185
|
+
npm start
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### Project Scripts
|
|
189
|
+
|
|
190
|
+
- `npm run dev` - Run with hot reload (tsx)
|
|
191
|
+
- `npm run build` - Compile TypeScript to JavaScript
|
|
192
|
+
- `npm start` - Run compiled JavaScript
|
|
193
|
+
- `npm run typecheck` - Check TypeScript types
|
|
194
|
+
|
|
195
|
+
## 📖 Next Steps
|
|
196
|
+
|
|
197
|
+
### Use Cases
|
|
198
|
+
|
|
199
|
+
This template can be extended for:
|
|
200
|
+
|
|
201
|
+
- **Lead generation** - Find potential customers discussing pain points
|
|
202
|
+
- **Market research** - Analyze trends and sentiment in specific communities
|
|
203
|
+
- **Content curation** - Collect and summarize relevant discussions
|
|
204
|
+
- **Competitive intelligence** - Monitor what people say about competitors
|
|
205
|
+
- **Community management** - Track important discussions in your community
|
|
206
|
+
|
|
207
|
+
### Example: Lead Finder
|
|
208
|
+
|
|
209
|
+
Extend the flow to find frustrated users:
|
|
210
|
+
|
|
211
|
+
```typescript
|
|
212
|
+
const agentResult = await new AIAgentBubble({
|
|
213
|
+
systemPrompt: `Identify users who are frustrated with their current solution.`,
|
|
214
|
+
message: `Analyze this post: "${post.title}" - ${post.selftext}`,
|
|
215
|
+
model: {
|
|
216
|
+
model: 'google/gemini-2.5-flash',
|
|
217
|
+
jsonMode: true,
|
|
218
|
+
},
|
|
219
|
+
}).action();
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
### Learn More
|
|
223
|
+
|
|
224
|
+
- [BubbleLab Documentation](https://github.com/bubblelabai/BubbleLab)
|
|
225
|
+
- [Examples & Tutorials](https://github.com/bubblelabai/BubbleLab/tree/main/examples)
|
|
226
|
+
|
|
227
|
+
## 🐛 Troubleshooting
|
|
228
|
+
|
|
229
|
+
### Error: API Key Not Found
|
|
230
|
+
|
|
231
|
+
Make sure `.env` file exists and contains a valid `GOOGLE_API_KEY`.
|
|
232
|
+
|
|
233
|
+
### Error: Module Not Found
|
|
234
|
+
|
|
235
|
+
Run `npm install` to install dependencies.
|
|
236
|
+
|
|
237
|
+
### No Posts Found
|
|
238
|
+
|
|
239
|
+
- Check that the subreddit name is spelled correctly
|
|
240
|
+
- Try increasing the `limit` parameter
|
|
241
|
+
- Verify you have internet connection
|
|
242
|
+
|
|
243
|
+
## 💬 Support
|
|
244
|
+
|
|
245
|
+
- **Issues**: [GitHub Issues](https://github.com/bubblelabai/BubbleLab/issues)
|
|
246
|
+
- **Discussions**: [GitHub Discussions](https://github.com/bubblelabai/BubbleLab/discussions)
|
|
247
|
+
|
|
248
|
+
## 📄 License
|
|
249
|
+
|
|
250
|
+
Apache-2.0 © Bubble Lab, Inc.
|
|
251
|
+
|
|
252
|
+
---
|
|
253
|
+
|
|
254
|
+
**Happy Scraping! 🫧**
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "my-bubblelab-app",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"private": true,
|
|
6
|
+
"description": "BubbleLab AI agent application",
|
|
7
|
+
"scripts": {
|
|
8
|
+
"dev": "tsx src/index.ts",
|
|
9
|
+
"build": "tsc",
|
|
10
|
+
"start": "node dist/index.js",
|
|
11
|
+
"typecheck": "tsc --noEmit"
|
|
12
|
+
},
|
|
13
|
+
"dependencies": {
|
|
14
|
+
"@bubblelab/bubble-core": "^0.1.1",
|
|
15
|
+
"@bubblelab/bubble-runtime": "^0.1.4",
|
|
16
|
+
"dotenv": "^16.4.5"
|
|
17
|
+
},
|
|
18
|
+
"devDependencies": {
|
|
19
|
+
"@types/node": "^20.12.12",
|
|
20
|
+
"tsx": "^4.20.3",
|
|
21
|
+
"typescript": "^5.4.5"
|
|
22
|
+
}
|
|
23
|
+
}
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { BubbleRunner } from '@bubblelab/bubble-runtime';
|
|
2
|
+
import { BubbleFactory } from '@bubblelab/bubble-core';
|
|
3
|
+
import { readFileSync } from 'fs';
|
|
4
|
+
import { fileURLToPath } from 'url';
|
|
5
|
+
import { dirname, join } from 'path';
|
|
6
|
+
import { CredentialType } from '@bubblelab/shared-schemas';
|
|
7
|
+
|
|
8
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
9
|
+
|
|
10
|
+
async function main() {
|
|
11
|
+
// Step 1: Create a BubbleFactory and register defaults
|
|
12
|
+
const bubbleFactory = new BubbleFactory();
|
|
13
|
+
await bubbleFactory.registerDefaults();
|
|
14
|
+
|
|
15
|
+
// Step 2: Read the flow code as a string
|
|
16
|
+
const flowCode = readFileSync(
|
|
17
|
+
join(__dirname, 'reddit-news-flow.ts'),
|
|
18
|
+
'utf-8'
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
// Step 3: Create a BubbleRunner with your flow code
|
|
22
|
+
const runner = new BubbleRunner(flowCode, bubbleFactory);
|
|
23
|
+
// Step 4: (Optional) Modify bubble parameters dynamically
|
|
24
|
+
const subreddit = 'worldnews';
|
|
25
|
+
const limit = 10;
|
|
26
|
+
|
|
27
|
+
// Step 5: Set the credentials
|
|
28
|
+
runner.injector.injectCredentials(runner.getParsedBubbles(), [], {
|
|
29
|
+
[CredentialType.GOOGLE_GEMINI_CRED]: process.env.GOOGLE_API_KEY,
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
const result = await runner.runAll({
|
|
33
|
+
subreddit,
|
|
34
|
+
limit,
|
|
35
|
+
});
|
|
36
|
+
|
|
37
|
+
if (result.error) {
|
|
38
|
+
console.error('❌ Error:', result.error);
|
|
39
|
+
process.exit(1);
|
|
40
|
+
} else {
|
|
41
|
+
console.log('✅ Reddit scraper executed successfully');
|
|
42
|
+
console.log(JSON.stringify(result.data, null, 2));
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
const summary = runner.getLogger()?.getExecutionSummary();
|
|
46
|
+
if (summary) {
|
|
47
|
+
console.log(summary);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Force exit to close any lingering connections (AI model HTTP clients, etc.)
|
|
51
|
+
process.exit(0);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Run the example
|
|
55
|
+
main().catch((error) => {
|
|
56
|
+
console.error('❌ Error:', error);
|
|
57
|
+
process.exit(1);
|
|
58
|
+
});
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Reddit News Scraper Flow
|
|
3
|
+
*
|
|
4
|
+
* This is a simple BubbleFlow that scrapes Reddit and summarizes news posts.
|
|
5
|
+
*/
|
|
6
|
+
import {
|
|
7
|
+
BubbleFlow,
|
|
8
|
+
RedditScrapeTool,
|
|
9
|
+
AIAgentBubble,
|
|
10
|
+
type WebhookEvent,
|
|
11
|
+
} from '@bubblelab/bubble-core';
|
|
12
|
+
import { config } from 'dotenv';
|
|
13
|
+
|
|
14
|
+
// Load environment variables from .env file
|
|
15
|
+
config();
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Payload interface for the Reddit news flow
|
|
19
|
+
*/
|
|
20
|
+
interface RedditNewsPayload extends WebhookEvent {
|
|
21
|
+
subreddit: string;
|
|
22
|
+
limit: number;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* RedditNewsFlow - Scrapes Reddit and summarizes news
|
|
27
|
+
*
|
|
28
|
+
* This flow demonstrates:
|
|
29
|
+
* - Using RedditScrapeTool to scrape subreddit posts
|
|
30
|
+
* - Using AIAgentBubble to analyze and summarize content
|
|
31
|
+
* - Simple 2-step workflow
|
|
32
|
+
*/
|
|
33
|
+
export class RedditNewsFlow extends BubbleFlow<'webhook/http'> {
|
|
34
|
+
async handle(payload: RedditNewsPayload) {
|
|
35
|
+
const subreddit = payload.subreddit || 'worldnews';
|
|
36
|
+
const limit = payload.limit || 10;
|
|
37
|
+
|
|
38
|
+
// Step 1: Scrape Reddit for posts
|
|
39
|
+
const scrapeResult = await new RedditScrapeTool({
|
|
40
|
+
subreddit: subreddit,
|
|
41
|
+
sort: 'hot',
|
|
42
|
+
limit: limit,
|
|
43
|
+
}).action();
|
|
44
|
+
|
|
45
|
+
if (!scrapeResult.success || !scrapeResult.data?.posts) {
|
|
46
|
+
throw new Error('Failed to scrape Reddit or no posts found.');
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
const posts = scrapeResult.data.posts;
|
|
50
|
+
|
|
51
|
+
// Format posts for AI
|
|
52
|
+
const postsText = posts
|
|
53
|
+
.map(
|
|
54
|
+
(
|
|
55
|
+
post: {
|
|
56
|
+
title: string;
|
|
57
|
+
score: number;
|
|
58
|
+
selftext: string;
|
|
59
|
+
postUrl: string;
|
|
60
|
+
},
|
|
61
|
+
i: number
|
|
62
|
+
) =>
|
|
63
|
+
`${i + 1}. "${post.title}" (${post.score} upvotes)\n ${post.selftext || 'No description'}\n URL: ${post.postUrl}`
|
|
64
|
+
)
|
|
65
|
+
.join('\n\n');
|
|
66
|
+
|
|
67
|
+
// Step 2: Summarize the news using AI
|
|
68
|
+
const summaryResult = await new AIAgentBubble({
|
|
69
|
+
message: `Here are the top ${posts.length} posts from r/${subreddit}:
|
|
70
|
+
|
|
71
|
+
${postsText}
|
|
72
|
+
|
|
73
|
+
Please provide:
|
|
74
|
+
1. A summary of the top 5 most important/popular news items
|
|
75
|
+
2. Key themes or trends you notice
|
|
76
|
+
3. A one-paragraph executive summary
|
|
77
|
+
|
|
78
|
+
Format the response in a clear, readable way.`,
|
|
79
|
+
model: {
|
|
80
|
+
model: 'google/gemini-2.5-flash',
|
|
81
|
+
},
|
|
82
|
+
tools: [],
|
|
83
|
+
}).action();
|
|
84
|
+
|
|
85
|
+
return {
|
|
86
|
+
subreddit,
|
|
87
|
+
postsScraped: posts.length,
|
|
88
|
+
summary: summaryResult.data?.response,
|
|
89
|
+
timestamp: new Date().toISOString(),
|
|
90
|
+
status: 'success',
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "ES2022",
|
|
5
|
+
"lib": ["ES2022"],
|
|
6
|
+
"moduleResolution": "node",
|
|
7
|
+
"outDir": "./dist",
|
|
8
|
+
"rootDir": "./src",
|
|
9
|
+
"strict": true,
|
|
10
|
+
"esModuleInterop": true,
|
|
11
|
+
"skipLibCheck": true,
|
|
12
|
+
"forceConsistentCasingInFileNames": true,
|
|
13
|
+
"resolveJsonModule": true,
|
|
14
|
+
"declaration": true,
|
|
15
|
+
"declarationMap": true,
|
|
16
|
+
"sourceMap": true
|
|
17
|
+
},
|
|
18
|
+
"include": ["src/**/*"],
|
|
19
|
+
"exclude": ["node_modules", "dist"]
|
|
20
|
+
}
|