thepopebot 1.1.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/api/index.js CHANGED
@@ -1,12 +1,8 @@
1
- const paths = require('../lib/paths');
2
- const { render_md } = require('../lib/utils/render-md');
3
1
  const { createJob } = require('../lib/tools/create-job');
4
- const { setWebhook, sendMessage, downloadFile, reactToMessage, startTypingIndicator } = require('../lib/tools/telegram');
5
- const { isWhisperEnabled, transcribeAudio } = require('../lib/tools/openai');
6
- const { chat, getApiKey } = require('../lib/claude');
7
- const { toolDefinitions, toolExecutors } = require('../lib/claude/tools');
8
- const { getHistory, updateHistory } = require('../lib/claude/conversation');
2
+ const { setWebhook, sendMessage } = require('../lib/tools/telegram');
9
3
  const { getJobStatus } = require('../lib/tools/github');
4
+ const { getTelegramAdapter } = require('../lib/channels');
5
+ const { chat, summarizeJob, addToThread } = require('../lib/ai');
10
6
 
11
7
  // Bot token from env, can be overridden by /telegram/register
12
8
  let telegramBotToken = null;
@@ -57,55 +53,6 @@ function extractJobId(branchName) {
57
53
  return branchName.slice(4);
58
54
  }
59
55
 
60
- /**
61
- * Summarize a completed job using Claude
62
- * @param {Object} results - Job results from webhook payload
63
- * @returns {Promise<string>} The message to send to Telegram
64
- */
65
- async function summarizeJob(results) {
66
- try {
67
- const apiKey = getApiKey();
68
-
69
- // System prompt from JOB_SUMMARY.md (supports {{includes}})
70
- const systemPrompt = render_md(paths.jobSummaryMd);
71
-
72
- // User message: structured job results
73
- const userMessage = [
74
- results.job ? `## Task\n${results.job}` : '',
75
- results.commit_message ? `## Commit Message\n${results.commit_message}` : '',
76
- results.changed_files?.length ? `## Changed Files\n${results.changed_files.join('\n')}` : '',
77
- results.status ? `## Status\n${results.status}` : '',
78
- results.merge_result ? `## Merge Result\n${results.merge_result}` : '',
79
- results.pr_url ? `## PR URL\n${results.pr_url}` : '',
80
- results.run_url ? `## Run URL\n${results.run_url}` : '',
81
- results.log ? `## Agent Log\n${results.log}` : '',
82
- ].filter(Boolean).join('\n\n');
83
-
84
- const response = await fetch('https://api.anthropic.com/v1/messages', {
85
- method: 'POST',
86
- headers: {
87
- 'Content-Type': 'application/json',
88
- 'x-api-key': apiKey,
89
- 'anthropic-version': '2023-06-01',
90
- },
91
- body: JSON.stringify({
92
- model: process.env.EVENT_HANDLER_MODEL || 'claude-sonnet-4-20250514',
93
- max_tokens: 1024,
94
- system: systemPrompt,
95
- messages: [{ role: 'user', content: userMessage }],
96
- }),
97
- });
98
-
99
- if (!response.ok) throw new Error(`Claude API error: ${response.status}`);
100
-
101
- const result = await response.json();
102
- return (result.content?.[0]?.text || '').trim() || 'Job finished.';
103
- } catch (err) {
104
- console.error('Failed to summarize job:', err);
105
- return 'Job finished.';
106
- }
107
- }
108
-
109
56
  // ─────────────────────────────────────────────────────────────────────────────
110
57
  // Route handlers
111
58
  // ─────────────────────────────────────────────────────────────────────────────
@@ -142,100 +89,42 @@ async function handleTelegramRegister(request) {
142
89
  }
143
90
 
144
91
  async function handleTelegramWebhook(request) {
145
- const { TELEGRAM_WEBHOOK_SECRET, TELEGRAM_CHAT_ID, TELEGRAM_VERIFICATION } = process.env;
146
92
  const botToken = getTelegramBotToken();
93
+ if (!botToken) return Response.json({ ok: true });
147
94
 
148
- // Validate secret token if configured
149
- // Always return 200 to prevent Telegram retry loops on mismatch
150
- if (TELEGRAM_WEBHOOK_SECRET) {
151
- const headerSecret = request.headers.get('x-telegram-bot-api-secret-token');
152
- if (headerSecret !== TELEGRAM_WEBHOOK_SECRET) {
153
- return Response.json({ ok: true });
154
- }
155
- }
156
-
157
- const update = await request.json();
158
- const message = update.message || update.edited_message;
95
+ const adapter = getTelegramAdapter(botToken);
96
+ const normalized = await adapter.receive(request);
97
+ if (!normalized) return Response.json({ ok: true });
159
98
 
160
- if (message && message.chat && botToken) {
161
- const chatId = String(message.chat.id);
162
-
163
- let messageText = null;
164
-
165
- if (message.text) {
166
- messageText = message.text;
167
- }
168
-
169
- // Check for verification code - this works even before TELEGRAM_CHAT_ID is set
170
- if (TELEGRAM_VERIFICATION && messageText === TELEGRAM_VERIFICATION) {
171
- await sendMessage(botToken, chatId, `Your chat ID:\n<code>${chatId}</code>`);
172
- return Response.json({ ok: true });
173
- }
174
-
175
- // Security: if no TELEGRAM_CHAT_ID configured, ignore all messages (except verification above)
176
- if (!TELEGRAM_CHAT_ID) {
177
- return Response.json({ ok: true });
178
- }
179
-
180
- // Security: only accept messages from configured chat
181
- if (chatId !== TELEGRAM_CHAT_ID) {
182
- return Response.json({ ok: true });
183
- }
184
-
185
- // Acknowledge receipt with a thumbs up (await so it completes before typing indicator starts)
186
- await reactToMessage(botToken, chatId, message.message_id).catch(() => {});
187
-
188
- if (message.voice) {
189
- // Handle voice messages
190
- if (!isWhisperEnabled()) {
191
- await sendMessage(botToken, chatId, 'Voice messages are not supported. Please set OPENAI_API_KEY to enable transcription.');
192
- return Response.json({ ok: true });
193
- }
194
-
195
- try {
196
- const { buffer, filename } = await downloadFile(botToken, message.voice.file_id);
197
- messageText = await transcribeAudio(buffer, filename);
198
- } catch (err) {
199
- console.error('Failed to transcribe voice:', err);
200
- await sendMessage(botToken, chatId, 'Sorry, I could not transcribe your voice message.');
201
- return Response.json({ ok: true });
202
- }
203
- }
204
-
205
- if (messageText) {
206
- // Process message asynchronously (don't block the response)
207
- processMessage(botToken, chatId, messageText).catch(err => {
208
- console.error('Failed to process message:', err);
209
- });
210
- }
211
- }
99
+ // Process message asynchronously (don't block the webhook response)
100
+ processChannelMessage(adapter, normalized).catch((err) => {
101
+ console.error('Failed to process message:', err);
102
+ });
212
103
 
213
104
  return Response.json({ ok: true });
214
105
  }
215
106
 
216
107
  /**
217
- * Process a Telegram message with Claude (async, non-blocking)
108
+ * Process a normalized message through the AI layer with channel UX.
218
109
  */
219
- async function processMessage(botToken, chatId, messageText) {
220
- const stopTyping = startTypingIndicator(botToken, chatId);
110
+ async function processChannelMessage(adapter, normalized) {
111
+ await adapter.acknowledge(normalized.metadata);
112
+ const stopIndicator = adapter.startProcessingIndicator(normalized.metadata);
113
+
221
114
  try {
222
- // Get conversation history and process with Claude
223
- const history = getHistory(chatId);
224
- const { response, history: newHistory } = await chat(
225
- messageText,
226
- history,
227
- toolDefinitions,
228
- toolExecutors
229
- );
230
- updateHistory(chatId, newHistory);
231
-
232
- // Send response (auto-splits if needed)
233
- await sendMessage(botToken, chatId, response);
115
+ const response = await chat(normalized.threadId, normalized.text, normalized.attachments);
116
+ await adapter.sendResponse(normalized.threadId, response, normalized.metadata);
234
117
  } catch (err) {
235
- console.error('Failed to process message with Claude:', err);
236
- await sendMessage(botToken, chatId, 'Sorry, I encountered an error processing your message.').catch(() => {});
118
+ console.error('Failed to process message with AI:', err);
119
+ await adapter
120
+ .sendResponse(
121
+ normalized.threadId,
122
+ 'Sorry, I encountered an error processing your message.',
123
+ normalized.metadata
124
+ )
125
+ .catch(() => {});
237
126
  } finally {
238
- stopTyping();
127
+ stopIndicator();
239
128
  }
240
129
  }
241
130
 
@@ -276,10 +165,8 @@ async function handleGithubWebhook(request) {
276
165
 
277
166
  await sendMessage(botToken, TELEGRAM_CHAT_ID, message);
278
167
 
279
- // Add the summary to chat memory so Claude has context in future conversations
280
- const history = getHistory(TELEGRAM_CHAT_ID);
281
- history.push({ role: 'assistant', content: message });
282
- updateHistory(TELEGRAM_CHAT_ID, history);
168
+ // Add the summary to chat memory so the agent has context in future conversations
169
+ await addToThread(TELEGRAM_CHAT_ID, message);
283
170
 
284
171
  console.log(`Notified chat ${TELEGRAM_CHAT_ID} about job ${jobId.slice(0, 8)}`);
285
172
 
package/bin/dev.sh ADDED
@@ -0,0 +1,30 @@
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ PACKAGE_DIR="$(cd "$(dirname "$0")/.." && pwd)"
5
+ DEV_DIR="${1:-/tmp/thepopebot}"
6
+ ENV_BACKUP="/tmp/env.$(uuidgen)"
7
+
8
+ HAS_ENV=false
9
+ if [ -f "$DEV_DIR/.env" ]; then
10
+ mv "$DEV_DIR/.env" "$ENV_BACKUP"
11
+ HAS_ENV=true
12
+ fi
13
+
14
+ rm -rf "$DEV_DIR"
15
+ mkdir -p "$DEV_DIR"
16
+ cd "$DEV_DIR"
17
+
18
+ node "$PACKAGE_DIR/bin/cli.js" init
19
+
20
+ sed -i '' "s|\"thepopebot\": \".*\"|\"thepopebot\": \"file:$PACKAGE_DIR\"|" package.json
21
+
22
+ rm -rf node_modules package-lock.json
23
+ npm install --install-links
24
+
25
+ if [ "$HAS_ENV" = true ]; then
26
+ mv "$ENV_BACKUP" .env
27
+ echo "Restored .env from previous build"
28
+ else
29
+ npm run setup
30
+ fi
package/config/index.js CHANGED
@@ -22,6 +22,11 @@ function withThepopebot(nextConfig = {}) {
22
22
  '@grammyjs/parse-mode',
23
23
  'node-cron',
24
24
  'uuid',
25
+ '@langchain/langgraph',
26
+ '@langchain/anthropic',
27
+ '@langchain/core',
28
+ 'zod',
29
+ 'better-sqlite3',
25
30
  ],
26
31
  };
27
32
  }
@@ -0,0 +1,38 @@
1
+ const { createReactAgent } = require('@langchain/langgraph/prebuilt');
2
+ const { createModel } = require('./model');
3
+ const { createJobTool, getJobStatusTool } = require('./tools');
4
+ const { createCheckpointer } = require('./memory');
5
+ const paths = require('../paths');
6
+ const { render_md } = require('../utils/render-md');
7
+
8
+ let _agent = null;
9
+
10
+ /**
11
+ * Get or create the LangGraph agent singleton.
12
+ * Uses createReactAgent which handles the tool loop automatically.
13
+ */
14
+ function getAgent() {
15
+ if (!_agent) {
16
+ const model = createModel();
17
+ const tools = [createJobTool, getJobStatusTool];
18
+ const checkpointer = createCheckpointer();
19
+ const systemPrompt = render_md(paths.chatbotMd);
20
+
21
+ _agent = createReactAgent({
22
+ llm: model,
23
+ tools,
24
+ checkpointSaver: checkpointer,
25
+ prompt: systemPrompt,
26
+ });
27
+ }
28
+ return _agent;
29
+ }
30
+
31
+ /**
32
+ * Reset the agent singleton (e.g., when config changes).
33
+ */
34
+ function resetAgent() {
35
+ _agent = null;
36
+ }
37
+
38
+ module.exports = { getAgent, resetAgent };
@@ -0,0 +1,147 @@
1
+ const { HumanMessage, AIMessage } = require('@langchain/core/messages');
2
+ const { getAgent } = require('./agent');
3
+ const { createModel } = require('./model');
4
+ const paths = require('../paths');
5
+ const { render_md } = require('../utils/render-md');
6
+
7
+ /**
8
+ * Process a chat message through the LangGraph agent.
9
+ *
10
+ * @param {string} threadId - Conversation thread ID (from channel adapter)
11
+ * @param {string} message - User's message text
12
+ * @param {Array} [attachments=[]] - Normalized attachments from adapter
13
+ * @returns {Promise<string>} AI response text
14
+ */
15
+ async function chat(threadId, message, attachments = []) {
16
+ const agent = getAgent();
17
+
18
+ // Build content blocks: text + any image attachments as base64 vision
19
+ const content = [];
20
+
21
+ if (message) {
22
+ content.push({ type: 'text', text: message });
23
+ }
24
+
25
+ for (const att of attachments) {
26
+ if (att.category === 'image') {
27
+ content.push({
28
+ type: 'image_url',
29
+ image_url: {
30
+ url: `data:${att.mimeType};base64,${att.data.toString('base64')}`,
31
+ },
32
+ });
33
+ }
34
+ // Documents: future handling
35
+ }
36
+
37
+ // If only text and no attachments, simplify to a string
38
+ const messageContent = content.length === 1 && content[0].type === 'text'
39
+ ? content[0].text
40
+ : content;
41
+
42
+ const result = await agent.invoke(
43
+ { messages: [new HumanMessage({ content: messageContent })] },
44
+ { configurable: { thread_id: threadId } }
45
+ );
46
+
47
+ const lastMessage = result.messages[result.messages.length - 1];
48
+
49
+ // LangChain message content can be a string or an array of content blocks
50
+ if (typeof lastMessage.content === 'string') {
51
+ return lastMessage.content;
52
+ }
53
+
54
+ // Extract text from content blocks
55
+ return lastMessage.content
56
+ .filter((block) => block.type === 'text')
57
+ .map((block) => block.text)
58
+ .join('\n');
59
+ }
60
+
61
+ /**
62
+ * Process a chat message with streaming (for channels that support it).
63
+ *
64
+ * @param {string} threadId - Conversation thread ID
65
+ * @param {string} message - User's message text
66
+ * @returns {AsyncIterableIterator<string>} Stream of text chunks
67
+ */
68
+ async function* chatStream(threadId, message) {
69
+ const agent = getAgent();
70
+
71
+ const stream = await agent.stream(
72
+ { messages: [new HumanMessage(message)] },
73
+ { configurable: { thread_id: threadId }, streamMode: 'messages' }
74
+ );
75
+
76
+ for await (const [message, metadata] of stream) {
77
+ if (message.content && typeof message.content === 'string') {
78
+ yield message.content;
79
+ }
80
+ }
81
+ }
82
+
83
+ /**
84
+ * One-shot summarization with a different system prompt and no memory.
85
+ * Used for job completion summaries sent via GitHub webhook.
86
+ *
87
+ * @param {object} results - Job results from webhook payload
88
+ * @returns {Promise<string>} Summary text
89
+ */
90
+ async function summarizeJob(results) {
91
+ try {
92
+ const model = createModel({ maxTokens: 1024 });
93
+ const systemPrompt = render_md(paths.jobSummaryMd);
94
+
95
+ const userMessage = [
96
+ results.job ? `## Task\n${results.job}` : '',
97
+ results.commit_message ? `## Commit Message\n${results.commit_message}` : '',
98
+ results.changed_files?.length ? `## Changed Files\n${results.changed_files.join('\n')}` : '',
99
+ results.status ? `## Status\n${results.status}` : '',
100
+ results.merge_result ? `## Merge Result\n${results.merge_result}` : '',
101
+ results.pr_url ? `## PR URL\n${results.pr_url}` : '',
102
+ results.run_url ? `## Run URL\n${results.run_url}` : '',
103
+ results.log ? `## Agent Log\n${results.log}` : '',
104
+ ]
105
+ .filter(Boolean)
106
+ .join('\n\n');
107
+
108
+ const response = await model.invoke([
109
+ ['system', systemPrompt],
110
+ ['human', userMessage],
111
+ ]);
112
+
113
+ const text =
114
+ typeof response.content === 'string'
115
+ ? response.content
116
+ : response.content
117
+ .filter((block) => block.type === 'text')
118
+ .map((block) => block.text)
119
+ .join('\n');
120
+
121
+ return text.trim() || 'Job finished.';
122
+ } catch (err) {
123
+ console.error('Failed to summarize job:', err);
124
+ return 'Job finished.';
125
+ }
126
+ }
127
+
128
+ /**
129
+ * Inject a message into a thread's memory so the agent has context
130
+ * for future conversations (e.g., job completion summaries).
131
+ *
132
+ * @param {string} threadId - Conversation thread ID
133
+ * @param {string} text - Message text to inject as an assistant message
134
+ */
135
+ async function addToThread(threadId, text) {
136
+ try {
137
+ const agent = getAgent();
138
+ await agent.updateState(
139
+ { configurable: { thread_id: threadId } },
140
+ { messages: [new AIMessage(text)] }
141
+ );
142
+ } catch (err) {
143
+ console.error('Failed to add message to thread:', err);
144
+ }
145
+ }
146
+
147
+ module.exports = { chat, chatStream, summarizeJob, addToThread };
@@ -0,0 +1,39 @@
1
+ const path = require('path');
2
+ const fs = require('fs');
3
+ const paths = require('../paths');
4
+
5
+ /**
6
+ * Create a LangGraph checkpointer based on environment configuration.
7
+ *
8
+ * Config env vars:
9
+ * MEMORY_BACKEND — "memory" (default), "sqlite"
10
+ * MEMORY_PATH — SQLite file path (default: "data/memory.sqlite")
11
+ *
12
+ * @returns {import('@langchain/langgraph').BaseCheckpointSaver}
13
+ */
14
+ function createCheckpointer() {
15
+ const backend = process.env.MEMORY_BACKEND || 'memory';
16
+
17
+ switch (backend) {
18
+ case 'memory': {
19
+ const { MemorySaver } = require('@langchain/langgraph');
20
+ return new MemorySaver();
21
+ }
22
+ case 'sqlite': {
23
+ const { SqliteSaver } = require('@langchain/langgraph-checkpoint-sqlite');
24
+ const dbPath = process.env.MEMORY_PATH || path.join(paths.dataDir, 'memory.sqlite');
25
+
26
+ // Ensure the data directory exists
27
+ const dir = path.dirname(dbPath);
28
+ if (!fs.existsSync(dir)) {
29
+ fs.mkdirSync(dir, { recursive: true });
30
+ }
31
+
32
+ return SqliteSaver.fromConnString(dbPath);
33
+ }
34
+ default:
35
+ throw new Error(`Unknown memory backend: ${backend}`);
36
+ }
37
+ }
38
+
39
+ module.exports = { createCheckpointer };
@@ -0,0 +1,44 @@
1
+ const { ChatAnthropic } = require('@langchain/anthropic');
2
+
3
+ const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
4
+
5
+ /**
6
+ * Create a LangChain chat model based on environment configuration.
7
+ *
8
+ * Config env vars:
9
+ * LLM_PROVIDER — "anthropic" (default). Future: "openai", "google", etc.
10
+ * LLM_MODEL — Model name override (e.g. "claude-sonnet-4-20250514")
11
+ * ANTHROPIC_API_KEY — Required for anthropic provider
12
+ *
13
+ * @param {object} [options]
14
+ * @param {number} [options.maxTokens=4096] - Max tokens for the response
15
+ * @returns {import('@langchain/core/language_models/chat_models').BaseChatModel}
16
+ */
17
+ function createModel(options = {}) {
18
+ const provider = process.env.LLM_PROVIDER || 'anthropic';
19
+ const modelName = process.env.LLM_MODEL || DEFAULT_MODEL;
20
+ const maxTokens = options.maxTokens || 4096;
21
+
22
+ switch (provider) {
23
+ case 'anthropic': {
24
+ const apiKey = process.env.ANTHROPIC_API_KEY;
25
+ if (!apiKey) {
26
+ throw new Error('ANTHROPIC_API_KEY environment variable is required');
27
+ }
28
+ return new ChatAnthropic({
29
+ modelName,
30
+ maxTokens,
31
+ anthropicApiKey: apiKey,
32
+ });
33
+ }
34
+ // Future providers:
35
+ // case 'openai': {
36
+ // const { ChatOpenAI } = require('@langchain/openai');
37
+ // return new ChatOpenAI({ modelName, maxTokens });
38
+ // }
39
+ default:
40
+ throw new Error(`Unknown LLM provider: ${provider}`);
41
+ }
42
+ }
43
+
44
+ module.exports = { createModel };
@@ -0,0 +1,49 @@
1
+ const { tool } = require('@langchain/core/tools');
2
+ const { z } = require('zod');
3
+ const { createJob } = require('../tools/create-job');
4
+ const { getJobStatus } = require('../tools/github');
5
+
6
+ const createJobTool = tool(
7
+ async ({ job_description }) => {
8
+ const result = await createJob(job_description);
9
+ return JSON.stringify({
10
+ success: true,
11
+ job_id: result.job_id,
12
+ branch: result.branch,
13
+ });
14
+ },
15
+ {
16
+ name: 'create_job',
17
+ description:
18
+ 'Create an autonomous job for thepopebot to execute. Use this tool liberally - if the user asks for ANY task to be done, create a job. Jobs can handle code changes, file updates, research tasks, web scraping, data analysis, or anything requiring autonomous work. When the user explicitly asks for a job, ALWAYS use this tool. Returns the job ID and branch name.',
19
+ schema: z.object({
20
+ job_description: z
21
+ .string()
22
+ .describe(
23
+ 'Detailed job description including context and requirements. Be specific about what needs to be done.'
24
+ ),
25
+ }),
26
+ }
27
+ );
28
+
29
+ const getJobStatusTool = tool(
30
+ async ({ job_id }) => {
31
+ const result = await getJobStatus(job_id);
32
+ return JSON.stringify(result);
33
+ },
34
+ {
35
+ name: 'get_job_status',
36
+ description:
37
+ 'Check status of running jobs. Returns list of active workflow runs with timing and current step. Use when user asks about job progress, running jobs, or job status.',
38
+ schema: z.object({
39
+ job_id: z
40
+ .string()
41
+ .optional()
42
+ .describe(
43
+ 'Optional: specific job ID to check. If omitted, returns all running jobs.'
44
+ ),
45
+ }),
46
+ }
47
+ );
48
+
49
+ module.exports = { createJobTool, getJobStatusTool };
@@ -0,0 +1,56 @@
1
+ /**
2
+ * Base channel adapter interface.
3
+ * Every chat channel (Telegram, Slack, web, etc.) implements this contract.
4
+ */
5
+ class ChannelAdapter {
6
+ /**
7
+ * Handle an incoming webhook request from this channel.
8
+ * Returns normalized message data or null if no action needed.
9
+ *
10
+ * @param {Request} request - Incoming HTTP request
11
+ * @returns {Promise<{ threadId: string, text: string, attachments: Array, metadata: object } | null>}
12
+ *
13
+ * Attachments array (may be empty) — only non-text content that the LLM needs to see:
14
+ * { category: "image", mimeType: "image/png", data: Buffer } — send to LLM as vision
15
+ * { category: "document", mimeType: "application/pdf", data: Buffer } — future: extract/attach
16
+ *
17
+ * The adapter downloads authenticated files and normalizes them.
18
+ * Voice/audio messages are fully resolved by the adapter — transcribed to text
19
+ * and included in the `text` field. They are NOT passed as attachments.
20
+ */
21
+ async receive(request) {
22
+ throw new Error('Not implemented');
23
+ }
24
+
25
+ /**
26
+ * Called when message is received — adapter shows acknowledgment.
27
+ * Telegram: thumbs up reaction. Slack: emoji reaction. Web: no-op.
28
+ */
29
+ async acknowledge(metadata) {}
30
+
31
+ /**
32
+ * Called while AI is processing — adapter shows activity.
33
+ * Telegram: typing indicator. Slack: typing indicator. Web: no-op (streaming handles this).
34
+ * Returns a stop function.
35
+ */
36
+ startProcessingIndicator(metadata) {
37
+ return () => {};
38
+ }
39
+
40
+ /**
41
+ * Send a complete (non-streaming) response back to the channel.
42
+ */
43
+ async sendResponse(threadId, text, metadata) {
44
+ throw new Error('Not implemented');
45
+ }
46
+
47
+ /**
48
+ * Whether this channel supports real streaming (e.g., web chat via Vercel AI SDK).
49
+ * If true, the AI layer provides a stream instead of a complete response.
50
+ */
51
+ get supportsStreaming() {
52
+ return false;
53
+ }
54
+ }
55
+
56
+ module.exports = { ChannelAdapter };
@@ -0,0 +1,17 @@
1
+ const { TelegramAdapter } = require('./telegram');
2
+
3
+ let _telegramAdapter = null;
4
+
5
+ /**
6
+ * Get the Telegram channel adapter (lazy singleton).
7
+ * @param {string} botToken - Telegram bot token
8
+ * @returns {TelegramAdapter}
9
+ */
10
+ function getTelegramAdapter(botToken) {
11
+ if (!_telegramAdapter || _telegramAdapter.botToken !== botToken) {
12
+ _telegramAdapter = new TelegramAdapter(botToken);
13
+ }
14
+ return _telegramAdapter;
15
+ }
16
+
17
+ module.exports = { getTelegramAdapter };
@@ -0,0 +1,146 @@
1
+ const { ChannelAdapter } = require('./base');
2
+ const {
3
+ sendMessage,
4
+ downloadFile,
5
+ reactToMessage,
6
+ startTypingIndicator,
7
+ } = require('../tools/telegram');
8
+ const { isWhisperEnabled, transcribeAudio } = require('../tools/openai');
9
+
10
+ class TelegramAdapter extends ChannelAdapter {
11
+ constructor(botToken) {
12
+ super();
13
+ this.botToken = botToken;
14
+ }
15
+
16
+ /**
17
+ * Parse a Telegram webhook update into normalized message data.
18
+ * Handles: text, voice/audio (transcribed), photos, documents.
19
+ * Returns null if the update should be ignored.
20
+ */
21
+ async receive(request) {
22
+ const { TELEGRAM_WEBHOOK_SECRET, TELEGRAM_CHAT_ID, TELEGRAM_VERIFICATION } = process.env;
23
+
24
+ // Validate secret token if configured
25
+ if (TELEGRAM_WEBHOOK_SECRET) {
26
+ const headerSecret = request.headers.get('x-telegram-bot-api-secret-token');
27
+ if (headerSecret !== TELEGRAM_WEBHOOK_SECRET) {
28
+ return null;
29
+ }
30
+ }
31
+
32
+ const update = await request.json();
33
+ const message = update.message || update.edited_message;
34
+
35
+ if (!message || !message.chat || !this.botToken) return null;
36
+
37
+ const chatId = String(message.chat.id);
38
+ let text = message.text || null;
39
+ const attachments = [];
40
+
41
+ // Check for verification code — works even before TELEGRAM_CHAT_ID is set
42
+ if (TELEGRAM_VERIFICATION && text === TELEGRAM_VERIFICATION) {
43
+ await sendMessage(this.botToken, chatId, `Your chat ID:\n<code>${chatId}</code>`);
44
+ return null;
45
+ }
46
+
47
+ // Security: if no TELEGRAM_CHAT_ID configured, ignore all messages
48
+ if (!TELEGRAM_CHAT_ID) return null;
49
+
50
+ // Security: only accept messages from configured chat
51
+ if (chatId !== TELEGRAM_CHAT_ID) return null;
52
+
53
+ // Voice messages → transcribe to text
54
+ if (message.voice) {
55
+ if (!isWhisperEnabled()) {
56
+ await sendMessage(
57
+ this.botToken,
58
+ chatId,
59
+ 'Voice messages are not supported. Please set OPENAI_API_KEY to enable transcription.'
60
+ );
61
+ return null;
62
+ }
63
+ try {
64
+ const { buffer, filename } = await downloadFile(this.botToken, message.voice.file_id);
65
+ text = await transcribeAudio(buffer, filename);
66
+ } catch (err) {
67
+ console.error('Failed to transcribe voice:', err);
68
+ await sendMessage(this.botToken, chatId, 'Sorry, I could not transcribe your voice message.');
69
+ return null;
70
+ }
71
+ }
72
+
73
+ // Audio messages → transcribe to text
74
+ if (message.audio && !text) {
75
+ if (!isWhisperEnabled()) {
76
+ await sendMessage(
77
+ this.botToken,
78
+ chatId,
79
+ 'Audio messages are not supported. Please set OPENAI_API_KEY to enable transcription.'
80
+ );
81
+ return null;
82
+ }
83
+ try {
84
+ const { buffer, filename } = await downloadFile(this.botToken, message.audio.file_id);
85
+ text = await transcribeAudio(buffer, filename);
86
+ } catch (err) {
87
+ console.error('Failed to transcribe audio:', err);
88
+ await sendMessage(this.botToken, chatId, 'Sorry, I could not transcribe your audio message.');
89
+ return null;
90
+ }
91
+ }
92
+
93
+ // Photo → download largest size, add as image attachment
94
+ if (message.photo && message.photo.length > 0) {
95
+ try {
96
+ const largest = message.photo[message.photo.length - 1];
97
+ const { buffer } = await downloadFile(this.botToken, largest.file_id);
98
+ attachments.push({ category: 'image', mimeType: 'image/jpeg', data: buffer });
99
+ // Use caption as text if no text yet
100
+ if (!text && message.caption) text = message.caption;
101
+ } catch (err) {
102
+ console.error('Failed to download photo:', err);
103
+ }
104
+ }
105
+
106
+ // Document → download, add as document attachment
107
+ if (message.document) {
108
+ try {
109
+ const { buffer, filename } = await downloadFile(this.botToken, message.document.file_id);
110
+ const mimeType = message.document.mime_type || 'application/octet-stream';
111
+ attachments.push({ category: 'document', mimeType, data: buffer });
112
+ if (!text && message.caption) text = message.caption;
113
+ } catch (err) {
114
+ console.error('Failed to download document:', err);
115
+ }
116
+ }
117
+
118
+ // Nothing actionable
119
+ if (!text && attachments.length === 0) return null;
120
+
121
+ return {
122
+ threadId: chatId,
123
+ text: text || '',
124
+ attachments,
125
+ metadata: { messageId: message.message_id, chatId },
126
+ };
127
+ }
128
+
129
+ async acknowledge(metadata) {
130
+ await reactToMessage(this.botToken, metadata.chatId, metadata.messageId).catch(() => {});
131
+ }
132
+
133
+ startProcessingIndicator(metadata) {
134
+ return startTypingIndicator(this.botToken, metadata.chatId);
135
+ }
136
+
137
+ async sendResponse(threadId, text, metadata) {
138
+ await sendMessage(this.botToken, threadId, text);
139
+ }
140
+
141
+ get supportsStreaming() {
142
+ return false;
143
+ }
144
+ }
145
+
146
+ module.exports = { TelegramAdapter };
package/lib/paths.js CHANGED
@@ -25,6 +25,9 @@ module.exports = {
25
25
  // Logs
26
26
  logsDir: path.join(PROJECT_ROOT, 'logs'),
27
27
 
28
+ // Data (SQLite memory, etc.)
29
+ dataDir: path.join(PROJECT_ROOT, 'data'),
30
+
28
31
  // .env
29
32
  envFile: path.join(PROJECT_ROOT, '.env'),
30
33
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "thepopebot",
3
- "version": "1.1.2",
3
+ "version": "1.2.0",
4
4
  "description": "Create autonomous AI agents with a two-layer architecture: Next.js Event Handler + Docker Agent.",
5
5
  "bin": {
6
6
  "thepopebot": "./bin/cli.js"
@@ -20,6 +20,7 @@
20
20
  "templates/"
21
21
  ],
22
22
  "scripts": {
23
+ "dev": "bash bin/dev.sh",
23
24
  "postinstall": "node bin/postinstall.js",
24
25
  "test": "echo \"No tests yet\" && exit 0"
25
26
  },
@@ -36,14 +37,20 @@
36
37
  "license": "MIT",
37
38
  "dependencies": {
38
39
  "@grammyjs/parse-mode": "^2.2.0",
40
+ "@langchain/anthropic": "^1.3.17",
41
+ "@langchain/core": "^1.1.24",
42
+ "@langchain/langgraph": "^1.1.4",
43
+ "@langchain/langgraph-checkpoint-sqlite": "^1.0.1",
44
+ "better-sqlite3": "^12.6.2",
45
+ "chalk": "^5.3.0",
39
46
  "dotenv": "^16.3.1",
40
47
  "grammy": "^1.39.3",
41
- "node-cron": "^3.0.3",
42
- "uuid": "^9.0.0",
43
- "chalk": "^5.3.0",
44
48
  "inquirer": "^9.2.12",
49
+ "node-cron": "^3.0.3",
45
50
  "open": "^10.0.0",
46
- "ora": "^8.0.1"
51
+ "ora": "^8.0.1",
52
+ "uuid": "^9.0.0",
53
+ "zod": "^4.3.6"
47
54
  },
48
55
  "peerDependencies": {
49
56
  "next": ">=15.0.0",
@@ -1,76 +0,0 @@
1
- /**
2
- * In-memory conversation history management per Telegram chat.
3
- * - Keyed by chat_id
4
- * - 30-minute TTL per conversation
5
- * - Max 20 messages per conversation
6
- */
7
-
8
- const MAX_MESSAGES = 20;
9
- const TTL_MS = 30 * 60 * 1000; // 30 minutes
10
- const CLEANUP_INTERVAL_MS = 5 * 60 * 1000; // 5 minutes
11
-
12
- // Map<chatId, { messages: Array, lastAccess: number }>
13
- const conversations = new Map();
14
-
15
- /**
16
- * Get conversation history for a chat
17
- * @param {string} chatId - Telegram chat ID
18
- * @returns {Array} - Message history array
19
- */
20
- function getHistory(chatId) {
21
- const entry = conversations.get(chatId);
22
- if (!entry) return [];
23
-
24
- // Check if expired
25
- if (Date.now() - entry.lastAccess > TTL_MS) {
26
- conversations.delete(chatId);
27
- return [];
28
- }
29
-
30
- entry.lastAccess = Date.now();
31
- return entry.messages;
32
- }
33
-
34
- /**
35
- * Update conversation history for a chat
36
- * @param {string} chatId - Telegram chat ID
37
- * @param {Array} messages - New message history
38
- */
39
- function updateHistory(chatId, messages) {
40
- // Trim to max messages (keep most recent)
41
- const trimmed = messages.slice(-MAX_MESSAGES);
42
-
43
- conversations.set(chatId, {
44
- messages: trimmed,
45
- lastAccess: Date.now(),
46
- });
47
- }
48
-
49
- /**
50
- * Clear conversation history for a chat
51
- * @param {string} chatId - Telegram chat ID
52
- */
53
- function clearHistory(chatId) {
54
- conversations.delete(chatId);
55
- }
56
-
57
- /**
58
- * Clean up expired conversations
59
- */
60
- function cleanupExpired() {
61
- const now = Date.now();
62
- for (const [chatId, entry] of conversations) {
63
- if (now - entry.lastAccess > TTL_MS) {
64
- conversations.delete(chatId);
65
- }
66
- }
67
- }
68
-
69
- // Start cleanup interval
70
- setInterval(cleanupExpired, CLEANUP_INTERVAL_MS);
71
-
72
- module.exports = {
73
- getHistory,
74
- updateHistory,
75
- clearHistory,
76
- };
@@ -1,142 +0,0 @@
1
- const paths = require('../paths');
2
- const { render_md } = require('../utils/render-md');
3
-
4
- const DEFAULT_MODEL = 'claude-sonnet-4-20250514';
5
-
6
- // Web search tool definition (Anthropic built-in)
7
- const WEB_SEARCH_TOOL = {
8
- type: 'web_search_20250305',
9
- name: 'web_search',
10
- max_uses: 5,
11
- };
12
-
13
- /**
14
- * Get Anthropic API key from environment
15
- * @returns {string} API key
16
- */
17
- function getApiKey() {
18
- if (process.env.ANTHROPIC_API_KEY) {
19
- return process.env.ANTHROPIC_API_KEY;
20
- }
21
- throw new Error('ANTHROPIC_API_KEY environment variable is required');
22
- }
23
-
24
- /**
25
- * Call Claude API
26
- * @param {Array} messages - Conversation messages
27
- * @param {Array} tools - Tool definitions
28
- * @returns {Promise<Object>} API response
29
- */
30
- async function callClaude(messages, tools) {
31
- const apiKey = getApiKey();
32
- const model = process.env.EVENT_HANDLER_MODEL || DEFAULT_MODEL;
33
- const systemPrompt = render_md(paths.chatbotMd);
34
-
35
- // Combine user tools with web search
36
- const allTools = [WEB_SEARCH_TOOL, ...tools];
37
-
38
- const response = await fetch('https://api.anthropic.com/v1/messages', {
39
- method: 'POST',
40
- headers: {
41
- 'Content-Type': 'application/json',
42
- 'x-api-key': apiKey,
43
- 'anthropic-version': '2023-06-01',
44
- 'anthropic-beta': 'web-search-2025-03-05',
45
- },
46
- body: JSON.stringify({
47
- model,
48
- max_tokens: 4096,
49
- system: systemPrompt,
50
- messages,
51
- tools: allTools,
52
- }),
53
- });
54
-
55
- if (!response.ok) {
56
- const error = await response.text();
57
- throw new Error(`Claude API error: ${response.status} ${error}`);
58
- }
59
-
60
- return response.json();
61
- }
62
-
63
- /**
64
- * Process a conversation turn with Claude, handling tool calls
65
- * @param {string} userMessage - User's message
66
- * @param {Array} history - Conversation history
67
- * @param {Array} toolDefinitions - Available tools
68
- * @param {Object} toolExecutors - Tool executor functions
69
- * @returns {Promise<{response: string, history: Array}>}
70
- */
71
- async function chat(userMessage, history, toolDefinitions, toolExecutors) {
72
- // Add user message to history
73
- const messages = [...history, { role: 'user', content: userMessage }];
74
-
75
- let response = await callClaude(messages, toolDefinitions);
76
- let assistantContent = response.content;
77
-
78
- // Add assistant response to history
79
- messages.push({ role: 'assistant', content: assistantContent });
80
-
81
- // Handle tool use loop
82
- while (response.stop_reason === 'tool_use') {
83
- const toolResults = [];
84
-
85
- for (const block of assistantContent) {
86
- if (block.type === 'tool_use') {
87
- // Skip web_search - it's a server-side tool executed by Anthropic
88
- if (block.name === 'web_search') {
89
- continue;
90
- }
91
-
92
- const executor = toolExecutors[block.name];
93
- let result;
94
-
95
- if (executor) {
96
- try {
97
- result = await executor(block.input);
98
- } catch (err) {
99
- result = { error: err.message };
100
- }
101
- } else {
102
- result = { error: `Unknown tool: ${block.name}` };
103
- }
104
-
105
- toolResults.push({
106
- type: 'tool_result',
107
- tool_use_id: block.id,
108
- content: JSON.stringify(result),
109
- });
110
- }
111
- }
112
-
113
- // If no client-side tools to execute, we're done
114
- if (toolResults.length === 0) {
115
- break;
116
- }
117
-
118
- // Add tool results to messages
119
- messages.push({ role: 'user', content: toolResults });
120
-
121
- // Get next response from Claude
122
- response = await callClaude(messages, toolDefinitions);
123
- assistantContent = response.content;
124
-
125
- // Add new assistant response to history
126
- messages.push({ role: 'assistant', content: assistantContent });
127
- }
128
-
129
- // Extract text response
130
- const textBlocks = assistantContent.filter((block) => block.type === 'text');
131
- const responseText = textBlocks.map((block) => block.text).join('\n');
132
-
133
- return {
134
- response: responseText,
135
- history: messages,
136
- };
137
- }
138
-
139
- module.exports = {
140
- chat,
141
- getApiKey,
142
- };
@@ -1,54 +0,0 @@
1
- const { createJob } = require('../tools/create-job');
2
- const { getJobStatus } = require('../tools/github');
3
-
4
- const toolDefinitions = [
5
- {
6
- name: 'create_job',
7
- description:
8
- 'Create an autonomous job for thepopebot to execute. Use this tool liberally - if the user asks for ANY task to be done, create a job. Jobs can handle code changes, file updates, research tasks, web scraping, data analysis, or anything requiring autonomous work. When the user explicitly asks for a job, ALWAYS use this tool. Returns the job ID and branch name.',
9
- input_schema: {
10
- type: 'object',
11
- properties: {
12
- job_description: {
13
- type: 'string',
14
- description:
15
- 'Detailed job description including context and requirements. Be specific about what needs to be done.',
16
- },
17
- },
18
- required: ['job_description'],
19
- },
20
- },
21
- {
22
- name: 'get_job_status',
23
- description:
24
- 'Check status of running jobs. Returns list of active workflow runs with timing and current step. Use when user asks about job progress, running jobs, or job status.',
25
- input_schema: {
26
- type: 'object',
27
- properties: {
28
- job_id: {
29
- type: 'string',
30
- description:
31
- 'Optional: specific job ID to check. If omitted, returns all running jobs.',
32
- },
33
- },
34
- required: [],
35
- },
36
- },
37
- ];
38
-
39
- const toolExecutors = {
40
- create_job: async (input) => {
41
- const result = await createJob(input.job_description);
42
- return {
43
- success: true,
44
- job_id: result.job_id,
45
- branch: result.branch,
46
- };
47
- },
48
- get_job_status: async (input) => {
49
- const result = await getJobStatus(input.job_id);
50
- return result;
51
- },
52
- };
53
-
54
- module.exports = { toolDefinitions, toolExecutors };