@projectservan8n/cnapse 0.4.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,378 @@
1
+ /**
2
+ * Provider Selector - Used in TUI for /provider command
3
+ * - Shows provider list with API key status
4
+ * - Prompts for API key if needed
5
+ * - Shows model list with recommendations
6
+ * - For Ollama: shows model availability status
7
+ */
8
+
9
+ import React, { useState, useEffect } from 'react';
10
+ import { Box, Text, useInput } from 'ink';
11
+ import TextInput from 'ink-text-input';
12
+ import Spinner from 'ink-spinner';
13
+ import { getConfig, setProvider, setModel, setApiKey } from '../lib/config.js';
14
+ import { checkOllamaStatus, hasModel, OllamaStatus } from '../lib/ollama.js';
15
+
16
+ interface ProviderSelectorProps {
17
+ onClose: () => void;
18
+ onSelect: (provider: string, model: string) => void;
19
+ }
20
+
21
+ interface ModelConfig {
22
+ id: string;
23
+ name: string;
24
+ recommended?: boolean;
25
+ }
26
+
27
+ interface ProviderConfig {
28
+ id: 'ollama' | 'openrouter' | 'anthropic' | 'openai';
29
+ name: string;
30
+ description: string;
31
+ needsApiKey: boolean;
32
+ models: ModelConfig[];
33
+ }
34
+
35
+ const PROVIDERS: ProviderConfig[] = [
36
+ {
37
+ id: 'ollama',
38
+ name: 'Ollama',
39
+ description: 'Local AI - Free, private',
40
+ needsApiKey: false,
41
+ models: [
42
+ { id: 'qwen2.5:0.5b', name: 'Qwen 2.5 0.5B (fast)', recommended: true },
43
+ { id: 'qwen2.5:1.5b', name: 'Qwen 2.5 1.5B' },
44
+ { id: 'qwen2.5:7b', name: 'Qwen 2.5 7B (quality)' },
45
+ { id: 'llama3.2:1b', name: 'Llama 3.2 1B' },
46
+ { id: 'llama3.2:3b', name: 'Llama 3.2 3B' },
47
+ { id: 'codellama:7b', name: 'Code Llama 7B' },
48
+ { id: 'llava:7b', name: 'LLaVA 7B (vision)' },
49
+ ],
50
+ },
51
+ {
52
+ id: 'openrouter',
53
+ name: 'OpenRouter',
54
+ description: 'Many models, pay-per-use',
55
+ needsApiKey: true,
56
+ models: [
57
+ { id: 'qwen/qwen-2.5-coder-32b-instruct', name: 'Qwen Coder 32B', recommended: true },
58
+ { id: 'anthropic/claude-3.5-sonnet', name: 'Claude 3.5 Sonnet' },
59
+ { id: 'openai/gpt-4o', name: 'GPT-4o' },
60
+ { id: 'openai/gpt-4o-mini', name: 'GPT-4o Mini' },
61
+ { id: 'google/gemini-pro-1.5', name: 'Gemini Pro 1.5' },
62
+ ],
63
+ },
64
+ {
65
+ id: 'anthropic',
66
+ name: 'Anthropic',
67
+ description: 'Claude - Best reasoning',
68
+ needsApiKey: true,
69
+ models: [
70
+ { id: 'claude-3-5-sonnet-20241022', name: 'Claude 3.5 Sonnet', recommended: true },
71
+ { id: 'claude-3-opus-20240229', name: 'Claude 3 Opus' },
72
+ { id: 'claude-3-haiku-20240307', name: 'Claude 3 Haiku' },
73
+ ],
74
+ },
75
+ {
76
+ id: 'openai',
77
+ name: 'OpenAI',
78
+ description: 'GPT models',
79
+ needsApiKey: true,
80
+ models: [
81
+ { id: 'gpt-4o', name: 'GPT-4o', recommended: true },
82
+ { id: 'gpt-4o-mini', name: 'GPT-4o Mini' },
83
+ { id: 'gpt-4-turbo', name: 'GPT-4 Turbo' },
84
+ ],
85
+ },
86
+ ];
87
+
88
+ type Step = 'provider' | 'apiKey' | 'model' | 'ollamaError' | 'done';
89
+
90
+ export function ProviderSelector({ onClose, onSelect }: ProviderSelectorProps) {
91
+ const config = getConfig();
92
+ const [step, setStep] = useState<Step>('provider');
93
+ const [providerIndex, setProviderIndex] = useState(() => {
94
+ const idx = PROVIDERS.findIndex(p => p.id === config.provider);
95
+ return idx >= 0 ? idx : 0;
96
+ });
97
+ const [modelIndex, setModelIndex] = useState(0);
98
+ const [apiKeyInput, setApiKeyInput] = useState('');
99
+ const [selectedProvider, setSelectedProvider] = useState<ProviderConfig | null>(null);
100
+
101
+ // Ollama status
102
+ const [ollamaStatus, setOllamaStatus] = useState<OllamaStatus | null>(null);
103
+ const [checkingOllama, setCheckingOllama] = useState(false);
104
+
105
+ // Check Ollama status when selecting Ollama provider
106
+ useEffect(() => {
107
+ if (step === 'model' && selectedProvider?.id === 'ollama' && !ollamaStatus) {
108
+ setCheckingOllama(true);
109
+ checkOllamaStatus().then(status => {
110
+ setOllamaStatus(status);
111
+ setCheckingOllama(false);
112
+
113
+ // If Ollama isn't running, show error
114
+ if (!status.running) {
115
+ setStep('ollamaError');
116
+ }
117
+ });
118
+ }
119
+ }, [step, selectedProvider, ollamaStatus]);
120
+
121
+ useInput((input, key) => {
122
+ if (key.escape) {
123
+ onClose();
124
+ return;
125
+ }
126
+
127
+ if (step === 'provider') {
128
+ if (key.upArrow) {
129
+ setProviderIndex(prev => (prev > 0 ? prev - 1 : PROVIDERS.length - 1));
130
+ } else if (key.downArrow) {
131
+ setProviderIndex(prev => (prev < PROVIDERS.length - 1 ? prev + 1 : 0));
132
+ } else if (key.return) {
133
+ const provider = PROVIDERS[providerIndex]!;
134
+ setSelectedProvider(provider);
135
+
136
+ // Find recommended or current model
137
+ const currentIdx = provider.models.findIndex(m => m.id === config.model);
138
+ const recommendedIdx = provider.models.findIndex(m => m.recommended);
139
+ setModelIndex(currentIdx >= 0 ? currentIdx : (recommendedIdx >= 0 ? recommendedIdx : 0));
140
+
141
+ // Check if we need API key
142
+ if (provider.needsApiKey) {
143
+ const apiKeyProvider = provider.id as 'openrouter' | 'anthropic' | 'openai';
144
+ if (!config.apiKeys[apiKeyProvider]) {
145
+ setStep('apiKey');
146
+ } else {
147
+ setStep('model');
148
+ }
149
+ } else {
150
+ setStep('model');
151
+ }
152
+ }
153
+ } else if (step === 'model' && selectedProvider) {
154
+ if (key.upArrow) {
155
+ setModelIndex(prev => (prev > 0 ? prev - 1 : selectedProvider.models.length - 1));
156
+ } else if (key.downArrow) {
157
+ setModelIndex(prev => (prev < selectedProvider.models.length - 1 ? prev + 1 : 0));
158
+ } else if (key.return) {
159
+ const model = selectedProvider.models[modelIndex]!;
160
+
161
+ // For Ollama, warn if model not available
162
+ if (selectedProvider.id === 'ollama' && ollamaStatus && !hasModel(ollamaStatus, model.id)) {
163
+ // Still allow selection, but they'll need to pull it
164
+ }
165
+
166
+ setProvider(selectedProvider.id);
167
+ setModel(model.id);
168
+ setStep('done');
169
+ onSelect(selectedProvider.id, model.id);
170
+
171
+ // Brief delay to show confirmation
172
+ setTimeout(() => onClose(), 1500);
173
+ } else if (key.leftArrow || input === 'b') {
174
+ setStep('provider');
175
+ setOllamaStatus(null); // Reset Ollama status
176
+ }
177
+ } else if (step === 'ollamaError') {
178
+ if (key.return || input === 'b') {
179
+ setStep('provider');
180
+ setOllamaStatus(null);
181
+ }
182
+ }
183
+ });
184
+
185
+ const handleApiKeySubmit = (value: string) => {
186
+ if (value.trim() && selectedProvider) {
187
+ setApiKey(selectedProvider.id as 'openrouter' | 'anthropic' | 'openai', value.trim());
188
+ setStep('model');
189
+ }
190
+ };
191
+
192
+ // Provider selection
193
+ if (step === 'provider') {
194
+ return (
195
+ <Box flexDirection="column" borderStyle="round" borderColor="cyan" padding={1} width={60}>
196
+ <Box marginBottom={1}>
197
+ <Text bold color="cyan">Select Provider</Text>
198
+ </Box>
199
+ <Box marginBottom={1}>
200
+ <Text color="gray" dimColor>Arrows to navigate, Enter to select</Text>
201
+ </Box>
202
+
203
+ {PROVIDERS.map((provider, index) => {
204
+ const isSelected = index === providerIndex;
205
+ const isCurrent = provider.id === config.provider;
206
+ const hasKey = provider.needsApiKey && provider.id !== 'ollama'
207
+ ? !!config.apiKeys[provider.id as 'openrouter' | 'anthropic' | 'openai']
208
+ : true;
209
+
210
+ return (
211
+ <Box key={provider.id} flexDirection="column">
212
+ <Text color={isSelected ? 'cyan' : 'white'}>
213
+ {isSelected ? '❯ ' : ' '}
214
+ {provider.name}
215
+ {isCurrent && <Text color="green"> (current)</Text>}
216
+ {provider.needsApiKey && !hasKey && <Text color="red"> (needs key)</Text>}
217
+ {provider.needsApiKey && hasKey && !isCurrent && <Text color="yellow"> (key saved)</Text>}
218
+ </Text>
219
+ {isSelected && (
220
+ <Text color="gray"> {provider.description}</Text>
221
+ )}
222
+ </Box>
223
+ );
224
+ })}
225
+
226
+ <Box marginTop={1}>
227
+ <Text color="gray" dimColor>Press Esc to cancel</Text>
228
+ </Box>
229
+ </Box>
230
+ );
231
+ }
232
+
233
+ // API Key input
234
+ if (step === 'apiKey' && selectedProvider) {
235
+ return (
236
+ <Box flexDirection="column" borderStyle="round" borderColor="cyan" padding={1} width={60}>
237
+ <Box marginBottom={1}>
238
+ <Text bold color="cyan">Enter API Key</Text>
239
+ </Box>
240
+ <Text><Text color="green">✓</Text> Provider: {selectedProvider.name}</Text>
241
+ <Box marginTop={1} flexDirection="column">
242
+ <Text color="gray" dimColor>
243
+ {selectedProvider.id === 'openrouter' && 'Get key: openrouter.ai/keys'}
244
+ {selectedProvider.id === 'anthropic' && 'Get key: console.anthropic.com'}
245
+ {selectedProvider.id === 'openai' && 'Get key: platform.openai.com/api-keys'}
246
+ </Text>
247
+ <Box marginTop={1}>
248
+ <Text color="cyan">❯ </Text>
249
+ <TextInput
250
+ value={apiKeyInput}
251
+ onChange={setApiKeyInput}
252
+ onSubmit={handleApiKeySubmit}
253
+ mask="*"
254
+ />
255
+ </Box>
256
+ </Box>
257
+ <Box marginTop={1}>
258
+ <Text color="gray" dimColor>Press Esc to cancel</Text>
259
+ </Box>
260
+ </Box>
261
+ );
262
+ }
263
+
264
+ // Ollama error
265
+ if (step === 'ollamaError' && ollamaStatus) {
266
+ return (
267
+ <Box flexDirection="column" borderStyle="round" borderColor="red" padding={1} width={60}>
268
+ <Box marginBottom={1}>
269
+ <Text bold color="red">Ollama Not Available</Text>
270
+ </Box>
271
+ <Text color="red">{ollamaStatus.error}</Text>
272
+ <Box marginTop={1} flexDirection="column">
273
+ {!ollamaStatus.installed && (
274
+ <>
275
+ <Text>1. Install Ollama from https://ollama.ai</Text>
276
+ <Text>2. Run: ollama pull qwen2.5:0.5b</Text>
277
+ <Text>3. Try again</Text>
278
+ </>
279
+ )}
280
+ {ollamaStatus.installed && !ollamaStatus.running && (
281
+ <>
282
+ <Text>1. Start Ollama: ollama serve</Text>
283
+ <Text>2. Or run any model: ollama run qwen2.5:0.5b</Text>
284
+ <Text>3. Try again</Text>
285
+ </>
286
+ )}
287
+ </Box>
288
+ <Box marginTop={1}>
289
+ <Text color="gray" dimColor>Press Enter or B to go back</Text>
290
+ </Box>
291
+ </Box>
292
+ );
293
+ }
294
+
295
+ // Model selection
296
+ if (step === 'model' && selectedProvider) {
297
+ const isOllama = selectedProvider.id === 'ollama';
298
+
299
+ return (
300
+ <Box flexDirection="column" borderStyle="round" borderColor="cyan" padding={1} width={60}>
301
+ <Box marginBottom={1}>
302
+ <Text bold color="cyan">Select Model</Text>
303
+ </Box>
304
+ <Text><Text color="green">✓</Text> Provider: {selectedProvider.name}</Text>
305
+
306
+ {isOllama && checkingOllama && (
307
+ <Box marginY={1}>
308
+ <Text color="cyan"><Spinner type="dots" /></Text>
309
+ <Text> Checking Ollama status...</Text>
310
+ </Box>
311
+ )}
312
+
313
+ {isOllama && ollamaStatus && ollamaStatus.running && (
314
+ <Text color="green">✓ Ollama running ({ollamaStatus.models.length} models installed)</Text>
315
+ )}
316
+
317
+ <Box marginTop={1} marginBottom={1}>
318
+ <Text color="gray" dimColor>Arrows to navigate, Enter to select, B to go back</Text>
319
+ </Box>
320
+
321
+ {selectedProvider.models.map((model, index) => {
322
+ const isSelected = index === modelIndex;
323
+ const isCurrent = model.id === config.model && selectedProvider.id === config.provider;
324
+
325
+ // Check if Ollama model is available
326
+ let modelStatus = '';
327
+ if (isOllama && ollamaStatus) {
328
+ const available = hasModel(ollamaStatus, model.id);
329
+ modelStatus = available ? ' (installed)' : ' (not installed)';
330
+ }
331
+
332
+ return (
333
+ <Text key={model.id} color={isSelected ? 'cyan' : 'white'}>
334
+ {isSelected ? '❯ ' : ' '}
335
+ {model.name}
336
+ {model.recommended && <Text color="yellow"> *</Text>}
337
+ {isCurrent && <Text color="green"> (current)</Text>}
338
+ {isOllama && ollamaStatus && (
339
+ hasModel(ollamaStatus, model.id)
340
+ ? <Text color="green">{modelStatus}</Text>
341
+ : <Text color="red">{modelStatus}</Text>
342
+ )}
343
+ </Text>
344
+ );
345
+ })}
346
+
347
+ {isOllama && (
348
+ <Box marginTop={1} flexDirection="column">
349
+ <Text color="gray" dimColor>* = Recommended</Text>
350
+ {ollamaStatus && !hasModel(ollamaStatus, selectedProvider.models[modelIndex]?.id || '') && (
351
+ <Text color="yellow">Run: ollama pull {selectedProvider.models[modelIndex]?.id}</Text>
352
+ )}
353
+ </Box>
354
+ )}
355
+
356
+ <Box marginTop={1}>
357
+ <Text color="gray" dimColor>Press Esc to cancel</Text>
358
+ </Box>
359
+ </Box>
360
+ );
361
+ }
362
+
363
+ // Done
364
+ if (step === 'done' && selectedProvider) {
365
+ return (
366
+ <Box flexDirection="column" borderStyle="round" borderColor="green" padding={1} width={60}>
367
+ <Text color="green" bold>Configuration Updated!</Text>
368
+ <Text><Text color="green">✓</Text> Provider: {selectedProvider.name}</Text>
369
+ <Text><Text color="green">✓</Text> Model: {selectedProvider.models[modelIndex]?.name}</Text>
370
+ {selectedProvider.id === 'ollama' && ollamaStatus && !hasModel(ollamaStatus, selectedProvider.models[modelIndex]?.id || '') && (
371
+ <Text color="yellow">Remember to run: ollama pull {selectedProvider.models[modelIndex]?.id}</Text>
372
+ )}
373
+ </Box>
374
+ );
375
+ }
376
+
377
+ return null;
378
+ }
@@ -0,0 +1,15 @@
1
+ /**
2
+ * Custom hooks for C-napse
3
+ */
4
+
5
+ export { useChat } from './useChat.js';
6
+ export type { ChatMessage, UseChatResult } from './useChat.js';
7
+
8
+ export { useVision } from './useVision.js';
9
+ export type { UseVisionResult } from './useVision.js';
10
+
11
+ export { useTelegram } from './useTelegram.js';
12
+ export type { UseTelegramResult } from './useTelegram.js';
13
+
14
+ export { useTasks } from './useTasks.js';
15
+ export type { UseTasksResult } from './useTasks.js';
@@ -0,0 +1,149 @@
1
+ /**
2
+ * Chat Hook - AI conversation management
3
+ */
4
+
5
+ import { useState, useCallback, useRef, useEffect } from 'react';
6
+ import { chat, Message } from '../lib/api.js';
7
+ import { getScreenDescription } from '../lib/screen.js';
8
+
9
+ export interface ChatMessage {
10
+ id: string;
11
+ role: 'user' | 'assistant' | 'system';
12
+ content: string;
13
+ timestamp: Date;
14
+ isStreaming?: boolean;
15
+ }
16
+
17
+ export interface UseChatResult {
18
+ messages: ChatMessage[];
19
+ isProcessing: boolean;
20
+ error: string | null;
21
+ sendMessage: (content: string) => Promise<void>;
22
+ addSystemMessage: (content: string) => void;
23
+ clearMessages: () => void;
24
+ }
25
+
26
+ const WELCOME_MESSAGE: ChatMessage = {
27
+ id: '0',
28
+ role: 'system',
29
+ content: 'Welcome to C-napse! Type your message and press Enter.\n\nShortcuts: Ctrl+H for help, Ctrl+P for provider',
30
+ timestamp: new Date(),
31
+ };
32
+
33
+ export function useChat(screenWatch: boolean = false): UseChatResult {
34
+ const [messages, setMessages] = useState<ChatMessage[]>([WELCOME_MESSAGE]);
35
+ const [isProcessing, setIsProcessing] = useState(false);
36
+ const [error, setError] = useState<string | null>(null);
37
+ const screenContextRef = useRef<string | null>(null);
38
+
39
+ // Screen watching effect
40
+ useEffect(() => {
41
+ if (!screenWatch) {
42
+ screenContextRef.current = null;
43
+ return;
44
+ }
45
+
46
+ const checkScreen = async () => {
47
+ const desc = await getScreenDescription();
48
+ if (desc) {
49
+ screenContextRef.current = desc;
50
+ }
51
+ };
52
+
53
+ checkScreen();
54
+ const interval = setInterval(checkScreen, 5000);
55
+ return () => clearInterval(interval);
56
+ }, [screenWatch]);
57
+
58
+ const addSystemMessage = useCallback((content: string) => {
59
+ setMessages(prev => [
60
+ ...prev,
61
+ {
62
+ id: Date.now().toString(),
63
+ role: 'system',
64
+ content,
65
+ timestamp: new Date(),
66
+ },
67
+ ]);
68
+ }, []);
69
+
70
+ const sendMessage = useCallback(async (content: string) => {
71
+ if (!content.trim() || isProcessing) return;
72
+
73
+ setError(null);
74
+
75
+ // Add user message
76
+ const userMsg: ChatMessage = {
77
+ id: Date.now().toString(),
78
+ role: 'user',
79
+ content,
80
+ timestamp: new Date(),
81
+ };
82
+
83
+ // Add assistant placeholder
84
+ const assistantId = (Date.now() + 1).toString();
85
+ const assistantMsg: ChatMessage = {
86
+ id: assistantId,
87
+ role: 'assistant',
88
+ content: '',
89
+ timestamp: new Date(),
90
+ isStreaming: true,
91
+ };
92
+
93
+ setMessages(prev => [...prev, userMsg, assistantMsg]);
94
+ setIsProcessing(true);
95
+
96
+ try {
97
+ // Build message history
98
+ const apiMessages: Message[] = messages
99
+ .filter(m => m.role === 'user' || m.role === 'assistant')
100
+ .slice(-10)
101
+ .map(m => ({ role: m.role as 'user' | 'assistant', content: m.content }));
102
+
103
+ // Add screen context if watching
104
+ let finalContent = content;
105
+ if (screenWatch && screenContextRef.current) {
106
+ finalContent = `[Screen context: ${screenContextRef.current}]\n\n${content}`;
107
+ }
108
+
109
+ apiMessages.push({ role: 'user', content: finalContent });
110
+
111
+ const response = await chat(apiMessages);
112
+
113
+ // Update assistant message
114
+ setMessages(prev =>
115
+ prev.map(m =>
116
+ m.id === assistantId
117
+ ? { ...m, content: response.content || '(no response)', isStreaming: false }
118
+ : m
119
+ )
120
+ );
121
+ } catch (err) {
122
+ const errorMsg = err instanceof Error ? err.message : 'Unknown error';
123
+ setError(errorMsg);
124
+ setMessages(prev =>
125
+ prev.map(m =>
126
+ m.id === assistantId
127
+ ? { ...m, content: `Error: ${errorMsg}`, isStreaming: false }
128
+ : m
129
+ )
130
+ );
131
+ } finally {
132
+ setIsProcessing(false);
133
+ }
134
+ }, [messages, isProcessing, screenWatch]);
135
+
136
+ const clearMessages = useCallback(() => {
137
+ setMessages([WELCOME_MESSAGE]);
138
+ setError(null);
139
+ }, []);
140
+
141
+ return {
142
+ messages,
143
+ isProcessing,
144
+ error,
145
+ sendMessage,
146
+ addSystemMessage,
147
+ clearMessages,
148
+ };
149
+ }
@@ -0,0 +1,63 @@
1
+ /**
2
+ * Tasks Hook - Multi-step task automation
3
+ */
4
+
5
+ import { useState, useCallback } from 'react';
6
+ import { parseTask, executeTask, formatTask, getTaskMemoryStats, clearTaskMemory, Task, TaskStep } from '../lib/tasks.js';
7
+
8
+ export interface UseTasksResult {
9
+ isRunning: boolean;
10
+ currentTask: Task | null;
11
+ currentStep: TaskStep | null;
12
+ error: string | null;
13
+ run: (description: string) => Promise<Task>;
14
+ format: (task: Task) => string;
15
+ getMemoryStats: () => { patternCount: number; totalUses: number; topPatterns: string[] };
16
+ clearMemory: () => void;
17
+ }
18
+
19
+ export function useTasks(onProgress?: (task: Task, step: TaskStep) => void): UseTasksResult {
20
+ const [isRunning, setIsRunning] = useState(false);
21
+ const [currentTask, setCurrentTask] = useState<Task | null>(null);
22
+ const [currentStep, setCurrentStep] = useState<TaskStep | null>(null);
23
+ const [error, setError] = useState<string | null>(null);
24
+
25
+ const run = useCallback(async (description: string): Promise<Task> => {
26
+ setIsRunning(true);
27
+ setError(null);
28
+
29
+ try {
30
+ // Parse the task
31
+ const task = await parseTask(description);
32
+ setCurrentTask(task);
33
+
34
+ // Execute with progress callback
35
+ const result = await executeTask(task, (updatedTask, step) => {
36
+ setCurrentTask({ ...updatedTask });
37
+ setCurrentStep(step);
38
+ onProgress?.(updatedTask, step);
39
+ });
40
+
41
+ setCurrentTask(result);
42
+ return result;
43
+ } catch (err) {
44
+ const errorMsg = err instanceof Error ? err.message : 'Task failed';
45
+ setError(errorMsg);
46
+ throw err;
47
+ } finally {
48
+ setIsRunning(false);
49
+ setCurrentStep(null);
50
+ }
51
+ }, [onProgress]);
52
+
53
+ return {
54
+ isRunning,
55
+ currentTask,
56
+ currentStep,
57
+ error,
58
+ run,
59
+ format: formatTask,
60
+ getMemoryStats: getTaskMemoryStats,
61
+ clearMemory: clearTaskMemory,
62
+ };
63
+ }
@@ -0,0 +1,91 @@
1
+ /**
2
+ * Telegram Hook - Remote PC control via Telegram bot
3
+ */
4
+
5
+ import { useState, useCallback, useEffect, useRef } from 'react';
6
+ import { getTelegramBot, TelegramMessage } from '../services/telegram.js';
7
+
8
+ export interface UseTelegramResult {
9
+ isEnabled: boolean;
10
+ isStarting: boolean;
11
+ error: string | null;
12
+ lastMessage: TelegramMessage | null;
13
+ toggle: () => Promise<void>;
14
+ start: () => Promise<void>;
15
+ stop: () => Promise<void>;
16
+ }
17
+
18
+ export function useTelegram(onMessage?: (msg: TelegramMessage) => void): UseTelegramResult {
19
+ const [isEnabled, setIsEnabled] = useState(false);
20
+ const [isStarting, setIsStarting] = useState(false);
21
+ const [error, setError] = useState<string | null>(null);
22
+ const [lastMessage, setLastMessage] = useState<TelegramMessage | null>(null);
23
+ const onMessageRef = useRef(onMessage);
24
+
25
+ // Keep callback ref updated
26
+ useEffect(() => {
27
+ onMessageRef.current = onMessage;
28
+ }, [onMessage]);
29
+
30
+ const start = useCallback(async () => {
31
+ if (isEnabled) return;
32
+
33
+ setIsStarting(true);
34
+ setError(null);
35
+
36
+ try {
37
+ const bot = getTelegramBot();
38
+
39
+ // Setup event handlers
40
+ bot.on('message', (msg: TelegramMessage) => {
41
+ setLastMessage(msg);
42
+ onMessageRef.current?.(msg);
43
+ });
44
+
45
+ bot.on('error', (err: Error) => {
46
+ setError(err.message);
47
+ });
48
+
49
+ await bot.start();
50
+ setIsEnabled(true);
51
+ } catch (err) {
52
+ const errorMsg = err instanceof Error ? err.message : 'Failed to start Telegram bot';
53
+ setError(errorMsg);
54
+ throw err;
55
+ } finally {
56
+ setIsStarting(false);
57
+ }
58
+ }, [isEnabled]);
59
+
60
+ const stop = useCallback(async () => {
61
+ if (!isEnabled) return;
62
+
63
+ try {
64
+ const bot = getTelegramBot();
65
+ await bot.stop();
66
+ setIsEnabled(false);
67
+ } catch (err) {
68
+ const errorMsg = err instanceof Error ? err.message : 'Failed to stop Telegram bot';
69
+ setError(errorMsg);
70
+ throw err;
71
+ }
72
+ }, [isEnabled]);
73
+
74
+ const toggle = useCallback(async () => {
75
+ if (isEnabled) {
76
+ await stop();
77
+ } else {
78
+ await start();
79
+ }
80
+ }, [isEnabled, start, stop]);
81
+
82
+ return {
83
+ isEnabled,
84
+ isStarting,
85
+ error,
86
+ lastMessage,
87
+ toggle,
88
+ start,
89
+ stop,
90
+ };
91
+ }