apexbot 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,315 @@
1
+ "use strict";
2
+ /**
3
+ * AI Agent Manager - handles AI model interactions
4
+ * Supports: Ollama (local), Claude (Anthropic), GPT (OpenAI), Gemini (Google)
5
+ * Default: Ollama for 100% free, privacy-focused operation
6
+ */
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.AgentManager = void 0;
9
+ const generative_ai_1 = require("@google/generative-ai");
10
+ class AgentManager {
11
+ config = null;
12
+ googleClient = null;
13
+ defaultSystemPrompt = `You are ApexBot, a helpful AI assistant running locally via Ollama. You are 100% free, private, and self-hosted.
14
+
15
+ Your goal is to act as a fully autonomous agent, not just a chatbot. Follow these principles:
16
+
17
+ 1. **EXPLORE:** Analyze code and context thoroughly before responding.
18
+ 2. **PLAN:** Outline a clear plan before implementing changes.
19
+ 3. **ACT:** Provide complete, working code without placeholders like "// ... rest of code".
20
+ 4. **VERIFY:** Test your suggestions mentally and point out edge cases.
21
+
22
+ CODING STANDARDS:
23
+ - TypeScript / Node.js with strict typing
24
+ - Modular, clean, modern ES6+ syntax
25
+ - Explicit error handling
26
+ - Safety first for financial/crypto logic (arbitrage, Kelly criterion)
27
+
28
+ INTERACTION STYLE:
29
+ - Be direct and actionable
30
+ - Proactively identify issues and offer fixes
31
+ - Keep responses concise but complete
32
+ - Start with context analysis when working with code
33
+
34
+ You are running locally on the user's machine. No data leaves their computer. You are free, private, and open-source.`;
35
+ constructor(config) {
36
+ if (config) {
37
+ this.configure(config);
38
+ }
39
+ }
40
+ configure(config) {
41
+ this.config = {
42
+ maxTokens: 4096,
43
+ temperature: 0.7,
44
+ systemPrompt: this.defaultSystemPrompt,
45
+ ...config,
46
+ };
47
+ // Initialize client based on provider
48
+ if (config.provider === 'google') {
49
+ // apiKey is optional in AgentConfig ( Ollama doesn't need it ),
50
+ // so only initialize Google client if we have an API key (from config or env).
51
+ const gkey = config.apiKey || process.env.GOOGLE_API_KEY;
52
+ if (typeof gkey === 'string' && gkey.length > 0) {
53
+ this.googleClient = new generative_ai_1.GoogleGenerativeAI(gkey);
54
+ }
55
+ else {
56
+ console.warn('[Agent] Google provider selected but no API key provided — Gemini will be unavailable until configured.');
57
+ }
58
+ }
59
+ // Note: Kimi provider uses a generic HTTP endpoint (apiUrl) and apiKey
60
+ if (config.provider === 'kimi') {
61
+ // nothing to initialize here; processWithKimi will use fetch + config.apiUrl
62
+ }
63
+ console.log(`[Agent] Configured with ${config.provider} (${config.model || 'default model'})`);
64
+ }
65
+ async process(session, message) {
66
+ if (!this.config) {
67
+ console.warn('[Agent] Not configured');
68
+ return { text: '⚠️ Agent not configured. Please set up an AI provider.' };
69
+ }
70
+ const userText = message.text || '';
71
+ // Handle slash commands
72
+ if (userText.startsWith('/')) {
73
+ return this.handleCommand(session, userText);
74
+ }
75
+ // Build conversation history
76
+ const history = this.buildHistory(session, userText);
77
+ try {
78
+ let response;
79
+ switch (this.config.provider) {
80
+ case 'ollama':
81
+ response = await this.processWithOllama(history);
82
+ break;
83
+ case 'google':
84
+ response = await this.processWithGemini(history);
85
+ break;
86
+ case 'anthropic':
87
+ response = await this.processWithClaude(history);
88
+ break;
89
+ case 'openai':
90
+ response = await this.processWithOpenAI(history);
91
+ break;
92
+ case 'kimi':
93
+ response = await this.processWithKimi(history);
94
+ break;
95
+ default:
96
+ response = { text: '❌ Unknown AI provider' };
97
+ }
98
+ // Save to session
99
+ session.messages.push({ role: 'user', content: userText, timestamp: Date.now() });
100
+ session.messages.push({ role: 'assistant', content: response.text, timestamp: Date.now() });
101
+ session.messageCount += 2;
102
+ session.lastActivity = Date.now();
103
+ return response;
104
+ }
105
+ catch (e) {
106
+ console.error('[Agent] Error:', e);
107
+ // Escape special characters for Telegram
108
+ const errorMsg = (e.message || 'Unknown error').slice(0, 200).replace(/[_*\[\]()~`>#+=|{}.!-]/g, '\\$&');
109
+ return { text: `Error: ${errorMsg}` };
110
+ }
111
+ }
112
+ async processWithKimi(history) {
113
+ // Generic Kimi 2.5 integration wrapper. This implementation is intentionally
114
+ // generic: it will POST to a user-provided `apiUrl` (in config or env) and
115
+ // expects a JSON response with a `text` field. Adjust to Kimi's official SDK
116
+ // or API spec if available.
117
+ const cfg = this.config;
118
+ if (!cfg)
119
+ throw new Error('Agent not configured');
120
+ const apiKey = cfg.apiKey || process.env.KIMI_API_KEY;
121
+ const apiUrl = cfg.apiUrl || process.env.KIMI_API_URL;
122
+ if (!apiKey || !apiUrl) {
123
+ return { text: '⚠️ Kimi provider not configured. Set config.apiUrl and apiKey (or KIMI_API_URL/KIMI_API_KEY).' };
124
+ }
125
+ // Build prompt from history (system prompt + conversation)
126
+ const systemPrompt = history.find(m => m.role === 'system')?.content || '';
127
+ const userAndAssistant = history
128
+ .filter(m => m.role !== 'system')
129
+ .map(m => `${m.role.toUpperCase()}: ${m.content}`)
130
+ .join('\n\n');
131
+ const payload = {
132
+ model: cfg.model || 'kimi-2.5',
133
+ prompt: `${systemPrompt}\n\n${userAndAssistant}`,
134
+ max_tokens: cfg.maxTokens || 1024,
135
+ temperature: cfg.temperature || 0.2,
136
+ };
137
+ // Use global fetch (Node 18+) to call the endpoint
138
+ const res = await fetch(apiUrl, {
139
+ method: 'POST',
140
+ headers: {
141
+ 'Content-Type': 'application/json',
142
+ Authorization: `Bearer ${apiKey}`,
143
+ },
144
+ body: JSON.stringify(payload),
145
+ });
146
+ if (!res.ok) {
147
+ const body = await res.text().catch(() => '');
148
+ throw new Error(`Kimi API error: ${res.status} ${res.statusText} ${body}`);
149
+ }
150
+ const data = await res.json().catch(() => ({}));
151
+ const text = data.text || data.output || (data.choices && data.choices[0]?.text) || '';
152
+ return {
153
+ text: String(text || '').trim() || '⚠️ Kimi returned an empty response.',
154
+ };
155
+ }
156
+ async processWithOllama(history) {
157
+ const cfg = this.config;
158
+ if (!cfg)
159
+ throw new Error('Agent not configured');
160
+ const apiUrl = cfg.apiUrl || 'http://localhost:11434';
161
+ const model = cfg.model || 'llama3.2';
162
+ const temperature = cfg.temperature ?? 0.7;
163
+ // Build messages array for Ollama chat API
164
+ const messages = history.map(m => ({
165
+ role: m.role,
166
+ content: m.content,
167
+ }));
168
+ try {
169
+ const res = await fetch(`${apiUrl}/api/chat`, {
170
+ method: 'POST',
171
+ headers: {
172
+ 'Content-Type': 'application/json',
173
+ },
174
+ body: JSON.stringify({
175
+ model,
176
+ messages,
177
+ stream: false,
178
+ options: {
179
+ temperature,
180
+ num_predict: cfg.maxTokens || 2048,
181
+ },
182
+ }),
183
+ });
184
+ if (!res.ok) {
185
+ const err = await res.text().catch(() => res.statusText);
186
+ throw new Error(`Ollama error: ${err}`);
187
+ }
188
+ const data = await res.json();
189
+ const text = data.message?.content || '';
190
+ return {
191
+ text: String(text).trim() || '🤔 No response from model',
192
+ usage: {
193
+ inputTokens: data.prompt_eval_count || 0,
194
+ outputTokens: data.eval_count || 0,
195
+ },
196
+ };
197
+ }
198
+ catch (error) {
199
+ if (error.code === 'ECONNREFUSED' || error.message?.includes('fetch failed')) {
200
+ return {
201
+ text: `⚠️ Ollama not running.\n\nPlease:\n1. Install Ollama: https://ollama.com\n2. Pull a model: ollama pull llama3.2\n3. Start Ollama: ollama serve`,
202
+ };
203
+ }
204
+ throw error;
205
+ }
206
+ }
207
+ buildHistory(session, currentMessage) {
208
+ const history = [];
209
+ // System prompt
210
+ const systemPrompt = session.systemPrompt || this.config?.systemPrompt || this.defaultSystemPrompt;
211
+ history.push({ role: 'system', content: systemPrompt, timestamp: 0 });
212
+ // Previous messages
213
+ for (const msg of session.messages) {
214
+ if (msg.role !== 'system') {
215
+ history.push(msg);
216
+ }
217
+ }
218
+ // Current message
219
+ history.push({ role: 'user', content: currentMessage, timestamp: Date.now() });
220
+ return history;
221
+ }
222
+ async processWithGemini(history) {
223
+ if (!this.googleClient) {
224
+ throw new Error('Google client not initialized');
225
+ }
226
+ // Get system prompt and prepend to first user message
227
+ const systemPrompt = history.find(m => m.role === 'system')?.content || '';
228
+ const conversationHistory = history.filter(m => m.role !== 'system');
229
+ // Build contents array for generateContent
230
+ const contents = conversationHistory.map((m, idx) => ({
231
+ role: m.role === 'user' ? 'user' : 'model',
232
+ parts: [{
233
+ text: idx === 0 && systemPrompt
234
+ ? `[System: ${systemPrompt}]\n\n${m.content}`
235
+ : m.content
236
+ }],
237
+ }));
238
+ const model = this.googleClient.getGenerativeModel({
239
+ model: this.config?.model || 'gemini-2.0-flash',
240
+ });
241
+ const result = await model.generateContent({
242
+ contents: contents,
243
+ generationConfig: {
244
+ maxOutputTokens: this.config?.maxTokens,
245
+ temperature: this.config?.temperature,
246
+ },
247
+ });
248
+ const response = result.response;
249
+ return {
250
+ text: response.text(),
251
+ usage: {
252
+ inputTokens: response.usageMetadata?.promptTokenCount || 0,
253
+ outputTokens: response.usageMetadata?.candidatesTokenCount || 0,
254
+ },
255
+ };
256
+ }
257
+ async processWithClaude(history) {
258
+ // TODO: Implement Anthropic Claude API
259
+ // Requires @anthropic-ai/sdk
260
+ return { text: '⚠️ Claude integration not yet implemented. Use Google/Gemini for now.' };
261
+ }
262
+ async processWithOpenAI(history) {
263
+ // TODO: Implement OpenAI API
264
+ // Requires openai package
265
+ return { text: '⚠️ OpenAI integration not yet implemented. Use Google/Gemini for now.' };
266
+ }
267
+ handleCommand(session, command) {
268
+ const [cmd, ...args] = command.slice(1).split(' ');
269
+ switch (cmd.toLowerCase()) {
270
+ case 'status':
271
+ return {
272
+ text: `📊 *Session Status*\n` +
273
+ `Session ID: \`${session.id}\`\n` +
274
+ `Messages: ${session.messageCount}\n` +
275
+ `Model: ${session.model || this.config?.model || 'default'}\n` +
276
+ `Provider: ${this.config?.provider || 'not configured'}\n` +
277
+ `Created: ${new Date(session.createdAt).toLocaleString()}\n` +
278
+ `Last activity: ${new Date(session.lastActivity).toLocaleString()}`,
279
+ };
280
+ case 'new':
281
+ case 'reset':
282
+ // Reset session (keep only system prompt)
283
+ const systemMsgs = session.messages.filter(m => m.role === 'system');
284
+ session.messages = systemMsgs;
285
+ session.messageCount = 0;
286
+ return { text: '🔄 Conversation reset. Let\'s start fresh!' };
287
+ case 'help':
288
+ return {
289
+ text: `📖 *ApexBot Commands*\n\n` +
290
+ `/status - Show session info\n` +
291
+ `/new - Reset conversation\n` +
292
+ `/model <name> - Change AI model\n` +
293
+ `/help - Show this help\n\n` +
294
+ `Just send a message to chat with me!`,
295
+ };
296
+ case 'model':
297
+ if (args.length > 0) {
298
+ session.model = args.join(' ');
299
+ return { text: `✅ Model changed to: ${session.model}` };
300
+ }
301
+ return { text: `Current model: ${session.model || this.config?.model || 'default'}` };
302
+ default:
303
+ return { text: `❓ Unknown command: /${cmd}\nType /help for available commands.` };
304
+ }
305
+ }
306
+ getStatus() {
307
+ return {
308
+ provider: this.config?.provider || 'none',
309
+ model: this.config?.model || 'none',
310
+ configured: this.config !== null,
311
+ };
312
+ }
313
+ }
314
+ exports.AgentManager = AgentManager;
315
+ exports.default = AgentManager;
@@ -0,0 +1,24 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.Backtester = void 0;
4
+ class Backtester {
5
+ engine;
6
+ ticks;
7
+ constructor(engine, ticks) {
8
+ this.engine = engine;
9
+ this.ticks = ticks;
10
+ }
11
+ run() {
12
+ // naive: feed ticks sequentially; when engine places an order we simulate immediate fill
13
+ for (const t of this.ticks) {
14
+ // feed market update
15
+ // @ts-ignore - use same event signature
16
+ (require('../eventBus').emit)('market:update', { marketId: t.marketId, price: t.price, probModel: t.price });
17
+ // In a realistic backtest we'd capture orders via event listeners and simulate fills; simple approach:
18
+ // If EV positive and order placed, compute pnl = (outcome * 1 - price) * (stake / price) ???
19
+ // For simplicity, assume stake purchased S at price p buys shares = S / p; payoff if outcome=1 is shares*1
20
+ }
21
+ }
22
+ }
23
+ exports.Backtester = Backtester;
24
+ exports.default = Backtester;
@@ -0,0 +1,72 @@
1
+ "use strict";
2
+ /**
3
+ * Channel Manager - handles all messaging channels
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.ChannelManager = void 0;
7
+ class ChannelManager {
8
+ channels = new Map();
9
+ gateway;
10
+ constructor(gateway) {
11
+ this.gateway = gateway;
12
+ }
13
+ register(channel) {
14
+ this.channels.set(channel.name, channel);
15
+ console.log(`[Channels] Registered: ${channel.name}`);
16
+ }
17
+ async connect(name) {
18
+ const channel = this.channels.get(name);
19
+ if (!channel) {
20
+ throw new Error(`Channel not found: ${name}`);
21
+ }
22
+ await channel.connect();
23
+ }
24
+ async connectAll() {
25
+ const promises = Array.from(this.channels.values()).map(async (ch) => {
26
+ try {
27
+ await ch.connect();
28
+ }
29
+ catch (e) {
30
+ console.error(`[Channels] Failed to connect ${ch.name}:`, e);
31
+ }
32
+ });
33
+ await Promise.all(promises);
34
+ }
35
+ async disconnect(name) {
36
+ const channel = this.channels.get(name);
37
+ if (channel) {
38
+ await channel.disconnect();
39
+ }
40
+ }
41
+ async disconnectAll() {
42
+ const promises = Array.from(this.channels.values()).map(ch => ch.disconnect());
43
+ await Promise.all(promises);
44
+ }
45
+ async send(channelName, to, text, replyTo) {
46
+ const channel = this.channels.get(channelName);
47
+ if (!channel) {
48
+ // For webchat, broadcast via gateway
49
+ if (channelName === 'webchat') {
50
+ this.gateway.broadcast({ type: 'response', text, to });
51
+ return;
52
+ }
53
+ throw new Error(`Channel not found: ${channelName}`);
54
+ }
55
+ await channel.send(to, text, replyTo);
56
+ }
57
+ getStatus() {
58
+ const status = {};
59
+ for (const [name, channel] of this.channels) {
60
+ status[name] = channel.status;
61
+ }
62
+ return status;
63
+ }
64
+ get(name) {
65
+ return this.channels.get(name);
66
+ }
67
+ list() {
68
+ return Array.from(this.channels.keys());
69
+ }
70
+ }
71
+ exports.ChannelManager = ChannelManager;
72
+ exports.default = ChannelManager;
@@ -0,0 +1,136 @@
1
+ "use strict";
2
+ /**
3
+ * Discord Channel - Bot integration via discord.js
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.DiscordChannel = void 0;
7
+ const discord_js_1 = require("discord.js");
8
+ const eventBus_1 = require("../core/eventBus");
9
+ class DiscordChannel {
10
+ name = 'discord';
11
+ status = 'disconnected';
12
+ client;
13
+ config;
14
+ constructor(config) {
15
+ this.config = {
16
+ allowDMs: true,
17
+ requireMention: true,
18
+ ...config,
19
+ };
20
+ this.client = new discord_js_1.Client({
21
+ intents: [
22
+ discord_js_1.GatewayIntentBits.Guilds,
23
+ discord_js_1.GatewayIntentBits.GuildMessages,
24
+ discord_js_1.GatewayIntentBits.MessageContent,
25
+ discord_js_1.GatewayIntentBits.DirectMessages,
26
+ ],
27
+ });
28
+ this.setupHandlers();
29
+ }
30
+ setupHandlers() {
31
+ this.client.on('ready', () => {
32
+ console.log(`[Discord] Logged in as ${this.client.user?.tag}`);
33
+ this.status = 'connected';
34
+ (0, eventBus_1.emit)('channel:status', { channel: 'discord', status: 'connected' });
35
+ });
36
+ this.client.on('messageCreate', async (message) => {
37
+ // Ignore bot messages
38
+ if (message.author.bot)
39
+ return;
40
+ const isGroup = message.guild !== null;
41
+ const isDM = message.channel.type === 1; // DM channel
42
+ // Check DM permission
43
+ if (isDM && !this.config.allowDMs)
44
+ return;
45
+ // Check guild permission
46
+ if (isGroup && this.config.allowedGuilds?.length) {
47
+ if (!this.config.allowedGuilds.includes(message.guild.id))
48
+ return;
49
+ }
50
+ // Check channel permission
51
+ if (this.config.allowedChannels?.length) {
52
+ if (!this.config.allowedChannels.includes(message.channel.id))
53
+ return;
54
+ }
55
+ // In guilds, check for mention
56
+ if (isGroup && this.config.requireMention) {
57
+ const mentioned = message.mentions.has(this.client.user.id) ||
58
+ message.content.toLowerCase().includes(`@${this.client.user?.username?.toLowerCase()}`);
59
+ if (!mentioned)
60
+ return;
61
+ }
62
+ // Clean text (remove bot mention)
63
+ let cleanText = message.content
64
+ .replace(new RegExp(`<@!?${this.client.user?.id}>`, 'g'), '')
65
+ .trim();
66
+ // Emit message
67
+ (0, eventBus_1.emit)('channel:message', {
68
+ channel: 'discord',
69
+ from: isDM ? message.author.id : message.channel.id,
70
+ text: cleanText,
71
+ id: message.id,
72
+ isGroup,
73
+ groupId: isGroup ? message.channel.id : undefined,
74
+ timestamp: message.createdTimestamp,
75
+ });
76
+ });
77
+ this.client.on('error', (error) => {
78
+ console.error('[Discord] Error:', error);
79
+ this.status = 'error';
80
+ (0, eventBus_1.emit)('channel:status', { channel: 'discord', status: 'error', data: error });
81
+ });
82
+ }
83
+ async connect() {
84
+ this.status = 'connecting';
85
+ try {
86
+ await this.client.login(this.config.botToken);
87
+ }
88
+ catch (e) {
89
+ this.status = 'error';
90
+ (0, eventBus_1.emit)('channel:status', { channel: 'discord', status: 'error', data: e });
91
+ throw e;
92
+ }
93
+ }
94
+ async disconnect() {
95
+ this.client.destroy();
96
+ this.status = 'disconnected';
97
+ (0, eventBus_1.emit)('channel:status', { channel: 'discord', status: 'disconnected' });
98
+ }
99
+ async send(to, text, replyTo) {
100
+ const channel = await this.client.channels.fetch(to);
101
+ if (!channel || !('send' in channel)) {
102
+ throw new Error(`Cannot send to channel: ${to}`);
103
+ }
104
+ const textChannel = channel;
105
+ // Split long messages (Discord limit: 2000)
106
+ const maxLength = 2000;
107
+ if (text.length <= maxLength) {
108
+ await textChannel.send(text);
109
+ }
110
+ else {
111
+ const chunks = this.splitMessage(text, maxLength);
112
+ for (const chunk of chunks) {
113
+ await textChannel.send(chunk);
114
+ }
115
+ }
116
+ }
117
+ splitMessage(text, maxLength) {
118
+ const chunks = [];
119
+ let remaining = text;
120
+ while (remaining.length > 0) {
121
+ if (remaining.length <= maxLength) {
122
+ chunks.push(remaining);
123
+ break;
124
+ }
125
+ let splitAt = remaining.lastIndexOf('\n', maxLength);
126
+ if (splitAt < maxLength / 2) {
127
+ splitAt = maxLength;
128
+ }
129
+ chunks.push(remaining.slice(0, splitAt));
130
+ remaining = remaining.slice(splitAt).trim();
131
+ }
132
+ return chunks;
133
+ }
134
+ }
135
+ exports.DiscordChannel = DiscordChannel;
136
+ exports.default = DiscordChannel;