@feardread/fear 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. package/FEAR.js +459 -0
  2. package/FEARServer.js +280 -0
  3. package/controllers/agent.js +438 -0
  4. package/controllers/auth/index.js +345 -0
  5. package/controllers/auth/token.js +50 -0
  6. package/controllers/blog.js +105 -0
  7. package/controllers/brand.js +10 -0
  8. package/controllers/cart.js +425 -0
  9. package/controllers/category.js +9 -0
  10. package/controllers/coupon.js +63 -0
  11. package/controllers/crud/crud.js +508 -0
  12. package/controllers/crud/index.js +36 -0
  13. package/controllers/email.js +34 -0
  14. package/controllers/enquiry.js +65 -0
  15. package/controllers/events.js +9 -0
  16. package/controllers/order.js +125 -0
  17. package/controllers/payment.js +31 -0
  18. package/controllers/product.js +147 -0
  19. package/controllers/review.js +247 -0
  20. package/controllers/tag.js +10 -0
  21. package/controllers/task.js +10 -0
  22. package/controllers/upload.js +41 -0
  23. package/controllers/user.js +401 -0
  24. package/index.js +7 -0
  25. package/libs/agent/index.js +561 -0
  26. package/libs/agent/modules/ai/ai.js +285 -0
  27. package/libs/agent/modules/ai/chat.js +518 -0
  28. package/libs/agent/modules/ai/config.js +688 -0
  29. package/libs/agent/modules/ai/operations.js +787 -0
  30. package/libs/agent/modules/analyze/api.js +546 -0
  31. package/libs/agent/modules/analyze/dorks.js +395 -0
  32. package/libs/agent/modules/ccard/README.md +454 -0
  33. package/libs/agent/modules/ccard/audit.js +479 -0
  34. package/libs/agent/modules/ccard/checker.js +674 -0
  35. package/libs/agent/modules/ccard/payment-processors.json +16 -0
  36. package/libs/agent/modules/ccard/validator.js +629 -0
  37. package/libs/agent/modules/code/analyzer.js +303 -0
  38. package/libs/agent/modules/code/jquery.js +1093 -0
  39. package/libs/agent/modules/code/react.js +1536 -0
  40. package/libs/agent/modules/code/refactor.js +499 -0
  41. package/libs/agent/modules/crypto/exchange.js +564 -0
  42. package/libs/agent/modules/net/proxy.js +409 -0
  43. package/libs/agent/modules/security/cve.js +442 -0
  44. package/libs/agent/modules/security/monitor.js +360 -0
  45. package/libs/agent/modules/security/scanner.js +300 -0
  46. package/libs/agent/modules/security/vulnerability.js +506 -0
  47. package/libs/agent/modules/security/web.js +465 -0
  48. package/libs/agent/modules/utils/browser.js +492 -0
  49. package/libs/agent/modules/utils/colorizer.js +285 -0
  50. package/libs/agent/modules/utils/manager.js +478 -0
  51. package/libs/cloud/index.js +228 -0
  52. package/libs/config/db.js +21 -0
  53. package/libs/config/validator.js +82 -0
  54. package/libs/db/index.js +318 -0
  55. package/libs/emailer/imap.js +126 -0
  56. package/libs/emailer/info.js +41 -0
  57. package/libs/emailer/smtp.js +77 -0
  58. package/libs/handler/async.js +3 -0
  59. package/libs/handler/error.js +66 -0
  60. package/libs/handler/index.js +161 -0
  61. package/libs/logger/index.js +49 -0
  62. package/libs/logger/morgan.js +24 -0
  63. package/libs/passport/passport.js +109 -0
  64. package/libs/search/api.js +384 -0
  65. package/libs/search/features.js +219 -0
  66. package/libs/search/service.js +64 -0
  67. package/libs/swagger/config.js +18 -0
  68. package/libs/swagger/index.js +35 -0
  69. package/libs/validator/index.js +254 -0
  70. package/models/blog.js +31 -0
  71. package/models/brand.js +12 -0
  72. package/models/cart.js +14 -0
  73. package/models/category.js +11 -0
  74. package/models/coupon.js +9 -0
  75. package/models/customer.js +0 -0
  76. package/models/enquiry.js +29 -0
  77. package/models/events.js +13 -0
  78. package/models/order.js +94 -0
  79. package/models/product.js +32 -0
  80. package/models/review.js +14 -0
  81. package/models/tag.js +10 -0
  82. package/models/task.js +11 -0
  83. package/models/user.js +68 -0
  84. package/package.json +12 -0
  85. package/routes/agent.js +615 -0
  86. package/routes/auth.js +13 -0
  87. package/routes/blog.js +19 -0
  88. package/routes/brand.js +15 -0
  89. package/routes/cart.js +105 -0
  90. package/routes/category.js +16 -0
  91. package/routes/coupon.js +15 -0
  92. package/routes/enquiry.js +14 -0
  93. package/routes/events.js +16 -0
  94. package/routes/mail.js +170 -0
  95. package/routes/order.js +19 -0
  96. package/routes/product.js +22 -0
  97. package/routes/review.js +11 -0
  98. package/routes/task.js +12 -0
  99. package/routes/user.js +17 -0
@@ -0,0 +1,688 @@
1
+ // modules/ai/config.js - Enhanced AI Configuration & Provider Management with Ollama
2
+ const OpenAI = require('openai');
3
+ const Anthropic = require('@anthropic-ai/sdk');
4
+ const { GoogleGenerativeAI } = require('@google/generative-ai');
5
+ const axios = require('axios');
6
+ const colorizer = require('../utils/colorizer');
7
+
8
+ const AIConfig = function () {
9
+ // Provider instances
10
+ this.anthropic = null;
11
+ this.openai = null;
12
+ this.googleAi = null;
13
+ this.ollama = null;
14
+
15
+ // API keys
16
+ this.anthropicKey = process.env.ANTHROPIC_API_KEY || '';
17
+ this.openaiKey = process.env.OPENAI_API_KEY || '';
18
+ this.googleKey = process.env.GOOGLE_API_KEY || process.env.GEMINI_API_KEY || '';
19
+
20
+ // Ollama configuration
21
+ this.ollamaHost = process.env.OLLAMA_HOST || 'http://localhost:11434';
22
+ this.ollamaModel = process.env.OLLAMA_MODEL || 'ai-code';
23
+
24
+ // Current provider
25
+ this.provider = process.env.AI_PROVIDER || 'openai';
26
+
27
+ // Model configurations
28
+ this.models = {
29
+ anthropic: 'claude-sonnet-4-5-20250929',
30
+ openai: 'gpt-4o',
31
+ google: 'gemini-2.0-flash-exp',
32
+ ollama: this.ollamaModel
33
+ };
34
+
35
+ // Ollama model presets
36
+ this.ollamaPresets = {
37
+ 'chat-ai': { base: 'llama3-chatqa:8b', temperature: 0.7, ctx: 8192 },
38
+ 'code-ai': { base: 'qwen2.5-coder:7b', temperature: 0.2, ctx: 8192 }
39
+ };
40
+
41
+ // Google safety settings
42
+ this.googleSafetySettings = [
43
+ {
44
+ category: 'HARM_CATEGORY_HARASSMENT',
45
+ threshold: 'BLOCK_NONE',
46
+ },
47
+ {
48
+ category: 'HARM_CATEGORY_HATE_SPEECH',
49
+ threshold: 'BLOCK_NONE',
50
+ },
51
+ {
52
+ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
53
+ threshold: 'BLOCK_NONE',
54
+ },
55
+ {
56
+ category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
57
+ threshold: 'BLOCK_NONE',
58
+ }
59
+ ];
60
+
61
+ // Initialize providers if keys exist
62
+ this.initializeProviders();
63
+ };
64
+
65
+ AIConfig.prototype = {
66
+
67
+ initializeProviders() {
68
+ // Initialize Anthropic
69
+ if (this.anthropicKey) {
70
+ try {
71
+ this.anthropic = new Anthropic({ apiKey: this.anthropicKey });
72
+ console.log(colorizer.dim(' ✓ Anthropic initialized'));
73
+ } catch (err) {
74
+ console.log(colorizer.warning(' ✗ Anthropic SDK error: ' + err.message));
75
+ }
76
+ }
77
+
78
+ // Initialize OpenAI
79
+ if (this.openaiKey) {
80
+ try {
81
+ this.openai = new OpenAI({ apiKey: this.openaiKey });
82
+ console.log(colorizer.dim(' ✓ OpenAI initialized'));
83
+ } catch (err) {
84
+ console.log(colorizer.warning(' ✗ OpenAI SDK error: ' + err.message));
85
+ }
86
+ }
87
+
88
+ // Initialize Google Gemini
89
+ if (this.googleKey) {
90
+ try {
91
+ this.googleAi = new GoogleGenerativeAI(this.googleKey);
92
+ console.log(colorizer.dim(' ✓ Google Gemini initialized'));
93
+ } catch (err) {
94
+ console.log(colorizer.warning(' ✗ Google SDK error: ' + err.message));
95
+ }
96
+ }
97
+
98
+ // Initialize Ollama (check if server is available)
99
+ this.checkOllamaAvailability();
100
+
101
+ // Auto-select first available provider if current is not configured
102
+ if (!this.isConfigured()) {
103
+ if (this.openai) this.provider = 'openai';
104
+ else if (this.googleAi) this.provider = 'google';
105
+ else if (this.anthropic) this.provider = 'anthropic';
106
+ else if (this.ollama) this.provider = 'ollama';
107
+ }
108
+ },
109
+
110
+ async checkOllamaAvailability() {
111
+ try {
112
+ const response = await axios.get(`${this.ollamaHost}/api/tags`, { timeout: 2000 });
113
+ if (response.status === 200) {
114
+ this.ollama = {
115
+ available: true,
116
+ models: response.data.models || []
117
+ };
118
+ console.log(colorizer.dim('Ollama initialized'));
119
+
120
+ // Check if our custom models exist
121
+ const modelNames = this.ollama.models.map(m => m.name);
122
+ if (modelNames.includes('chat-ai') || modelNames.includes('code-ai')) {
123
+ console.log(colorizer.dim('Found custom models: ' +
124
+ modelNames.filter(m => m === 'chat-ai' || m === 'code-ai').join(', ')));
125
+ }
126
+ }
127
+ } catch (err) {
128
+ // Ollama not available - silent fail
129
+ this.ollama = null;
130
+ }
131
+ },
132
+
133
+ setup(args) {
134
+ if (args.length === 0) {
135
+ return this.showSetupInfo();
136
+ }
137
+
138
+ const provider = args[0]?.toLowerCase();
139
+ const key = args[1];
140
+
141
+ // Special handling for Ollama (no key required)
142
+ if (provider === 'ollama') {
143
+ return this.setupOllama(key || 'chat-ai', args[2]);
144
+ }
145
+
146
+ if (!key) {
147
+ console.log(colorizer.error('Please provide an API key\n'));
148
+ return Promise.resolve();
149
+ }
150
+
151
+ switch (provider) {
152
+ case 'anthropic':
153
+ case 'claude':
154
+ return this.setupAnthropic(key);
155
+
156
+ case 'openai':
157
+ case 'gpt':
158
+ return this.setupOpenAI(key);
159
+
160
+ case 'google':
161
+ case 'gemini':
162
+ return this.setupGoogle(key);
163
+
164
+ default:
165
+ console.log(colorizer.error('Unknown provider. Use "anthropic", "openai", "google", or "ollama"\n'));
166
+ return Promise.resolve();
167
+ }
168
+ },
169
+
170
+ async setupOllama(model, host) {
171
+ try {
172
+ const ollamaHost = host || this.ollamaHost;
173
+
174
+ // Test connection
175
+ const response = await axios.get(`${ollamaHost}/api/tags`, { timeout: 3000 });
176
+
177
+ if (response.status !== 200) {
178
+ throw new Error('Ollama server not responding');
179
+ }
180
+
181
+ const availableModels = response.data.models || [];
182
+ const modelNames = availableModels.map(m => m.name);
183
+
184
+ // Check if requested model exists
185
+ if (!modelNames.includes(model)) {
186
+ console.log(colorizer.warning(`Model "${model}" not found on Ollama server`));
187
+ console.log(colorizer.info('Available models:'));
188
+ modelNames.forEach(m => console.log(colorizer.dim(' • ' + m)));
189
+ console.log();
190
+ console.log(colorizer.info('To create custom models, run:'));
191
+ console.log(colorizer.dim(' ollama create ai-chat -f Modelfile.chat-ai'));
192
+ console.log(colorizer.dim(' ollama create ai-code -f Modelfile.code-ai'));
193
+ console.log();
194
+ return Promise.resolve(false);
195
+ }
196
+
197
+ this.ollamaHost = ollamaHost;
198
+ this.ollamaModel = model;
199
+ this.models.ollama = model;
200
+ this.ollama = {
201
+ available: true,
202
+ models: availableModels
203
+ };
204
+ this.provider = 'ollama';
205
+
206
+ console.log(colorizer.success('Ollama configured successfully!'));
207
+ console.log(colorizer.cyan('Host: ') + colorizer.bright(ollamaHost));
208
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(model));
209
+ console.log();
210
+
211
+ return Promise.resolve(true);
212
+ } catch (err) {
213
+ console.log(colorizer.error('Failed to configure Ollama: ' + err.message));
214
+ console.log(colorizer.info('Make sure Ollama is running:'));
215
+ console.log(colorizer.dim(' ollama serve'));
216
+ console.log();
217
+ return Promise.resolve(false);
218
+ }
219
+ },
220
+
221
+ setupGoogle(key) {
222
+ try {
223
+ this.googleKey = key;
224
+ this.googleAi = new GoogleGenerativeAI(key);
225
+ this.provider = 'google';
226
+
227
+ console.log(colorizer.success('Google Gemini configured successfully!'));
228
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.google));
229
+ console.log();
230
+
231
+ return Promise.resolve(true);
232
+ } catch (err) {
233
+ console.log(colorizer.error('Failed to configure Google: ' + err.message + '\n'));
234
+ return Promise.resolve(false);
235
+ }
236
+ },
237
+
238
+ setupAnthropic(key) {
239
+ try {
240
+ this.anthropicKey = key;
241
+ this.anthropic = new Anthropic({ apiKey: key });
242
+ this.provider = 'anthropic';
243
+
244
+ console.log(colorizer.success('Anthropic Claude configured successfully!'));
245
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.anthropic));
246
+ console.log();
247
+
248
+ return Promise.resolve(true);
249
+ } catch (err) {
250
+ console.log(colorizer.error('Failed to configure Anthropic: ' + err.message + '\n'));
251
+ return Promise.resolve(false);
252
+ }
253
+ },
254
+
255
+ setupOpenAI(key) {
256
+ try {
257
+ this.openaiKey = key;
258
+ this.openai = new OpenAI({ apiKey: key });
259
+ this.provider = 'openai';
260
+
261
+ console.log(colorizer.success('OpenAI configured successfully!'));
262
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.openai));
263
+ console.log();
264
+
265
+ return Promise.resolve(true);
266
+ } catch (err) {
267
+ console.log(colorizer.error('Failed to configure OpenAI: ' + err.message + '\n'));
268
+ return Promise.resolve(false);
269
+ }
270
+ },
271
+
272
+ showSetupInfo() {
273
+ console.log(colorizer.header('AI API Configuration'));
274
+ console.log(colorizer.separator());
275
+ console.log(colorizer.cyan('Usage: ') + colorizer.bright('ai-setup <provider> <api-key|model>'));
276
+ console.log();
277
+
278
+ console.log(colorizer.section('Available Providers'));
279
+ console.log(colorizer.bullet('anthropic (claude) - Anthropic Claude'));
280
+ console.log(colorizer.dim(' Model: ' + this.models.anthropic));
281
+ console.log(colorizer.bullet('openai (gpt) - OpenAI GPT'));
282
+ console.log(colorizer.dim(' Model: ' + this.models.openai));
283
+ console.log(colorizer.bullet('google (gemini) - Google Gemini'));
284
+ console.log(colorizer.dim(' Model: ' + this.models.google));
285
+ console.log(colorizer.bullet('ollama - Local Ollama'));
286
+ console.log(colorizer.dim(' Model: ' + this.models.ollama + ' (customizable)'));
287
+ console.log();
288
+
289
+ console.log(colorizer.section('Examples'));
290
+ console.log(colorizer.dim(' ai-setup anthropic sk-ant-your-key-here'));
291
+ console.log(colorizer.dim(' ai-setup openai sk-your-openai-key-here'));
292
+ console.log(colorizer.dim(' ai-setup google your-gemini-key-here'));
293
+ console.log(colorizer.dim(' ai-setup ollama ai-chati'));
294
+ console.log(colorizer.dim(' ai-setup ollama ai-code'));
295
+ console.log();
296
+
297
+ console.log(colorizer.section('Ollama Setup'));
298
+ console.log(colorizer.info('Create custom Ollama models:'));
299
+ console.log();
300
+ console.log(colorizer.dim('1. Chat Model (llama3-chatqa):'));
301
+ console.log(colorizer.dim(' cat > Modelfile.ai-chat << \'EOF\''));
302
+ console.log(colorizer.dim(' FROM llama3-chatqa:8b'));
303
+ console.log(colorizer.dim(' PARAMETER num_ctx 8192'));
304
+ console.log(colorizer.dim(' PARAMETER temperature 0.7'));
305
+ console.log(colorizer.dim(' EOF'));
306
+ console.log(colorizer.dim(' ollama create chat-ai -f Modelfile.chat-ai'));
307
+ console.log();
308
+ console.log(colorizer.dim('2. Code Model (qwen2.5-coder):'));
309
+ console.log(colorizer.dim(' cat > Modelfile.ai-code << \'EOF\''));
310
+ console.log(colorizer.dim(' FROM qwen2.5-coder:7b'));
311
+ console.log(colorizer.dim(' PARAMETER num_ctx 8192'));
312
+ console.log(colorizer.dim(' PARAMETER temperature 0.2'));
313
+ console.log(colorizer.dim(' TEMPLATE """{{- if .Suffix }}<|fim_prefix|>{{ .Prompt }}<|fim_suffix|>{{ .Suffix }}<|fim_middle|>{{ else }}User: {{ .Prompt }}'));
314
+ console.log(colorizer.dim(' Assistant:{{ end }}"""'));
315
+ console.log(colorizer.dim(' EOF'));
316
+ console.log(colorizer.dim(' ollama create code-ai -f Modelfile.code-ai'));
317
+ console.log();
318
+
319
+ console.log(colorizer.section('Environment Variables'));
320
+ console.log(colorizer.dim(' export ANTHROPIC_API_KEY=your-key'));
321
+ console.log(colorizer.dim(' export OPENAI_API_KEY=your-key'));
322
+ console.log(colorizer.dim(' export GOOGLE_API_KEY=your-key'));
323
+ console.log(colorizer.dim(' export OLLAMA_HOST=http://localhost:11434'));
324
+ console.log(colorizer.dim(' export OLLAMA_MODEL=chat-ai'));
325
+ console.log(colorizer.dim(' export AI_PROVIDER=anthropic|openai|google|ollama'));
326
+ console.log();
327
+
328
+ console.log(colorizer.section('Current Status'));
329
+ console.log(colorizer.cyan(' Provider: ') + colorizer.bright(this.getProviderName()));
330
+ console.log(colorizer.cyan(' Status: ') +
331
+ (this.isConfigured() ? colorizer.green('Configured') : colorizer.red('Not configured')));
332
+
333
+ if (!this.isConfigured() && (this.anthropic || this.openai || this.googleAi || this.ollama)) {
334
+ console.log();
335
+ console.log(colorizer.info('Other providers available:'));
336
+ if (this.anthropic && this.provider !== 'anthropic') {
337
+ console.log(colorizer.dim(' Run "ai-provider anthropic" to switch'));
338
+ }
339
+ if (this.openai && this.provider !== 'openai') {
340
+ console.log(colorizer.dim(' Run "ai-provider openai" to switch'));
341
+ }
342
+ if (this.googleAi && this.provider !== 'google') {
343
+ console.log(colorizer.dim(' Run "ai-provider google" to switch'));
344
+ }
345
+ if (this.ollama && this.provider !== 'ollama') {
346
+ console.log(colorizer.dim(' Run "ai-provider ollama" to switch'));
347
+ }
348
+ }
349
+ console.log();
350
+
351
+ return Promise.resolve();
352
+ },
353
+
354
+ setProvider(args) {
355
+ const provider = args[0]?.toLowerCase();
356
+
357
+ if (!provider) {
358
+ return this.showProviderInfo();
359
+ }
360
+
361
+ // Normalize provider names
362
+ const normalizedProvider = provider === 'claude' ? 'anthropic' :
363
+ provider === 'gpt' ? 'openai' :
364
+ provider === 'gemini' ? 'google' :
365
+ provider;
366
+
367
+ if (normalizedProvider === 'anthropic' && this.anthropic) {
368
+ this.provider = 'anthropic';
369
+ console.log(colorizer.success('Switched to Anthropic Claude'));
370
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.anthropic));
371
+ console.log();
372
+ } else if (normalizedProvider === 'openai' && this.openai) {
373
+ this.provider = 'openai';
374
+ console.log(colorizer.success('Switched to OpenAI GPT'));
375
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.openai));
376
+ console.log();
377
+ } else if (normalizedProvider === 'google' && this.googleAi) {
378
+ this.provider = 'google';
379
+ console.log(colorizer.success('Switched to Google Gemini'));
380
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.google));
381
+ console.log();
382
+ } else if (normalizedProvider === 'ollama' && this.ollama) {
383
+ this.provider = 'ollama';
384
+ console.log(colorizer.success('Switched to Ollama'));
385
+ console.log(colorizer.cyan('Model: ') + colorizer.bright(this.models.ollama));
386
+ console.log(colorizer.cyan('Host: ') + colorizer.dim(this.ollamaHost));
387
+ console.log();
388
+ } else {
389
+ console.log(colorizer.error(provider + ' is not configured. Use ai-setup first.\n'));
390
+ }
391
+
392
+ return Promise.resolve();
393
+ },
394
+
395
+ showProviderInfo() {
396
+ console.log(colorizer.header('AI Provider Management'));
397
+ console.log(colorizer.separator());
398
+ console.log(colorizer.cyan('Usage: ') + colorizer.bright('ai-provider <anthropic|openai|google|ollama>'));
399
+ console.log();
400
+
401
+ console.log(colorizer.section('Current Provider'));
402
+ console.log(colorizer.cyan(' Active: ') + colorizer.bright(this.getProviderName()));
403
+ console.log(colorizer.cyan(' Model: ') + colorizer.dim(this.getModel()));
404
+ console.log();
405
+
406
+ console.log(colorizer.section('Available Providers'));
407
+ console.log(colorizer.cyan(' Anthropic Claude: ') +
408
+ (this.anthropic ? colorizer.green('Available') : colorizer.red('✗ Not configured')));
409
+ if (this.anthropic) {
410
+ console.log(colorizer.dim(' Model: ' + this.models.anthropic));
411
+ }
412
+
413
+ console.log(colorizer.cyan(' OpenAI GPT: ') +
414
+ (this.openai ? colorizer.green('Available') : colorizer.red('✗ Not configured')));
415
+ if (this.openai) {
416
+ console.log(colorizer.dim(' Model: ' + this.models.openai));
417
+ }
418
+
419
+ console.log(colorizer.cyan(' Google Gemini: ') +
420
+ (this.googleAi ? colorizer.green('Available') : colorizer.red('✗ Not configured')));
421
+ if (this.googleAi) {
422
+ console.log(colorizer.dim(' Model: ' + this.models.google));
423
+ }
424
+
425
+ console.log(colorizer.cyan(' Ollama (Local): ') +
426
+ (this.ollama ? colorizer.green('Available') : colorizer.red('✗ Not configured')));
427
+ if (this.ollama) {
428
+ console.log(colorizer.dim(' Model: ' + this.models.ollama));
429
+ console.log(colorizer.dim(' Host: ' + this.ollamaHost));
430
+ console.log(colorizer.dim(' Available models: ' +
431
+ this.ollama.models.map(m => m.name).join(', ')));
432
+ }
433
+ console.log();
434
+
435
+ return Promise.resolve();
436
+ },
437
+
438
+ isConfigured() {
439
+ if (this.provider === 'openai') return !!this.openai;
440
+ if (this.provider === 'google') return !!this.googleAi;
441
+ if (this.provider === 'anthropic') return !!this.anthropic;
442
+ if (this.provider === 'ollama') return !!this.ollama;
443
+ return false;
444
+ },
445
+
446
+ getProviderName() {
447
+ if (this.provider === 'openai') return 'OpenAI GPT';
448
+ if (this.provider === 'google') return 'Google Gemini';
449
+ if (this.provider === 'anthropic') return 'Anthropic Claude';
450
+ if (this.provider === 'ollama') return 'Ollama (Local)';
451
+ return 'Unknown';
452
+ },
453
+
454
+ getProvider() {
455
+ return this.provider;
456
+ },
457
+
458
+ getClient() {
459
+ if (this.provider === 'openai') return this.openai;
460
+ if (this.provider === 'google') return this.googleAi;
461
+ if (this.provider === 'anthropic') return this.anthropic;
462
+ if (this.provider === 'ollama') return this.ollama;
463
+ return null;
464
+ },
465
+
466
+ getModel() {
467
+ return this.models[this.provider];
468
+ },
469
+
470
+ // Ollama API call
471
+ async callOllama(prompt, maxTokens = 4096) {
472
+ if (!this.ollama) {
473
+ throw new Error('Ollama not configured');
474
+ }
475
+
476
+ try {
477
+ const response = await axios.post(`${this.ollamaHost}/api/generate`, {
478
+ model: this.ollamaModel,
479
+ prompt: prompt,
480
+ stream: false,
481
+ options: {
482
+ num_predict: maxTokens,
483
+ temperature: this.ollamaPresets[this.ollamaModel]?.temperature || 0.7,
484
+ num_ctx: this.ollamaPresets[this.ollamaModel]?.ctx || 8192
485
+ }
486
+ });
487
+
488
+ return response.data.response;
489
+ } catch (err) {
490
+ throw new Error('Ollama request failed: ' + err.message);
491
+ }
492
+ },
493
+
494
+ // Ollama streaming call
495
+ async callOllamaStream(prompt, maxTokens = 4096, onChunk) {
496
+ if (!this.ollama) {
497
+ throw new Error('Ollama not configured');
498
+ }
499
+
500
+ try {
501
+ const response = await axios.post(`${this.ollamaHost}/api/generate`, {
502
+ model: this.ollamaModel,
503
+ prompt: prompt,
504
+ stream: true,
505
+ options: {
506
+ num_predict: maxTokens,
507
+ temperature: this.ollamaPresets[this.ollamaModel]?.temperature || 0.7,
508
+ num_ctx: this.ollamaPresets[this.ollamaModel]?.ctx || 8192
509
+ }
510
+ }, {
511
+ responseType: 'stream'
512
+ });
513
+
514
+ let fullText = '';
515
+
516
+ return new Promise((resolve, reject) => {
517
+ response.data.on('data', (chunk) => {
518
+ try {
519
+ const lines = chunk.toString().split('\n').filter(line => line.trim());
520
+
521
+ for (const line of lines) {
522
+ const data = JSON.parse(line);
523
+ if (data.response) {
524
+ fullText += data.response;
525
+ if (onChunk) {
526
+ onChunk(data.response);
527
+ }
528
+ }
529
+
530
+ if (data.done) {
531
+ resolve(fullText);
532
+ }
533
+ }
534
+ } catch (err) {
535
+ // Ignore parse errors for incomplete chunks
536
+ }
537
+ });
538
+
539
+ response.data.on('error', (err) => {
540
+ reject(new Error('Ollama stream error: ' + err.message));
541
+ });
542
+ });
543
+ } catch (err) {
544
+ throw new Error('Ollama stream request failed: ' + err.message);
545
+ }
546
+ },
547
+
548
+ // Google Gemini API call
549
+ async callGoogle(prompt, maxTokens = 4096) {
550
+ if (!this.googleAi) {
551
+ throw new Error('Google Gemini not configured');
552
+ }
553
+
554
+ try {
555
+ const model = this.googleAi.getGenerativeModel({
556
+ model: this.models.google,
557
+ safetySettings: this.googleSafetySettings,
558
+ generationConfig: {
559
+ maxOutputTokens: maxTokens,
560
+ temperature: 0.7,
561
+ topP: 0.95,
562
+ }
563
+ });
564
+
565
+ const result = await model.generateContent(prompt);
566
+ const response = await result.response;
567
+ return response.text();
568
+ } catch (err) {
569
+ if (err.message.includes('SAFETY')) {
570
+ throw new Error('Content blocked by safety filters. Try rephrasing your query.');
571
+ }
572
+ throw err;
573
+ }
574
+ },
575
+
576
+ // Google Gemini streaming call
577
+ async callGoogleStream(prompt, maxTokens = 4096, onChunk) {
578
+ if (!this.googleAi) {
579
+ throw new Error('Google Gemini not configured');
580
+ }
581
+
582
+ try {
583
+ const model = this.googleAi.getGenerativeModel({
584
+ model: this.models.google,
585
+ safetySettings: this.googleSafetySettings,
586
+ generationConfig: {
587
+ maxOutputTokens: maxTokens,
588
+ temperature: 0.7,
589
+ topP: 0.95,
590
+ }
591
+ });
592
+
593
+ const result = await model.generateContentStream(prompt);
594
+ let fullText = '';
595
+
596
+ for await (const chunk of result.stream) {
597
+ const chunkText = chunk.text();
598
+ fullText += chunkText;
599
+ if (onChunk) {
600
+ onChunk(chunkText);
601
+ }
602
+ }
603
+
604
+ return fullText;
605
+ } catch (err) {
606
+ if (err.message.includes('SAFETY')) {
607
+ throw new Error('Content blocked by safety filters. Try rephrasing your query.');
608
+ }
609
+ throw err;
610
+ }
611
+ },
612
+
613
+ // Anthropic API call
614
+ callAnthropic(prompt, maxTokens = 4096) {
615
+ if (!this.anthropic) {
616
+ return Promise.reject(new Error('Anthropic not configured'));
617
+ }
618
+
619
+ return this.anthropic.messages.create({
620
+ model: this.models.anthropic,
621
+ max_tokens: maxTokens,
622
+ messages: [{
623
+ role: 'user',
624
+ content: prompt
625
+ }]
626
+ }).then(message => message.content[0].text);
627
+ },
628
+
629
+ // OpenAI API call
630
+ callOpenAI(prompt, maxTokens = 4096) {
631
+ if (!this.openai) {
632
+ return Promise.reject(new Error('OpenAI not configured'));
633
+ }
634
+
635
+ return this.openai.chat.completions.create({
636
+ model: this.models.openai,
637
+ max_tokens: maxTokens,
638
+ messages: [{
639
+ role: 'system',
640
+ content: 'You are an expert cybersecurity analyst and software development consultant.'
641
+ }, {
642
+ role: 'user',
643
+ content: prompt
644
+ }]
645
+ }).then(completion => completion.choices[0].message.content);
646
+ },
647
+
648
+ // Universal call method
649
+ call(prompt, maxTokens = 4096) {
650
+ if (!this.isConfigured()) {
651
+ return Promise.reject(new Error('AI not configured. Use ai-setup first.'));
652
+ }
653
+
654
+ if (this.provider === 'openai') {
655
+ return this.callOpenAI(prompt, maxTokens);
656
+ } else if (this.provider === 'google') {
657
+ return this.callGoogle(prompt, maxTokens);
658
+ } else if (this.provider === 'anthropic') {
659
+ return this.callAnthropic(prompt, maxTokens);
660
+ } else if (this.provider === 'ollama') {
661
+ return this.callOllama(prompt, maxTokens);
662
+ }
663
+
664
+ return Promise.reject(new Error('Invalid provider configuration'));
665
+ },
666
+
667
+ // Streaming call method
668
+ async callStream(prompt, maxTokens = 4096, onChunk) {
669
+ if (!this.isConfigured()) {
670
+ throw new Error('AI not configured. Use ai-setup first.');
671
+ }
672
+
673
+ if (this.provider === 'google') {
674
+ return this.callGoogleStream(prompt, maxTokens, onChunk);
675
+ } else if (this.provider === 'ollama') {
676
+ return this.callOllamaStream(prompt, maxTokens, onChunk);
677
+ } else {
678
+ // For non-streaming providers, simulate streaming
679
+ const response = await this.call(prompt, maxTokens);
680
+ if (onChunk) {
681
+ onChunk(response);
682
+ }
683
+ return response;
684
+ }
685
+ }
686
+ };
687
+
688
+ module.exports = AIConfig;