apexify.js 4.0.7 → 4.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai/ApexAI.d.ts +7 -7
- package/dist/ai/ApexAI.d.ts.map +1 -1
- package/dist/ai/ApexAI.js +60 -30
- package/dist/ai/ApexAI.js.map +1 -1
- package/dist/ai/functions/draw.d.ts +1 -1
- package/dist/ai/functions/draw.d.ts.map +1 -1
- package/dist/ai/functions/draw.js +18 -4
- package/dist/ai/functions/draw.js.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.js +5 -5
- package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts +8 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +1 -1
- package/dist/ai/modals-chat/Gemini-flash.js +118 -51
- package/dist/ai/modals-chat/Gemini-flash.js.map +1 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts +8 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +1 -1
- package/dist/ai/modals-chat/Gemini-pro.js +115 -51
- package/dist/ai/modals-chat/Gemini-pro.js.map +1 -1
- package/lib/ai/ApexAI.ts +73 -35
- package/lib/ai/functions/draw.ts +21 -5
- package/lib/ai/functions/generateVoiceResponse.ts +6 -5
- package/lib/ai/modals-chat/Gemini-flash.ts +140 -52
- package/lib/ai/modals-chat/Gemini-pro.ts +139 -53
- package/package.json +3 -2
package/lib/ai/ApexAI.ts
CHANGED
|
@@ -22,9 +22,6 @@ import { imageTools } from "./buttons/drawMenu";
|
|
|
22
22
|
import { ImageModals } from './functions/validOptions';
|
|
23
23
|
import { apexai, geminiFlash, geminiPro, facebook_ai, yi_34b, starChat } from "./modals-chat/modals";
|
|
24
24
|
|
|
25
|
-
/**
|
|
26
|
-
* Configuration options for the Gemini Flash functionality.
|
|
27
|
-
*/
|
|
28
25
|
export interface Options {
|
|
29
26
|
/**
|
|
30
27
|
* Configuration options related to voice functionality.
|
|
@@ -82,7 +79,9 @@ export interface Options {
|
|
|
82
79
|
imgStyle?: string | "3d-model" | "analog-film" | "anime" | "cinematic" | "comic-book" | "digital-art" | "enhance" | "isometric" | "fantasy-art" | "isometric" | "line-art" | "low-poly" | "neon-punk" | "origami" | "photographic" | "pixel-art" | "texture" | "craft-clay";
|
|
83
80
|
negative_prompt?: string;
|
|
84
81
|
sampler?: string | "DPM++ 2M Karras" | "DPM++ SDE Karras" | "DPM++ 2M SDE Exponential" | "DPM++ 2M SDE Karras" | "Euler a" | "Euler" | "LMS" | "Heun" | "DPM2" | "DPM2 a" | "DPM++ 2S a" | "DPM++ 2M" | "DPM++ SDE" | "DPM++ 2M SDE" | "DPM++ 2M SDE Heun" | "DPM++ 2M SDE Heun Karras" | "DPM++ 2M SDE Heun Exponential" | "DPM++ 3M SDE" | "DPM++ 3M SDE Karras" | "DPM++ 3M SDE Exponential" | "DPM fast" | "DPM adaptive" | "LMS Karras" | "DPM2 Karras" | "DPM2 a Karras" | "DPM++ 2S a Karras" | "Restart" | "DDIM" | "PLMS" | "UniPC";
|
|
85
|
-
|
|
82
|
+
width?: number;
|
|
83
|
+
height?: number;
|
|
84
|
+
};
|
|
86
85
|
};
|
|
87
86
|
/**
|
|
88
87
|
* Configuration options related to chat functionality.
|
|
@@ -106,8 +105,8 @@ export interface Options {
|
|
|
106
105
|
personality?: string | any;
|
|
107
106
|
API_KEY?: string;
|
|
108
107
|
memory?: {
|
|
109
|
-
memoryOn
|
|
110
|
-
id
|
|
108
|
+
memoryOn?: boolean;
|
|
109
|
+
id?: string;
|
|
111
110
|
};
|
|
112
111
|
typeWriting?: {
|
|
113
112
|
enable?: boolean;
|
|
@@ -138,8 +137,9 @@ export interface Options {
|
|
|
138
137
|
*/
|
|
139
138
|
others?: {
|
|
140
139
|
messageType?: {
|
|
141
|
-
type
|
|
142
|
-
intialContent
|
|
140
|
+
type?: string;
|
|
141
|
+
intialContent?: string;
|
|
142
|
+
sendAs?: string | "embed" | "content";
|
|
143
143
|
};
|
|
144
144
|
buttons?: any[];
|
|
145
145
|
keywords?: string[];
|
|
@@ -179,7 +179,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
179
179
|
await filters(message.client);
|
|
180
180
|
|
|
181
181
|
let usermsg: string = '';
|
|
182
|
-
|
|
182
|
+
let RespondMessage: any;
|
|
183
183
|
const {
|
|
184
184
|
voice: {
|
|
185
185
|
textVoice: {
|
|
@@ -208,7 +208,9 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
208
208
|
sampler: sampler = 'DDIM',
|
|
209
209
|
steps: steps = 20,
|
|
210
210
|
seed: seed = -1,
|
|
211
|
-
imgStyle: imgStyle = 'enhance'
|
|
211
|
+
imgStyle: imgStyle = 'enhance',
|
|
212
|
+
width: width = 512,
|
|
213
|
+
height: height = 512,
|
|
212
214
|
} = {},
|
|
213
215
|
} = {},
|
|
214
216
|
chat: {
|
|
@@ -275,10 +277,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
275
277
|
|
|
276
278
|
await message.channel?.sendTyping();
|
|
277
279
|
|
|
278
|
-
|
|
280
|
+
usermsg = message.content;
|
|
279
281
|
|
|
280
282
|
if (
|
|
281
|
-
message.attachments
|
|
283
|
+
message.attachments?.some((attachment: any) =>
|
|
282
284
|
attachment.contentType.startsWith("audio/")
|
|
283
285
|
)
|
|
284
286
|
) {
|
|
@@ -319,10 +321,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
319
321
|
if (message.attachments.size > 0) {
|
|
320
322
|
if (attachment.name.endsWith('.pdf')) {
|
|
321
323
|
const pdfContent = await readPdf(attachment.url);
|
|
322
|
-
usermsg += pdfContent
|
|
324
|
+
usermsg += `\n\n This is the pdf file content:\n\n ${pdfContent}`;
|
|
323
325
|
} else {
|
|
324
326
|
const txtContent = await readTextFile(attachment.url);
|
|
325
|
-
usermsg += txtContent
|
|
327
|
+
usermsg += `\n\n This is the .txt file content:\n\n ${txtContent}`;
|
|
326
328
|
}
|
|
327
329
|
}
|
|
328
330
|
}
|
|
@@ -390,9 +392,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
390
392
|
|
|
391
393
|
if (drawValid) {
|
|
392
394
|
|
|
393
|
-
usermsg = `Rewrite this text below in more descriptive way make it clear to be visualized correctly and enhance it and use stronger words please\n\n\n ${usermsg}`
|
|
394
|
-
|
|
395
|
-
|
|
395
|
+
usermsg = `Rewrite this text below in more descriptive way make it clear to be visualized correctly and enhance it and use stronger words please and please return the response with nothing else in just the enhanced prompt nothing more nothing less\n\n\n ${usermsg}`
|
|
396
|
+
|
|
397
|
+
if (enhancerOn) {
|
|
398
|
+
usermsg += await hercai.question({ model: "v3", content: usermsg});
|
|
396
399
|
}
|
|
397
400
|
|
|
398
401
|
return await aiImagine(
|
|
@@ -405,7 +408,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
405
408
|
nsfwKeyWords,
|
|
406
409
|
deepCheck,
|
|
407
410
|
aiOptions.imagine?.enhancer,
|
|
408
|
-
buttons
|
|
411
|
+
buttons,
|
|
412
|
+
RespondMessage
|
|
409
413
|
);
|
|
410
414
|
|
|
411
415
|
} else if (aiOptions.voice) {
|
|
@@ -427,7 +431,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
427
431
|
nsfwKeyWords,
|
|
428
432
|
deepCheck,
|
|
429
433
|
aiOptions.imagine?.enhancer,
|
|
430
|
-
buttons
|
|
434
|
+
buttons,
|
|
435
|
+
RespondMessage
|
|
431
436
|
);
|
|
432
437
|
}
|
|
433
438
|
|
|
@@ -456,22 +461,49 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
456
461
|
} else if (chatModal === 'starChat') {
|
|
457
462
|
response = await starChat(usermsg);
|
|
458
463
|
} else if (chatModal === 'gemini-flash') {
|
|
459
|
-
response = await geminiFlash(
|
|
464
|
+
response = await geminiFlash(
|
|
465
|
+
{
|
|
466
|
+
userId: message.author.id,
|
|
467
|
+
serverName: message.guild.name,
|
|
468
|
+
serverId: message.guild.id,
|
|
469
|
+
channelName: message.channel.name,
|
|
470
|
+
attachment: attachment,
|
|
471
|
+
db: memoryOn
|
|
472
|
+
},
|
|
473
|
+
{
|
|
474
|
+
userMsg: usermsg,
|
|
475
|
+
API_KEY: API_KEY,
|
|
476
|
+
AiPersonality: personality
|
|
477
|
+
}
|
|
478
|
+
);
|
|
460
479
|
} else if (chatModal === 'gemini-pro') {
|
|
461
|
-
response = await geminiPro(
|
|
480
|
+
response = await geminiPro(
|
|
481
|
+
{
|
|
482
|
+
userId: message.author.id,
|
|
483
|
+
serverName: message.guild.name,
|
|
484
|
+
serverId: message.guild.id,
|
|
485
|
+
channelName: message.channel.name,
|
|
486
|
+
attachment: attachment,
|
|
487
|
+
db: memoryOn
|
|
488
|
+
},
|
|
489
|
+
{
|
|
490
|
+
userMsg: usermsg,
|
|
491
|
+
API_KEY: API_KEY,
|
|
492
|
+
AiPersonality: personality
|
|
493
|
+
}
|
|
494
|
+
);
|
|
462
495
|
} else if (chatModal === 'v3' || chatModal === 'v3-32k' || chatModal === 'turbo' || chatModal === 'turbo-16k' || chatModal === 'gemini') {
|
|
463
496
|
if (!memoryOn) {
|
|
464
497
|
|
|
465
498
|
const personalityFilePath = path.join(process.cwd(), personality);
|
|
466
499
|
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
467
500
|
const personalityString = personalityContent.split('\n').join(' ');
|
|
468
|
-
|
|
501
|
+
hercai.betaDrawImage
|
|
469
502
|
response = await hercai.question({
|
|
470
503
|
model: chatModal,
|
|
471
504
|
content: usermsg,
|
|
472
505
|
personality: personalityString
|
|
473
506
|
});
|
|
474
|
-
|
|
475
507
|
response = response.reply;
|
|
476
508
|
} else {
|
|
477
509
|
response = await hercai.betaQuestion({
|
|
@@ -487,21 +519,26 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
487
519
|
if (msgType === 'reply') {
|
|
488
520
|
if (error.response && error.response.status === 429) {
|
|
489
521
|
console.error("Too many requests. Please try again later.");
|
|
490
|
-
|
|
522
|
+
RespondMessage = await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
523
|
+
return;
|
|
491
524
|
} else if (error.response && error.response.status === 500) {
|
|
492
525
|
console.error("Internal server error. Please try again later.");
|
|
493
|
-
|
|
526
|
+
RespondMessage = await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
527
|
+
return;
|
|
494
528
|
} else {
|
|
495
529
|
console.error("The Api is on a cool down for 10 seconds", error.message);
|
|
496
|
-
|
|
530
|
+
RespondMessage = await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
531
|
+
return;
|
|
497
532
|
}
|
|
498
533
|
} else if (msgType === 'send') {
|
|
499
534
|
if (error.response && error.response.status === 429) {
|
|
500
535
|
console.error("Too many requests. Please try again later.");
|
|
501
|
-
|
|
536
|
+
RespondMessage = await message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
537
|
+
return;
|
|
502
538
|
} else if (error.response && error.response.status === 500) {
|
|
503
539
|
console.error("Internal server error. Please try again later.");
|
|
504
|
-
|
|
540
|
+
RespondMessage = await message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
541
|
+
return;
|
|
505
542
|
} else {
|
|
506
543
|
await message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
|
|
507
544
|
console.error("The Api is on a cool down for 10 seconds", error.message);
|
|
@@ -525,13 +562,13 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
525
562
|
}
|
|
526
563
|
}
|
|
527
564
|
for (const part of parts) {
|
|
528
|
-
await typeWriter(message.channel, part, speed, delay, buttons);
|
|
565
|
+
RespondMessage = await typeWriter(message.channel, part, speed, delay, buttons);
|
|
529
566
|
}
|
|
530
567
|
}
|
|
531
568
|
|
|
532
569
|
} else {
|
|
533
570
|
if (response.length <= 2000) {
|
|
534
|
-
|
|
571
|
+
RespondMessage = await message.reply({
|
|
535
572
|
content: response,
|
|
536
573
|
components: buttons,
|
|
537
574
|
allowedMentions: { repliedUser: false },
|
|
@@ -548,7 +585,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
548
585
|
}
|
|
549
586
|
}
|
|
550
587
|
for (const part of parts) {
|
|
551
|
-
|
|
588
|
+
RespondMessage = await message.reply({
|
|
552
589
|
content: part,
|
|
553
590
|
components: buttons,
|
|
554
591
|
allowedMentions: { repliedUser: false },
|
|
@@ -559,7 +596,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
559
596
|
} else if (msgType === 'send') {
|
|
560
597
|
if (typeWritingEnable) {
|
|
561
598
|
if (response.length <= 2000) {
|
|
562
|
-
|
|
599
|
+
RespondMessage = await typeWriter(message.channel, response, speed, delay, buttons);
|
|
563
600
|
} else {
|
|
564
601
|
let parts: string[] = [];
|
|
565
602
|
while (typeof response === 'string' && response.length > 0) {
|
|
@@ -572,12 +609,12 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
572
609
|
}
|
|
573
610
|
}
|
|
574
611
|
for (const part of parts) {
|
|
575
|
-
|
|
612
|
+
RespondMessage = await typeWriter(message.channel, part, speed, delay, buttons);
|
|
576
613
|
}
|
|
577
614
|
}
|
|
578
615
|
} else {
|
|
579
616
|
if (response.length <= 2000) {
|
|
580
|
-
|
|
617
|
+
RespondMessage = await message.channel.send({
|
|
581
618
|
content: response,
|
|
582
619
|
components: buttons
|
|
583
620
|
});
|
|
@@ -593,7 +630,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
593
630
|
}
|
|
594
631
|
}
|
|
595
632
|
for (const part of parts) {
|
|
596
|
-
|
|
633
|
+
RespondMessage = await message.channel.send({
|
|
597
634
|
content: part,
|
|
598
635
|
components: buttons
|
|
599
636
|
});
|
|
@@ -602,4 +639,5 @@ export async function ApexAI (message: any, aiOptions: Options) {
|
|
|
602
639
|
}
|
|
603
640
|
}
|
|
604
641
|
}
|
|
642
|
+
return await RespondMessage;
|
|
605
643
|
};
|
package/lib/ai/functions/draw.ts
CHANGED
|
@@ -16,7 +16,8 @@ async function aiImagine(
|
|
|
16
16
|
nsfwKeyWords: string[],
|
|
17
17
|
deepCheck: boolean,
|
|
18
18
|
enhancer: any,
|
|
19
|
-
buttons: any[]
|
|
19
|
+
buttons: any[],
|
|
20
|
+
RespondMessage: any
|
|
20
21
|
) {
|
|
21
22
|
const maxRetryAttempts = 4;
|
|
22
23
|
const retryInterval = 5000;
|
|
@@ -63,6 +64,19 @@ async function aiImagine(
|
|
|
63
64
|
prompt: translatedText.text,
|
|
64
65
|
}),
|
|
65
66
|
);
|
|
67
|
+
} else if (imageModel === 'prodia') {
|
|
68
|
+
response = await retry(() =>
|
|
69
|
+
hercai.betaDrawImage({
|
|
70
|
+
prompt: translatedText.text,
|
|
71
|
+
negative_prompt: enhancer.negative_prompt,
|
|
72
|
+
sampler: enhancer.sampler,
|
|
73
|
+
image_style: enhancer.imgStyle,
|
|
74
|
+
width: enhancer.width,
|
|
75
|
+
height: enhancer.height,
|
|
76
|
+
steps: enhancer.steps,
|
|
77
|
+
scale: enhancer.cfgScale,
|
|
78
|
+
}),
|
|
79
|
+
);
|
|
66
80
|
} else if (validOptions.validProdiaModals.includes(imageModel)) {
|
|
67
81
|
const generateResponse = await sdk.generate({
|
|
68
82
|
model: imageModel,
|
|
@@ -73,6 +87,8 @@ async function aiImagine(
|
|
|
73
87
|
sampler: enhancer.sampler,
|
|
74
88
|
seed: enhancer.seed,
|
|
75
89
|
steps: enhancer.steps,
|
|
90
|
+
width: enhancer.width,
|
|
91
|
+
height: enhancer.height
|
|
76
92
|
});
|
|
77
93
|
|
|
78
94
|
await message.channel?.sendTyping();
|
|
@@ -740,14 +756,14 @@ async function aiImagine(
|
|
|
740
756
|
}
|
|
741
757
|
}
|
|
742
758
|
}
|
|
743
|
-
const allRows = [...buttons,
|
|
744
|
-
await message.reply({
|
|
759
|
+
const allRows = [...buttons, row1, row2]
|
|
760
|
+
RespondMessage = await message.reply({
|
|
745
761
|
files: attachData,
|
|
746
762
|
components: allRows,
|
|
747
763
|
allowedMentions: { repliedUser: false },
|
|
748
764
|
});
|
|
749
765
|
|
|
750
|
-
return imageUrls;
|
|
766
|
+
return { imageUrls, RespondMessage };
|
|
751
767
|
} catch (error: any) {
|
|
752
768
|
console.error("Error in drawImage:", error.message);
|
|
753
769
|
if (error.response) {
|
|
@@ -758,7 +774,7 @@ async function aiImagine(
|
|
|
758
774
|
content: "An error occurred while processing the images.",
|
|
759
775
|
allowedMentions: { repliedUser: false },
|
|
760
776
|
});
|
|
761
|
-
return [];
|
|
777
|
+
return { imageUrls: [], RespondMessage };
|
|
762
778
|
}
|
|
763
779
|
}
|
|
764
780
|
|
|
@@ -27,7 +27,8 @@ async function aiVoice(
|
|
|
27
27
|
nsfwKeyWords: string[],
|
|
28
28
|
deepCheck: boolean,
|
|
29
29
|
enhancer: any,
|
|
30
|
-
buttons: any[]
|
|
30
|
+
buttons: any[],
|
|
31
|
+
RespondMessage: any
|
|
31
32
|
) {
|
|
32
33
|
if (message.author.bot || isProcessing || !message.guild) {
|
|
33
34
|
return;
|
|
@@ -39,7 +40,7 @@ async function aiVoice(
|
|
|
39
40
|
let msg = message.content;
|
|
40
41
|
|
|
41
42
|
if (drawValid) {
|
|
42
|
-
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons);
|
|
43
|
+
return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage);
|
|
43
44
|
}
|
|
44
45
|
|
|
45
46
|
if (message.attachments.size > 0) {
|
|
@@ -79,7 +80,7 @@ async function aiVoice(
|
|
|
79
80
|
"User-Agent": "stagefright/1.2 (Linux;Android 5.0)",
|
|
80
81
|
},
|
|
81
82
|
});
|
|
82
|
-
await message.reply({
|
|
83
|
+
RespondMessage = await message.reply({
|
|
83
84
|
components: buttons,
|
|
84
85
|
files: [
|
|
85
86
|
{
|
|
@@ -120,7 +121,7 @@ async function aiVoice(
|
|
|
120
121
|
const response = await axios.request(options);
|
|
121
122
|
const audioData = response.data.audioStream;
|
|
122
123
|
fs.writeFileSync("output.ogg", Buffer.from(audioData, "base64"));
|
|
123
|
-
await message.reply({
|
|
124
|
+
RespondMessage = await message.reply({
|
|
124
125
|
components: buttons,
|
|
125
126
|
files: [
|
|
126
127
|
{
|
|
@@ -160,7 +161,7 @@ async function aiVoice(
|
|
|
160
161
|
try {
|
|
161
162
|
const response = await axios.request(options);
|
|
162
163
|
const audioUrl = response.data.result.audio_url;
|
|
163
|
-
await message.reply({
|
|
164
|
+
RespondMessage = await message.reply({
|
|
164
165
|
content: audioUrl,
|
|
165
166
|
components: buttons,
|
|
166
167
|
allowedMentions: { repliedUser: false },
|
|
@@ -1,17 +1,40 @@
|
|
|
1
|
-
import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
1
|
+
import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
|
|
2
2
|
import path from 'path';
|
|
3
3
|
import fs from 'fs';
|
|
4
4
|
import config from './config';
|
|
5
5
|
import { converter } from "../../canvas/utils/general functions";
|
|
6
|
+
import { connect } from "verse.db";
|
|
7
|
+
import axios from "axios";
|
|
6
8
|
|
|
7
9
|
let currentApiKeyIndex = 0;
|
|
8
10
|
|
|
9
|
-
export async function geminiFlash(message: any, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
11
|
+
export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
|
|
12
|
+
|
|
13
|
+
let db: any;
|
|
14
|
+
|
|
15
|
+
if (message.db) {
|
|
16
|
+
db = new connect({ adapter: 'json', dataPath: `${message.serverId}_ChatHistory` });
|
|
17
|
+
}
|
|
10
18
|
|
|
11
19
|
try {
|
|
12
|
-
|
|
20
|
+
let apiKeyIndex = currentApiKeyIndex;
|
|
21
|
+
let genAI: any;
|
|
22
|
+
while (apiKeyIndex < config.apiKeys.length) {
|
|
23
|
+
const validateKey = await axios.get(`https://generativelanguage.googleapis.com/v1beta/models?key=${config.apiKeys[apiKeyIndex]}`);
|
|
24
|
+
if (validateKey.status === 200) {
|
|
25
|
+
genAI = new GoogleGenerativeAI(config.apiKeys[apiKeyIndex]);
|
|
26
|
+
|
|
27
|
+
break;
|
|
28
|
+
} else {
|
|
29
|
+
apiKeyIndex++;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
13
32
|
|
|
33
|
+
if (apiKeyIndex === config.apiKeys.length) {
|
|
34
|
+
return 'All provided API keys are invalid.';
|
|
35
|
+
}
|
|
14
36
|
let personalityString: string = '';
|
|
37
|
+
|
|
15
38
|
if (AI.AiPersonality) {
|
|
16
39
|
const personalityFilePath = path.join(process.cwd(), AI.AiPersonality);
|
|
17
40
|
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
@@ -22,49 +45,83 @@ export async function geminiFlash(message: any, AI: { AiPersonality: string | nu
|
|
|
22
45
|
maxOutputTokens: 750,
|
|
23
46
|
};
|
|
24
47
|
|
|
25
|
-
const
|
|
26
|
-
|
|
48
|
+
const safetySettings = [
|
|
49
|
+
{
|
|
50
|
+
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
51
|
+
threshold: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
55
|
+
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
59
|
+
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
63
|
+
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
|
64
|
+
},
|
|
65
|
+
];
|
|
66
|
+
|
|
67
|
+
const systemInstruction = `${personalityString}\n And working on discord in serverName: ${message.serverName} at channelName: ${message.channelName} and responding to the userName: <@${message.userId}>`;
|
|
68
|
+
const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash-latest", systemInstruction, generationConfig, safetySettings });
|
|
69
|
+
|
|
70
|
+
const defaultHistory = [
|
|
71
|
+
{
|
|
72
|
+
role: "user",
|
|
73
|
+
parts: [{ text: `${personalityString}\n And working on discord in serverName: ${message.serverName} at channelName: ${message.channelName} and responding to the userName: <@${message.userId}>` }],
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
role: "model",
|
|
77
|
+
parts: [{ text: `I will greet the user with their name: <@${message.userId}>. Feel free to ask me anything! 😊` }],
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
role: "user",
|
|
81
|
+
parts: [{ text: "Any question regarding JEDI STUDIO, verse.db, apexify.js, or djs-builder?" }],
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
role: "model",
|
|
85
|
+
parts: [{ text: `You should reply with visiting documentation online at jedi-studio.com. The development team consists of 8 people, led by Marco. The CEO/founder is Kimo.` }],
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
role: "user",
|
|
89
|
+
parts: [{ text: "How was I developed or programmed?" }],
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
role: "model",
|
|
93
|
+
parts: [{ text: `You were made by JEDI STUDIO under the development of jedi.tsx, one of the CEOs for JEDI STUDIO.` }],
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
role: "user",
|
|
97
|
+
parts: [{ text: "Tell me about verse.db." }],
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
role: "model",
|
|
101
|
+
parts: [{ text: `verse.db is a database that enables users to manage data for SQL, JSON, YAML, and SQOL. It's a new brand by JEDI STUDIO, and SOOL will support more database management with comprehensive features and security.` }],
|
|
102
|
+
},
|
|
103
|
+
];
|
|
104
|
+
|
|
105
|
+
let historyData: any[] | undefined = undefined;
|
|
106
|
+
|
|
107
|
+
if (message.db) {
|
|
108
|
+
db = new connect({ adapter: 'json', dataPath: `${message.serverId}_ChatHistory` });
|
|
109
|
+
|
|
110
|
+
const data = await db.find(`${message.userId}_chatHistory`, { userId: message.userId });
|
|
111
|
+
|
|
112
|
+
if (!data.results?.history || data.results?.history?.length === 0) {
|
|
113
|
+
await db.update(`${message.userId}_chatHistory`, { userId: message.userId }, { $set: { history: [] } }, true);
|
|
114
|
+
} else {
|
|
115
|
+
historyData = data.results?.history || undefined;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
27
118
|
|
|
28
119
|
const chat = model.startChat({
|
|
29
|
-
history:
|
|
30
|
-
{
|
|
31
|
-
role: "user",
|
|
32
|
-
parts: [{ text: `${personalityString}\n And working on discord in serverName: ${message.guild.name} at channelName: ${message.channel.name} and responding to the userName: ${message.author.username}` }],
|
|
33
|
-
},
|
|
34
|
-
{
|
|
35
|
-
role: "model",
|
|
36
|
-
parts: [{ text: `I will greet the user with their name: <@${message.author.id}>. Feel free to ask me anything! 😊` }],
|
|
37
|
-
},
|
|
38
|
-
{
|
|
39
|
-
role: "user",
|
|
40
|
-
parts: [{ text: "Any question regarding JEDI STUDIO, verse.db, apexify.js, or djs-builder?" }],
|
|
41
|
-
},
|
|
42
|
-
{
|
|
43
|
-
role: "model",
|
|
44
|
-
parts: [{ text: `You should reply with visiting documentation online at jedi-studio.com. The development team consists of 8 people, led by Marco. The CEO/founder is Kimo.` }],
|
|
45
|
-
},
|
|
46
|
-
{
|
|
47
|
-
role: "user",
|
|
48
|
-
parts: [{ text: "How was I developed or programmed?" }],
|
|
49
|
-
},
|
|
50
|
-
{
|
|
51
|
-
role: "model",
|
|
52
|
-
parts: [{ text: `You were made by JEDI STUDIO under the development of jedi.tsx, one of the CEOs for JEDI STUDIO.` }],
|
|
53
|
-
},
|
|
54
|
-
{
|
|
55
|
-
role: "user",
|
|
56
|
-
parts: [{ text: "Tell me about verse.db." }],
|
|
57
|
-
},
|
|
58
|
-
{
|
|
59
|
-
role: "model",
|
|
60
|
-
parts: [{ text: `verse.db is a database that enables users to manage data for SQL, JSON, YAML, and SQOL. It's a new brand by JEDI STUDIO, and SOOL will support more database management with comprehensive features and security.` }],
|
|
61
|
-
},
|
|
62
|
-
],
|
|
120
|
+
history: historyData || defaultHistory,
|
|
63
121
|
generationConfig
|
|
64
|
-
|
|
122
|
+
});
|
|
65
123
|
|
|
66
|
-
const
|
|
67
|
-
const imgURL = attachment?.url || null;
|
|
124
|
+
const imgURL = message.attachment?.url || null;
|
|
68
125
|
let result: any;
|
|
69
126
|
|
|
70
127
|
if (imgURL) {
|
|
@@ -79,20 +136,51 @@ export async function geminiFlash(message: any, AI: { AiPersonality: string | nu
|
|
|
79
136
|
} else {
|
|
80
137
|
result = await chat.sendMessage(AI.userMsg);
|
|
81
138
|
}
|
|
82
|
-
const response = result.response;
|
|
83
139
|
|
|
84
|
-
|
|
140
|
+
const response = await result.response.text();
|
|
141
|
+
|
|
142
|
+
if (message.db) {
|
|
143
|
+
|
|
144
|
+
const updateQuery_1 = {
|
|
145
|
+
$push: {
|
|
146
|
+
"history": {
|
|
147
|
+
role: "user",
|
|
148
|
+
parts: [{ text: `${AI.userMsg}` }]
|
|
149
|
+
}
|
|
150
|
+
},
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
historyData = await db.update(`${message.userId}_chatHistory`,
|
|
154
|
+
{ userId: message.userId },
|
|
155
|
+
updateQuery_1, true
|
|
156
|
+
);
|
|
157
|
+
|
|
158
|
+
const updateQuery_2 = {
|
|
159
|
+
$push: {
|
|
160
|
+
"history": {
|
|
161
|
+
role: "model",
|
|
162
|
+
parts: [{ text: `${response}` }]
|
|
163
|
+
}
|
|
164
|
+
},
|
|
165
|
+
};
|
|
166
|
+
|
|
167
|
+
historyData = await db.update(`${message.userId}_chatHistory`,
|
|
168
|
+
{ userId: message.userId },
|
|
169
|
+
updateQuery_2,
|
|
170
|
+
true
|
|
171
|
+
);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return response;
|
|
85
175
|
} catch (e: any) {
|
|
86
176
|
if (e.message) {
|
|
87
177
|
if (e.message === '[GoogleGenerativeAI Error]: Error fetching from https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent: [400 Bad Request] User location is not supported for the API use.') {
|
|
88
|
-
return
|
|
89
|
-
} else if (e.response && (e.response.status === 429 || e.response.status ===
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
return 'Ai is on a cooldown for the rest of the day. Either provide your API key or wait for tomorrow. Check ai.google.dev for apikeys';
|
|
95
|
-
}
|
|
178
|
+
return `The hoster/bot owner/the used host isn't supported by gemini.`;
|
|
179
|
+
} else if (e.response && (e.response.status === 429 || e.response.status === 403)) {
|
|
180
|
+
return 'Ai is on a cooldown for the rest of the day. Either provide your own API key or wait for tomorrow. Check ai.google.dev for free apikeys';
|
|
181
|
+
} else if (e.message === '[GoogleGenerativeAI Error]: Candidate was blocked due to SAFETY') {
|
|
182
|
+
console.error(e);
|
|
183
|
+
return `Due to safety enabled by gemini you have been blocked.`;
|
|
96
184
|
} else {
|
|
97
185
|
console.error(e);
|
|
98
186
|
return `Try again later please... Either API is on a cooldown or an internal server error has occurred. If issue persists please contact the bot developer or owner of the npm package.`;
|