apexify.js 4.4.35 → 4.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -199
- package/dist/ai/ApexAI.d.ts +18 -5
- package/dist/ai/ApexAI.d.ts.map +1 -1
- package/dist/ai/ApexAI.js +33 -170
- package/dist/ai/ApexAI.js.map +1 -1
- package/dist/ai/ApexModules.d.ts.map +1 -1
- package/dist/ai/ApexModules.js +37 -125
- package/dist/ai/ApexModules.js.map +1 -1
- package/dist/ai/functions/draw.d.ts +6 -1
- package/dist/ai/functions/draw.d.ts.map +1 -1
- package/dist/ai/functions/draw.js +18 -59
- package/dist/ai/functions/draw.js.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts +6 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.js +2 -2
- package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
- package/dist/ai/functions/validOptions.d.ts +11 -4
- package/dist/ai/functions/validOptions.d.ts.map +1 -1
- package/dist/ai/functions/validOptions.js +15 -14
- package/dist/ai/functions/validOptions.js.map +1 -1
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts +7 -0
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js +28 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts +5 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js +30 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-flash.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-flash.js → gemini/Gemini-flash.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-flash.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-pro.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-pro.js → gemini/Gemini-pro.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-pro.js.map +1 -0
- package/dist/ai/modals-chat/gemini/config.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/config.js.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.js.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts +9 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.js +58 -0
- package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts +8 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js +64 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -0
- package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -0
- package/dist/ai/modals-chat/{whisper.js → groq/whisper.js} +2 -2
- package/dist/ai/modals-chat/groq/whisper.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModals.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModels.js.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts +7 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.js +88 -0
- package/dist/ai/modals-chat/others/otherModels.js.map +1 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts +8 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts.map +1 -0
- package/dist/ai/modals-chat/{bing.js → rsn/rsnChat.js} +22 -8
- package/dist/ai/modals-chat/rsn/rsnChat.js.map +1 -0
- package/dist/ai/modals-images/cartoon.js +1 -1
- package/dist/ai/modals-images/cartoon.js.map +1 -1
- package/dist/ai/modals-images/flux.js +1 -1
- package/dist/ai/modals-images/flux.js.map +1 -1
- package/dist/ai/utils.d.ts +4 -7
- package/dist/ai/utils.d.ts.map +1 -1
- package/dist/ai/utils.js +6 -12
- package/dist/ai/utils.js.map +1 -1
- package/dist/index.d.ts +11 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -3
- package/dist/index.js.map +1 -1
- package/lib/ai/ApexAI.ts +92 -189
- package/lib/ai/ApexModules.ts +43 -135
- package/lib/ai/functions/draw.ts +24 -68
- package/lib/ai/functions/generateVoiceResponse.ts +3 -2
- package/lib/ai/functions/validOptions.ts +23 -22
- package/lib/ai/modals-chat/electronHub/imageModels.ts +26 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/cartoon.ts +3 -3
- package/lib/ai/modals-chat/freesedgpt/chat.ts +31 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/flux.ts +3 -3
- package/lib/ai/modals-chat/{Gemini-flash.ts → gemini/Gemini-flash.ts} +2 -2
- package/lib/ai/modals-chat/{Gemini-pro.ts → gemini/Gemini-pro.ts} +2 -2
- package/lib/ai/modals-chat/groq/chatgroq.ts +68 -0
- package/lib/ai/modals-chat/groq/imageAnalyzer.ts +68 -0
- package/lib/ai/modals-chat/{whisper.ts → groq/whisper.ts} +2 -2
- package/lib/ai/modals-chat/hercai/chatModels.ts +20 -0
- package/lib/ai/modals-chat/others/otherModels.ts +99 -0
- package/lib/ai/modals-chat/{mixtral.ts → rsn/rsnChat.ts} +26 -8
- package/lib/ai/utils.ts +7 -12
- package/lib/index.ts +5 -3
- package/package.json +1 -1
- package/dist/ai/functions/imageAnalysis.d.ts +0 -2
- package/dist/ai/functions/imageAnalysis.d.ts.map +0 -1
- package/dist/ai/functions/imageAnalysis.js +0 -45
- package/dist/ai/functions/imageAnalysis.js.map +0 -1
- package/dist/ai/functions/readImagess.d.ts +0 -2
- package/dist/ai/functions/readImagess.d.ts.map +0 -1
- package/dist/ai/functions/readImagess.js +0 -45
- package/dist/ai/functions/readImagess.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.js.map +0 -1
- package/dist/ai/modals-chat/apexChat.d.ts +0 -2
- package/dist/ai/modals-chat/apexChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/apexChat.js +0 -32
- package/dist/ai/modals-chat/apexChat.js.map +0 -1
- package/dist/ai/modals-chat/bard.d.ts +0 -7
- package/dist/ai/modals-chat/bard.d.ts.map +0 -1
- package/dist/ai/modals-chat/bard.js +0 -48
- package/dist/ai/modals-chat/bard.js.map +0 -1
- package/dist/ai/modals-chat/bing.d.ts +0 -7
- package/dist/ai/modals-chat/bing.d.ts.map +0 -1
- package/dist/ai/modals-chat/bing.js.map +0 -1
- package/dist/ai/modals-chat/codellama.d.ts +0 -7
- package/dist/ai/modals-chat/codellama.d.ts.map +0 -1
- package/dist/ai/modals-chat/codellama.js +0 -48
- package/dist/ai/modals-chat/codellama.js.map +0 -1
- package/dist/ai/modals-chat/config.d.ts.map +0 -1
- package/dist/ai/modals-chat/config.js.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.d.ts +0 -2
- package/dist/ai/modals-chat/facebook-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.js +0 -20
- package/dist/ai/modals-chat/facebook-ai.js.map +0 -1
- package/dist/ai/modals-chat/geminiFast.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiFast.js.map +0 -1
- package/dist/ai/modals-chat/geminiV2.d.ts +0 -7
- package/dist/ai/modals-chat/geminiV2.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiV2.js +0 -48
- package/dist/ai/modals-chat/geminiV2.js.map +0 -1
- package/dist/ai/modals-chat/gemma.d.ts +0 -2
- package/dist/ai/modals-chat/gemma.d.ts.map +0 -1
- package/dist/ai/modals-chat/gemma.js +0 -43
- package/dist/ai/modals-chat/gemma.js.map +0 -1
- package/dist/ai/modals-chat/llama.d.ts +0 -7
- package/dist/ai/modals-chat/llama.d.ts.map +0 -1
- package/dist/ai/modals-chat/llama.js +0 -48
- package/dist/ai/modals-chat/llama.js.map +0 -1
- package/dist/ai/modals-chat/llamav2.d.ts +0 -2
- package/dist/ai/modals-chat/llamav2.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav2.js +0 -43
- package/dist/ai/modals-chat/llamav2.js.map +0 -1
- package/dist/ai/modals-chat/llamav3.d.ts +0 -2
- package/dist/ai/modals-chat/llamav3.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav3.js +0 -43
- package/dist/ai/modals-chat/llamav3.js.map +0 -1
- package/dist/ai/modals-chat/mixtral.d.ts +0 -7
- package/dist/ai/modals-chat/mixtral.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtral.js +0 -48
- package/dist/ai/modals-chat/mixtral.js.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.d.ts +0 -2
- package/dist/ai/modals-chat/mixtralv2.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.js +0 -43
- package/dist/ai/modals-chat/mixtralv2.js.map +0 -1
- package/dist/ai/modals-chat/modals.d.ts +0 -8
- package/dist/ai/modals-chat/modals.d.ts.map +0 -1
- package/dist/ai/modals-chat/modals.js +0 -16
- package/dist/ai/modals-chat/modals.js.map +0 -1
- package/dist/ai/modals-chat/openChat.d.ts +0 -7
- package/dist/ai/modals-chat/openChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/openChat.js +0 -48
- package/dist/ai/modals-chat/openChat.js.map +0 -1
- package/dist/ai/modals-chat/starChat.d.ts +0 -2
- package/dist/ai/modals-chat/starChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/starChat.js +0 -31
- package/dist/ai/modals-chat/starChat.js.map +0 -1
- package/dist/ai/modals-chat/v4.d.ts +0 -7
- package/dist/ai/modals-chat/v4.d.ts.map +0 -1
- package/dist/ai/modals-chat/v4.js +0 -48
- package/dist/ai/modals-chat/v4.js.map +0 -1
- package/dist/ai/modals-chat/whisper.d.ts.map +0 -1
- package/dist/ai/modals-chat/whisper.js.map +0 -1
- package/dist/ai/modals-chat/yi-ai.d.ts +0 -2
- package/dist/ai/modals-chat/yi-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/yi-ai.js +0 -40
- package/dist/ai/modals-chat/yi-ai.js.map +0 -1
- package/lib/ai/functions/imageAnalysis.ts +0 -41
- package/lib/ai/modals-chat/apexChat.ts +0 -31
- package/lib/ai/modals-chat/bard.ts +0 -44
- package/lib/ai/modals-chat/bing.ts +0 -44
- package/lib/ai/modals-chat/codellama.ts +0 -44
- package/lib/ai/modals-chat/facebook-ai.ts +0 -14
- package/lib/ai/modals-chat/geminiV2.ts +0 -44
- package/lib/ai/modals-chat/gemma.ts +0 -35
- package/lib/ai/modals-chat/llama.ts +0 -44
- package/lib/ai/modals-chat/llamav2.ts +0 -35
- package/lib/ai/modals-chat/llamav3.ts +0 -35
- package/lib/ai/modals-chat/mixtralv2.ts +0 -35
- package/lib/ai/modals-chat/modals.ts +0 -8
- package/lib/ai/modals-chat/openChat.ts +0 -44
- package/lib/ai/modals-chat/starChat.ts +0 -31
- package/lib/ai/modals-chat/v4.ts +0 -44
- package/lib/ai/modals-chat/yi-ai.ts +0 -40
- /package/dist/ai/modals-chat/{Gemini-flash.d.ts → gemini/Gemini-flash.d.ts} +0 -0
- /package/dist/ai/modals-chat/{Gemini-pro.d.ts → gemini/Gemini-pro.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.d.ts → gemini/config.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.js → gemini/config.js} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.d.ts → gemini/geminiFast.d.ts} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.js → gemini/geminiFast.js} +0 -0
- /package/dist/ai/modals-chat/{whisper.d.ts → groq/whisper.d.ts} +0 -0
- /package/lib/ai/modals-chat/{config.ts → gemini/config.ts} +0 -0
- /package/lib/ai/modals-chat/{geminiFast.ts → gemini/geminiFast.ts} +0 -0
package/lib/ai/ApexAI.ts
CHANGED
|
@@ -1,18 +1,14 @@
|
|
|
1
1
|
import { Hercai } from "hercai";
|
|
2
|
-
import fs from "fs";
|
|
3
|
-
import path from "path";
|
|
4
2
|
import {
|
|
5
3
|
imageReader,
|
|
6
4
|
toDraw,
|
|
7
5
|
aiImagine,
|
|
8
6
|
aiVoice,
|
|
9
7
|
typeWriter,
|
|
10
|
-
|
|
11
|
-
v4,
|
|
12
|
-
openChat,
|
|
13
|
-
llamaChat,
|
|
14
|
-
mixtral,
|
|
8
|
+
groqAnalyzer,
|
|
15
9
|
readFile,
|
|
10
|
+
geminiPro,
|
|
11
|
+
geminiFlash
|
|
16
12
|
} from "./utils";
|
|
17
13
|
import {
|
|
18
14
|
ModalBuilder,
|
|
@@ -20,28 +16,32 @@ import {
|
|
|
20
16
|
TextInputStyle,
|
|
21
17
|
ActionRowBuilder,
|
|
22
18
|
Message,
|
|
23
|
-
PermissionResolvable
|
|
24
|
-
TextChannel
|
|
19
|
+
PermissionResolvable
|
|
25
20
|
} from "discord.js";
|
|
26
21
|
import { filters } from "./buttons/tools";
|
|
27
22
|
import { imageTools } from "./buttons/drawMenu";
|
|
28
|
-
import {
|
|
29
|
-
import {
|
|
23
|
+
import { whisper } from "./modals-chat/groq/whisper";
|
|
24
|
+
import { groqChatModels, hercChatModels, otherChatModel, rsnChatModels } from "./functions/validOptions";
|
|
25
|
+
import { chatHercai } from "./modals-chat/hercai/chatModels";
|
|
26
|
+
import { rsnAPI } from "./modals-chat/rsn/rsnChat";
|
|
27
|
+
import { otherChatModels } from "./modals-chat/others/otherModels";
|
|
28
|
+
import { chatGroq } from "./modals-chat/groq/chatgroq";
|
|
29
|
+
import { gpt4o } from "./modals-chat/freesedgpt/chat";
|
|
30
30
|
|
|
31
31
|
export interface Options {
|
|
32
32
|
/**
|
|
33
33
|
* Configuration options related to voice functionality.
|
|
34
34
|
* @param voice.textVoice Configuration options for text-to-voice functionality.
|
|
35
35
|
* @param voice.textVoice.enable Whether text-to-voice functionality is enabled.
|
|
36
|
-
* @param voice.textVoice.
|
|
37
|
-
* @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI
|
|
38
|
-
* @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI
|
|
39
|
-
* @param voice.textVoice.type The type of voice only for (apexAI and zenithAI
|
|
36
|
+
* @param voice.textVoice.voiceModel The voice model to be used.
|
|
37
|
+
* @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI model).
|
|
38
|
+
* @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI model).
|
|
39
|
+
* @param voice.textVoice.type The type of voice only for (apexAI and zenithAI model).
|
|
40
40
|
*/
|
|
41
41
|
voice?: {
|
|
42
42
|
textVoice?: {
|
|
43
43
|
enable?: boolean;
|
|
44
|
-
|
|
44
|
+
voiceModel?: string;
|
|
45
45
|
voice_code?: string;
|
|
46
46
|
apiKey?: string;
|
|
47
47
|
type?: string;
|
|
@@ -51,26 +51,32 @@ export interface Options {
|
|
|
51
51
|
* Configuration options related to image generation.
|
|
52
52
|
* @param imagine.enable Whether image generation is enabled.
|
|
53
53
|
* @param imagine.drawTrigger The trigger phrases for initiating image generation.
|
|
54
|
-
* @param imagine.imageModal The
|
|
54
|
+
* @param imagine.imageModal The model for the image generation.
|
|
55
55
|
* @param imagine.numOfImages The number of images to generate.
|
|
56
56
|
* @param imagine.nsfw Configuration options for NSFW filtering.
|
|
57
57
|
* @param imagine.nsfw.enable Whether NSFW filtering is enabled.
|
|
58
58
|
* @param imagine.nsfw.keywords Keywords for NSFW filtering.
|
|
59
59
|
* @param imagine.enhancer Configuration options for image enhancement.
|
|
60
60
|
* @param imagine.enhancer.enable Whether image enhancement is enabled (rewrites your prompt in more descriptive way).
|
|
61
|
-
* @param imagine.enhancer.enhancerModal The
|
|
62
|
-
* @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia
|
|
63
|
-
* @param imagine.enhancer.steps The number of enhancement steps only for (Prodia
|
|
64
|
-
* @param imagine.enhancer.seed The seed for image enhancement only for (Prodia
|
|
65
|
-
* @param imagine.enhancer.imgStyle The style of the image only for (Prodia
|
|
66
|
-
* @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia
|
|
67
|
-
* @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia
|
|
61
|
+
* @param imagine.enhancer.enhancerModal The model for image enhancement only for (Prodia models).
|
|
62
|
+
* @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia models).
|
|
63
|
+
* @param imagine.enhancer.steps The number of enhancement steps only for (Prodia models).
|
|
64
|
+
* @param imagine.enhancer.seed The seed for image enhancement only for (Prodia models).
|
|
65
|
+
* @param imagine.enhancer.imgStyle The style of the image only for (Prodia models).
|
|
66
|
+
* @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia models).
|
|
67
|
+
* @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia models).
|
|
68
68
|
*/
|
|
69
69
|
imagine?: {
|
|
70
70
|
enable?: boolean;
|
|
71
71
|
drawTrigger?: string[];
|
|
72
|
-
|
|
72
|
+
imageModel?: string;
|
|
73
73
|
numOfImages?: number;
|
|
74
|
+
ApiKeys?: {
|
|
75
|
+
groqAPI?: string;
|
|
76
|
+
rsnAPIkey?: string;
|
|
77
|
+
prodiaAPI?: string;
|
|
78
|
+
freesedGPTApi?: string;
|
|
79
|
+
};
|
|
74
80
|
nsfw?: {
|
|
75
81
|
enable?: boolean;
|
|
76
82
|
keywords?: string[];
|
|
@@ -92,7 +98,7 @@ export interface Options {
|
|
|
92
98
|
};
|
|
93
99
|
/**
|
|
94
100
|
* Configuration options related to chat functionality.
|
|
95
|
-
* @param chat.
|
|
101
|
+
* @param chat.chatModel The chat model to be used.
|
|
96
102
|
* @param chat.readFiles Whether to read files.
|
|
97
103
|
* @param chat.readImages Whether to read images.
|
|
98
104
|
* @param chat.personality The personality for the chat.
|
|
@@ -109,11 +115,18 @@ export interface Options {
|
|
|
109
115
|
chat?: {
|
|
110
116
|
enable?: boolean;
|
|
111
117
|
returnChat?: Boolean
|
|
112
|
-
|
|
118
|
+
chatModel?: string;
|
|
113
119
|
readFiles?: boolean;
|
|
114
120
|
readImages?: boolean;
|
|
115
121
|
personality?: string | any;
|
|
116
|
-
|
|
122
|
+
instruction?: string;
|
|
123
|
+
Api_Keys?: {
|
|
124
|
+
groq_API?: string;
|
|
125
|
+
rsn_API?: string;
|
|
126
|
+
prodia_API?: string;
|
|
127
|
+
geminiAPI?: string;
|
|
128
|
+
freesedGPT_Api?: string;
|
|
129
|
+
};
|
|
117
130
|
lang?: string;
|
|
118
131
|
memory?: {
|
|
119
132
|
memoryOn?: boolean;
|
|
@@ -196,16 +209,22 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
196
209
|
voice: {
|
|
197
210
|
textVoice: {
|
|
198
211
|
enable: textVoiceEnable = false,
|
|
199
|
-
|
|
212
|
+
voiceModel: textVoiceModel = "google",
|
|
200
213
|
voice_code: textVoiceCode = "en-US-3",
|
|
201
214
|
apiKey: textVoiceApiKey = "",
|
|
202
215
|
type: textVoiceType = "b"
|
|
203
216
|
} = {}
|
|
204
217
|
} = {},
|
|
205
218
|
imagine: {
|
|
219
|
+
ApiKeys: {
|
|
220
|
+
groqAPI = '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',
|
|
221
|
+
rsnAPIkey = 'rsnai_SbLbFcwdT2h2KoYet2LS0F34',
|
|
222
|
+
prodiaAPI = 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3',
|
|
223
|
+
freesedGPTApi = null
|
|
224
|
+
} = {},
|
|
206
225
|
enable: imagineEnable = false,
|
|
207
226
|
drawTrigger = ["create", "رسم"],
|
|
208
|
-
|
|
227
|
+
imageModel = "prodia",
|
|
209
228
|
numOfImages = 2,
|
|
210
229
|
nsfw: {
|
|
211
230
|
enable: nsfwEnabled = false,
|
|
@@ -228,11 +247,17 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
228
247
|
} = {},
|
|
229
248
|
chat: {
|
|
230
249
|
enable: chatEnable = true,
|
|
231
|
-
|
|
250
|
+
chatModel = "v3",
|
|
232
251
|
readFiles = false,
|
|
233
252
|
readImages = false,
|
|
234
253
|
returnChat = true,
|
|
235
|
-
|
|
254
|
+
instruction = '',
|
|
255
|
+
Api_Keys: {
|
|
256
|
+
groq_API = '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',
|
|
257
|
+
rsn_API = 'rsnai_SbLbFcwdT2h2KoYet2LS0F34',
|
|
258
|
+
geminiAPI = null,
|
|
259
|
+
freesedGPT_Api = null,
|
|
260
|
+
} = {},
|
|
236
261
|
personality = null,
|
|
237
262
|
lang = 'en',
|
|
238
263
|
memory: {
|
|
@@ -318,7 +343,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
318
343
|
|
|
319
344
|
if (contentType.startsWith('audio/')) {
|
|
320
345
|
if (voiceSize > (25 * 1024 * 1024)) return 'Voice/Audio file uploaded has is bigger than 25MB please decrease its size and resend it.'
|
|
321
|
-
usermsg
|
|
346
|
+
usermsg += await whisper(usermsg, url, lang)
|
|
322
347
|
}
|
|
323
348
|
} catch (e: any) {}
|
|
324
349
|
});
|
|
@@ -331,9 +356,11 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
331
356
|
|
|
332
357
|
if (attachment && validExtensions.test(attachment?.name)) {
|
|
333
358
|
if (imgURL && !readImages) {
|
|
359
|
+
usermsg += `-# This is the explanation/analyzing of image provided.\n\n`;
|
|
334
360
|
usermsg += await imageReader(imgURL);
|
|
335
361
|
} else if (imgURL && readImages) {
|
|
336
|
-
usermsg +=
|
|
362
|
+
usermsg += `-# This is the explanation/analyzing of image provided.\n\n`;
|
|
363
|
+
usermsg += await groqAnalyzer({ imgURL, ApiKey: groqAPI as string, prompt: usermsg });
|
|
337
364
|
}
|
|
338
365
|
}
|
|
339
366
|
|
|
@@ -353,7 +380,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
353
380
|
}
|
|
354
381
|
}
|
|
355
382
|
|
|
356
|
-
if (aiOptions.chat && readFiles &&
|
|
383
|
+
if (aiOptions.chat && readFiles && chatModel !== ('gemini-pro' || 'gemini-flash')) {
|
|
357
384
|
if (message.attachments?.size > 0) {
|
|
358
385
|
if (attachment?.name.endsWith('.pdf')) {
|
|
359
386
|
const pdfContent = await readFile(attachment.url, 'pdf');
|
|
@@ -403,9 +430,11 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
403
430
|
|
|
404
431
|
if (fetchedMessage.attachments && validExtensions.test(fetchedMessage.attachments.name)) {
|
|
405
432
|
if (imgURL && !readImages) {
|
|
433
|
+
usermsg += `-# This is the explanation/analyzing of previously image provided.\n\n`;
|
|
406
434
|
replied += await imageReader(fetchedMessage.attachments?.first().url);
|
|
407
435
|
} else if (imgURL && readImages) {
|
|
408
|
-
usermsg +=
|
|
436
|
+
usermsg += `-# This is the explanation/analyzing of previously image provided.\n\n`;
|
|
437
|
+
usermsg += await groqAnalyzer({ imgURL, ApiKey: groqAPI as string, prompt: usermsg });
|
|
409
438
|
}
|
|
410
439
|
}
|
|
411
440
|
usermsg = `${usermsg}\n\n Read previous message: ${replied}`;
|
|
@@ -425,7 +454,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
425
454
|
|
|
426
455
|
const drawValid: any = aiOptions.imagine && imagineEnable && toDraw(usermsg, drawTrigger);
|
|
427
456
|
const number = numOfImages;
|
|
428
|
-
const
|
|
457
|
+
const model = imageModel;
|
|
429
458
|
|
|
430
459
|
if (drawValid) {
|
|
431
460
|
const matchingTrigger = drawTrigger.find(trigger => usermsg.startsWith(trigger));
|
|
@@ -445,13 +474,14 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
445
474
|
number,
|
|
446
475
|
usermsg,
|
|
447
476
|
hercai,
|
|
448
|
-
|
|
477
|
+
model,
|
|
449
478
|
nsfwEnabled,
|
|
450
479
|
nsfwKeyWords,
|
|
451
480
|
deepCheck,
|
|
452
481
|
aiOptions.imagine?.enhancer,
|
|
453
482
|
buttons,
|
|
454
|
-
RespondMessage
|
|
483
|
+
RespondMessage,
|
|
484
|
+
{ rsnAPI: rsnAPIkey, groqAPI, prodiaAPI, freesedGPTApi: freesedGPTApi as string }
|
|
455
485
|
);
|
|
456
486
|
|
|
457
487
|
} else if (aiOptions.voice?.textVoice?.enable) {
|
|
@@ -463,9 +493,9 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
463
493
|
usermsg,
|
|
464
494
|
hercai,
|
|
465
495
|
drawValid,
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
496
|
+
model,
|
|
497
|
+
chatModel,
|
|
498
|
+
textVoiceModel,
|
|
469
499
|
textVoiceCode,
|
|
470
500
|
textVoiceApiKey,
|
|
471
501
|
textVoiceType,
|
|
@@ -474,7 +504,8 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
474
504
|
deepCheck,
|
|
475
505
|
aiOptions.imagine?.enhancer,
|
|
476
506
|
buttons,
|
|
477
|
-
RespondMessage
|
|
507
|
+
RespondMessage,
|
|
508
|
+
{ rsnAPI: rsnAPIkey, groqAPI, prodiaAPI, freesedGPTApi: freesedGPTApi as string}
|
|
478
509
|
);
|
|
479
510
|
}
|
|
480
511
|
|
|
@@ -494,24 +525,16 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
494
525
|
}
|
|
495
526
|
} else if (chatEnable) {
|
|
496
527
|
try {
|
|
497
|
-
if (
|
|
498
|
-
|
|
499
|
-
} else if (
|
|
500
|
-
|
|
501
|
-
} else if (
|
|
502
|
-
|
|
503
|
-
} else if (
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
} else if (chatModal === 'openChat') {
|
|
508
|
-
response = await openChat({ API_KEY, prompt: usermsg });
|
|
509
|
-
} else if (chatModal === 'llamaChat') {
|
|
510
|
-
response = await llamaChat({ API_KEY, prompt: usermsg });
|
|
511
|
-
} else if (chatModal === 'mixtral') {
|
|
512
|
-
response = await mixtral({ API_KEY, prompt: usermsg });
|
|
513
|
-
} else if (chatModal === 'gemini-flash') {
|
|
514
|
-
response = await geminiFlash(
|
|
528
|
+
if (otherChatModel.includes(chatModel)) {
|
|
529
|
+
response = await otherChatModels({ modelName: chatModel as any, prompt: usermsg });
|
|
530
|
+
} else if (rsnChatModels.includes(chatModel)) {
|
|
531
|
+
response = await rsnAPI({ API_KEY: rsn_API as string, prompt: usermsg, apiName: chatModel });
|
|
532
|
+
} else if (groqChatModels.includes(chatModel)) {
|
|
533
|
+
response = await chatGroq({ API_KEY: groqAPI as string | undefined, prompt: usermsg, apiName: chatModel, instruction})
|
|
534
|
+
} else if (chatModel === 'gemini-flash' || chatModel === 'gemini-pro') {
|
|
535
|
+
const geminiFunction = chatModel === 'gemini-flash' ? geminiFlash : geminiPro;
|
|
536
|
+
|
|
537
|
+
response = await geminiFunction(
|
|
515
538
|
{
|
|
516
539
|
userId: message.author.id,
|
|
517
540
|
serverName: message.guild?.name as string,
|
|
@@ -523,137 +546,17 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
523
546
|
},
|
|
524
547
|
{
|
|
525
548
|
userMsg: usermsg,
|
|
526
|
-
API_KEY:
|
|
549
|
+
API_KEY: geminiAPI,
|
|
527
550
|
AiPersonality: personality
|
|
528
551
|
}
|
|
529
552
|
);
|
|
530
|
-
} else if (
|
|
531
|
-
response = await
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
channelName: message.channel.name,
|
|
538
|
-
attachment: attachment,
|
|
539
|
-
db: memoryOn
|
|
540
|
-
},
|
|
541
|
-
{
|
|
542
|
-
userMsg: usermsg,
|
|
543
|
-
API_KEY: API_KEY,
|
|
544
|
-
AiPersonality: personality
|
|
545
|
-
}
|
|
546
|
-
);
|
|
547
|
-
} else if (chatModal === 'v3' || chatModal === 'v3-32k' || chatModal === 'turbo' || chatModal === 'turbo-16k' || chatModal === 'gemini') {
|
|
548
|
-
if (!memoryOn) {
|
|
549
|
-
|
|
550
|
-
let personalityString = `no specific personality you are just an ai chat bot in discord server called ${message.guild.name} and speaking at channel ${message.channel.id} responding to users response who is ${message.author.username}`
|
|
551
|
-
if (personality) {
|
|
552
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
553
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
554
|
-
personalityString = personalityContent.split('\n').join(' ');
|
|
555
|
-
}
|
|
556
|
-
response = await hercai.question({
|
|
557
|
-
model: chatModal,
|
|
558
|
-
content: usermsg,
|
|
559
|
-
personality: personalityString
|
|
560
|
-
});
|
|
561
|
-
response = response.reply;
|
|
562
|
-
} else {
|
|
563
|
-
response = await hercai.betaQuestion({
|
|
564
|
-
content: usermsg,
|
|
565
|
-
user: memoryId
|
|
566
|
-
});
|
|
567
|
-
response = response.reply
|
|
568
|
-
}
|
|
569
|
-
} else if (chatModal === 'llama') {
|
|
570
|
-
if (!memoryOn) {
|
|
571
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
572
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
573
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
574
|
-
response = await hercai.question({
|
|
575
|
-
model: 'llama3-70b',
|
|
576
|
-
content: usermsg,
|
|
577
|
-
personality: personalityString
|
|
578
|
-
});
|
|
579
|
-
} else {
|
|
580
|
-
response = await hercai.betaQuestion({
|
|
581
|
-
content: usermsg,
|
|
582
|
-
user: memoryId
|
|
583
|
-
});
|
|
584
|
-
response = response.reply
|
|
585
|
-
}
|
|
586
|
-
} else if (chatModal === 'llama-v2') {
|
|
587
|
-
if (!memoryOn) {
|
|
588
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
589
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
590
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
591
|
-
response = await hercai.question({
|
|
592
|
-
model: 'llama3-8b',
|
|
593
|
-
content: usermsg,
|
|
594
|
-
personality: personalityString
|
|
595
|
-
});
|
|
596
|
-
} else {
|
|
597
|
-
response = await hercai.betaQuestion({
|
|
598
|
-
content: usermsg,
|
|
599
|
-
user: memoryId
|
|
600
|
-
});
|
|
601
|
-
response = response.reply
|
|
602
|
-
}
|
|
603
|
-
} else if (chatModal === 'gemma') {
|
|
604
|
-
if (!memoryOn) {
|
|
605
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
606
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
607
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
608
|
-
response = await hercai.question({
|
|
609
|
-
model: 'gemma-7b',
|
|
610
|
-
content: usermsg,
|
|
611
|
-
personality: personalityString
|
|
612
|
-
});
|
|
613
|
-
} else {
|
|
614
|
-
response = await hercai.betaQuestion({
|
|
615
|
-
content: usermsg,
|
|
616
|
-
user: memoryId
|
|
617
|
-
});
|
|
618
|
-
response = response.reply
|
|
619
|
-
}
|
|
620
|
-
} else if (chatModal === 'gemma-v2') {
|
|
621
|
-
if (!memoryOn) {
|
|
622
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
623
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
624
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
625
|
-
response = await hercai.question({
|
|
626
|
-
model: 'gemma2-9b',
|
|
627
|
-
content: usermsg,
|
|
628
|
-
personality: personalityString
|
|
629
|
-
});
|
|
630
|
-
} else {
|
|
631
|
-
response = await hercai.betaQuestion({
|
|
632
|
-
content: usermsg,
|
|
633
|
-
user: memoryId
|
|
634
|
-
});
|
|
635
|
-
response = response.reply
|
|
636
|
-
}
|
|
637
|
-
} else if (chatModal === 'mixtral-v2') {
|
|
638
|
-
if (!memoryOn) {
|
|
639
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
640
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
641
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
642
|
-
response = await hercai.question({
|
|
643
|
-
model: 'mixtral-8x7b',
|
|
644
|
-
content: usermsg,
|
|
645
|
-
personality: personalityString
|
|
646
|
-
});
|
|
647
|
-
} else {
|
|
648
|
-
response = await hercai.betaQuestion({
|
|
649
|
-
content: usermsg,
|
|
650
|
-
user: memoryId
|
|
651
|
-
});
|
|
652
|
-
response = response.reply
|
|
653
|
-
}
|
|
654
|
-
} else {
|
|
655
|
-
throw new Error('Invalid chat modal. Check documentation for valid chat modals.')
|
|
656
|
-
}
|
|
553
|
+
} else if (hercChatModels.includes(chatModel)) {
|
|
554
|
+
response = await chatHercai(usermsg, chatModel as any, instruction, { enable: memoryOn, id: memoryId });
|
|
555
|
+
} else if (chatModel === 'gpt-4o') {
|
|
556
|
+
response = await gpt4o({ ApiKey: freesedGPT_Api as string, prompt: usermsg})
|
|
557
|
+
} else {
|
|
558
|
+
throw new Error('Invalid chat model. Check documentation for valid chat models.')
|
|
559
|
+
}
|
|
657
560
|
} catch (error: any) {
|
|
658
561
|
if (msgType === 'reply') {
|
|
659
562
|
if (error.response && error.response.status === 429) {
|