apexify.js 4.4.36 → 4.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -199
- package/dist/ai/ApexAI.d.ts +18 -6
- package/dist/ai/ApexAI.d.ts.map +1 -1
- package/dist/ai/ApexAI.js +33 -170
- package/dist/ai/ApexAI.js.map +1 -1
- package/dist/ai/ApexModules.d.ts.map +1 -1
- package/dist/ai/ApexModules.js +37 -125
- package/dist/ai/ApexModules.js.map +1 -1
- package/dist/ai/functions/draw.d.ts +6 -1
- package/dist/ai/functions/draw.d.ts.map +1 -1
- package/dist/ai/functions/draw.js +18 -59
- package/dist/ai/functions/draw.js.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts +6 -1
- package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
- package/dist/ai/functions/generateVoiceResponse.js +2 -2
- package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
- package/dist/ai/functions/validOptions.d.ts +11 -4
- package/dist/ai/functions/validOptions.d.ts.map +1 -1
- package/dist/ai/functions/validOptions.js +15 -14
- package/dist/ai/functions/validOptions.js.map +1 -1
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts +7 -0
- package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js +28 -0
- package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/cartoon.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts +5 -0
- package/dist/ai/modals-chat/freesedgpt/chat.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js +30 -0
- package/dist/ai/modals-chat/freesedgpt/chat.js.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts +2 -0
- package/dist/ai/modals-chat/freesedgpt/flux.d.ts.map +1 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js +26 -0
- package/dist/ai/modals-chat/freesedgpt/flux.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-flash.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-flash.js → gemini/Gemini-flash.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-flash.js.map +1 -0
- package/dist/ai/modals-chat/gemini/Gemini-pro.d.ts.map +1 -0
- package/dist/ai/modals-chat/{Gemini-pro.js → gemini/Gemini-pro.js} +2 -2
- package/dist/ai/modals-chat/gemini/Gemini-pro.js.map +1 -0
- package/dist/ai/modals-chat/gemini/config.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/config.js.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.d.ts.map +1 -0
- package/dist/ai/modals-chat/gemini/geminiFast.js.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts +9 -0
- package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/chatgroq.js +58 -0
- package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts +8 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js +64 -0
- package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -0
- package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -0
- package/dist/ai/modals-chat/{whisper.js → groq/whisper.js} +2 -2
- package/dist/ai/modals-chat/groq/whisper.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModals.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModals.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModals.js.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts +7 -0
- package/dist/ai/modals-chat/hercai/chatModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/hercai/chatModels.js +23 -0
- package/dist/ai/modals-chat/hercai/chatModels.js.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts +7 -0
- package/dist/ai/modals-chat/others/otherModels.d.ts.map +1 -0
- package/dist/ai/modals-chat/others/otherModels.js +88 -0
- package/dist/ai/modals-chat/others/otherModels.js.map +1 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts +8 -0
- package/dist/ai/modals-chat/rsn/rsnChat.d.ts.map +1 -0
- package/dist/ai/modals-chat/{bing.js → rsn/rsnChat.js} +22 -8
- package/dist/ai/modals-chat/rsn/rsnChat.js.map +1 -0
- package/dist/ai/modals-images/cartoon.js +1 -1
- package/dist/ai/modals-images/cartoon.js.map +1 -1
- package/dist/ai/modals-images/flux.js +1 -1
- package/dist/ai/modals-images/flux.js.map +1 -1
- package/dist/ai/utils.d.ts +4 -7
- package/dist/ai/utils.d.ts.map +1 -1
- package/dist/ai/utils.js +6 -12
- package/dist/ai/utils.js.map +1 -1
- package/dist/index.d.ts +11 -6
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +6 -3
- package/dist/index.js.map +1 -1
- package/lib/ai/ApexAI.ts +90 -191
- package/lib/ai/ApexModules.ts +43 -135
- package/lib/ai/functions/draw.ts +23 -68
- package/lib/ai/functions/generateVoiceResponse.ts +2 -2
- package/lib/ai/functions/validOptions.ts +23 -22
- package/lib/ai/modals-chat/electronHub/imageModels.ts +26 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/cartoon.ts +3 -3
- package/lib/ai/modals-chat/freesedgpt/chat.ts +31 -0
- package/lib/ai/{modals-images → modals-chat/freesedgpt}/flux.ts +3 -3
- package/lib/ai/modals-chat/{Gemini-flash.ts → gemini/Gemini-flash.ts} +2 -2
- package/lib/ai/modals-chat/{Gemini-pro.ts → gemini/Gemini-pro.ts} +2 -2
- package/lib/ai/modals-chat/groq/chatgroq.ts +68 -0
- package/lib/ai/modals-chat/groq/imageAnalyzer.ts +68 -0
- package/lib/ai/modals-chat/{whisper.ts → groq/whisper.ts} +2 -2
- package/lib/ai/modals-chat/hercai/chatModels.ts +20 -0
- package/lib/ai/modals-chat/others/otherModels.ts +99 -0
- package/lib/ai/modals-chat/{mixtral.ts → rsn/rsnChat.ts} +26 -8
- package/lib/ai/utils.ts +7 -12
- package/lib/index.ts +5 -3
- package/package.json +1 -1
- package/dist/ai/functions/imageAnalysis.d.ts +0 -2
- package/dist/ai/functions/imageAnalysis.d.ts.map +0 -1
- package/dist/ai/functions/imageAnalysis.js +0 -45
- package/dist/ai/functions/imageAnalysis.js.map +0 -1
- package/dist/ai/functions/readImagess.d.ts +0 -2
- package/dist/ai/functions/readImagess.d.ts.map +0 -1
- package/dist/ai/functions/readImagess.js +0 -45
- package/dist/ai/functions/readImagess.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-flash.js.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.d.ts.map +0 -1
- package/dist/ai/modals-chat/Gemini-pro.js.map +0 -1
- package/dist/ai/modals-chat/apexChat.d.ts +0 -2
- package/dist/ai/modals-chat/apexChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/apexChat.js +0 -32
- package/dist/ai/modals-chat/apexChat.js.map +0 -1
- package/dist/ai/modals-chat/bard.d.ts +0 -7
- package/dist/ai/modals-chat/bard.d.ts.map +0 -1
- package/dist/ai/modals-chat/bard.js +0 -48
- package/dist/ai/modals-chat/bard.js.map +0 -1
- package/dist/ai/modals-chat/bing.d.ts +0 -7
- package/dist/ai/modals-chat/bing.d.ts.map +0 -1
- package/dist/ai/modals-chat/bing.js.map +0 -1
- package/dist/ai/modals-chat/codellama.d.ts +0 -7
- package/dist/ai/modals-chat/codellama.d.ts.map +0 -1
- package/dist/ai/modals-chat/codellama.js +0 -48
- package/dist/ai/modals-chat/codellama.js.map +0 -1
- package/dist/ai/modals-chat/config.d.ts.map +0 -1
- package/dist/ai/modals-chat/config.js.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.d.ts +0 -2
- package/dist/ai/modals-chat/facebook-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/facebook-ai.js +0 -20
- package/dist/ai/modals-chat/facebook-ai.js.map +0 -1
- package/dist/ai/modals-chat/geminiFast.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiFast.js.map +0 -1
- package/dist/ai/modals-chat/geminiV2.d.ts +0 -7
- package/dist/ai/modals-chat/geminiV2.d.ts.map +0 -1
- package/dist/ai/modals-chat/geminiV2.js +0 -48
- package/dist/ai/modals-chat/geminiV2.js.map +0 -1
- package/dist/ai/modals-chat/gemma.d.ts +0 -2
- package/dist/ai/modals-chat/gemma.d.ts.map +0 -1
- package/dist/ai/modals-chat/gemma.js +0 -43
- package/dist/ai/modals-chat/gemma.js.map +0 -1
- package/dist/ai/modals-chat/llama.d.ts +0 -7
- package/dist/ai/modals-chat/llama.d.ts.map +0 -1
- package/dist/ai/modals-chat/llama.js +0 -48
- package/dist/ai/modals-chat/llama.js.map +0 -1
- package/dist/ai/modals-chat/llamav2.d.ts +0 -2
- package/dist/ai/modals-chat/llamav2.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav2.js +0 -43
- package/dist/ai/modals-chat/llamav2.js.map +0 -1
- package/dist/ai/modals-chat/llamav3.d.ts +0 -2
- package/dist/ai/modals-chat/llamav3.d.ts.map +0 -1
- package/dist/ai/modals-chat/llamav3.js +0 -43
- package/dist/ai/modals-chat/llamav3.js.map +0 -1
- package/dist/ai/modals-chat/mixtral.d.ts +0 -7
- package/dist/ai/modals-chat/mixtral.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtral.js +0 -48
- package/dist/ai/modals-chat/mixtral.js.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.d.ts +0 -2
- package/dist/ai/modals-chat/mixtralv2.d.ts.map +0 -1
- package/dist/ai/modals-chat/mixtralv2.js +0 -43
- package/dist/ai/modals-chat/mixtralv2.js.map +0 -1
- package/dist/ai/modals-chat/modals.d.ts +0 -8
- package/dist/ai/modals-chat/modals.d.ts.map +0 -1
- package/dist/ai/modals-chat/modals.js +0 -16
- package/dist/ai/modals-chat/modals.js.map +0 -1
- package/dist/ai/modals-chat/openChat.d.ts +0 -7
- package/dist/ai/modals-chat/openChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/openChat.js +0 -48
- package/dist/ai/modals-chat/openChat.js.map +0 -1
- package/dist/ai/modals-chat/starChat.d.ts +0 -2
- package/dist/ai/modals-chat/starChat.d.ts.map +0 -1
- package/dist/ai/modals-chat/starChat.js +0 -31
- package/dist/ai/modals-chat/starChat.js.map +0 -1
- package/dist/ai/modals-chat/v4.d.ts +0 -7
- package/dist/ai/modals-chat/v4.d.ts.map +0 -1
- package/dist/ai/modals-chat/v4.js +0 -48
- package/dist/ai/modals-chat/v4.js.map +0 -1
- package/dist/ai/modals-chat/whisper.d.ts.map +0 -1
- package/dist/ai/modals-chat/whisper.js.map +0 -1
- package/dist/ai/modals-chat/yi-ai.d.ts +0 -2
- package/dist/ai/modals-chat/yi-ai.d.ts.map +0 -1
- package/dist/ai/modals-chat/yi-ai.js +0 -40
- package/dist/ai/modals-chat/yi-ai.js.map +0 -1
- package/lib/ai/functions/imageAnalysis.ts +0 -41
- package/lib/ai/modals-chat/apexChat.ts +0 -31
- package/lib/ai/modals-chat/bard.ts +0 -44
- package/lib/ai/modals-chat/bing.ts +0 -44
- package/lib/ai/modals-chat/codellama.ts +0 -44
- package/lib/ai/modals-chat/facebook-ai.ts +0 -14
- package/lib/ai/modals-chat/geminiV2.ts +0 -44
- package/lib/ai/modals-chat/gemma.ts +0 -35
- package/lib/ai/modals-chat/llama.ts +0 -44
- package/lib/ai/modals-chat/llamav2.ts +0 -35
- package/lib/ai/modals-chat/llamav3.ts +0 -35
- package/lib/ai/modals-chat/mixtralv2.ts +0 -35
- package/lib/ai/modals-chat/modals.ts +0 -8
- package/lib/ai/modals-chat/openChat.ts +0 -44
- package/lib/ai/modals-chat/starChat.ts +0 -31
- package/lib/ai/modals-chat/v4.ts +0 -44
- package/lib/ai/modals-chat/yi-ai.ts +0 -40
- /package/dist/ai/modals-chat/{Gemini-flash.d.ts → gemini/Gemini-flash.d.ts} +0 -0
- /package/dist/ai/modals-chat/{Gemini-pro.d.ts → gemini/Gemini-pro.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.d.ts → gemini/config.d.ts} +0 -0
- /package/dist/ai/modals-chat/{config.js → gemini/config.js} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.d.ts → gemini/geminiFast.d.ts} +0 -0
- /package/dist/ai/modals-chat/{geminiFast.js → gemini/geminiFast.js} +0 -0
- /package/dist/ai/modals-chat/{whisper.d.ts → groq/whisper.d.ts} +0 -0
- /package/lib/ai/modals-chat/{config.ts → gemini/config.ts} +0 -0
- /package/lib/ai/modals-chat/{geminiFast.ts → gemini/geminiFast.ts} +0 -0
package/lib/ai/ApexAI.ts
CHANGED
|
@@ -1,18 +1,14 @@
|
|
|
1
1
|
import { Hercai } from "hercai";
|
|
2
|
-
import fs from "fs";
|
|
3
|
-
import path from "path";
|
|
4
2
|
import {
|
|
5
3
|
imageReader,
|
|
6
4
|
toDraw,
|
|
7
5
|
aiImagine,
|
|
8
6
|
aiVoice,
|
|
9
7
|
typeWriter,
|
|
10
|
-
|
|
11
|
-
v4,
|
|
12
|
-
openChat,
|
|
13
|
-
llamaChat,
|
|
14
|
-
mixtral,
|
|
8
|
+
groqAnalyzer,
|
|
15
9
|
readFile,
|
|
10
|
+
geminiPro,
|
|
11
|
+
geminiFlash
|
|
16
12
|
} from "./utils";
|
|
17
13
|
import {
|
|
18
14
|
ModalBuilder,
|
|
@@ -20,28 +16,32 @@ import {
|
|
|
20
16
|
TextInputStyle,
|
|
21
17
|
ActionRowBuilder,
|
|
22
18
|
Message,
|
|
23
|
-
PermissionResolvable
|
|
24
|
-
TextChannel
|
|
19
|
+
PermissionResolvable
|
|
25
20
|
} from "discord.js";
|
|
26
21
|
import { filters } from "./buttons/tools";
|
|
27
22
|
import { imageTools } from "./buttons/drawMenu";
|
|
28
|
-
import {
|
|
29
|
-
import {
|
|
23
|
+
import { whisper } from "./modals-chat/groq/whisper";
|
|
24
|
+
import { groqChatModels, hercChatModels, otherChatModel, rsnChatModels } from "./functions/validOptions";
|
|
25
|
+
import { chatHercai } from "./modals-chat/hercai/chatModels";
|
|
26
|
+
import { rsnAPI } from "./modals-chat/rsn/rsnChat";
|
|
27
|
+
import { otherChatModels } from "./modals-chat/others/otherModels";
|
|
28
|
+
import { chatGroq } from "./modals-chat/groq/chatgroq";
|
|
29
|
+
import { gpt4o } from "./modals-chat/freesedgpt/chat";
|
|
30
30
|
|
|
31
31
|
export interface Options {
|
|
32
32
|
/**
|
|
33
33
|
* Configuration options related to voice functionality.
|
|
34
34
|
* @param voice.textVoice Configuration options for text-to-voice functionality.
|
|
35
35
|
* @param voice.textVoice.enable Whether text-to-voice functionality is enabled.
|
|
36
|
-
* @param voice.textVoice.
|
|
37
|
-
* @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI
|
|
38
|
-
* @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI
|
|
39
|
-
* @param voice.textVoice.type The type of voice only for (apexAI and zenithAI
|
|
36
|
+
* @param voice.textVoice.voiceModel The voice model to be used.
|
|
37
|
+
* @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI model).
|
|
38
|
+
* @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI model).
|
|
39
|
+
* @param voice.textVoice.type The type of voice only for (apexAI and zenithAI model).
|
|
40
40
|
*/
|
|
41
41
|
voice?: {
|
|
42
42
|
textVoice?: {
|
|
43
43
|
enable?: boolean;
|
|
44
|
-
|
|
44
|
+
voiceModel?: string;
|
|
45
45
|
voice_code?: string;
|
|
46
46
|
apiKey?: string;
|
|
47
47
|
type?: string;
|
|
@@ -51,27 +51,32 @@ export interface Options {
|
|
|
51
51
|
* Configuration options related to image generation.
|
|
52
52
|
* @param imagine.enable Whether image generation is enabled.
|
|
53
53
|
* @param imagine.drawTrigger The trigger phrases for initiating image generation.
|
|
54
|
-
* @param imagine.imageModal The
|
|
54
|
+
* @param imagine.imageModal The model for the image generation.
|
|
55
55
|
* @param imagine.numOfImages The number of images to generate.
|
|
56
56
|
* @param imagine.nsfw Configuration options for NSFW filtering.
|
|
57
57
|
* @param imagine.nsfw.enable Whether NSFW filtering is enabled.
|
|
58
58
|
* @param imagine.nsfw.keywords Keywords for NSFW filtering.
|
|
59
59
|
* @param imagine.enhancer Configuration options for image enhancement.
|
|
60
60
|
* @param imagine.enhancer.enable Whether image enhancement is enabled (rewrites your prompt in more descriptive way).
|
|
61
|
-
* @param imagine.enhancer.enhancerModal The
|
|
62
|
-
* @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia
|
|
63
|
-
* @param imagine.enhancer.steps The number of enhancement steps only for (Prodia
|
|
64
|
-
* @param imagine.enhancer.seed The seed for image enhancement only for (Prodia
|
|
65
|
-
* @param imagine.enhancer.imgStyle The style of the image only for (Prodia
|
|
66
|
-
* @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia
|
|
67
|
-
* @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia
|
|
61
|
+
* @param imagine.enhancer.enhancerModal The model for image enhancement only for (Prodia models).
|
|
62
|
+
* @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia models).
|
|
63
|
+
* @param imagine.enhancer.steps The number of enhancement steps only for (Prodia models).
|
|
64
|
+
* @param imagine.enhancer.seed The seed for image enhancement only for (Prodia models).
|
|
65
|
+
* @param imagine.enhancer.imgStyle The style of the image only for (Prodia models).
|
|
66
|
+
* @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia models).
|
|
67
|
+
* @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia models).
|
|
68
68
|
*/
|
|
69
69
|
imagine?: {
|
|
70
70
|
enable?: boolean;
|
|
71
71
|
drawTrigger?: string[];
|
|
72
|
-
|
|
72
|
+
imageModel?: string;
|
|
73
73
|
numOfImages?: number;
|
|
74
|
-
|
|
74
|
+
ApiKeys?: {
|
|
75
|
+
groqAPI?: string;
|
|
76
|
+
rsnAPIkey?: string;
|
|
77
|
+
prodiaAPI?: string;
|
|
78
|
+
freesedGPTApi?: string;
|
|
79
|
+
};
|
|
75
80
|
nsfw?: {
|
|
76
81
|
enable?: boolean;
|
|
77
82
|
keywords?: string[];
|
|
@@ -93,7 +98,7 @@ export interface Options {
|
|
|
93
98
|
};
|
|
94
99
|
/**
|
|
95
100
|
* Configuration options related to chat functionality.
|
|
96
|
-
* @param chat.
|
|
101
|
+
* @param chat.chatModel The chat model to be used.
|
|
97
102
|
* @param chat.readFiles Whether to read files.
|
|
98
103
|
* @param chat.readImages Whether to read images.
|
|
99
104
|
* @param chat.personality The personality for the chat.
|
|
@@ -110,11 +115,18 @@ export interface Options {
|
|
|
110
115
|
chat?: {
|
|
111
116
|
enable?: boolean;
|
|
112
117
|
returnChat?: Boolean
|
|
113
|
-
|
|
118
|
+
chatModel?: string;
|
|
114
119
|
readFiles?: boolean;
|
|
115
120
|
readImages?: boolean;
|
|
116
121
|
personality?: string | any;
|
|
117
|
-
|
|
122
|
+
instruction?: string;
|
|
123
|
+
Api_Keys?: {
|
|
124
|
+
groq_API?: string;
|
|
125
|
+
rsn_API?: string;
|
|
126
|
+
prodia_API?: string;
|
|
127
|
+
geminiAPI?: string;
|
|
128
|
+
freesedGPT_Api?: string;
|
|
129
|
+
};
|
|
118
130
|
lang?: string;
|
|
119
131
|
memory?: {
|
|
120
132
|
memoryOn?: boolean;
|
|
@@ -197,17 +209,22 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
197
209
|
voice: {
|
|
198
210
|
textVoice: {
|
|
199
211
|
enable: textVoiceEnable = false,
|
|
200
|
-
|
|
212
|
+
voiceModel: textVoiceModel = "google",
|
|
201
213
|
voice_code: textVoiceCode = "en-US-3",
|
|
202
214
|
apiKey: textVoiceApiKey = "",
|
|
203
215
|
type: textVoiceType = "b"
|
|
204
216
|
} = {}
|
|
205
217
|
} = {},
|
|
206
218
|
imagine: {
|
|
207
|
-
|
|
219
|
+
ApiKeys: {
|
|
220
|
+
groqAPI = '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',
|
|
221
|
+
rsnAPIkey = 'rsnai_SbLbFcwdT2h2KoYet2LS0F34',
|
|
222
|
+
prodiaAPI = 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3',
|
|
223
|
+
freesedGPTApi = null
|
|
224
|
+
} = {},
|
|
208
225
|
enable: imagineEnable = false,
|
|
209
226
|
drawTrigger = ["create", "رسم"],
|
|
210
|
-
|
|
227
|
+
imageModel = "prodia",
|
|
211
228
|
numOfImages = 2,
|
|
212
229
|
nsfw: {
|
|
213
230
|
enable: nsfwEnabled = false,
|
|
@@ -230,11 +247,17 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
230
247
|
} = {},
|
|
231
248
|
chat: {
|
|
232
249
|
enable: chatEnable = true,
|
|
233
|
-
|
|
250
|
+
chatModel = "v3",
|
|
234
251
|
readFiles = false,
|
|
235
252
|
readImages = false,
|
|
236
253
|
returnChat = true,
|
|
237
|
-
|
|
254
|
+
instruction = '',
|
|
255
|
+
Api_Keys: {
|
|
256
|
+
groq_API = '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',
|
|
257
|
+
rsn_API = 'rsnai_SbLbFcwdT2h2KoYet2LS0F34',
|
|
258
|
+
geminiAPI = null,
|
|
259
|
+
freesedGPT_Api = null,
|
|
260
|
+
} = {},
|
|
238
261
|
personality = null,
|
|
239
262
|
lang = 'en',
|
|
240
263
|
memory: {
|
|
@@ -320,7 +343,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
320
343
|
|
|
321
344
|
if (contentType.startsWith('audio/')) {
|
|
322
345
|
if (voiceSize > (25 * 1024 * 1024)) return 'Voice/Audio file uploaded has is bigger than 25MB please decrease its size and resend it.'
|
|
323
|
-
usermsg
|
|
346
|
+
usermsg += await whisper(usermsg, url, lang)
|
|
324
347
|
}
|
|
325
348
|
} catch (e: any) {}
|
|
326
349
|
});
|
|
@@ -333,9 +356,11 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
333
356
|
|
|
334
357
|
if (attachment && validExtensions.test(attachment?.name)) {
|
|
335
358
|
if (imgURL && !readImages) {
|
|
359
|
+
usermsg += `-# This is the explanation/analyzing of image provided.\n\n`;
|
|
336
360
|
usermsg += await imageReader(imgURL);
|
|
337
361
|
} else if (imgURL && readImages) {
|
|
338
|
-
usermsg +=
|
|
362
|
+
usermsg += `-# This is the explanation/analyzing of image provided.\n\n`;
|
|
363
|
+
usermsg += await groqAnalyzer({ imgURL, ApiKey: groqAPI as string, prompt: usermsg });
|
|
339
364
|
}
|
|
340
365
|
}
|
|
341
366
|
|
|
@@ -355,7 +380,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
355
380
|
}
|
|
356
381
|
}
|
|
357
382
|
|
|
358
|
-
if (aiOptions.chat && readFiles &&
|
|
383
|
+
if (aiOptions.chat && readFiles && chatModel !== ('gemini-pro' || 'gemini-flash')) {
|
|
359
384
|
if (message.attachments?.size > 0) {
|
|
360
385
|
if (attachment?.name.endsWith('.pdf')) {
|
|
361
386
|
const pdfContent = await readFile(attachment.url, 'pdf');
|
|
@@ -405,9 +430,11 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
405
430
|
|
|
406
431
|
if (fetchedMessage.attachments && validExtensions.test(fetchedMessage.attachments.name)) {
|
|
407
432
|
if (imgURL && !readImages) {
|
|
433
|
+
usermsg += `-# This is the explanation/analyzing of previously image provided.\n\n`;
|
|
408
434
|
replied += await imageReader(fetchedMessage.attachments?.first().url);
|
|
409
435
|
} else if (imgURL && readImages) {
|
|
410
|
-
usermsg +=
|
|
436
|
+
usermsg += `-# This is the explanation/analyzing of previously image provided.\n\n`;
|
|
437
|
+
usermsg += await groqAnalyzer({ imgURL, ApiKey: groqAPI as string, prompt: usermsg });
|
|
411
438
|
}
|
|
412
439
|
}
|
|
413
440
|
usermsg = `${usermsg}\n\n Read previous message: ${replied}`;
|
|
@@ -427,7 +454,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
427
454
|
|
|
428
455
|
const drawValid: any = aiOptions.imagine && imagineEnable && toDraw(usermsg, drawTrigger);
|
|
429
456
|
const number = numOfImages;
|
|
430
|
-
const
|
|
457
|
+
const model = imageModel;
|
|
431
458
|
|
|
432
459
|
if (drawValid) {
|
|
433
460
|
const matchingTrigger = drawTrigger.find(trigger => usermsg.startsWith(trigger));
|
|
@@ -447,14 +474,14 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
447
474
|
number,
|
|
448
475
|
usermsg,
|
|
449
476
|
hercai,
|
|
450
|
-
|
|
477
|
+
model,
|
|
451
478
|
nsfwEnabled,
|
|
452
479
|
nsfwKeyWords,
|
|
453
480
|
deepCheck,
|
|
454
481
|
aiOptions.imagine?.enhancer,
|
|
455
482
|
buttons,
|
|
456
483
|
RespondMessage,
|
|
457
|
-
|
|
484
|
+
{ rsnAPI: rsnAPIkey, groqAPI, prodiaAPI, freesedGPTApi: freesedGPTApi as string }
|
|
458
485
|
);
|
|
459
486
|
|
|
460
487
|
} else if (aiOptions.voice?.textVoice?.enable) {
|
|
@@ -466,9 +493,9 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
466
493
|
usermsg,
|
|
467
494
|
hercai,
|
|
468
495
|
drawValid,
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
496
|
+
model,
|
|
497
|
+
chatModel,
|
|
498
|
+
textVoiceModel,
|
|
472
499
|
textVoiceCode,
|
|
473
500
|
textVoiceApiKey,
|
|
474
501
|
textVoiceType,
|
|
@@ -478,7 +505,7 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
478
505
|
aiOptions.imagine?.enhancer,
|
|
479
506
|
buttons,
|
|
480
507
|
RespondMessage,
|
|
481
|
-
|
|
508
|
+
{ rsnAPI: rsnAPIkey, groqAPI, prodiaAPI, freesedGPTApi: freesedGPTApi as string}
|
|
482
509
|
);
|
|
483
510
|
}
|
|
484
511
|
|
|
@@ -498,24 +525,16 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
498
525
|
}
|
|
499
526
|
} else if (chatEnable) {
|
|
500
527
|
try {
|
|
501
|
-
if (
|
|
502
|
-
|
|
503
|
-
} else if (
|
|
504
|
-
|
|
505
|
-
} else if (
|
|
506
|
-
|
|
507
|
-
} else if (
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
} else if (chatModal === 'openChat') {
|
|
512
|
-
response = await openChat({ API_KEY, prompt: usermsg });
|
|
513
|
-
} else if (chatModal === 'llamaChat') {
|
|
514
|
-
response = await llamaChat({ API_KEY, prompt: usermsg });
|
|
515
|
-
} else if (chatModal === 'mixtral') {
|
|
516
|
-
response = await mixtral({ API_KEY, prompt: usermsg });
|
|
517
|
-
} else if (chatModal === 'gemini-flash') {
|
|
518
|
-
response = await geminiFlash(
|
|
528
|
+
if (otherChatModel.includes(chatModel)) {
|
|
529
|
+
response = await otherChatModels({ modelName: chatModel as any, prompt: usermsg });
|
|
530
|
+
} else if (rsnChatModels.includes(chatModel)) {
|
|
531
|
+
response = await rsnAPI({ API_KEY: rsn_API as string, prompt: usermsg, apiName: chatModel });
|
|
532
|
+
} else if (groqChatModels.includes(chatModel)) {
|
|
533
|
+
response = await chatGroq({ API_KEY: groqAPI as string | undefined, prompt: usermsg, apiName: chatModel, instruction})
|
|
534
|
+
} else if (chatModel === 'gemini-flash' || chatModel === 'gemini-pro') {
|
|
535
|
+
const geminiFunction = chatModel === 'gemini-flash' ? geminiFlash : geminiPro;
|
|
536
|
+
|
|
537
|
+
response = await geminiFunction(
|
|
519
538
|
{
|
|
520
539
|
userId: message.author.id,
|
|
521
540
|
serverName: message.guild?.name as string,
|
|
@@ -527,137 +546,17 @@ export async function ApexAI (message: Message, aiOptions: Options) {
|
|
|
527
546
|
},
|
|
528
547
|
{
|
|
529
548
|
userMsg: usermsg,
|
|
530
|
-
API_KEY:
|
|
549
|
+
API_KEY: geminiAPI,
|
|
531
550
|
AiPersonality: personality
|
|
532
551
|
}
|
|
533
552
|
);
|
|
534
|
-
} else if (
|
|
535
|
-
response = await
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
channelName: message.channel.name,
|
|
542
|
-
attachment: attachment,
|
|
543
|
-
db: memoryOn
|
|
544
|
-
},
|
|
545
|
-
{
|
|
546
|
-
userMsg: usermsg,
|
|
547
|
-
API_KEY: API_KEY,
|
|
548
|
-
AiPersonality: personality
|
|
549
|
-
}
|
|
550
|
-
);
|
|
551
|
-
} else if (chatModal === 'v3' || chatModal === 'v3-32k' || chatModal === 'turbo' || chatModal === 'turbo-16k' || chatModal === 'gemini') {
|
|
552
|
-
if (!memoryOn) {
|
|
553
|
-
|
|
554
|
-
let personalityString = `no specific personality you are just an ai chat bot in discord server called ${message.guild.name} and speaking at channel ${message.channel.id} responding to users response who is ${message.author.username}`
|
|
555
|
-
if (personality) {
|
|
556
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
557
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
558
|
-
personalityString = personalityContent.split('\n').join(' ');
|
|
559
|
-
}
|
|
560
|
-
response = await hercai.question({
|
|
561
|
-
model: chatModal,
|
|
562
|
-
content: usermsg,
|
|
563
|
-
personality: personalityString
|
|
564
|
-
});
|
|
565
|
-
response = response.reply;
|
|
566
|
-
} else {
|
|
567
|
-
response = await hercai.betaQuestion({
|
|
568
|
-
content: usermsg,
|
|
569
|
-
user: memoryId
|
|
570
|
-
});
|
|
571
|
-
response = response.reply
|
|
572
|
-
}
|
|
573
|
-
} else if (chatModal === 'llama') {
|
|
574
|
-
if (!memoryOn) {
|
|
575
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
576
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
577
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
578
|
-
response = await hercai.question({
|
|
579
|
-
model: 'llama3-70b',
|
|
580
|
-
content: usermsg,
|
|
581
|
-
personality: personalityString
|
|
582
|
-
});
|
|
583
|
-
} else {
|
|
584
|
-
response = await hercai.betaQuestion({
|
|
585
|
-
content: usermsg,
|
|
586
|
-
user: memoryId
|
|
587
|
-
});
|
|
588
|
-
response = response.reply
|
|
589
|
-
}
|
|
590
|
-
} else if (chatModal === 'llama-v2') {
|
|
591
|
-
if (!memoryOn) {
|
|
592
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
593
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
594
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
595
|
-
response = await hercai.question({
|
|
596
|
-
model: 'llama3-8b',
|
|
597
|
-
content: usermsg,
|
|
598
|
-
personality: personalityString
|
|
599
|
-
});
|
|
600
|
-
} else {
|
|
601
|
-
response = await hercai.betaQuestion({
|
|
602
|
-
content: usermsg,
|
|
603
|
-
user: memoryId
|
|
604
|
-
});
|
|
605
|
-
response = response.reply
|
|
606
|
-
}
|
|
607
|
-
} else if (chatModal === 'gemma') {
|
|
608
|
-
if (!memoryOn) {
|
|
609
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
610
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
611
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
612
|
-
response = await hercai.question({
|
|
613
|
-
model: 'gemma-7b',
|
|
614
|
-
content: usermsg,
|
|
615
|
-
personality: personalityString
|
|
616
|
-
});
|
|
617
|
-
} else {
|
|
618
|
-
response = await hercai.betaQuestion({
|
|
619
|
-
content: usermsg,
|
|
620
|
-
user: memoryId
|
|
621
|
-
});
|
|
622
|
-
response = response.reply
|
|
623
|
-
}
|
|
624
|
-
} else if (chatModal === 'gemma-v2') {
|
|
625
|
-
if (!memoryOn) {
|
|
626
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
627
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
628
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
629
|
-
response = await hercai.question({
|
|
630
|
-
model: 'gemma2-9b',
|
|
631
|
-
content: usermsg,
|
|
632
|
-
personality: personalityString
|
|
633
|
-
});
|
|
634
|
-
} else {
|
|
635
|
-
response = await hercai.betaQuestion({
|
|
636
|
-
content: usermsg,
|
|
637
|
-
user: memoryId
|
|
638
|
-
});
|
|
639
|
-
response = response.reply
|
|
640
|
-
}
|
|
641
|
-
} else if (chatModal === 'mixtral-v2') {
|
|
642
|
-
if (!memoryOn) {
|
|
643
|
-
const personalityFilePath = path.join(process.cwd(), personality);
|
|
644
|
-
const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
|
|
645
|
-
const personalityString = personalityContent.split('\n').join(' ');
|
|
646
|
-
response = await hercai.question({
|
|
647
|
-
model: 'mixtral-8x7b',
|
|
648
|
-
content: usermsg,
|
|
649
|
-
personality: personalityString
|
|
650
|
-
});
|
|
651
|
-
} else {
|
|
652
|
-
response = await hercai.betaQuestion({
|
|
653
|
-
content: usermsg,
|
|
654
|
-
user: memoryId
|
|
655
|
-
});
|
|
656
|
-
response = response.reply
|
|
657
|
-
}
|
|
658
|
-
} else {
|
|
659
|
-
throw new Error('Invalid chat modal. Check documentation for valid chat modals.')
|
|
660
|
-
}
|
|
553
|
+
} else if (hercChatModels.includes(chatModel)) {
|
|
554
|
+
response = await chatHercai(usermsg, chatModel as any, instruction, { enable: memoryOn, id: memoryId });
|
|
555
|
+
} else if (chatModel === 'gpt-4o') {
|
|
556
|
+
response = await gpt4o({ ApiKey: freesedGPT_Api as string, prompt: usermsg})
|
|
557
|
+
} else {
|
|
558
|
+
throw new Error('Invalid chat model. Check documentation for valid chat models.')
|
|
559
|
+
}
|
|
661
560
|
} catch (error: any) {
|
|
662
561
|
if (msgType === 'reply') {
|
|
663
562
|
if (error.response && error.response.status === 429) {
|