apexify.js 4.8.2 → 4.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/tsconfig.cjs.tsbuildinfo +1 -1
- package/dist/tsconfig.esm.tsbuildinfo +1 -1
- package/lib/ai/ApexAI.ts +4 -4
- package/lib/ai/ApexModules.ts +8 -8
- package/lib/ai/modals/electronHub/chatmodels.ts +1 -1
- package/lib/ai/modals/electronHub/imageModels.ts +2 -2
- package/lib/ai/modals/electronHub/speechModels.ts +1 -1
- package/lib/ai/utils.ts +3 -3
- package/lib/canvas/ApexPainter.ts +1 -1
- package/lib/canvas/utils/Background/bg.ts +1 -1
- package/lib/canvas/utils/Charts/charts.ts +1 -1
- package/lib/canvas/utils/Custom/customLines.ts +2 -2
- package/lib/canvas/utils/General/general functions.ts +1 -1
- package/lib/canvas/utils/Image/imageProperties.ts +1 -1
- package/lib/canvas/utils/Texts/textProperties.ts +1 -1
- package/lib/canvas/utils/utils.ts +8 -8
- package/lib/index.ts +5 -5
- package/lib/utils.ts +3 -3
- package/package.json +5 -5
- package/dist/cjs/ai/ApexAI.d.ts +0 -144
- package/dist/cjs/ai/ApexAI.js +0 -486
- package/dist/cjs/ai/ApexModules.d.ts +0 -52
- package/dist/cjs/ai/ApexModules.js +0 -811
- package/dist/cjs/ai/functions/readFiles.d.ts +0 -1
- package/dist/cjs/ai/functions/readFiles.js +0 -56
- package/dist/cjs/ai/functions/tokenizer.d.ts +0 -9
- package/dist/cjs/ai/functions/tokenizer.js +0 -60
- package/dist/cjs/ai/functions/validOptions.d.ts +0 -21
- package/dist/cjs/ai/functions/validOptions.js +0 -93
- package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts +0 -6
- package/dist/cjs/ai/modals/electronHub/chatmodels.js +0 -44
- package/dist/cjs/ai/modals/electronHub/imageModels.d.ts +0 -11
- package/dist/cjs/ai/modals/electronHub/imageModels.js +0 -85
- package/dist/cjs/ai/modals/electronHub/songModels.d.ts +0 -1
- package/dist/cjs/ai/modals/electronHub/songModels.js +0 -1
- package/dist/cjs/ai/modals/electronHub/speechModels.d.ts +0 -6
- package/dist/cjs/ai/modals/electronHub/speechModels.js +0 -56
- package/dist/cjs/ai/modals/electronHub/videoModels.d.ts +0 -5
- package/dist/cjs/ai/modals/electronHub/videoModels.js +0 -56
- package/dist/cjs/ai/modals/groq/chatgroq.d.ts +0 -8
- package/dist/cjs/ai/modals/groq/chatgroq.js +0 -57
- package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts +0 -7
- package/dist/cjs/ai/modals/groq/imageAnalyzer.js +0 -75
- package/dist/cjs/ai/modals/groq/whisper.d.ts +0 -4
- package/dist/cjs/ai/modals/groq/whisper.js +0 -101
- package/dist/cjs/ai/modals/hercai/chatModels.d.ts +0 -6
- package/dist/cjs/ai/modals/hercai/chatModels.js +0 -19
- package/dist/cjs/ai/utils.d.ts +0 -4
- package/dist/cjs/ai/utils.js +0 -4
- package/dist/cjs/canvas/ApexPainter.d.ts +0 -144
- package/dist/cjs/canvas/ApexPainter.js +0 -900
- package/dist/cjs/canvas/utils/Background/bg.d.ts +0 -30
- package/dist/cjs/canvas/utils/Background/bg.js +0 -151
- package/dist/cjs/canvas/utils/Charts/charts.d.ts +0 -7
- package/dist/cjs/canvas/utils/Charts/charts.js +0 -455
- package/dist/cjs/canvas/utils/Custom/customLines.d.ts +0 -2
- package/dist/cjs/canvas/utils/Custom/customLines.js +0 -105
- package/dist/cjs/canvas/utils/General/conversion.d.ts +0 -5
- package/dist/cjs/canvas/utils/General/conversion.js +0 -26
- package/dist/cjs/canvas/utils/General/general functions.d.ts +0 -38
- package/dist/cjs/canvas/utils/General/general functions.js +0 -590
- package/dist/cjs/canvas/utils/Image/imageProperties.d.ts +0 -114
- package/dist/cjs/canvas/utils/Image/imageProperties.js +0 -590
- package/dist/cjs/canvas/utils/Texts/textProperties.d.ts +0 -16
- package/dist/cjs/canvas/utils/Texts/textProperties.js +0 -154
- package/dist/cjs/canvas/utils/types.d.ts +0 -621
- package/dist/cjs/canvas/utils/types.js +0 -5
- package/dist/cjs/canvas/utils/utils.d.ts +0 -18
- package/dist/cjs/canvas/utils/utils.js +0 -17
- package/dist/cjs/index.d.ts +0 -28
- package/dist/cjs/index.js +0 -67
- package/dist/cjs/utils.d.ts +0 -4
- package/dist/cjs/utils.js +0 -4
- package/dist/esm/ai/ApexAI.d.ts +0 -144
- package/dist/esm/ai/ApexAI.js +0 -486
- package/dist/esm/ai/ApexModules.d.ts +0 -52
- package/dist/esm/ai/ApexModules.js +0 -811
- package/dist/esm/ai/functions/readFiles.d.ts +0 -1
- package/dist/esm/ai/functions/readFiles.js +0 -56
- package/dist/esm/ai/functions/tokenizer.d.ts +0 -9
- package/dist/esm/ai/functions/tokenizer.js +0 -60
- package/dist/esm/ai/functions/validOptions.d.ts +0 -21
- package/dist/esm/ai/functions/validOptions.js +0 -93
- package/dist/esm/ai/modals/electronHub/chatmodels.d.ts +0 -6
- package/dist/esm/ai/modals/electronHub/chatmodels.js +0 -44
- package/dist/esm/ai/modals/electronHub/imageModels.d.ts +0 -11
- package/dist/esm/ai/modals/electronHub/imageModels.js +0 -85
- package/dist/esm/ai/modals/electronHub/songModels.d.ts +0 -1
- package/dist/esm/ai/modals/electronHub/songModels.js +0 -1
- package/dist/esm/ai/modals/electronHub/speechModels.d.ts +0 -6
- package/dist/esm/ai/modals/electronHub/speechModels.js +0 -56
- package/dist/esm/ai/modals/electronHub/videoModels.d.ts +0 -5
- package/dist/esm/ai/modals/electronHub/videoModels.js +0 -56
- package/dist/esm/ai/modals/groq/chatgroq.d.ts +0 -8
- package/dist/esm/ai/modals/groq/chatgroq.js +0 -57
- package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts +0 -7
- package/dist/esm/ai/modals/groq/imageAnalyzer.js +0 -75
- package/dist/esm/ai/modals/groq/whisper.d.ts +0 -4
- package/dist/esm/ai/modals/groq/whisper.js +0 -101
- package/dist/esm/ai/modals/hercai/chatModels.d.ts +0 -6
- package/dist/esm/ai/modals/hercai/chatModels.js +0 -19
- package/dist/esm/ai/utils.d.ts +0 -4
- package/dist/esm/ai/utils.js +0 -4
- package/dist/esm/canvas/ApexPainter.d.ts +0 -144
- package/dist/esm/canvas/ApexPainter.js +0 -900
- package/dist/esm/canvas/utils/Background/bg.d.ts +0 -30
- package/dist/esm/canvas/utils/Background/bg.js +0 -151
- package/dist/esm/canvas/utils/Charts/charts.d.ts +0 -7
- package/dist/esm/canvas/utils/Charts/charts.js +0 -455
- package/dist/esm/canvas/utils/Custom/customLines.d.ts +0 -2
- package/dist/esm/canvas/utils/Custom/customLines.js +0 -105
- package/dist/esm/canvas/utils/General/conversion.d.ts +0 -5
- package/dist/esm/canvas/utils/General/conversion.js +0 -26
- package/dist/esm/canvas/utils/General/general functions.d.ts +0 -38
- package/dist/esm/canvas/utils/General/general functions.js +0 -590
- package/dist/esm/canvas/utils/Image/imageProperties.d.ts +0 -114
- package/dist/esm/canvas/utils/Image/imageProperties.js +0 -590
- package/dist/esm/canvas/utils/Texts/textProperties.d.ts +0 -16
- package/dist/esm/canvas/utils/Texts/textProperties.js +0 -154
- package/dist/esm/canvas/utils/types.d.ts +0 -621
- package/dist/esm/canvas/utils/types.js +0 -5
- package/dist/esm/canvas/utils/utils.d.ts +0 -18
- package/dist/esm/canvas/utils/utils.js +0 -17
- package/dist/esm/index.d.ts +0 -28
- package/dist/esm/index.js +0 -67
- package/dist/esm/utils.d.ts +0 -4
- package/dist/esm/utils.js +0 -4
- package/lib/ai/modals/electronHub/songModels.ts +0 -0
package/dist/esm/ai/ApexAI.js
DELETED
|
@@ -1,486 +0,0 @@
|
|
|
1
|
-
import { groqAnalyzer, readFile, ApexImagine, ApexText2Speech, ApexListener, ApexChat } from "./utils.js";
|
|
2
|
-
import { ActionRowBuilder, EmbedBuilder, AttachmentBuilder, ButtonStyle, ButtonBuilder, PermissionFlagsBits } from "discord.js";
|
|
3
|
-
import { joinVoiceChannel, createAudioPlayer, createAudioResource, EndBehaviorType, AudioPlayerStatus } from "@discordjs/voice";
|
|
4
|
-
import { whisper } from "./modals/groq/whisper.js";
|
|
5
|
-
import { pipeline, Readable } from "stream";
|
|
6
|
-
import prism from "prism-media";
|
|
7
|
-
import path from "path";
|
|
8
|
-
import fs from "fs";
|
|
9
|
-
import { execSync } from "child_process";
|
|
10
|
-
function createButtonRows(buttons) {
|
|
11
|
-
if (!Array.isArray(buttons) || buttons.length === 0)
|
|
12
|
-
return [];
|
|
13
|
-
const buttonRows = [];
|
|
14
|
-
let currentRow = new ActionRowBuilder();
|
|
15
|
-
for (const button of buttons) {
|
|
16
|
-
if (!button.label || !button.style || !button.custom_id)
|
|
17
|
-
continue;
|
|
18
|
-
const buttonComponent = new ButtonBuilder()
|
|
19
|
-
.setLabel(button.label)
|
|
20
|
-
.setStyle(button.style)
|
|
21
|
-
.setCustomId(button.custom_id);
|
|
22
|
-
if (button.emoji)
|
|
23
|
-
buttonComponent.setEmoji(button.emoji);
|
|
24
|
-
if (button.url && button.style === ButtonStyle.Link)
|
|
25
|
-
buttonComponent.setURL(button.url);
|
|
26
|
-
currentRow.addComponents(buttonComponent);
|
|
27
|
-
if (currentRow.components.length === 5) {
|
|
28
|
-
buttonRows.push(currentRow);
|
|
29
|
-
currentRow = new ActionRowBuilder();
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
if (currentRow.components.length > 0) {
|
|
33
|
-
buttonRows.push(currentRow);
|
|
34
|
-
}
|
|
35
|
-
return buttonRows.slice(0, 5);
|
|
36
|
-
}
|
|
37
|
-
export async function ApexAI(message, ApexOptions) {
|
|
38
|
-
if (!message.guild || message.author.bot)
|
|
39
|
-
return;
|
|
40
|
-
let prompt = message.content || "";
|
|
41
|
-
const { others, chat, voice, imagine, voiceChannel } = ApexOptions;
|
|
42
|
-
const { permissions, channel, messageType, onMention, loader, buttons } = others ?? {};
|
|
43
|
-
const { Api_Keys, typeWriting } = chat ?? {};
|
|
44
|
-
const { nsfw } = imagine ?? {};
|
|
45
|
-
if (permissions?.enable) {
|
|
46
|
-
const userRoles = message.member?.roles.cache.map((role) => role.id);
|
|
47
|
-
const hasPermission = permissions.role?.some((roleId) => userRoles?.includes(roleId)) ?? false;
|
|
48
|
-
const hasRequiredPerms = permissions.permission?.some((perm) => message.member?.permissions.has(perm)) ?? false;
|
|
49
|
-
const isBlocked = permissions.blockedUsers?.includes(message.author.id) ?? false;
|
|
50
|
-
if (!hasPermission || !hasRequiredPerms || isBlocked)
|
|
51
|
-
return;
|
|
52
|
-
}
|
|
53
|
-
if (channel?.enable && !channel.id?.includes(message.channel.id))
|
|
54
|
-
return;
|
|
55
|
-
if (onMention) {
|
|
56
|
-
const botMention = `<@${message.client.user.id}>`;
|
|
57
|
-
if (!prompt.startsWith(botMention))
|
|
58
|
-
return;
|
|
59
|
-
prompt = prompt.replace(botMention, "").trim();
|
|
60
|
-
}
|
|
61
|
-
const mainPrompt = prompt;
|
|
62
|
-
if (message.channel.isTextBased() && "sendTyping" in message.channel)
|
|
63
|
-
await message.channel.sendTyping();
|
|
64
|
-
if (loader?.enable) {
|
|
65
|
-
const loadingMessage = await message.reply({
|
|
66
|
-
content: loader.loadingMessage,
|
|
67
|
-
allowedMentions: { repliedUser: false },
|
|
68
|
-
});
|
|
69
|
-
setTimeout(() => loadingMessage.delete().catch(console.error), loader.loadingTimer || 3000);
|
|
70
|
-
}
|
|
71
|
-
if (message.attachments.size > 0) {
|
|
72
|
-
for (const attachment of message.attachments.values()) {
|
|
73
|
-
try {
|
|
74
|
-
const url = attachment.url;
|
|
75
|
-
const contentType = attachment.contentType || "";
|
|
76
|
-
const fileSize = attachment.size;
|
|
77
|
-
const maxSize = 25 * 1024 * 1024;
|
|
78
|
-
if (contentType.startsWith("audio/")) {
|
|
79
|
-
if (fileSize > maxSize) {
|
|
80
|
-
console.error("Audio file exceeds 25MB.");
|
|
81
|
-
continue;
|
|
82
|
-
}
|
|
83
|
-
prompt += `\n> Transcribed from audio:\n${await whisper(prompt, url)}`;
|
|
84
|
-
}
|
|
85
|
-
else if (["image/png", "image/jpeg", "image/jpg", "image/webp"].includes(contentType)) {
|
|
86
|
-
if (!Api_Keys?.groq_API) {
|
|
87
|
-
console.error("Missing Groq API Key.");
|
|
88
|
-
continue;
|
|
89
|
-
}
|
|
90
|
-
prompt += `\n> Image analysis:\n${await groqAnalyzer({ img: url, ApiKey: Api_Keys.groq_API })}`;
|
|
91
|
-
}
|
|
92
|
-
else if (attachment.name.endsWith(".pdf") || attachment.name.endsWith(".txt")) {
|
|
93
|
-
const fileType = attachment.name.endsWith(".pdf") ? "pdf" : "txt";
|
|
94
|
-
const fileContent = await readFile(url, fileType);
|
|
95
|
-
prompt += `\n\n- 📜 ${attachment.name} content:\n${fileContent}`;
|
|
96
|
-
}
|
|
97
|
-
else {
|
|
98
|
-
prompt += `\n> ❌ Unsupported file type uploaded: ${contentType}`;
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
catch (e) {
|
|
102
|
-
console.error("Error processing attachment:", e);
|
|
103
|
-
}
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
let prevReply = "";
|
|
107
|
-
if (message.reference?.messageId) {
|
|
108
|
-
const channel = message?.guild?.channels?.cache?.get(message.channel.id);
|
|
109
|
-
const messageId = message?.reference?.messageId;
|
|
110
|
-
// @ts-ignore
|
|
111
|
-
const fetchedMessage = await channel?.messages?.fetch(messageId);
|
|
112
|
-
if (fetchedMessage) {
|
|
113
|
-
prevReply += fetchedMessage.content || "";
|
|
114
|
-
const firstAttachment = fetchedMessage.attachments?.first();
|
|
115
|
-
if (firstAttachment && /\.(png|jpg|jpeg|webp)$/i.test(firstAttachment.url)) {
|
|
116
|
-
prevReply += await groqAnalyzer({
|
|
117
|
-
img: firstAttachment.url,
|
|
118
|
-
ApiKey: Api_Keys?.groq_API,
|
|
119
|
-
prompt,
|
|
120
|
-
});
|
|
121
|
-
prompt += `\n- 📷 Image analysis:\n${prevReply}`;
|
|
122
|
-
}
|
|
123
|
-
}
|
|
124
|
-
}
|
|
125
|
-
if (others?.keywordResponses) {
|
|
126
|
-
for (const [keyword, response] of Object.entries(others.keywordResponses)) {
|
|
127
|
-
if (prompt.toLowerCase().includes(keyword.toLowerCase())) {
|
|
128
|
-
return await message.reply({
|
|
129
|
-
content: response,
|
|
130
|
-
allowedMentions: { repliedUser: false },
|
|
131
|
-
});
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
}
|
|
135
|
-
if (voiceChannel?.enable && voiceChannel.channelId && voiceChannel.joinOn?.triggeredWords) {
|
|
136
|
-
const { channelId, joinOn, personality, voiceModel } = voiceChannel;
|
|
137
|
-
const triggerDetected = joinOn.triggeredWords?.some(word => prompt.toLowerCase().startsWith(word.toLowerCase()));
|
|
138
|
-
if (triggerDetected) {
|
|
139
|
-
return handleVoiceAI(message, channelId, chat, voiceModel, personality);
|
|
140
|
-
}
|
|
141
|
-
}
|
|
142
|
-
let aiResponse = "";
|
|
143
|
-
const chatEnabled = chat?.enable ?? false;
|
|
144
|
-
const voiceEnabled = voice?.enable ?? false;
|
|
145
|
-
const imageEnabled = imagine?.enable ?? false;
|
|
146
|
-
let genImage = [];
|
|
147
|
-
const drawTrigger = imagine?.drawTrigger?.some(trigger => mainPrompt.toLowerCase().startsWith(trigger.toLowerCase()));
|
|
148
|
-
if (message.channel.isTextBased() && "sendTyping" in message.channel)
|
|
149
|
-
await message.channel.sendTyping();
|
|
150
|
-
if (chatEnabled && voiceEnabled) {
|
|
151
|
-
return await message.reply({
|
|
152
|
-
content: "❌ Error: Both Chat and Voice cannot be enabled at the same time.",
|
|
153
|
-
allowedMentions: { repliedUser: false },
|
|
154
|
-
});
|
|
155
|
-
}
|
|
156
|
-
if (chatEnabled) {
|
|
157
|
-
let instruction = chat?.instruction;
|
|
158
|
-
if (imageEnabled && drawTrigger) {
|
|
159
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
160
|
-
let tempEnhancer;
|
|
161
|
-
if (enhancer) {
|
|
162
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n`;
|
|
163
|
-
tempEnhancer = await ApexChat("gpt-4-turbo", prompt, { instruction: enhancementRequest });
|
|
164
|
-
}
|
|
165
|
-
genImage = await ApexImagine(imagine?.imageModel, tempEnhancer || prompt, { width: imagine?.enhancer?.width, height: imagine?.enhancer?.height, format: imagine?.enhancer?.format,
|
|
166
|
-
quality: imagine?.enhancer?.quality, negative_prompt: imagine?.enhancer?.negative_prompt, nsfw: imagine?.nsfw?.enable,
|
|
167
|
-
nsfwWords: imagine?.nsfw?.keywords, deepCheck: imagine?.nsfw?.deepCheck, count: imagine?.numOfImages });
|
|
168
|
-
instruction += `\n\nThe image has already been generated. Do not attempt to generate or describe another image. Instead, provide feedback, thoughts, or a critique based on the given prompt:\n\n`;
|
|
169
|
-
}
|
|
170
|
-
aiResponse = await ApexChat(chat?.chatModel, prompt, { instruction, memory: chat?.memory?.memoryOn, userId: chat?.memory?.id, limit: chat?.memory?.limit, threshold: chat?.memory?.threshold });
|
|
171
|
-
}
|
|
172
|
-
else if (voiceEnabled) {
|
|
173
|
-
let instruction = chat?.instruction;
|
|
174
|
-
if (imageEnabled && drawTrigger) {
|
|
175
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
176
|
-
let tempEnhancer;
|
|
177
|
-
if (enhancer) {
|
|
178
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n`;
|
|
179
|
-
tempEnhancer = await ApexChat("gpt-4-turbo", prompt, { instruction: enhancementRequest });
|
|
180
|
-
}
|
|
181
|
-
genImage = await ApexImagine(imagine?.imageModel, tempEnhancer || prompt, { width: enhancer?.width, height: enhancer?.height, format: enhancer?.format,
|
|
182
|
-
quality: enhancer?.quality, negative_prompt: enhancer?.negative_prompt, nsfw: nsfw?.enable,
|
|
183
|
-
nsfwWords: nsfw?.keywords, deepCheck: nsfw?.deepCheck, count: imagine?.numOfImages });
|
|
184
|
-
instruction += `\n\nThe image has already been generated. Do not attempt to generate or describe another image. Instead, provide feedback, thoughts, or a critique based on the given prompt:\n\n`;
|
|
185
|
-
}
|
|
186
|
-
aiResponse = await ApexChat(chat?.chatModel, prompt, { instruction, memory: chat?.memory?.memoryOn, userId: chat?.memory?.id, limit: chat?.memory?.limit, threshold: chat?.memory?.threshold });
|
|
187
|
-
aiResponse = await ApexText2Speech({ modelName: voice?.voiceModel, inputText: aiResponse, personality: voice?.characterName });
|
|
188
|
-
}
|
|
189
|
-
else if (imageEnabled && drawTrigger) {
|
|
190
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
191
|
-
let tempEnhancer;
|
|
192
|
-
if (enhancer) {
|
|
193
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n${prompt}`;
|
|
194
|
-
tempEnhancer = await ApexChat("gpt-4-turbo", prompt, { instruction: enhancementRequest });
|
|
195
|
-
}
|
|
196
|
-
genImage = await ApexImagine(imagine?.imageModel, tempEnhancer || prompt, { width: enhancer?.width, height: enhancer?.height, format: enhancer?.format,
|
|
197
|
-
quality: enhancer?.quality, negative_prompt: enhancer?.negative_prompt, nsfw: nsfw?.enable,
|
|
198
|
-
nsfwWords: nsfw?.keywords, deepCheck: nsfw?.deepCheck, count: imagine?.numOfImages });
|
|
199
|
-
}
|
|
200
|
-
else {
|
|
201
|
-
return await message.reply({
|
|
202
|
-
content: "❌ Error: No valid AI features are enabled.",
|
|
203
|
-
allowedMentions: { repliedUser: false },
|
|
204
|
-
});
|
|
205
|
-
}
|
|
206
|
-
const buttonRows = createButtonRows(buttons || []);
|
|
207
|
-
const imageAttachments = [];
|
|
208
|
-
if (genImage.length > 0) {
|
|
209
|
-
for (const imageUrl of genImage) {
|
|
210
|
-
imageAttachments.push(new AttachmentBuilder(imageUrl, { name: `image-${Date.now()}.png` }));
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
let voiceAttachment = null;
|
|
214
|
-
if (voiceEnabled && aiResponse instanceof Buffer) {
|
|
215
|
-
voiceAttachment = new AttachmentBuilder(aiResponse, { name: "ai_voice.mp3" });
|
|
216
|
-
}
|
|
217
|
-
const chunkMessage = (text, maxLength = 2000) => {
|
|
218
|
-
const chunks = [];
|
|
219
|
-
while (text.length > maxLength) {
|
|
220
|
-
let sliceIndex = text.lastIndexOf("\n", maxLength);
|
|
221
|
-
if (sliceIndex === -1)
|
|
222
|
-
sliceIndex = maxLength;
|
|
223
|
-
chunks.push(text.slice(0, sliceIndex));
|
|
224
|
-
text = text.slice(sliceIndex);
|
|
225
|
-
}
|
|
226
|
-
if (text.length > 0)
|
|
227
|
-
chunks.push(text);
|
|
228
|
-
return chunks;
|
|
229
|
-
};
|
|
230
|
-
const initialContent = messageType?.intialContent ? `${messageType.intialContent} ` : "";
|
|
231
|
-
const finalText = typeof aiResponse === "string" ? `${initialContent}${aiResponse}` : "";
|
|
232
|
-
const messageChunks = chunkMessage(finalText);
|
|
233
|
-
const sendMessage = async (content, components) => {
|
|
234
|
-
if (!content.trim())
|
|
235
|
-
return;
|
|
236
|
-
if (messageType?.sendAs === "embed") {
|
|
237
|
-
const embed = new EmbedBuilder()
|
|
238
|
-
.setColor(0x0099ff)
|
|
239
|
-
.setDescription(content)
|
|
240
|
-
.setFooter({ text: "AI Response" });
|
|
241
|
-
if (messageType?.type === "send") {
|
|
242
|
-
return message.channel.send({ embeds: [embed], components });
|
|
243
|
-
}
|
|
244
|
-
else {
|
|
245
|
-
return message.reply({ embeds: [embed], allowedMentions: { repliedUser: false }, components });
|
|
246
|
-
}
|
|
247
|
-
}
|
|
248
|
-
else {
|
|
249
|
-
if (messageType?.type === "send") {
|
|
250
|
-
return message.channel.send({ content, components });
|
|
251
|
-
}
|
|
252
|
-
else {
|
|
253
|
-
return message.reply({ content, allowedMentions: { repliedUser: false }, components });
|
|
254
|
-
}
|
|
255
|
-
}
|
|
256
|
-
};
|
|
257
|
-
const sendTypeWritingMessage = async (content, components) => {
|
|
258
|
-
if (!typeWriting?.enable) {
|
|
259
|
-
return sendMessage(content, components);
|
|
260
|
-
}
|
|
261
|
-
content = content.trimStart();
|
|
262
|
-
const typingMessage = await sendMessage(content[0]);
|
|
263
|
-
let typedSentence = typingMessage?.content;
|
|
264
|
-
let i = 1;
|
|
265
|
-
while (i < content.length) {
|
|
266
|
-
typedSentence += content.slice(i, i + getStepCount(typeWriting.speed ?? 50));
|
|
267
|
-
await sleep(typeWriting.delay ?? 500);
|
|
268
|
-
await typingMessage?.edit({ content: typedSentence });
|
|
269
|
-
i += getStepCount(typeWriting.speed ?? 50);
|
|
270
|
-
}
|
|
271
|
-
if (components && components.length > 0) {
|
|
272
|
-
await typingMessage?.edit({ components });
|
|
273
|
-
}
|
|
274
|
-
return typingMessage;
|
|
275
|
-
};
|
|
276
|
-
function getStepCount(speed) {
|
|
277
|
-
const maxSteps = 120;
|
|
278
|
-
const steps = Math.min(Math.ceil(speed), maxSteps);
|
|
279
|
-
return steps > 0 ? steps : 1;
|
|
280
|
-
}
|
|
281
|
-
function sleep(ms) {
|
|
282
|
-
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
283
|
-
}
|
|
284
|
-
(async () => {
|
|
285
|
-
if (imageAttachments.length > 0) {
|
|
286
|
-
await message.channel.send({ files: imageAttachments });
|
|
287
|
-
}
|
|
288
|
-
let lastSentMessage = null;
|
|
289
|
-
for (let i = 0; i < messageChunks.length; i++) {
|
|
290
|
-
const chunk = messageChunks[i].trim();
|
|
291
|
-
if (!chunk)
|
|
292
|
-
continue;
|
|
293
|
-
const isLastChunk = i === messageChunks.length - 1;
|
|
294
|
-
if (isLastChunk) {
|
|
295
|
-
const response = await sendTypeWritingMessage(chunk, buttonRows.length > 0 ? buttonRows : undefined);
|
|
296
|
-
if (response)
|
|
297
|
-
lastSentMessage = response;
|
|
298
|
-
}
|
|
299
|
-
else {
|
|
300
|
-
await sendTypeWritingMessage(chunk);
|
|
301
|
-
}
|
|
302
|
-
if (!isLastChunk) {
|
|
303
|
-
await new Promise((resolve) => setTimeout(resolve, chat?.typeWriting?.delay ?? 500));
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
if (voiceAttachment) {
|
|
307
|
-
await message.channel.send({ files: [voiceAttachment] });
|
|
308
|
-
}
|
|
309
|
-
if (lastSentMessage && buttonRows.length > 0) {
|
|
310
|
-
await lastSentMessage.edit({ components: buttonRows }).catch(() => null);
|
|
311
|
-
}
|
|
312
|
-
})();
|
|
313
|
-
}
|
|
314
|
-
;
|
|
315
|
-
let voiceQueue = [];
|
|
316
|
-
let isProcessing = false;
|
|
317
|
-
let voiceConnection = null;
|
|
318
|
-
let activeUser = null;
|
|
319
|
-
let isRecording = false;
|
|
320
|
-
let silenceTimer = null;
|
|
321
|
-
const recordingsDir = path.join(process.cwd(), "recordings");
|
|
322
|
-
if (!fs.existsSync(recordingsDir)) {
|
|
323
|
-
fs.mkdirSync(recordingsDir, { recursive: true });
|
|
324
|
-
}
|
|
325
|
-
try {
|
|
326
|
-
execSync("ffmpeg -version > nul 2>&1");
|
|
327
|
-
}
|
|
328
|
-
catch (err) {
|
|
329
|
-
console.error("🚨 FFmpeg is NOT installed or not in PATH! Install it first.");
|
|
330
|
-
}
|
|
331
|
-
function resetSilenceTimer() {
|
|
332
|
-
if (silenceTimer)
|
|
333
|
-
clearTimeout(silenceTimer);
|
|
334
|
-
}
|
|
335
|
-
export async function handleVoiceAI(message, voiceChannelId, chat, modelName, personality) {
|
|
336
|
-
const guild = message.guild;
|
|
337
|
-
if (!guild)
|
|
338
|
-
return;
|
|
339
|
-
const channel = guild.channels.cache.get(voiceChannelId);
|
|
340
|
-
if (!channel || channel.type !== 2) {
|
|
341
|
-
return await message.reply(`🚫 Invalid voice channel ID: ${voiceChannelId}`);
|
|
342
|
-
}
|
|
343
|
-
const botMember = guild.members.me;
|
|
344
|
-
if (!botMember)
|
|
345
|
-
return;
|
|
346
|
-
const permissions = channel.permissionsFor(botMember);
|
|
347
|
-
if (!permissions?.has(PermissionFlagsBits.Connect) ||
|
|
348
|
-
!permissions.has(PermissionFlagsBits.Speak) ||
|
|
349
|
-
!permissions.has(PermissionFlagsBits.Stream) ||
|
|
350
|
-
!permissions.has(PermissionFlagsBits.UseVAD)) {
|
|
351
|
-
return await message.reply("🚫 I don't have the required permissions to join and speak in this voice channel.");
|
|
352
|
-
}
|
|
353
|
-
if (voiceConnection) {
|
|
354
|
-
return await message.reply("⚠️ AI is already in a voice channel.");
|
|
355
|
-
}
|
|
356
|
-
voiceConnection = joinVoiceChannel({
|
|
357
|
-
channelId: channel.id,
|
|
358
|
-
guildId: guild.id,
|
|
359
|
-
adapterCreator: guild.voiceAdapterCreator,
|
|
360
|
-
selfMute: false,
|
|
361
|
-
selfDeaf: false
|
|
362
|
-
});
|
|
363
|
-
activeUser = message.author.id;
|
|
364
|
-
captureAudio(voiceConnection, chat, modelName, personality);
|
|
365
|
-
}
|
|
366
|
-
function captureAudio(connection, chat, modelName, personality) {
|
|
367
|
-
const receiver = connection.receiver;
|
|
368
|
-
receiver.speaking.on("start", async (userId) => {
|
|
369
|
-
if (userId !== activeUser) {
|
|
370
|
-
activeUser = userId;
|
|
371
|
-
isRecording = false;
|
|
372
|
-
}
|
|
373
|
-
resetSilenceTimer();
|
|
374
|
-
const rawFilePath = path.join(recordingsDir, `${userId}.pcm`);
|
|
375
|
-
const wavFilePath = path.join(recordingsDir, `${userId}.wav`);
|
|
376
|
-
const opusStream = receiver.subscribe(userId, {
|
|
377
|
-
end: { behavior: EndBehaviorType.AfterSilence, duration: 2000 }
|
|
378
|
-
});
|
|
379
|
-
const pcmStream = new prism.opus.Decoder({
|
|
380
|
-
frameSize: 960,
|
|
381
|
-
channels: 1,
|
|
382
|
-
rate: 48000
|
|
383
|
-
});
|
|
384
|
-
const writeStream = fs.createWriteStream(rawFilePath);
|
|
385
|
-
pipeline(opusStream, pcmStream, writeStream, (err) => {
|
|
386
|
-
if (err) {
|
|
387
|
-
console.error("❌ Error writing PCM file:", err);
|
|
388
|
-
return;
|
|
389
|
-
}
|
|
390
|
-
convertPCMtoWAV(rawFilePath, wavFilePath, chat, modelName, personality);
|
|
391
|
-
});
|
|
392
|
-
});
|
|
393
|
-
receiver.speaking.on("end", async (userId) => {
|
|
394
|
-
if (userId === activeUser) {
|
|
395
|
-
startSilenceTimer(chat, modelName, personality);
|
|
396
|
-
}
|
|
397
|
-
});
|
|
398
|
-
}
|
|
399
|
-
function startSilenceTimer(chat, modelName, personality) {
|
|
400
|
-
resetSilenceTimer();
|
|
401
|
-
silenceTimer = setTimeout(() => {
|
|
402
|
-
if (voiceQueue.length > 0) {
|
|
403
|
-
const nextUser = voiceQueue.shift();
|
|
404
|
-
if (nextUser) {
|
|
405
|
-
activeUser = nextUser.userId;
|
|
406
|
-
processQueue(chat, modelName, personality);
|
|
407
|
-
}
|
|
408
|
-
}
|
|
409
|
-
else {
|
|
410
|
-
leaveVoiceChannel();
|
|
411
|
-
}
|
|
412
|
-
}, 5000);
|
|
413
|
-
}
|
|
414
|
-
function convertPCMtoWAV(inputPCM, outputWAV, chat, modelName, personality) {
|
|
415
|
-
if (!fs.existsSync(inputPCM) || fs.statSync(inputPCM).size === 0) {
|
|
416
|
-
return;
|
|
417
|
-
}
|
|
418
|
-
try {
|
|
419
|
-
execSync(`ffmpeg -y -f s16le -ar 48000 -ac 1 -i "${inputPCM}" -acodec pcm_s16le "${outputWAV}" > nul 2>&1`);
|
|
420
|
-
if (fs.existsSync(outputWAV)) {
|
|
421
|
-
transcribeAudio(outputWAV, chat, modelName, personality);
|
|
422
|
-
}
|
|
423
|
-
}
|
|
424
|
-
catch (error) {
|
|
425
|
-
console.error("❌ FFmpeg failed:", error);
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
async function transcribeAudio(filePath, chat, modelName, personality) {
|
|
429
|
-
try {
|
|
430
|
-
const transcribedText = await ApexListener({
|
|
431
|
-
filepath: filePath,
|
|
432
|
-
prompt: "Transcribe what the user said in English.",
|
|
433
|
-
lang: "en"
|
|
434
|
-
});
|
|
435
|
-
if (transcribedText.transcribe) {
|
|
436
|
-
voiceQueue.push({ userId: activeUser || "unknown", text: transcribedText.transcribe });
|
|
437
|
-
processQueue(chat, modelName, personality);
|
|
438
|
-
}
|
|
439
|
-
fs.unlinkSync(filePath);
|
|
440
|
-
}
|
|
441
|
-
catch (error) {
|
|
442
|
-
console.error("❌ Error in transcription:", error);
|
|
443
|
-
}
|
|
444
|
-
}
|
|
445
|
-
async function processQueue(chat, modelName, personality) {
|
|
446
|
-
if (isProcessing || voiceQueue.length === 0)
|
|
447
|
-
return;
|
|
448
|
-
isProcessing = true;
|
|
449
|
-
const { userId, text } = voiceQueue.shift();
|
|
450
|
-
activeUser = userId;
|
|
451
|
-
resetSilenceTimer();
|
|
452
|
-
try {
|
|
453
|
-
const aiResponse = await ApexChat(chat?.chatModel || "gpt-4o", text, {
|
|
454
|
-
instruction: chat.instruction,
|
|
455
|
-
memory: chat?.memory?.memoryOn,
|
|
456
|
-
userId,
|
|
457
|
-
limit: chat?.memory?.limit,
|
|
458
|
-
threshold: chat?.memory?.threshold
|
|
459
|
-
});
|
|
460
|
-
const audioBuffer = await ApexText2Speech({ inputText: aiResponse, modelName, personality });
|
|
461
|
-
if (voiceConnection) {
|
|
462
|
-
const player = createAudioPlayer();
|
|
463
|
-
const resource = createAudioResource(Readable.from(audioBuffer));
|
|
464
|
-
voiceConnection.subscribe(player);
|
|
465
|
-
player.play(resource);
|
|
466
|
-
player.on(AudioPlayerStatus.Idle, () => {
|
|
467
|
-
isProcessing = false;
|
|
468
|
-
if (voiceQueue.length > 0) {
|
|
469
|
-
processQueue(chat, modelName, personality);
|
|
470
|
-
}
|
|
471
|
-
});
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
catch (error) {
|
|
475
|
-
console.error("❌ Error processing AI response:", error);
|
|
476
|
-
isProcessing = false;
|
|
477
|
-
}
|
|
478
|
-
}
|
|
479
|
-
function leaveVoiceChannel() {
|
|
480
|
-
if (voiceConnection) {
|
|
481
|
-
voiceConnection.destroy();
|
|
482
|
-
voiceConnection = null;
|
|
483
|
-
activeUser = null;
|
|
484
|
-
resetSilenceTimer();
|
|
485
|
-
}
|
|
486
|
-
}
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
export interface ApexImagineOptions {
|
|
2
|
-
nsfw?: boolean | undefined;
|
|
3
|
-
deepCheck?: boolean;
|
|
4
|
-
nsfwWords?: string[];
|
|
5
|
-
count?: number;
|
|
6
|
-
negative_prompt?: string;
|
|
7
|
-
sampler?: any;
|
|
8
|
-
image_style?: any;
|
|
9
|
-
width?: number;
|
|
10
|
-
height?: number;
|
|
11
|
-
format?: "jpeg" | "png";
|
|
12
|
-
quality?: number;
|
|
13
|
-
steps?: number;
|
|
14
|
-
seed?: number;
|
|
15
|
-
cfg_scale?: number;
|
|
16
|
-
Api_Key?: string;
|
|
17
|
-
}
|
|
18
|
-
declare function ApexImagine(model: string, prompt: string, options?: ApexImagineOptions): Promise<string[] | undefined>;
|
|
19
|
-
declare function ApexChat(model: string, prompt: string, { userId, memory, limit, instruction, Api_key, threshold }: {
|
|
20
|
-
userId?: string;
|
|
21
|
-
memory?: boolean;
|
|
22
|
-
limit?: number;
|
|
23
|
-
instruction?: string;
|
|
24
|
-
Api_key?: string;
|
|
25
|
-
threshold?: number;
|
|
26
|
-
}): Promise<string>;
|
|
27
|
-
declare function resetHistory({ id, last }: {
|
|
28
|
-
id: string;
|
|
29
|
-
last: number | string;
|
|
30
|
-
}): Promise<boolean>;
|
|
31
|
-
declare function ApexListener(options: {
|
|
32
|
-
filepath: string;
|
|
33
|
-
model?: string;
|
|
34
|
-
prompt?: string;
|
|
35
|
-
lang?: string;
|
|
36
|
-
apiKey?: string;
|
|
37
|
-
}): Promise<{
|
|
38
|
-
response: string;
|
|
39
|
-
transcribe: string;
|
|
40
|
-
}>;
|
|
41
|
-
declare function ApexText2Speech({ ApiKey, inputText, modelName, personality }: {
|
|
42
|
-
ApiKey?: string;
|
|
43
|
-
inputText: string;
|
|
44
|
-
modelName?: "elevenlabs" | "myshell-tts" | "deepinfra-tts" | "whisper-large-v3" | "distil-large-v3" | string;
|
|
45
|
-
personality?: string;
|
|
46
|
-
}): Promise<Buffer>;
|
|
47
|
-
declare function ApexVideo({ ApiKey, prompt, modelName }: {
|
|
48
|
-
ApiKey?: string;
|
|
49
|
-
prompt: string;
|
|
50
|
-
modelName?: string;
|
|
51
|
-
}): Promise<string>;
|
|
52
|
-
export { ApexImagine, ApexChat, ApexListener, ApexText2Speech, ApexVideo, resetHistory };
|