apexify.js 4.9.24 → 4.9.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/canvas/ApexPainter.js +1 -1
- package/dist/cjs/canvas/ApexPainter.js.map +1 -1
- package/dist/cjs/index.d.ts +2 -22
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +2 -17
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/tsconfig.cjs.tsbuildinfo +1 -1
- package/dist/cjs/utils.d.ts +1 -3
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +1 -11
- package/dist/cjs/utils.js.map +1 -1
- package/dist/esm/canvas/ApexPainter.js +1 -1
- package/dist/esm/canvas/ApexPainter.js.map +1 -1
- package/dist/esm/index.d.ts +2 -22
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +2 -17
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
- package/dist/esm/utils.d.ts +1 -3
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +1 -11
- package/dist/esm/utils.js.map +1 -1
- package/lib/canvas/ApexPainter.ts +3 -3
- package/lib/index.ts +4 -11
- package/lib/utils.ts +1 -3
- package/package.json +3 -12
- package/dist/cjs/ai/ApexAI.d.ts +0 -145
- package/dist/cjs/ai/ApexAI.d.ts.map +0 -1
- package/dist/cjs/ai/ApexAI.js +0 -494
- package/dist/cjs/ai/ApexAI.js.map +0 -1
- package/dist/cjs/ai/ApexModules.d.ts +0 -53
- package/dist/cjs/ai/ApexModules.d.ts.map +0 -1
- package/dist/cjs/ai/ApexModules.js +0 -824
- package/dist/cjs/ai/ApexModules.js.map +0 -1
- package/dist/cjs/ai/functions/readFiles.d.ts +0 -2
- package/dist/cjs/ai/functions/readFiles.d.ts.map +0 -1
- package/dist/cjs/ai/functions/readFiles.js +0 -96
- package/dist/cjs/ai/functions/readFiles.js.map +0 -1
- package/dist/cjs/ai/functions/tokenizer.d.ts +0 -10
- package/dist/cjs/ai/functions/tokenizer.d.ts.map +0 -1
- package/dist/cjs/ai/functions/tokenizer.js +0 -64
- package/dist/cjs/ai/functions/tokenizer.js.map +0 -1
- package/dist/cjs/ai/functions/validOptions.d.ts +0 -22
- package/dist/cjs/ai/functions/validOptions.d.ts.map +0 -1
- package/dist/cjs/ai/functions/validOptions.js +0 -103
- package/dist/cjs/ai/functions/validOptions.js.map +0 -1
- package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts +0 -7
- package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts.map +0 -1
- package/dist/cjs/ai/modals/electronHub/chatmodels.js +0 -51
- package/dist/cjs/ai/modals/electronHub/chatmodels.js.map +0 -1
- package/dist/cjs/ai/modals/electronHub/imageModels.d.ts +0 -12
- package/dist/cjs/ai/modals/electronHub/imageModels.d.ts.map +0 -1
- package/dist/cjs/ai/modals/electronHub/imageModels.js +0 -92
- package/dist/cjs/ai/modals/electronHub/imageModels.js.map +0 -1
- package/dist/cjs/ai/modals/electronHub/speechModels.d.ts +0 -7
- package/dist/cjs/ai/modals/electronHub/speechModels.d.ts.map +0 -1
- package/dist/cjs/ai/modals/electronHub/speechModels.js +0 -63
- package/dist/cjs/ai/modals/electronHub/speechModels.js.map +0 -1
- package/dist/cjs/ai/modals/electronHub/videoModels.d.ts +0 -6
- package/dist/cjs/ai/modals/electronHub/videoModels.d.ts.map +0 -1
- package/dist/cjs/ai/modals/electronHub/videoModels.js +0 -63
- package/dist/cjs/ai/modals/electronHub/videoModels.js.map +0 -1
- package/dist/cjs/ai/modals/groq/chatgroq.d.ts +0 -9
- package/dist/cjs/ai/modals/groq/chatgroq.d.ts.map +0 -1
- package/dist/cjs/ai/modals/groq/chatgroq.js +0 -64
- package/dist/cjs/ai/modals/groq/chatgroq.js.map +0 -1
- package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts +0 -8
- package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts.map +0 -1
- package/dist/cjs/ai/modals/groq/imageAnalyzer.js +0 -82
- package/dist/cjs/ai/modals/groq/imageAnalyzer.js.map +0 -1
- package/dist/cjs/ai/modals/groq/whisper.d.ts +0 -5
- package/dist/cjs/ai/modals/groq/whisper.d.ts.map +0 -1
- package/dist/cjs/ai/modals/groq/whisper.js +0 -108
- package/dist/cjs/ai/modals/groq/whisper.js.map +0 -1
- package/dist/cjs/ai/modals/hercai/chatModels.d.ts +0 -7
- package/dist/cjs/ai/modals/hercai/chatModels.d.ts.map +0 -1
- package/dist/cjs/ai/modals/hercai/chatModels.js +0 -23
- package/dist/cjs/ai/modals/hercai/chatModels.js.map +0 -1
- package/dist/cjs/ai/utils.d.ts +0 -5
- package/dist/cjs/ai/utils.d.ts.map +0 -1
- package/dist/cjs/ai/utils.js +0 -15
- package/dist/cjs/ai/utils.js.map +0 -1
- package/dist/esm/ai/ApexAI.d.ts +0 -145
- package/dist/esm/ai/ApexAI.d.ts.map +0 -1
- package/dist/esm/ai/ApexAI.js +0 -494
- package/dist/esm/ai/ApexAI.js.map +0 -1
- package/dist/esm/ai/ApexModules.d.ts +0 -53
- package/dist/esm/ai/ApexModules.d.ts.map +0 -1
- package/dist/esm/ai/ApexModules.js +0 -824
- package/dist/esm/ai/ApexModules.js.map +0 -1
- package/dist/esm/ai/functions/readFiles.d.ts +0 -2
- package/dist/esm/ai/functions/readFiles.d.ts.map +0 -1
- package/dist/esm/ai/functions/readFiles.js +0 -96
- package/dist/esm/ai/functions/readFiles.js.map +0 -1
- package/dist/esm/ai/functions/tokenizer.d.ts +0 -10
- package/dist/esm/ai/functions/tokenizer.d.ts.map +0 -1
- package/dist/esm/ai/functions/tokenizer.js +0 -64
- package/dist/esm/ai/functions/tokenizer.js.map +0 -1
- package/dist/esm/ai/functions/validOptions.d.ts +0 -22
- package/dist/esm/ai/functions/validOptions.d.ts.map +0 -1
- package/dist/esm/ai/functions/validOptions.js +0 -103
- package/dist/esm/ai/functions/validOptions.js.map +0 -1
- package/dist/esm/ai/modals/electronHub/chatmodels.d.ts +0 -7
- package/dist/esm/ai/modals/electronHub/chatmodels.d.ts.map +0 -1
- package/dist/esm/ai/modals/electronHub/chatmodels.js +0 -51
- package/dist/esm/ai/modals/electronHub/chatmodels.js.map +0 -1
- package/dist/esm/ai/modals/electronHub/imageModels.d.ts +0 -12
- package/dist/esm/ai/modals/electronHub/imageModels.d.ts.map +0 -1
- package/dist/esm/ai/modals/electronHub/imageModels.js +0 -92
- package/dist/esm/ai/modals/electronHub/imageModels.js.map +0 -1
- package/dist/esm/ai/modals/electronHub/speechModels.d.ts +0 -7
- package/dist/esm/ai/modals/electronHub/speechModels.d.ts.map +0 -1
- package/dist/esm/ai/modals/electronHub/speechModels.js +0 -63
- package/dist/esm/ai/modals/electronHub/speechModels.js.map +0 -1
- package/dist/esm/ai/modals/electronHub/videoModels.d.ts +0 -6
- package/dist/esm/ai/modals/electronHub/videoModels.d.ts.map +0 -1
- package/dist/esm/ai/modals/electronHub/videoModels.js +0 -63
- package/dist/esm/ai/modals/electronHub/videoModels.js.map +0 -1
- package/dist/esm/ai/modals/groq/chatgroq.d.ts +0 -9
- package/dist/esm/ai/modals/groq/chatgroq.d.ts.map +0 -1
- package/dist/esm/ai/modals/groq/chatgroq.js +0 -64
- package/dist/esm/ai/modals/groq/chatgroq.js.map +0 -1
- package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts +0 -8
- package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts.map +0 -1
- package/dist/esm/ai/modals/groq/imageAnalyzer.js +0 -82
- package/dist/esm/ai/modals/groq/imageAnalyzer.js.map +0 -1
- package/dist/esm/ai/modals/groq/whisper.d.ts +0 -5
- package/dist/esm/ai/modals/groq/whisper.d.ts.map +0 -1
- package/dist/esm/ai/modals/groq/whisper.js +0 -108
- package/dist/esm/ai/modals/groq/whisper.js.map +0 -1
- package/dist/esm/ai/modals/hercai/chatModels.d.ts +0 -7
- package/dist/esm/ai/modals/hercai/chatModels.d.ts.map +0 -1
- package/dist/esm/ai/modals/hercai/chatModels.js +0 -23
- package/dist/esm/ai/modals/hercai/chatModels.js.map +0 -1
- package/dist/esm/ai/utils.d.ts +0 -5
- package/dist/esm/ai/utils.d.ts.map +0 -1
- package/dist/esm/ai/utils.js +0 -15
- package/dist/esm/ai/utils.js.map +0 -1
- package/lib/ai/ApexAI.ts +0 -758
- package/lib/ai/ApexModules.ts +0 -916
- package/lib/ai/functions/readFiles.ts +0 -66
- package/lib/ai/functions/tokenizer.ts +0 -69
- package/lib/ai/functions/validOptions.ts +0 -116
- package/lib/ai/modals/electronHub/chatmodels.ts +0 -57
- package/lib/ai/modals/electronHub/imageModels.ts +0 -116
- package/lib/ai/modals/electronHub/speechModels.ts +0 -75
- package/lib/ai/modals/electronHub/videoModels.ts +0 -75
- package/lib/ai/modals/groq/chatgroq.ts +0 -78
- package/lib/ai/modals/groq/imageAnalyzer.ts +0 -83
- package/lib/ai/modals/groq/whisper.ts +0 -114
- package/lib/ai/modals/hercai/chatModels.ts +0 -20
- package/lib/ai/utils.ts +0 -15
package/lib/ai/ApexAI.ts
DELETED
|
@@ -1,758 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
groqAnalyzer, readFile, ApexImagine, ApexText2Speech, ApexListener, ApexChat
|
|
3
|
-
} from "./utils";
|
|
4
|
-
import {
|
|
5
|
-
ActionRowBuilder, Message, PermissionResolvable, TextChannel,
|
|
6
|
-
EmbedBuilder, AttachmentBuilder, MessageActionRowComponentBuilder, ButtonStyle, ButtonBuilder,
|
|
7
|
-
PermissionFlagsBits
|
|
8
|
-
} from "discord.js";
|
|
9
|
-
import {
|
|
10
|
-
joinVoiceChannel, createAudioPlayer, createAudioResource, EndBehaviorType,
|
|
11
|
-
VoiceConnection, AudioPlayerStatus
|
|
12
|
-
} from "@discordjs/voice";
|
|
13
|
-
|
|
14
|
-
import { whisper } from "./modals/groq/whisper";
|
|
15
|
-
import { pipeline, Readable } from "stream";
|
|
16
|
-
import prism from "prism-media"
|
|
17
|
-
import path from "path";
|
|
18
|
-
import fs from "fs";
|
|
19
|
-
import { execSync } from "child_process";
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
export interface Options {
|
|
23
|
-
/**
|
|
24
|
-
* Configuration options related to voice functionality.
|
|
25
|
-
* @param voice.textVoice Configuration options for text-to-voice functionality.
|
|
26
|
-
* @param voice.textVoice.enable Whether text-to-voice functionality is enabled.
|
|
27
|
-
* @param voice.textVoice.voiceModel The voice model to be used.
|
|
28
|
-
* @param voice.textVoice.character The character name.
|
|
29
|
-
*/
|
|
30
|
-
voice?: {
|
|
31
|
-
enable?: boolean;
|
|
32
|
-
voiceModel?: "elevenlabs" | "myshell-tts" | "deepinfra-tts" | "whisper-large-v3" | "distil-large-v3" | string;
|
|
33
|
-
characterName?: string;
|
|
34
|
-
};
|
|
35
|
-
/**
|
|
36
|
-
* Configuration options related to image generation.
|
|
37
|
-
* @param imagine.enable Whether image generation is enabled.
|
|
38
|
-
* @param imagine.drawTrigger The trigger phrases for initiating image generation.
|
|
39
|
-
* @param imagine.imageModal The model for the image generation.
|
|
40
|
-
* @param imagine.numOfImages The number of images to generate.
|
|
41
|
-
* @param imagine.nsfw Configuration options for NSFW filtering.
|
|
42
|
-
* @param imagine.nsfw.enable Whether NSFW filtering is enabled.
|
|
43
|
-
* @param imagine.nsfw.keywords Keywords for NSFW filtering.
|
|
44
|
-
* @param imagine.enhancer.enable Whether image enhancement is enabled (rewrites your prompt in more descriptive way).
|
|
45
|
-
* @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia models).
|
|
46
|
-
*/
|
|
47
|
-
imagine?: {
|
|
48
|
-
enable: boolean;
|
|
49
|
-
drawTrigger: string[];
|
|
50
|
-
imageModel: string;
|
|
51
|
-
numOfImages: number;
|
|
52
|
-
ApiKeys?: {
|
|
53
|
-
electronHubKey?: string
|
|
54
|
-
};
|
|
55
|
-
nsfw?: {
|
|
56
|
-
enable?: boolean;
|
|
57
|
-
keywords?: string[];
|
|
58
|
-
deepCheck?: boolean;
|
|
59
|
-
};
|
|
60
|
-
enhancer?: {
|
|
61
|
-
enable: boolean;
|
|
62
|
-
negative_prompt?: string;
|
|
63
|
-
width?: number;
|
|
64
|
-
height?: number;
|
|
65
|
-
format?: "jpeg" | "png";
|
|
66
|
-
quality?: number;
|
|
67
|
-
};
|
|
68
|
-
};
|
|
69
|
-
/**
|
|
70
|
-
* Configuration options related to chat functionality.
|
|
71
|
-
* @param chat.chatModel The chat model to be used.
|
|
72
|
-
* @param chat.readFiles Whether to read files.
|
|
73
|
-
* @param chat.readImages Whether to read images.
|
|
74
|
-
* @param chat.personality The personality for the chat.
|
|
75
|
-
* @param chat.API_KEY The API key for accessing the chat service.
|
|
76
|
-
* @param chat.lang The Language of the voice message sent.
|
|
77
|
-
* @param chat.memory Configuration options for memory.
|
|
78
|
-
* @param chat.memory.memoryOn Whether memory is enabled.
|
|
79
|
-
* @param chat.memory.id The ID for memory.
|
|
80
|
-
* @param chat.typeWriting Configuration options for typing effect.
|
|
81
|
-
* @param chat.typeWriting.enable Whether the typing effect is enabled.
|
|
82
|
-
* @param chat.typeWriting.speed The speed of typing.
|
|
83
|
-
* @param chat.typeWriting.delay The delay for typing.
|
|
84
|
-
*/
|
|
85
|
-
chat?: {
|
|
86
|
-
enable?: boolean;
|
|
87
|
-
chatModel?: string;
|
|
88
|
-
readFiles?: boolean;
|
|
89
|
-
readImages?: boolean;
|
|
90
|
-
instruction?: string;
|
|
91
|
-
Api_Keys?: {
|
|
92
|
-
groq_API?: string;
|
|
93
|
-
electronHub_Key?: string
|
|
94
|
-
};
|
|
95
|
-
memory?: {
|
|
96
|
-
memoryOn?: boolean;
|
|
97
|
-
id?: string;
|
|
98
|
-
threshold?: number;
|
|
99
|
-
limit?: number;
|
|
100
|
-
};
|
|
101
|
-
typeWriting?: {
|
|
102
|
-
enable?: boolean;
|
|
103
|
-
speed?: number;
|
|
104
|
-
delay?: number;
|
|
105
|
-
};
|
|
106
|
-
};
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
voiceChannel?: {
|
|
110
|
-
enable?: boolean;
|
|
111
|
-
channelId?: string;
|
|
112
|
-
personality?: string;
|
|
113
|
-
voiceModel?: "elevenlabs" | "myshell-tts" | "deepinfra-tts" | "whisper-large-v3" | "distil-large-v3" | string;
|
|
114
|
-
joinOn?: {
|
|
115
|
-
triggeredWords?: string[];
|
|
116
|
-
}
|
|
117
|
-
};
|
|
118
|
-
/**
|
|
119
|
-
* Additional configuration options.
|
|
120
|
-
* @param others.messageType Configuration options for message types.
|
|
121
|
-
* @param others.messageType.type The type of message.
|
|
122
|
-
* @param others.messageType.intialContent The initial content of the message.
|
|
123
|
-
* @param others.buttons Buttons configuration.
|
|
124
|
-
* @param others.keywords Keywords for response.
|
|
125
|
-
* @param others.keywordResponses Responses for keywords.
|
|
126
|
-
* @param others.loader Configuration options for loader.
|
|
127
|
-
* @param others.loader.enable Whether the loader is enabled.
|
|
128
|
-
* @param others.loader.loadingMessage The loading message.
|
|
129
|
-
* @param others.loader.loadingTimer The loading timer.
|
|
130
|
-
* @param others.channel Configuration options for channels.
|
|
131
|
-
* @param others.channel.enable Whether channels are enabled.
|
|
132
|
-
* @param others.channel.id The ID of the channels.
|
|
133
|
-
* @param others.permissions Configuration options for permissions.
|
|
134
|
-
* @param others.permissions.enable Whether permissions are enabled.
|
|
135
|
-
* @param others.permissions.role The role for permissions.
|
|
136
|
-
* @param others.permissions.permission The permission.
|
|
137
|
-
* @param others.permissions.blockedUsers Blocked users.
|
|
138
|
-
*/
|
|
139
|
-
others?: {
|
|
140
|
-
messageType?: {
|
|
141
|
-
type?: string;
|
|
142
|
-
intialContent?: string;
|
|
143
|
-
sendAs?: string | "embed" | "content";
|
|
144
|
-
};
|
|
145
|
-
onMention?: boolean;
|
|
146
|
-
buttons?: any[];
|
|
147
|
-
keywordResponses?: Record<string, string>;
|
|
148
|
-
loader?: {
|
|
149
|
-
enable?: boolean;
|
|
150
|
-
loadingMessage?: string;
|
|
151
|
-
loadingTimer?: number;
|
|
152
|
-
};
|
|
153
|
-
channel?: {
|
|
154
|
-
enable?: boolean;
|
|
155
|
-
id?: string[];
|
|
156
|
-
};
|
|
157
|
-
permissions?: {
|
|
158
|
-
enable?: boolean;
|
|
159
|
-
role?: string[];
|
|
160
|
-
permission?: string[];
|
|
161
|
-
blockedUsers?: string[];
|
|
162
|
-
};
|
|
163
|
-
};
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
function createButtonRows(buttons: any[]): ActionRowBuilder<MessageActionRowComponentBuilder>[] {
|
|
168
|
-
if (!Array.isArray(buttons) || buttons.length === 0) return [];
|
|
169
|
-
|
|
170
|
-
const buttonRows: ActionRowBuilder<MessageActionRowComponentBuilder>[] = [];
|
|
171
|
-
let currentRow = new ActionRowBuilder<MessageActionRowComponentBuilder>();
|
|
172
|
-
|
|
173
|
-
for (const button of buttons) {
|
|
174
|
-
if (!button.label || !button.style || !button.custom_id) continue;
|
|
175
|
-
|
|
176
|
-
const buttonComponent = new ButtonBuilder()
|
|
177
|
-
.setLabel(button.label)
|
|
178
|
-
.setStyle(button.style as ButtonStyle)
|
|
179
|
-
.setCustomId(button.custom_id);
|
|
180
|
-
|
|
181
|
-
if (button.emoji) buttonComponent.setEmoji(button.emoji);
|
|
182
|
-
if (button.url && button.style === ButtonStyle.Link) buttonComponent.setURL(button.url);
|
|
183
|
-
|
|
184
|
-
currentRow.addComponents(buttonComponent);
|
|
185
|
-
|
|
186
|
-
if (currentRow.components.length === 5) {
|
|
187
|
-
buttonRows.push(currentRow);
|
|
188
|
-
currentRow = new ActionRowBuilder<MessageActionRowComponentBuilder>();
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
if (currentRow.components.length > 0) {
|
|
193
|
-
buttonRows.push(currentRow);
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
return buttonRows.slice(0, 5);
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
export async function ApexAI(message: Message, ApexOptions: Options) {
|
|
201
|
-
if (!message.guild || message.author.bot) return;
|
|
202
|
-
|
|
203
|
-
let prompt = message.content || "";
|
|
204
|
-
const { others, chat, voice, imagine, voiceChannel } = ApexOptions;
|
|
205
|
-
const { permissions, channel, messageType, onMention, loader, buttons } = others ?? {};
|
|
206
|
-
const { Api_Keys, typeWriting } = chat ?? {};
|
|
207
|
-
const { nsfw } = imagine ?? {};
|
|
208
|
-
|
|
209
|
-
if (permissions?.enable) {
|
|
210
|
-
const userRoles = message.member?.roles.cache.map((role: any) => role.id);
|
|
211
|
-
const hasPermission =
|
|
212
|
-
permissions.role?.some((roleId) => userRoles?.includes(roleId)) ?? false;
|
|
213
|
-
const hasRequiredPerms =
|
|
214
|
-
permissions.permission?.some((perm) =>
|
|
215
|
-
message.member?.permissions.has(perm as PermissionResolvable)
|
|
216
|
-
) ?? false;
|
|
217
|
-
const isBlocked = permissions.blockedUsers?.includes(message.author.id) ?? false;
|
|
218
|
-
|
|
219
|
-
if (!hasPermission || !hasRequiredPerms || isBlocked) return;
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
if (channel?.enable && !channel.id?.includes(message.channel.id)) return;
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
if (onMention) {
|
|
227
|
-
const botMention = `<@${message.client.user.id}>`;
|
|
228
|
-
if (!prompt.startsWith(botMention)) return;
|
|
229
|
-
prompt = prompt.replace(botMention, "").trim();
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
const mainPrompt = prompt;
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
if (message.channel.isTextBased() && "sendTyping" in message.channel) await message.channel.sendTyping();
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
if (loader?.enable) {
|
|
240
|
-
const loadingMessage = await message.reply({
|
|
241
|
-
content: loader.loadingMessage,
|
|
242
|
-
allowedMentions: { repliedUser: false },
|
|
243
|
-
});
|
|
244
|
-
setTimeout(() => loadingMessage.delete().catch(console.error), loader.loadingTimer || 3000);
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
if (message.attachments.size > 0) {
|
|
249
|
-
for (const attachment of message.attachments.values()) {
|
|
250
|
-
try {
|
|
251
|
-
const url = attachment.url;
|
|
252
|
-
const contentType = attachment.contentType || "";
|
|
253
|
-
const fileSize = attachment.size;
|
|
254
|
-
const maxSize = 25 * 1024 * 1024;
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
if (contentType.startsWith("audio/")) {
|
|
258
|
-
if (fileSize > maxSize) {
|
|
259
|
-
console.error("Audio file exceeds 25MB.");
|
|
260
|
-
continue;
|
|
261
|
-
}
|
|
262
|
-
prompt += `\n> Transcribed from audio:\n${await whisper(prompt, url)}`;
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
else if (["image/png", "image/jpeg", "image/jpg", "image/webp"].includes(contentType)) {
|
|
267
|
-
if (!Api_Keys?.groq_API) {
|
|
268
|
-
console.error("Missing Groq API Key.");
|
|
269
|
-
continue;
|
|
270
|
-
}
|
|
271
|
-
prompt += `\n> Image analysis:\n${await groqAnalyzer({ img: url, ApiKey: Api_Keys.groq_API })}`;
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
else if (attachment.name.endsWith(".pdf") || attachment.name.endsWith(".txt")) {
|
|
276
|
-
const fileType = attachment.name.endsWith(".pdf") ? "pdf" : "txt";
|
|
277
|
-
const fileContent = await readFile(url, fileType);
|
|
278
|
-
prompt += `\n\n- 📜 ${attachment.name} content:\n${fileContent}`;
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
else {
|
|
283
|
-
prompt += `\n> ❌ Unsupported file type uploaded: ${contentType}`;
|
|
284
|
-
}
|
|
285
|
-
} catch (e) {
|
|
286
|
-
console.error("Error processing attachment:", e);
|
|
287
|
-
}
|
|
288
|
-
}
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
let prevReply: string = "";
|
|
293
|
-
if (message.reference?.messageId) {
|
|
294
|
-
const channel = message?.guild?.channels?.cache?.get(message.channel.id);
|
|
295
|
-
const messageId = message?.reference?.messageId;
|
|
296
|
-
// @ts-ignore
|
|
297
|
-
const fetchedMessage = await channel?.messages?.fetch(messageId);
|
|
298
|
-
|
|
299
|
-
if (fetchedMessage) {
|
|
300
|
-
prevReply += fetchedMessage.content || "";
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
const firstAttachment = fetchedMessage.attachments?.first();
|
|
304
|
-
if (firstAttachment && /\.(png|jpg|jpeg|webp)$/i.test(firstAttachment.url)) {
|
|
305
|
-
prevReply += await groqAnalyzer({
|
|
306
|
-
img: firstAttachment.url,
|
|
307
|
-
ApiKey: Api_Keys?.groq_API,
|
|
308
|
-
prompt,
|
|
309
|
-
});
|
|
310
|
-
prompt += `\n- 📷 Image analysis:\n${prevReply}`;
|
|
311
|
-
}
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
if (others?.keywordResponses) {
|
|
317
|
-
for (const [keyword, response] of Object.entries(others.keywordResponses)) {
|
|
318
|
-
if (prompt.toLowerCase().includes(keyword.toLowerCase())) {
|
|
319
|
-
return await message.reply({
|
|
320
|
-
content: response,
|
|
321
|
-
allowedMentions: { repliedUser: false },
|
|
322
|
-
});
|
|
323
|
-
}
|
|
324
|
-
}
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
if (voiceChannel?.enable && voiceChannel.channelId && voiceChannel.joinOn?.triggeredWords) {
|
|
328
|
-
const { channelId, joinOn, personality, voiceModel } = voiceChannel;
|
|
329
|
-
|
|
330
|
-
const triggerDetected = joinOn.triggeredWords?.some(word =>
|
|
331
|
-
prompt.toLowerCase().startsWith(word.toLowerCase())
|
|
332
|
-
);
|
|
333
|
-
|
|
334
|
-
if (triggerDetected) {
|
|
335
|
-
return handleVoiceAI(message, channelId, chat, voiceModel, personality);
|
|
336
|
-
}
|
|
337
|
-
}
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
let aiResponse: string | Buffer = "";
|
|
341
|
-
const chatEnabled = chat?.enable ?? false;
|
|
342
|
-
const voiceEnabled = voice?.enable ?? false;
|
|
343
|
-
const imageEnabled = imagine?.enable ?? false;
|
|
344
|
-
let genImage: string[] = [];
|
|
345
|
-
const drawTrigger = imagine?.drawTrigger?.some(trigger =>
|
|
346
|
-
mainPrompt.toLowerCase().startsWith(trigger.toLowerCase())
|
|
347
|
-
);
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
if (message.channel.isTextBased() && "sendTyping" in message.channel) await message.channel.sendTyping();
|
|
351
|
-
|
|
352
|
-
if (chatEnabled && voiceEnabled) {
|
|
353
|
-
return await message.reply({
|
|
354
|
-
content: "❌ Error: Both Chat and Voice cannot be enabled at the same time.",
|
|
355
|
-
allowedMentions: { repliedUser: false },
|
|
356
|
-
});
|
|
357
|
-
}
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
if (chatEnabled) {
|
|
361
|
-
let instruction: string | undefined = chat?.instruction;
|
|
362
|
-
|
|
363
|
-
if (imageEnabled && drawTrigger) {
|
|
364
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
365
|
-
let tempEnhancer: string | undefined;
|
|
366
|
-
|
|
367
|
-
if (enhancer) {
|
|
368
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n`;
|
|
369
|
-
tempEnhancer = await ApexChat("llama-3.1-tulu-3-405b", prompt, { instruction: enhancementRequest });
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
genImage = await ApexImagine(imagine?.imageModel as string, tempEnhancer || prompt,
|
|
373
|
-
{ width: imagine?.enhancer?.width , height: imagine?.enhancer?.height, format: imagine?.enhancer?.format,
|
|
374
|
-
quality: imagine?.enhancer?.quality, negative_prompt: imagine?.enhancer?.negative_prompt, nsfw: imagine?.nsfw?.enable,
|
|
375
|
-
nsfwWords: imagine?.nsfw?.keywords, deepCheck: imagine?.nsfw?.deepCheck, count: imagine?.numOfImages }) as string[];
|
|
376
|
-
|
|
377
|
-
instruction += `\n\nThe image has already been generated. Do not attempt to generate or describe another image. Instead, provide feedback, thoughts, or a critique based on the given prompt:\n\n`;
|
|
378
|
-
}
|
|
379
|
-
|
|
380
|
-
aiResponse = await ApexChat(chat?.chatModel as string, prompt, { instruction, memory: chat?.memory?.memoryOn, userId: chat?.memory?.id, limit: chat?.memory?.limit, threshold: chat?.memory?.threshold });
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
else if (voiceEnabled) {
|
|
384
|
-
let instruction: string | undefined = chat?.instruction;
|
|
385
|
-
|
|
386
|
-
if (imageEnabled && drawTrigger) {
|
|
387
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
388
|
-
let tempEnhancer: string | undefined;
|
|
389
|
-
|
|
390
|
-
if (enhancer) {
|
|
391
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n`;
|
|
392
|
-
tempEnhancer = await ApexChat("llama-3.1-tulu-3-405b", prompt, { instruction: enhancementRequest });
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
genImage = await ApexImagine(imagine?.imageModel as string, tempEnhancer || prompt,
|
|
396
|
-
{ width: enhancer?.width , height: enhancer?.height, format: enhancer?.format,
|
|
397
|
-
quality: enhancer?.quality, negative_prompt: enhancer?.negative_prompt, nsfw: nsfw?.enable,
|
|
398
|
-
nsfwWords: nsfw?.keywords, deepCheck: nsfw?.deepCheck, count: imagine?.numOfImages }) as string[];
|
|
399
|
-
|
|
400
|
-
instruction += `\n\nThe image has already been generated. Do not attempt to generate or describe another image. Instead, provide feedback, thoughts, or a critique based on the given prompt:\n\n`;
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
aiResponse = await ApexChat(chat?.chatModel as string, prompt, { instruction, memory: chat?.memory?.memoryOn, userId: chat?.memory?.id, limit: chat?.memory?.limit, threshold: chat?.memory?.threshold });
|
|
404
|
-
|
|
405
|
-
aiResponse = await ApexText2Speech({ modelName: voice?.voiceModel, inputText: aiResponse, personality: voice?.characterName });
|
|
406
|
-
}
|
|
407
|
-
|
|
408
|
-
else if (imageEnabled && drawTrigger) {
|
|
409
|
-
const enhancer = imagine?.enhancer?.enable ? imagine.enhancer : undefined;
|
|
410
|
-
let tempEnhancer: string | undefined;
|
|
411
|
-
|
|
412
|
-
if (enhancer) {
|
|
413
|
-
const enhancementRequest = `Rewrite this text below in a more descriptive way, making it clearer to be visualized correctly by an AI image generator. Use stronger words and return only the enhanced prompt—nothing more, nothing less.\n\n${prompt}`;
|
|
414
|
-
tempEnhancer = await ApexChat("llama-3.1-tulu-3-405b", prompt, { instruction: enhancementRequest });
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
genImage = await ApexImagine(imagine?.imageModel as string, tempEnhancer || prompt,
|
|
418
|
-
{ width: enhancer?.width , height: enhancer?.height, format: enhancer?.format,
|
|
419
|
-
quality: enhancer?.quality, negative_prompt: enhancer?.negative_prompt, nsfw: nsfw?.enable,
|
|
420
|
-
nsfwWords: nsfw?.keywords, deepCheck: nsfw?.deepCheck, count: imagine?.numOfImages }) as string[];
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
else {
|
|
424
|
-
return await message.reply({
|
|
425
|
-
content: "❌ Error: No valid AI features are enabled.",
|
|
426
|
-
allowedMentions: { repliedUser: false },
|
|
427
|
-
});
|
|
428
|
-
}
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
const buttonRows = createButtonRows(buttons || []);
|
|
432
|
-
|
|
433
|
-
const imageAttachments: AttachmentBuilder[] = [];
|
|
434
|
-
if (genImage.length > 0) {
|
|
435
|
-
for (const imageUrl of genImage) {
|
|
436
|
-
imageAttachments.push(new AttachmentBuilder(imageUrl, { name: `image-${Date.now()}.png` }));
|
|
437
|
-
}
|
|
438
|
-
}
|
|
439
|
-
|
|
440
|
-
let voiceAttachment: AttachmentBuilder | null = null;
|
|
441
|
-
if (voiceEnabled && aiResponse instanceof Buffer) {
|
|
442
|
-
voiceAttachment = new AttachmentBuilder(aiResponse, { name: "ai_voice.mp3" });
|
|
443
|
-
}
|
|
444
|
-
|
|
445
|
-
const chunkMessage = (text: string, maxLength = 2000): string[] => {
|
|
446
|
-
const chunks: string[] = [];
|
|
447
|
-
while (text.length > maxLength) {
|
|
448
|
-
let sliceIndex = text.lastIndexOf("\n", maxLength);
|
|
449
|
-
if (sliceIndex === -1) sliceIndex = maxLength;
|
|
450
|
-
chunks.push(text.slice(0, sliceIndex));
|
|
451
|
-
text = text.slice(sliceIndex);
|
|
452
|
-
}
|
|
453
|
-
if (text.length > 0) chunks.push(text);
|
|
454
|
-
return chunks;
|
|
455
|
-
};
|
|
456
|
-
|
|
457
|
-
const initialContent = messageType?.intialContent ? `${messageType.intialContent} ` : "";
|
|
458
|
-
|
|
459
|
-
const finalText = typeof aiResponse === "string" ? `${initialContent}${aiResponse}` : "";
|
|
460
|
-
const messageChunks = chunkMessage(finalText);
|
|
461
|
-
|
|
462
|
-
const sendMessage = async (content: string, components?: ActionRowBuilder<MessageActionRowComponentBuilder>[]) => {
|
|
463
|
-
if (!content.trim()) return;
|
|
464
|
-
|
|
465
|
-
if (messageType?.sendAs === "embed") {
|
|
466
|
-
const embed = new EmbedBuilder()
|
|
467
|
-
.setColor(0x0099ff)
|
|
468
|
-
.setDescription(content)
|
|
469
|
-
.setFooter({ text: "AI Response" });
|
|
470
|
-
|
|
471
|
-
if (messageType?.type === "send") {
|
|
472
|
-
return (message.channel as TextChannel).send({ embeds: [embed], components });
|
|
473
|
-
} else {
|
|
474
|
-
return message.reply({ embeds: [embed], allowedMentions: { repliedUser: false }, components });
|
|
475
|
-
}
|
|
476
|
-
} else {
|
|
477
|
-
if (messageType?.type === "send") {
|
|
478
|
-
return (message.channel as TextChannel).send({ content, components });
|
|
479
|
-
} else {
|
|
480
|
-
return message.reply({ content, allowedMentions: { repliedUser: false }, components });
|
|
481
|
-
}
|
|
482
|
-
}
|
|
483
|
-
};
|
|
484
|
-
|
|
485
|
-
const sendTypeWritingMessage = async (content: string, components?: ActionRowBuilder<MessageActionRowComponentBuilder>[]) => {
|
|
486
|
-
if (!typeWriting?.enable) {
|
|
487
|
-
return sendMessage(content, components);
|
|
488
|
-
}
|
|
489
|
-
|
|
490
|
-
content = content.trimStart();
|
|
491
|
-
const typingMessage = await sendMessage(content[0]);
|
|
492
|
-
let typedSentence = typingMessage?.content;
|
|
493
|
-
let i = 1;
|
|
494
|
-
|
|
495
|
-
while (i < content.length) {
|
|
496
|
-
typedSentence += content.slice(i, i + getStepCount(typeWriting.speed ?? 50));
|
|
497
|
-
await sleep(typeWriting.delay ?? 500);
|
|
498
|
-
await typingMessage?.edit({ content: typedSentence });
|
|
499
|
-
i += getStepCount(typeWriting.speed ?? 50);
|
|
500
|
-
}
|
|
501
|
-
|
|
502
|
-
if (components && components.length > 0) {
|
|
503
|
-
await typingMessage?.edit({ components });
|
|
504
|
-
}
|
|
505
|
-
|
|
506
|
-
return typingMessage;
|
|
507
|
-
};
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
function getStepCount(speed: number): number {
|
|
511
|
-
const maxSteps = 120;
|
|
512
|
-
const steps = Math.min(Math.ceil(speed), maxSteps);
|
|
513
|
-
return steps > 0 ? steps : 1;
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
function sleep(ms: number): Promise<void> {
|
|
517
|
-
return new Promise<void>((resolve) => setTimeout(resolve, ms));
|
|
518
|
-
}
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
(async () => {
|
|
522
|
-
if (imageAttachments.length > 0) {
|
|
523
|
-
await (message.channel as TextChannel).send({ files: imageAttachments });
|
|
524
|
-
}
|
|
525
|
-
|
|
526
|
-
let lastSentMessage: Message<boolean> | null = null;
|
|
527
|
-
|
|
528
|
-
for (let i = 0; i < messageChunks.length; i++) {
|
|
529
|
-
const chunk = messageChunks[i].trim();
|
|
530
|
-
if (!chunk) continue;
|
|
531
|
-
|
|
532
|
-
const isLastChunk = i === messageChunks.length - 1;
|
|
533
|
-
|
|
534
|
-
if (isLastChunk) {
|
|
535
|
-
const response = await sendTypeWritingMessage(chunk, buttonRows.length > 0 ? buttonRows : undefined);
|
|
536
|
-
if (response) lastSentMessage = response as Message<boolean>;
|
|
537
|
-
} else {
|
|
538
|
-
await sendTypeWritingMessage(chunk);
|
|
539
|
-
}
|
|
540
|
-
|
|
541
|
-
if (!isLastChunk) {
|
|
542
|
-
await new Promise((resolve) => setTimeout(resolve, chat?.typeWriting?.delay ?? 500));
|
|
543
|
-
}
|
|
544
|
-
}
|
|
545
|
-
|
|
546
|
-
if (voiceAttachment) {
|
|
547
|
-
await (message.channel as TextChannel).send({ files: [voiceAttachment] });
|
|
548
|
-
}
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
if (lastSentMessage && buttonRows.length > 0) {
|
|
552
|
-
await lastSentMessage.edit({ components: buttonRows }).catch(() => null);
|
|
553
|
-
}
|
|
554
|
-
})();
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
};
|
|
558
|
-
|
|
559
|
-
let voiceQueue: { userId: string; text: string }[] = [];
|
|
560
|
-
let isProcessing = false;
|
|
561
|
-
let voiceConnection: VoiceConnection | null = null;
|
|
562
|
-
let activeUser: string | null = null;
|
|
563
|
-
let isRecording = false;
|
|
564
|
-
let silenceTimer: NodeJS.Timeout | null = null;
|
|
565
|
-
|
|
566
|
-
const recordingsDir = path.join(process.cwd(), "recordings");
|
|
567
|
-
if (!fs.existsSync(recordingsDir)) {
|
|
568
|
-
fs.mkdirSync(recordingsDir, { recursive: true });
|
|
569
|
-
}
|
|
570
|
-
|
|
571
|
-
try {
|
|
572
|
-
execSync("ffmpeg -version > nul 2>&1");
|
|
573
|
-
} catch (err) {
|
|
574
|
-
console.error("🚨 FFmpeg is NOT installed or not in PATH! Install it first.");
|
|
575
|
-
}
|
|
576
|
-
|
|
577
|
-
function resetSilenceTimer() {
|
|
578
|
-
if (silenceTimer) clearTimeout(silenceTimer);
|
|
579
|
-
}
|
|
580
|
-
|
|
581
|
-
export async function handleVoiceAI(message: any, voiceChannelId: string, chat: any, modelName?: string, personality?: string) {
|
|
582
|
-
const guild = message.guild;
|
|
583
|
-
if (!guild) return;
|
|
584
|
-
|
|
585
|
-
const channel = guild.channels.cache.get(voiceChannelId);
|
|
586
|
-
if (!channel || channel.type !== 2) {
|
|
587
|
-
return await message.reply(`🚫 Invalid voice channel ID: ${voiceChannelId}`);
|
|
588
|
-
}
|
|
589
|
-
|
|
590
|
-
const botMember = guild.members.me;
|
|
591
|
-
if (!botMember) return;
|
|
592
|
-
const permissions = channel.permissionsFor(botMember);
|
|
593
|
-
|
|
594
|
-
if (
|
|
595
|
-
!permissions?.has(PermissionFlagsBits.Connect) ||
|
|
596
|
-
!permissions.has(PermissionFlagsBits.Speak) ||
|
|
597
|
-
!permissions.has(PermissionFlagsBits.Stream) ||
|
|
598
|
-
!permissions.has(PermissionFlagsBits.UseVAD)
|
|
599
|
-
) {
|
|
600
|
-
return await message.reply("🚫 I don't have the required permissions to join and speak in this voice channel.");
|
|
601
|
-
}
|
|
602
|
-
|
|
603
|
-
if (voiceConnection) {
|
|
604
|
-
return await message.reply("⚠️ AI is already in a voice channel.");
|
|
605
|
-
}
|
|
606
|
-
|
|
607
|
-
voiceConnection = joinVoiceChannel({
|
|
608
|
-
channelId: channel.id,
|
|
609
|
-
guildId: guild.id,
|
|
610
|
-
adapterCreator: guild.voiceAdapterCreator as any,
|
|
611
|
-
selfMute: false,
|
|
612
|
-
selfDeaf: false
|
|
613
|
-
});
|
|
614
|
-
|
|
615
|
-
activeUser = message.author.id;
|
|
616
|
-
captureAudio(voiceConnection, chat, modelName, personality);
|
|
617
|
-
}
|
|
618
|
-
|
|
619
|
-
function captureAudio(connection: VoiceConnection, chat: any, modelName?: string, personality?: string) {
|
|
620
|
-
const receiver = connection.receiver;
|
|
621
|
-
|
|
622
|
-
receiver.speaking.on("start", async (userId) => {
|
|
623
|
-
if (userId !== activeUser) {
|
|
624
|
-
activeUser = userId;
|
|
625
|
-
isRecording = false;
|
|
626
|
-
}
|
|
627
|
-
|
|
628
|
-
resetSilenceTimer();
|
|
629
|
-
|
|
630
|
-
const rawFilePath = path.join(recordingsDir, `${userId}.pcm`);
|
|
631
|
-
const wavFilePath = path.join(recordingsDir, `${userId}.wav`);
|
|
632
|
-
|
|
633
|
-
const opusStream = receiver.subscribe(userId, {
|
|
634
|
-
end: { behavior: EndBehaviorType.AfterSilence, duration: 2000 }
|
|
635
|
-
});
|
|
636
|
-
|
|
637
|
-
const pcmStream = new prism.opus.Decoder({
|
|
638
|
-
frameSize: 960,
|
|
639
|
-
channels: 1,
|
|
640
|
-
rate: 48000
|
|
641
|
-
});
|
|
642
|
-
|
|
643
|
-
const writeStream = fs.createWriteStream(rawFilePath);
|
|
644
|
-
pipeline(opusStream, pcmStream, writeStream, (err) => {
|
|
645
|
-
if (err) {
|
|
646
|
-
console.error("❌ Error writing PCM file:", err);
|
|
647
|
-
return;
|
|
648
|
-
}
|
|
649
|
-
convertPCMtoWAV(rawFilePath, wavFilePath, chat, modelName, personality);
|
|
650
|
-
});
|
|
651
|
-
});
|
|
652
|
-
|
|
653
|
-
receiver.speaking.on("end", async (userId) => {
|
|
654
|
-
if (userId === activeUser) {
|
|
655
|
-
startSilenceTimer(chat, modelName, personality);
|
|
656
|
-
}
|
|
657
|
-
});
|
|
658
|
-
}
|
|
659
|
-
|
|
660
|
-
function startSilenceTimer(chat: any, modelName?: string, personality?: string) {
|
|
661
|
-
resetSilenceTimer();
|
|
662
|
-
silenceTimer = setTimeout(() => {
|
|
663
|
-
if (voiceQueue.length > 0) {
|
|
664
|
-
const nextUser = voiceQueue.shift();
|
|
665
|
-
if (nextUser) {
|
|
666
|
-
activeUser = nextUser.userId;
|
|
667
|
-
processQueue(chat, modelName, personality);
|
|
668
|
-
}
|
|
669
|
-
} else {
|
|
670
|
-
leaveVoiceChannel();
|
|
671
|
-
}
|
|
672
|
-
}, 5000);
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
function convertPCMtoWAV(inputPCM: string, outputWAV: string, chat: any, modelName?: string, personality?: string) {
|
|
677
|
-
if (!fs.existsSync(inputPCM) || fs.statSync(inputPCM).size === 0) {
|
|
678
|
-
return;
|
|
679
|
-
}
|
|
680
|
-
|
|
681
|
-
try {
|
|
682
|
-
execSync(`ffmpeg -y -f s16le -ar 48000 -ac 1 -i "${inputPCM}" -acodec pcm_s16le "${outputWAV}" > nul 2>&1`);
|
|
683
|
-
|
|
684
|
-
if (fs.existsSync(outputWAV)) {
|
|
685
|
-
transcribeAudio(outputWAV, chat, modelName, personality);
|
|
686
|
-
}
|
|
687
|
-
|
|
688
|
-
} catch (error) {
|
|
689
|
-
console.error("❌ FFmpeg failed:", error);
|
|
690
|
-
}
|
|
691
|
-
}
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
async function transcribeAudio(filePath: string, chat: any, modelName?: string, personality?: string) {
|
|
695
|
-
try {
|
|
696
|
-
const transcribedText = await ApexListener({
|
|
697
|
-
filepath: filePath,
|
|
698
|
-
prompt: "Transcribe what the user said in English.",
|
|
699
|
-
lang: "en"
|
|
700
|
-
});
|
|
701
|
-
|
|
702
|
-
if (transcribedText.transcribe) {
|
|
703
|
-
voiceQueue.push({ userId: activeUser || "unknown", text: transcribedText.transcribe });
|
|
704
|
-
processQueue(chat, modelName, personality);
|
|
705
|
-
}
|
|
706
|
-
|
|
707
|
-
fs.unlinkSync(filePath);
|
|
708
|
-
} catch (error) {
|
|
709
|
-
console.error("❌ Error in transcription:", error);
|
|
710
|
-
}
|
|
711
|
-
}
|
|
712
|
-
|
|
713
|
-
async function processQueue(chat: any, modelName?: string, personality?: string) {
|
|
714
|
-
if (isProcessing || voiceQueue.length === 0) return;
|
|
715
|
-
|
|
716
|
-
isProcessing = true;
|
|
717
|
-
const { userId, text } = voiceQueue.shift()!;
|
|
718
|
-
activeUser = userId;
|
|
719
|
-
resetSilenceTimer();
|
|
720
|
-
|
|
721
|
-
try {
|
|
722
|
-
const aiResponse = await ApexChat(chat?.chatModel || "llama-3.1-tulu-3-405b", text, {
|
|
723
|
-
instruction: chat.instruction,
|
|
724
|
-
memory: chat?.memory?.memoryOn,
|
|
725
|
-
userId,
|
|
726
|
-
limit: chat?.memory?.limit,
|
|
727
|
-
threshold: chat?.memory?.threshold
|
|
728
|
-
});
|
|
729
|
-
|
|
730
|
-
const audioBuffer = await ApexText2Speech({ inputText: aiResponse, modelName, personality });
|
|
731
|
-
|
|
732
|
-
if (voiceConnection) {
|
|
733
|
-
const player = createAudioPlayer();
|
|
734
|
-
const resource = createAudioResource(Readable.from(audioBuffer));
|
|
735
|
-
voiceConnection.subscribe(player);
|
|
736
|
-
player.play(resource);
|
|
737
|
-
|
|
738
|
-
player.on(AudioPlayerStatus.Idle, () => {
|
|
739
|
-
isProcessing = false;
|
|
740
|
-
if (voiceQueue.length > 0) {
|
|
741
|
-
processQueue(chat, modelName, personality);
|
|
742
|
-
}
|
|
743
|
-
});
|
|
744
|
-
}
|
|
745
|
-
} catch (error) {
|
|
746
|
-
console.error("❌ Error processing AI response:", error);
|
|
747
|
-
isProcessing = false;
|
|
748
|
-
}
|
|
749
|
-
}
|
|
750
|
-
|
|
751
|
-
function leaveVoiceChannel() {
|
|
752
|
-
if (voiceConnection) {
|
|
753
|
-
voiceConnection.destroy();
|
|
754
|
-
voiceConnection = null;
|
|
755
|
-
activeUser = null;
|
|
756
|
-
resetSilenceTimer();
|
|
757
|
-
}
|
|
758
|
-
}
|