apexify.js 4.7.95 → 4.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (187) hide show
  1. package/dist/cjs/ai/ApexAI.d.ts.map +1 -1
  2. package/dist/cjs/ai/ApexAI.js +42 -23
  3. package/dist/cjs/ai/ApexAI.js.map +1 -1
  4. package/dist/cjs/ai/ApexModules.d.ts.map +1 -1
  5. package/dist/cjs/ai/ApexModules.js +35 -21
  6. package/dist/cjs/ai/ApexModules.js.map +1 -1
  7. package/dist/cjs/ai/buttons/tools.js +8 -8
  8. package/dist/cjs/ai/buttons/tools.js.map +1 -1
  9. package/dist/cjs/ai/functions/tokenizer.d.ts +10 -0
  10. package/dist/cjs/ai/functions/tokenizer.d.ts.map +1 -0
  11. package/dist/cjs/ai/functions/tokenizer.js +64 -0
  12. package/dist/cjs/ai/functions/tokenizer.js.map +1 -0
  13. package/dist/cjs/ai/modals/electronHub/chatmodels.d.ts.map +1 -0
  14. package/dist/cjs/ai/{modals-chat → modals}/electronHub/chatmodels.js +8 -11
  15. package/dist/cjs/ai/modals/electronHub/chatmodels.js.map +1 -0
  16. package/dist/cjs/ai/modals/electronHub/imageModels.d.ts.map +1 -0
  17. package/dist/cjs/ai/{modals-chat → modals}/electronHub/imageModels.js +9 -5
  18. package/dist/cjs/ai/modals/electronHub/imageModels.js.map +1 -0
  19. package/dist/cjs/ai/modals/electronHub/songModels.d.ts.map +1 -0
  20. package/dist/cjs/ai/modals/electronHub/songModels.js.map +1 -0
  21. package/dist/cjs/ai/modals/electronHub/speechModels.d.ts.map +1 -0
  22. package/dist/cjs/ai/{modals-chat → modals}/electronHub/speechModels.js +4 -1
  23. package/dist/cjs/ai/modals/electronHub/speechModels.js.map +1 -0
  24. package/dist/cjs/ai/modals/electronHub/videoModels.d.ts.map +1 -0
  25. package/dist/cjs/ai/modals/electronHub/videoModels.js.map +1 -0
  26. package/dist/cjs/ai/modals/groq/chatgroq.d.ts.map +1 -0
  27. package/dist/cjs/ai/modals/groq/chatgroq.js.map +1 -0
  28. package/dist/cjs/ai/modals/groq/imageAnalyzer.d.ts.map +1 -0
  29. package/dist/cjs/ai/modals/groq/imageAnalyzer.js.map +1 -0
  30. package/dist/cjs/ai/modals/groq/whisper.d.ts.map +1 -0
  31. package/dist/cjs/ai/{modals-chat → modals}/groq/whisper.js +34 -33
  32. package/dist/cjs/ai/modals/groq/whisper.js.map +1 -0
  33. package/dist/cjs/ai/modals/hercai/chatModels.d.ts.map +1 -0
  34. package/dist/cjs/ai/modals/hercai/chatModels.js.map +1 -0
  35. package/dist/cjs/ai/utils.d.ts +1 -1
  36. package/dist/cjs/ai/utils.d.ts.map +1 -1
  37. package/dist/cjs/ai/utils.js +1 -1
  38. package/dist/cjs/ai/utils.js.map +1 -1
  39. package/dist/cjs/canvas/ApexPainter.d.ts +20 -15
  40. package/dist/cjs/canvas/ApexPainter.d.ts.map +1 -1
  41. package/dist/cjs/canvas/ApexPainter.js +143 -23
  42. package/dist/cjs/canvas/ApexPainter.js.map +1 -1
  43. package/dist/cjs/canvas/utils/Image/imageProperties.d.ts.map +1 -1
  44. package/dist/cjs/canvas/utils/Image/imageProperties.js +47 -24
  45. package/dist/cjs/canvas/utils/Image/imageProperties.js.map +1 -1
  46. package/dist/cjs/canvas/utils/types.d.ts +21 -12
  47. package/dist/cjs/canvas/utils/types.d.ts.map +1 -1
  48. package/dist/cjs/canvas/utils/types.js.map +1 -1
  49. package/dist/cjs/canvas/utils/utils.d.ts +2 -2
  50. package/dist/cjs/canvas/utils/utils.d.ts.map +1 -1
  51. package/dist/cjs/tsconfig.cjs.tsbuildinfo +1 -1
  52. package/dist/esm/ai/ApexAI.d.ts.map +1 -1
  53. package/dist/esm/ai/ApexAI.js +42 -23
  54. package/dist/esm/ai/ApexAI.js.map +1 -1
  55. package/dist/esm/ai/ApexModules.d.ts.map +1 -1
  56. package/dist/esm/ai/ApexModules.js +35 -21
  57. package/dist/esm/ai/ApexModules.js.map +1 -1
  58. package/dist/esm/ai/buttons/tools.js +8 -8
  59. package/dist/esm/ai/buttons/tools.js.map +1 -1
  60. package/dist/esm/ai/functions/tokenizer.d.ts +10 -0
  61. package/dist/esm/ai/functions/tokenizer.d.ts.map +1 -0
  62. package/dist/esm/ai/functions/tokenizer.js +64 -0
  63. package/dist/esm/ai/functions/tokenizer.js.map +1 -0
  64. package/dist/esm/ai/modals/electronHub/chatmodels.d.ts.map +1 -0
  65. package/dist/esm/ai/{modals-chat → modals}/electronHub/chatmodels.js +8 -11
  66. package/dist/esm/ai/modals/electronHub/chatmodels.js.map +1 -0
  67. package/dist/esm/ai/modals/electronHub/imageModels.d.ts.map +1 -0
  68. package/dist/esm/ai/{modals-chat → modals}/electronHub/imageModels.js +9 -5
  69. package/dist/esm/ai/modals/electronHub/imageModels.js.map +1 -0
  70. package/dist/esm/ai/modals/electronHub/songModels.d.ts.map +1 -0
  71. package/dist/esm/ai/modals/electronHub/songModels.js.map +1 -0
  72. package/dist/esm/ai/modals/electronHub/speechModels.d.ts.map +1 -0
  73. package/dist/esm/ai/{modals-chat → modals}/electronHub/speechModels.js +4 -1
  74. package/dist/esm/ai/modals/electronHub/speechModels.js.map +1 -0
  75. package/dist/esm/ai/modals/electronHub/videoModels.d.ts.map +1 -0
  76. package/dist/esm/ai/modals/electronHub/videoModels.js.map +1 -0
  77. package/dist/esm/ai/modals/groq/chatgroq.d.ts.map +1 -0
  78. package/dist/esm/ai/modals/groq/chatgroq.js.map +1 -0
  79. package/dist/esm/ai/modals/groq/imageAnalyzer.d.ts.map +1 -0
  80. package/dist/esm/ai/modals/groq/imageAnalyzer.js.map +1 -0
  81. package/dist/esm/ai/modals/groq/whisper.d.ts.map +1 -0
  82. package/dist/esm/ai/{modals-chat → modals}/groq/whisper.js +34 -33
  83. package/dist/esm/ai/modals/groq/whisper.js.map +1 -0
  84. package/dist/esm/ai/modals/hercai/chatModels.d.ts.map +1 -0
  85. package/dist/esm/ai/modals/hercai/chatModels.js.map +1 -0
  86. package/dist/esm/ai/utils.d.ts +1 -1
  87. package/dist/esm/ai/utils.d.ts.map +1 -1
  88. package/dist/esm/ai/utils.js +1 -1
  89. package/dist/esm/ai/utils.js.map +1 -1
  90. package/dist/esm/canvas/ApexPainter.d.ts +20 -15
  91. package/dist/esm/canvas/ApexPainter.d.ts.map +1 -1
  92. package/dist/esm/canvas/ApexPainter.js +143 -23
  93. package/dist/esm/canvas/ApexPainter.js.map +1 -1
  94. package/dist/esm/canvas/utils/Image/imageProperties.d.ts.map +1 -1
  95. package/dist/esm/canvas/utils/Image/imageProperties.js +47 -24
  96. package/dist/esm/canvas/utils/Image/imageProperties.js.map +1 -1
  97. package/dist/esm/canvas/utils/types.d.ts +21 -12
  98. package/dist/esm/canvas/utils/types.d.ts.map +1 -1
  99. package/dist/esm/canvas/utils/types.js.map +1 -1
  100. package/dist/esm/canvas/utils/utils.d.ts +2 -2
  101. package/dist/esm/canvas/utils/utils.d.ts.map +1 -1
  102. package/dist/esm/tsconfig.esm.tsbuildinfo +1 -1
  103. package/lib/ai/ApexAI.ts +69 -48
  104. package/lib/ai/ApexModules.ts +45 -33
  105. package/lib/ai/buttons/tools.ts +8 -8
  106. package/lib/ai/functions/tokenizer.ts +69 -0
  107. package/lib/ai/modals/electronHub/chatmodels.ts +57 -0
  108. package/lib/ai/{modals-chat → modals}/electronHub/imageModels.ts +17 -13
  109. package/lib/ai/{modals-chat → modals}/electronHub/speechModels.ts +5 -1
  110. package/lib/ai/modals/groq/whisper.ts +114 -0
  111. package/lib/ai/utils.ts +1 -1
  112. package/lib/canvas/ApexPainter.ts +214 -45
  113. package/lib/canvas/utils/Image/imageProperties.ts +67 -24
  114. package/lib/canvas/utils/types.ts +22 -14
  115. package/lib/canvas/utils/utils.ts +4 -2
  116. package/package.json +2 -2
  117. package/dist/cjs/ai/modals-chat/electronHub/chatmodels.d.ts.map +0 -1
  118. package/dist/cjs/ai/modals-chat/electronHub/chatmodels.js.map +0 -1
  119. package/dist/cjs/ai/modals-chat/electronHub/imageModels.d.ts.map +0 -1
  120. package/dist/cjs/ai/modals-chat/electronHub/imageModels.js.map +0 -1
  121. package/dist/cjs/ai/modals-chat/electronHub/songModels.d.ts.map +0 -1
  122. package/dist/cjs/ai/modals-chat/electronHub/songModels.js.map +0 -1
  123. package/dist/cjs/ai/modals-chat/electronHub/speechModels.d.ts.map +0 -1
  124. package/dist/cjs/ai/modals-chat/electronHub/speechModels.js.map +0 -1
  125. package/dist/cjs/ai/modals-chat/electronHub/videoModels.d.ts.map +0 -1
  126. package/dist/cjs/ai/modals-chat/electronHub/videoModels.js.map +0 -1
  127. package/dist/cjs/ai/modals-chat/groq/chatgroq.d.ts.map +0 -1
  128. package/dist/cjs/ai/modals-chat/groq/chatgroq.js.map +0 -1
  129. package/dist/cjs/ai/modals-chat/groq/imageAnalyzer.d.ts.map +0 -1
  130. package/dist/cjs/ai/modals-chat/groq/imageAnalyzer.js.map +0 -1
  131. package/dist/cjs/ai/modals-chat/groq/whisper.d.ts.map +0 -1
  132. package/dist/cjs/ai/modals-chat/groq/whisper.js.map +0 -1
  133. package/dist/cjs/ai/modals-chat/hercai/chatModels.d.ts.map +0 -1
  134. package/dist/cjs/ai/modals-chat/hercai/chatModels.js.map +0 -1
  135. package/dist/esm/ai/modals-chat/electronHub/chatmodels.d.ts.map +0 -1
  136. package/dist/esm/ai/modals-chat/electronHub/chatmodels.js.map +0 -1
  137. package/dist/esm/ai/modals-chat/electronHub/imageModels.d.ts.map +0 -1
  138. package/dist/esm/ai/modals-chat/electronHub/imageModels.js.map +0 -1
  139. package/dist/esm/ai/modals-chat/electronHub/songModels.d.ts.map +0 -1
  140. package/dist/esm/ai/modals-chat/electronHub/songModels.js.map +0 -1
  141. package/dist/esm/ai/modals-chat/electronHub/speechModels.d.ts.map +0 -1
  142. package/dist/esm/ai/modals-chat/electronHub/speechModels.js.map +0 -1
  143. package/dist/esm/ai/modals-chat/electronHub/videoModels.d.ts.map +0 -1
  144. package/dist/esm/ai/modals-chat/electronHub/videoModels.js.map +0 -1
  145. package/dist/esm/ai/modals-chat/groq/chatgroq.d.ts.map +0 -1
  146. package/dist/esm/ai/modals-chat/groq/chatgroq.js.map +0 -1
  147. package/dist/esm/ai/modals-chat/groq/imageAnalyzer.d.ts.map +0 -1
  148. package/dist/esm/ai/modals-chat/groq/imageAnalyzer.js.map +0 -1
  149. package/dist/esm/ai/modals-chat/groq/whisper.d.ts.map +0 -1
  150. package/dist/esm/ai/modals-chat/groq/whisper.js.map +0 -1
  151. package/dist/esm/ai/modals-chat/hercai/chatModels.d.ts.map +0 -1
  152. package/dist/esm/ai/modals-chat/hercai/chatModels.js.map +0 -1
  153. package/lib/ai/modals-chat/electronHub/chatmodels.ts +0 -64
  154. package/lib/ai/modals-chat/groq/whisper.ts +0 -113
  155. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/chatmodels.d.ts +0 -0
  156. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/imageModels.d.ts +0 -0
  157. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/songModels.d.ts +0 -0
  158. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/songModels.js +0 -0
  159. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/speechModels.d.ts +0 -0
  160. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/videoModels.d.ts +0 -0
  161. /package/dist/cjs/ai/{modals-chat → modals}/electronHub/videoModels.js +0 -0
  162. /package/dist/cjs/ai/{modals-chat → modals}/groq/chatgroq.d.ts +0 -0
  163. /package/dist/cjs/ai/{modals-chat → modals}/groq/chatgroq.js +0 -0
  164. /package/dist/cjs/ai/{modals-chat → modals}/groq/imageAnalyzer.d.ts +0 -0
  165. /package/dist/cjs/ai/{modals-chat → modals}/groq/imageAnalyzer.js +0 -0
  166. /package/dist/cjs/ai/{modals-chat → modals}/groq/whisper.d.ts +0 -0
  167. /package/dist/cjs/ai/{modals-chat → modals}/hercai/chatModels.d.ts +0 -0
  168. /package/dist/cjs/ai/{modals-chat → modals}/hercai/chatModels.js +0 -0
  169. /package/dist/esm/ai/{modals-chat → modals}/electronHub/chatmodels.d.ts +0 -0
  170. /package/dist/esm/ai/{modals-chat → modals}/electronHub/imageModels.d.ts +0 -0
  171. /package/dist/esm/ai/{modals-chat → modals}/electronHub/songModels.d.ts +0 -0
  172. /package/dist/esm/ai/{modals-chat → modals}/electronHub/songModels.js +0 -0
  173. /package/dist/esm/ai/{modals-chat → modals}/electronHub/speechModels.d.ts +0 -0
  174. /package/dist/esm/ai/{modals-chat → modals}/electronHub/videoModels.d.ts +0 -0
  175. /package/dist/esm/ai/{modals-chat → modals}/electronHub/videoModels.js +0 -0
  176. /package/dist/esm/ai/{modals-chat → modals}/groq/chatgroq.d.ts +0 -0
  177. /package/dist/esm/ai/{modals-chat → modals}/groq/chatgroq.js +0 -0
  178. /package/dist/esm/ai/{modals-chat → modals}/groq/imageAnalyzer.d.ts +0 -0
  179. /package/dist/esm/ai/{modals-chat → modals}/groq/imageAnalyzer.js +0 -0
  180. /package/dist/esm/ai/{modals-chat → modals}/groq/whisper.d.ts +0 -0
  181. /package/dist/esm/ai/{modals-chat → modals}/hercai/chatModels.d.ts +0 -0
  182. /package/dist/esm/ai/{modals-chat → modals}/hercai/chatModels.js +0 -0
  183. /package/lib/ai/{modals-chat → modals}/electronHub/songModels.ts +0 -0
  184. /package/lib/ai/{modals-chat → modals}/electronHub/videoModels.ts +0 -0
  185. /package/lib/ai/{modals-chat → modals}/groq/chatgroq.ts +0 -0
  186. /package/lib/ai/{modals-chat → modals}/groq/imageAnalyzer.ts +0 -0
  187. /package/lib/ai/{modals-chat → modals}/hercai/chatModels.ts +0 -0
@@ -1 +1 @@
1
- {"root":["../../lib/index.ts","../../lib/utils.ts","../../lib/ai/apexai.ts","../../lib/ai/apexmodules.ts","../../lib/ai/utils.ts","../../lib/ai/buttons/drawmenu.ts","../../lib/ai/buttons/tools.ts","../../lib/ai/functions/readfiles.ts","../../lib/ai/functions/typewriter.ts","../../lib/ai/functions/validoptions.ts","../../lib/ai/modals-chat/electronhub/chatmodels.ts","../../lib/ai/modals-chat/electronhub/imagemodels.ts","../../lib/ai/modals-chat/electronhub/songmodels.ts","../../lib/ai/modals-chat/electronhub/speechmodels.ts","../../lib/ai/modals-chat/electronhub/videomodels.ts","../../lib/ai/modals-chat/groq/chatgroq.ts","../../lib/ai/modals-chat/groq/imageanalyzer.ts","../../lib/ai/modals-chat/groq/whisper.ts","../../lib/ai/modals-chat/hercai/chatmodels.ts","../../lib/canvas/apexpainter.ts","../../lib/canvas/themes/level-up/levelup.ts","../../lib/canvas/utils/types.ts","../../lib/canvas/utils/utils.ts","../../lib/canvas/utils/background/bg.ts","../../lib/canvas/utils/charts/charts.ts","../../lib/canvas/utils/custom/customlines.ts","../../lib/canvas/utils/general/conversion.ts","../../lib/canvas/utils/general/general functions.ts","../../lib/canvas/utils/image/imageproperties.ts","../../lib/canvas/utils/texts/textproperties.ts"],"version":"5.7.3"}
1
+ {"root":["../../lib/index.ts","../../lib/utils.ts","../../lib/ai/apexai.ts","../../lib/ai/apexmodules.ts","../../lib/ai/utils.ts","../../lib/ai/buttons/drawmenu.ts","../../lib/ai/buttons/tools.ts","../../lib/ai/functions/readfiles.ts","../../lib/ai/functions/tokenizer.ts","../../lib/ai/functions/typewriter.ts","../../lib/ai/functions/validoptions.ts","../../lib/ai/modals/electronhub/chatmodels.ts","../../lib/ai/modals/electronhub/imagemodels.ts","../../lib/ai/modals/electronhub/songmodels.ts","../../lib/ai/modals/electronhub/speechmodels.ts","../../lib/ai/modals/electronhub/videomodels.ts","../../lib/ai/modals/groq/chatgroq.ts","../../lib/ai/modals/groq/imageanalyzer.ts","../../lib/ai/modals/groq/whisper.ts","../../lib/ai/modals/hercai/chatmodels.ts","../../lib/canvas/apexpainter.ts","../../lib/canvas/themes/level-up/levelup.ts","../../lib/canvas/utils/types.ts","../../lib/canvas/utils/utils.ts","../../lib/canvas/utils/background/bg.ts","../../lib/canvas/utils/charts/charts.ts","../../lib/canvas/utils/custom/customlines.ts","../../lib/canvas/utils/general/conversion.ts","../../lib/canvas/utils/general/general functions.ts","../../lib/canvas/utils/image/imageproperties.ts","../../lib/canvas/utils/texts/textproperties.ts"],"version":"5.7.3"}
package/lib/ai/ApexAI.ts CHANGED
@@ -8,12 +8,12 @@ import {
8
8
  } from "discord.js";
9
9
  import {
10
10
  joinVoiceChannel, createAudioPlayer, createAudioResource, EndBehaviorType,
11
- VoiceConnection, DiscordGatewayAdapterCreator, AudioPlayerStatus
11
+ VoiceConnection, AudioPlayerStatus
12
12
  } from "@discordjs/voice";
13
13
 
14
14
  import { filters } from "./buttons/tools";
15
15
  import { imageTools } from "./buttons/drawMenu";
16
- import { whisper } from "./modals-chat/groq/whisper";
16
+ import { whisper } from "./modals/groq/whisper";
17
17
  import { pipeline, Readable } from "stream";
18
18
  import prism from "prism-media"
19
19
  import path from "path";
@@ -568,11 +568,12 @@ export async function ApexAI(message: Message, ApexOptions: Options) {
568
568
 
569
569
  };
570
570
 
571
- const voiceQueue: { userId: string; text: string }[] = [];
571
+ let voiceQueue: { userId: string; text: string }[] = [];
572
572
  let isProcessing = false;
573
573
  let voiceConnection: VoiceConnection | null = null;
574
574
  let activeUser: string | null = null;
575
575
  let isRecording = false;
576
+ let silenceTimer: NodeJS.Timeout | null = null;
576
577
 
577
578
  const recordingsDir = path.join(process.cwd(), "recordings");
578
579
  if (!fs.existsSync(recordingsDir)) {
@@ -581,17 +582,21 @@ if (!fs.existsSync(recordingsDir)) {
581
582
 
582
583
  try {
583
584
  execSync("ffmpeg -version > nul 2>&1");
584
- } catch (err) {
585
+ } catch (err) {
585
586
  console.error("🚨 FFmpeg is NOT installed or not in PATH! Install it first.");
586
587
  }
587
588
 
589
+ function resetSilenceTimer() {
590
+ if (silenceTimer) clearTimeout(silenceTimer);
591
+ }
592
+
588
593
  export async function handleVoiceAI(message: any, voiceChannelId: string, chat: any, modelName?: string, personality?: string) {
589
594
  const guild = message.guild;
590
595
  if (!guild) return;
591
596
 
592
597
  const channel = guild.channels.cache.get(voiceChannelId);
593
598
  if (!channel || channel.type !== 2) {
594
- return await message.reply(`🚫 Invalid voice channel ID: ${voiceChannelId}`);
599
+ return await message.reply(`🚫 Invalid voice channel ID: ${voiceChannelId}`);
595
600
  }
596
601
 
597
602
  const botMember = guild.members.me;
@@ -608,7 +613,7 @@ export async function handleVoiceAI(message: any, voiceChannelId: string, chat:
608
613
  }
609
614
 
610
615
  if (voiceConnection) {
611
- return await message.reply("⚠️ AI is already in a voice channel.");
616
+ return await message.reply("⚠️ AI is already in a voice channel.");
612
617
  }
613
618
 
614
619
  voiceConnection = joinVoiceChannel({
@@ -620,7 +625,6 @@ export async function handleVoiceAI(message: any, voiceChannelId: string, chat:
620
625
  });
621
626
 
622
627
  activeUser = message.author.id;
623
-
624
628
  captureAudio(voiceConnection, chat, modelName, personality);
625
629
  }
626
630
 
@@ -628,15 +632,18 @@ function captureAudio(connection: VoiceConnection, chat: any, modelName?: string
628
632
  const receiver = connection.receiver;
629
633
 
630
634
  receiver.speaking.on("start", async (userId) => {
631
- if (userId !== activeUser || isRecording) return;
632
- isRecording = true;
635
+ if (userId !== activeUser) {
636
+ activeUser = userId;
637
+ isRecording = false;
638
+ }
633
639
 
640
+ resetSilenceTimer();
634
641
 
635
642
  const rawFilePath = path.join(recordingsDir, `${userId}.pcm`);
636
643
  const wavFilePath = path.join(recordingsDir, `${userId}.wav`);
637
644
 
638
645
  const opusStream = receiver.subscribe(userId, {
639
- end: { behavior: EndBehaviorType.AfterSilence, duration: 2000 }
646
+ end: { behavior: EndBehaviorType.AfterSilence, duration: 2000 }
640
647
  });
641
648
 
642
649
  const pcmStream = new prism.opus.Decoder({
@@ -647,35 +654,55 @@ function captureAudio(connection: VoiceConnection, chat: any, modelName?: string
647
654
 
648
655
  const writeStream = fs.createWriteStream(rawFilePath);
649
656
  pipeline(opusStream, pcmStream, writeStream, (err) => {
650
- isRecording = false;
651
657
  if (err) {
652
658
  console.error("❌ Error writing PCM file:", err);
653
659
  return;
654
660
  }
655
-
656
661
  convertPCMtoWAV(rawFilePath, wavFilePath, chat, modelName, personality);
657
662
  });
658
663
  });
664
+
665
+ receiver.speaking.on("end", async (userId) => {
666
+ if (userId === activeUser) {
667
+ startSilenceTimer(chat, modelName, personality);
668
+ }
669
+ });
670
+ }
671
+
672
+ function startSilenceTimer(chat: any, modelName?: string, personality?: string) {
673
+ resetSilenceTimer();
674
+ silenceTimer = setTimeout(() => {
675
+ if (voiceQueue.length > 0) {
676
+ const nextUser = voiceQueue.shift();
677
+ if (nextUser) {
678
+ activeUser = nextUser.userId;
679
+ processQueue(chat, modelName, personality);
680
+ }
681
+ } else {
682
+ leaveVoiceChannel();
683
+ }
684
+ }, 5000);
659
685
  }
660
686
 
687
+
661
688
  function convertPCMtoWAV(inputPCM: string, outputWAV: string, chat: any, modelName?: string, personality?: string) {
662
- if (!fs.existsSync(inputPCM) || fs.statSync(inputPCM).size === 0) {
663
- return;
664
- }
689
+ if (!fs.existsSync(inputPCM) || fs.statSync(inputPCM).size === 0) {
690
+ return;
691
+ }
665
692
 
666
- try {
667
- execSync(`ffmpeg -y -f s16le -ar 48000 -ac 1 -i "${inputPCM}" -acodec pcm_s16le "${outputWAV}" > nul 2>&1`);
693
+ try {
694
+ execSync(`ffmpeg -y -f s16le -ar 48000 -ac 1 -i "${inputPCM}" -acodec pcm_s16le "${outputWAV}" > nul 2>&1`);
668
695
 
669
- if (fs.existsSync(outputWAV)) {
670
- transcribeAudio(outputWAV, chat, modelName, personality);
671
- }
696
+ if (fs.existsSync(outputWAV)) {
697
+ transcribeAudio(outputWAV, chat, modelName, personality);
698
+ }
672
699
 
673
- } catch (error) {
674
- console.error("❌ FFmpeg failed:", error);
675
- }
700
+ } catch (error) {
701
+ console.error("❌ FFmpeg failed:", error);
702
+ }
676
703
  }
677
704
 
678
- // 🛠 **Transcribe Audio using ApexListener**
705
+
679
706
  async function transcribeAudio(filePath: string, chat: any, modelName?: string, personality?: string) {
680
707
  try {
681
708
  const transcribedText = await ApexListener({
@@ -689,47 +716,42 @@ async function transcribeAudio(filePath: string, chat: any, modelName?: string,
689
716
  processQueue(chat, modelName, personality);
690
717
  }
691
718
 
692
- const pcmFile = filePath.replace(".wav", ".pcm");
693
- if (fs.existsSync(pcmFile)) fs.unlinkSync(pcmFile);
694
- if (fs.existsSync(filePath)) fs.unlinkSync(filePath);
695
-
719
+ fs.unlinkSync(filePath);
696
720
  } catch (error) {
697
721
  console.error("❌ Error in transcription:", error);
698
722
  }
699
723
  }
700
724
 
701
725
  async function processQueue(chat: any, modelName?: string, personality?: string) {
702
- if (isProcessing || voiceQueue.length === 0) {
703
- if (voiceQueue.length === 0) {
704
- leaveVoiceChannel();
705
- }
706
- return;
707
- }
726
+ if (isProcessing || voiceQueue.length === 0) return;
708
727
 
709
728
  isProcessing = true;
710
729
  const { userId, text } = voiceQueue.shift()!;
730
+ activeUser = userId;
731
+ resetSilenceTimer();
711
732
 
712
733
  try {
734
+ const aiResponse = await ApexChat(chat?.chatModel || "gpt-4o", text, {
735
+ instruction: chat.instruction,
736
+ memory: chat?.memory?.memoryOn,
737
+ userId,
738
+ limit: chat?.memory?.limit,
739
+ threshold: chat?.memory?.threshold
740
+ });
713
741
 
714
- const aiResponse = await ApexChat(chat?.chatModel as string || "gpt-4o", text, {
715
- instruction: chat.instruction,
716
- memory: chat?.memory?.memoryOn,
717
- userId: userId,
718
- limit: chat?.memory?.limit,
719
- threshold: chat?.memory?.threshold
720
- });
721
- const audioBuffer = await ApexText2Speech({ inputText: aiResponse, modelName, personality });
742
+ const audioBuffer = await ApexText2Speech({ inputText: aiResponse, modelName, personality });
722
743
 
723
744
  if (voiceConnection) {
724
745
  const player = createAudioPlayer();
725
- const audioStream = Readable.from(audioBuffer);
726
- const resource = createAudioResource(audioStream);
746
+ const resource = createAudioResource(Readable.from(audioBuffer));
727
747
  voiceConnection.subscribe(player);
728
748
  player.play(resource);
729
749
 
730
750
  player.on(AudioPlayerStatus.Idle, () => {
731
751
  isProcessing = false;
732
- processQueue(chat);
752
+ if (voiceQueue.length > 0) {
753
+ processQueue(chat, modelName, personality);
754
+ }
733
755
  });
734
756
  }
735
757
  } catch (error) {
@@ -738,12 +760,11 @@ async function processQueue(chat: any, modelName?: string, personality?: string)
738
760
  }
739
761
  }
740
762
 
741
- // 🔄 **Leave Voice Channel When Done**
742
763
  function leaveVoiceChannel() {
743
764
  if (voiceConnection) {
744
- console.log("👋 AI is leaving the voice channel...");
745
765
  voiceConnection.destroy();
746
766
  voiceConnection = null;
747
767
  activeUser = null;
768
+ resetSilenceTimer();
748
769
  }
749
- }
770
+ }
@@ -1,13 +1,13 @@
1
1
  import { Hercai } from 'hercai';
2
2
  import { validateModels } from "./functions/validOptions";
3
- import { whisper } from './modals-chat/groq/whisper';
3
+ import { whisper } from './modals/groq/whisper';
4
4
  import { connect } from 'verse.db';
5
- import { chatGroq } from './modals-chat/groq/chatgroq';
5
+ import { chatGroq } from './modals/groq/chatgroq';
6
6
  import { groqAnalyzer } from './utils';
7
- import { electronImagine } from './modals-chat/electronHub/imageModels';
8
- import { electronChat } from './modals-chat/electronHub/chatmodels';
9
- import { electronSpeech } from './modals-chat/electronHub/speechModels';
10
- import { electronVideo } from './modals-chat/electronHub/videoModels';
7
+ import { electronImagine } from './modals/electronHub/imageModels';
8
+ import { electronChat } from './modals/electronHub/chatmodels';
9
+ import { electronSpeech } from './modals/electronHub/speechModels';
10
+ import { electronVideo } from './modals/electronHub/videoModels';
11
11
  import { GoogleGenerativeAI } from"@google/generative-ai";
12
12
 
13
13
 
@@ -601,22 +601,21 @@ async function ApexChat(
601
601
  let enhancedPrompt: string = prompt;
602
602
  let historyChat: string = `- This is Previous chat history between you (System) and the user (User). Don't use the history or reply back anything related to it unless being mentioned in user's new prompt\n`;
603
603
 
604
- let commandString = instruction ? `- Given Constant instruction for the System (AI) to abide and to always follow:\n${instruction}\n\n` : '';
605
-
606
604
  if (memory && userId) {
607
605
  const userHistoryResult = (await db.find(`${userId}`, { userId })).results?.history || [];
608
606
  const historyItems = Array.isArray(userHistoryResult) ? userHistoryResult : [];
609
-
607
+
608
+ historyItems.sort((a, b) => b.currentTime - a.currentTime);
609
+
610
610
  const relevantMessages = await getRelevantHistoryAI(prompt, historyItems, limit ?? 12, threshold);
611
-
612
-
611
+
613
612
  let formattedHistory = '';
614
613
  relevantMessages.forEach((item) => {
615
614
  formattedHistory += `User: ${item.user}\nSystem: ${item.model}\n\n`;
616
615
  });
617
-
616
+
618
617
  historyChat += `${formattedHistory}\n\n`;
619
- enhancedPrompt = `${commandString} - This is the User's New Prompt:\n ${prompt}\n\n${historyChat}`;
618
+ enhancedPrompt = `This is the User's New Prompt:\n ${prompt}\n\n${historyChat}`;
620
619
  }
621
620
 
622
621
  let response: string;
@@ -636,8 +635,8 @@ async function ApexChat(
636
635
  response = await processChunk(model, enhancedPrompt, { ApiKey: Api_key, personality: instruction });
637
636
  }
638
637
 
639
- if (memory && userId && model !== 'gemini-pro' && model !== 'gemini-flash') {
640
- await db.update(`${userId}`, { userId }, { $push: { history: { user: prompt, model: response } } }, true);
638
+ if (memory && userId) {
639
+ await db.update(`${userId}`, { userId }, { $push: { history: { user: prompt, model: response } }, $set: { currentTime: Date.now() } }, true);
641
640
  }
642
641
 
643
642
  return response;
@@ -652,28 +651,41 @@ const genAI = new GoogleGenerativeAI('AIzaSyAdlBVg12yjqqGfBqxT5DLGMhP2jysG7Hk');
652
651
  const model = genAI.getGenerativeModel({ model: "text-embedding-004" });
653
652
 
654
653
  async function getRelevantHistoryAI(prompt: string, history: any[], maxResults: number, threshold: number = 0.7): Promise<any[]> {
654
+ const trimmedPrompt = prompt.length > 20000 ? prompt.slice(0, 10000) + "..." + prompt.slice(-10000) : prompt;
655
+
656
+ // Batch embeddings instead of one-by-one calls
657
+ const [promptEmbedding, historyEmbeddings] = await Promise.all([
658
+ embedding(trimmedPrompt),
659
+ embeddingBatch(history.map(item => item.user))
660
+ ]);
661
+
662
+ // Calculate cosine similarity in a single loop (faster)
663
+ const scores = historyEmbeddings.map((messageEmbedding, index) => ({
664
+ item: history[index],
665
+ score: cosineSimilarity(promptEmbedding, messageEmbedding)
666
+ }));
667
+
668
+ return scores
669
+ .filter(entry => entry.score >= threshold)
670
+ .sort((a, b) => b.score - a.score)
671
+ .slice(0, maxResults)
672
+ .map(entry => entry.item);
673
+ }
655
674
 
656
- const trimmedPrompt = prompt.length > 20000 ? prompt.slice(0, 20000) : prompt;
657
- const promptEmbedding = await generateEmbedding(trimmedPrompt);
658
-
659
-
660
- const scores = await Promise.all(
661
- history.map(async (item) => {
662
- const trimmedUser = item.user.length > 20000 ? item.user.slice(0, 20000) : item.user;
663
-
664
- const messageEmbedding = await generateEmbedding(trimmedUser);
665
- const score = cosineSimilarity(promptEmbedding, messageEmbedding);
666
- return { item, score };
667
- })
668
- );
675
+ async function embeddingBatch(texts: string[]): Promise<number[][]> {
676
+ let embeddings: number[][] = [];
669
677
 
670
- const filteredResults = scores.filter(entry => entry.score >= threshold);
671
- filteredResults.sort((a, b) => b.score - a.score);
678
+ for (const text of texts) {
679
+ const trimmedText = text.length > 20000 ? text.slice(0, 10000) + "..." + text.slice(-10000) : text;
680
+ const embedded = await embedding(trimmedText);
681
+ embeddings.push(embedded);
682
+ }
672
683
 
673
- return filteredResults.slice(0, maxResults).map(entry => entry.item);
684
+ return embeddings;
674
685
  }
675
686
 
676
- async function generateEmbedding(text: string): Promise<number[]> {
687
+
688
+ async function embedding(text: string): Promise<number[]> {
677
689
  let maxRetries = 5;
678
690
  let delay = 5000;
679
691
  let attempt = 0;
@@ -684,7 +696,7 @@ async function generateEmbedding(text: string): Promise<number[]> {
684
696
  const response = await model.embedContent(text);
685
697
 
686
698
 
687
- return response.embedding.values;
699
+ return response.embedding.values as number[];
688
700
  } catch (error: any) {
689
701
  if (error.response) {
690
702
  console.warn(`❌ API Error - Status: ${error.response.status}, Message: ${error.response.statusText}`);
@@ -175,7 +175,7 @@ async function filters(Apex: any) {
175
175
  }
176
176
 
177
177
  try {
178
- const processedBuffer = await apexPainter.processImage(imageURL, [
178
+ const processedBuffer = await apexPainter.effects(imageURL, [
179
179
  { type: "brightness", value: parseFloat(brightnessDegree) },
180
180
  ]);
181
181
 
@@ -217,7 +217,7 @@ async function filters(Apex: any) {
217
217
  const imageURL = selectedAttachment.url;
218
218
 
219
219
  try {
220
- const processedBuffer = await apexPainter.processImage(imageURL, [
220
+ const processedBuffer = await apexPainter.effects(imageURL, [
221
221
  { type: "sepia" },
222
222
  ]);
223
223
  await a.editReply({
@@ -283,7 +283,7 @@ async function filters(Apex: any) {
283
283
  const [x, y] = xyArray;
284
284
  const [width, height] = whArray;
285
285
 
286
- const processedBuffer = await apexPainter.processImage(imageURL, [
286
+ const processedBuffer = await apexPainter.effects(imageURL, [
287
287
  {
288
288
  type: "pixelate",
289
289
  size: parseInt(size, 10),
@@ -340,7 +340,7 @@ async function filters(Apex: any) {
340
340
  }
341
341
 
342
342
  try {
343
- const processedBuffer = await apexPainter.processImage(imageURL, [
343
+ const processedBuffer = await apexPainter.effects(imageURL, [
344
344
  { type: "blur", radius: parseFloat(blurRadius) },
345
345
  ]);
346
346
  await a.editReply({
@@ -390,7 +390,7 @@ async function filters(Apex: any) {
390
390
  }
391
391
 
392
392
  try {
393
- const processedBuffer = await apexPainter.processImage(imageURL, [
393
+ const processedBuffer = await apexPainter.effects(imageURL, [
394
394
  { type: "fade", factor: parseFloat(fadeFactor) },
395
395
  ]);
396
396
  await a.editReply({
@@ -431,7 +431,7 @@ async function filters(Apex: any) {
431
431
  const imageURL = selectedAttachment.url;
432
432
 
433
433
  try {
434
- const processedBuffer = await apexPainter.processImage(imageURL, [
434
+ const processedBuffer = await apexPainter.effects(imageURL, [
435
435
  { type: "opaque" },
436
436
  ]);
437
437
  await a.editReply({
@@ -472,7 +472,7 @@ async function filters(Apex: any) {
472
472
  const imageURL = selectedAttachment.url;
473
473
 
474
474
  try {
475
- const processedBuffer = await apexPainter.processImage(imageURL, [
475
+ const processedBuffer = await apexPainter.effects(imageURL, [
476
476
  { type: "greyscale" },
477
477
  ]);
478
478
  await a.editReply({
@@ -522,7 +522,7 @@ async function filters(Apex: any) {
522
522
  }
523
523
 
524
524
  try {
525
- const processedBuffer = await apexPainter.processImage(imageURL, [
525
+ const processedBuffer = await apexPainter.effects(imageURL, [
526
526
  { type: "contrast", value: parseFloat(contrastValue) },
527
527
  ]);
528
528
  await a.editReply({
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Optimized tokenization function that processes characters in a single pass.
3
+ * - Splits words & punctuation efficiently.
4
+ * - Handles contractions (e.g., "can't", "it's").
5
+ */
6
+ function optimizedTokenize(text: string): string[] {
7
+ const tokens: string[] = [];
8
+ let currentToken = '';
9
+
10
+ for (let i = 0; i < text.length; i++) {
11
+ const char = text[i];
12
+
13
+ if (char.match(/\w/)) {
14
+ currentToken += char;
15
+ } else {
16
+ if (currentToken) {
17
+ tokens.push(currentToken);
18
+ currentToken = '';
19
+ }
20
+ if (char.match(/[^\s]/)) {
21
+ tokens.push(char);
22
+ }
23
+ }
24
+ }
25
+
26
+ if (currentToken) {
27
+ tokens.push(currentToken);
28
+ }
29
+
30
+ return tokens;
31
+ }
32
+
33
+ /**
34
+ * Returns the token count using the optimized tokenizer.
35
+ */
36
+ function optimizedTokenCount(text: string): number {
37
+ return optimizedTokenize(text).length;
38
+ }
39
+
40
+ /**
41
+ * Trims the user’s prompt to ensure total tokens do not exceed 3964.
42
+ * - Keeps the full instruction intact.
43
+ * - Trims only the user’s prompt if needed.
44
+ */
45
+ export function tokenLimit(instruction: string, prompt: string, maxTokens = 3964): { instruction: string, prompt: string } {
46
+ const instructionTokens = optimizedTokenCount(instruction);
47
+
48
+ const remainingTokens = maxTokens - instructionTokens;
49
+
50
+ if (remainingTokens <= 0) {
51
+ throw new Error("Instruction alone exceeds max token limit! This should never happen.");
52
+ }
53
+
54
+ return {
55
+ instruction,
56
+ prompt: optimizedTrimText(prompt, remainingTokens)
57
+ };
58
+ }
59
+
60
+ /**
61
+ * Trims the input text to fit within the token limit.
62
+ */
63
+ function optimizedTrimText(text: string, maxTokens: number): string {
64
+ const tokens = optimizedTokenize(text);
65
+ if (tokens.length <= maxTokens) {
66
+ return text;
67
+ }
68
+ return tokens.slice(0, maxTokens).join(' ');
69
+ }
@@ -0,0 +1,57 @@
1
+ import OpenAI from "openai";
2
+ import { tokenLimit } from "../../functions/tokenizer";
3
+ export async function electronChat({
4
+ ApiKey,
5
+ prompt,
6
+ modelName,
7
+ instruction,
8
+ }: {
9
+ ApiKey?: string;
10
+ prompt: string;
11
+ modelName: string;
12
+ instruction?: string;
13
+ }) {
14
+ try {
15
+ const apiKey = ApiKey || "ek-3gmOPmvuljmrl4NQrohpnp1ryNXQG5bNn08zNuzhX6bcxBrndR";
16
+ const openai = new OpenAI({
17
+ apiKey: apiKey,
18
+ baseURL: "https://api.electronhub.top/v1",
19
+ });
20
+
21
+ const models = await openai.models.list();
22
+ const modelExists = models.data.some((model: any) => model.id === modelName);
23
+ if (!modelExists) {
24
+ throw new Error('Invalid model name. Please check out Electron hub models for more info.');
25
+ }
26
+
27
+ const { instruction: finalInstruction, prompt: finalPrompt } = tokenLimit(instruction || "", prompt, 10000);
28
+
29
+ const messages: OpenAI.ChatCompletionMessageParam[] = [
30
+ { role: "system", content: finalInstruction },
31
+ { role: "user", content: finalPrompt }
32
+ ];
33
+
34
+ const completion = await openai.chat.completions.create({
35
+ model: modelName,
36
+ messages: messages
37
+ });
38
+
39
+ return completion.choices[0]?.message?.content;
40
+ } catch (e: any) {
41
+ if (e.response) {
42
+ if (e.response.status === 429) {
43
+ throw new Error(
44
+ "Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key."
45
+ );
46
+ } else if (e.response.status === 500) {
47
+ throw new Error("Server error. Please try again later.");
48
+ } else {
49
+ console.error("Error generating response:", e.response.data);
50
+ throw e;
51
+ }
52
+ } else {
53
+ console.error("Error generating response:", e.message);
54
+ throw e;
55
+ }
56
+ }
57
+ }
@@ -1,7 +1,9 @@
1
1
  import OpenAI from "openai";
2
2
  import sharp from "sharp";
3
3
  import { ApexPainter } from "../../../utils";
4
- const paint = new ApexPainter({ type: 'url' });
4
+ import { tokenLimit } from "../../functions/tokenizer";
5
+
6
+ const paint = new ApexPainter({ type: "url" });
5
7
 
6
8
  export async function electronImagine({
7
9
  ApiKey,
@@ -15,7 +17,7 @@ export async function electronImagine({
15
17
  resizeOptions?: {
16
18
  width?: number;
17
19
  height?: number;
18
- format?: "jpeg" | "png" ;
20
+ format?: "jpeg" | "png";
19
21
  quality?: number;
20
22
  };
21
23
  }): Promise<string | undefined> {
@@ -39,9 +41,11 @@ export async function electronImagine({
39
41
  }
40
42
 
41
43
  try {
44
+ const { prompt: trimmedPrompt } = tokenLimit("", prompt, 32000);
45
+
42
46
  const response = await openai.images.generate({
43
47
  model: modelName,
44
- prompt: prompt,
48
+ prompt: trimmedPrompt,
45
49
  n: 1,
46
50
  });
47
51
 
@@ -50,12 +54,12 @@ export async function electronImagine({
50
54
 
51
55
  if (
52
56
  !resizeOptions ||
53
- (
54
- (resizeOptions.width == null && resizeOptions.height == null && resizeOptions.format == null) ||
57
+ ((resizeOptions.width == null &&
58
+ resizeOptions.height == null &&
59
+ resizeOptions.format == null) ||
55
60
  (resizeOptions.width === 1024 &&
56
- resizeOptions.height === 1024 &&
57
- resizeOptions.format == null)
58
- )
61
+ resizeOptions.height === 1024 &&
62
+ resizeOptions.format == null))
59
63
  ) {
60
64
  return imageUrl;
61
65
  }
@@ -70,7 +74,7 @@ export async function electronImagine({
70
74
  imageProcessor = imageProcessor.resize({
71
75
  width: resizeOptions.width,
72
76
  height: resizeOptions.height,
73
- kernel: sharp.kernel.lanczos3,
77
+ kernel: sharp.kernel.lanczos3,
74
78
  withoutEnlargement: true,
75
79
  });
76
80
  }
@@ -85,13 +89,13 @@ export async function electronImagine({
85
89
  imageProcessor = imageProcessor.png({ quality });
86
90
  break;
87
91
  default:
88
- throw Error('We dont support this format only png and jpeg');
92
+ throw Error("We don't support this format, only png and jpeg.");
89
93
  }
90
94
  }
91
95
 
92
- const buffer = await imageProcessor.toBuffer();
93
- const output = await paint.outPut(buffer) as string;
94
- return output;
96
+ const buffer = await imageProcessor.toBuffer();
97
+ const output = (await paint.outPut(buffer)) as string;
98
+ return output;
95
99
  } catch (e: any) {
96
100
  if (e.response) {
97
101
  if (e.response.status === 429) {
@@ -1,5 +1,6 @@
1
1
  import OpenAI from 'openai';
2
2
  import axios from 'axios';
3
+ import { tokenLimit } from '../../functions/tokenizer'; // ✅ Import token limiter
3
4
 
4
5
  export async function electronSpeech({
5
6
  ApiKey,
@@ -37,12 +38,15 @@ export async function electronSpeech({
37
38
  throw new Error('Invalid model name. Please check out Electron hub models for more info.');
38
39
  }
39
40
 
41
+ // ✅ Limit input text to max 3964 tokens
42
+ const { prompt: trimmedInputText } = tokenLimit("", inputText, 2500);
43
+
40
44
  const response = await axios.post(
41
45
  'https://api.electronhub.top/v1/audio/speech',
42
46
  {
43
47
  model: modelName,
44
48
  voice: personality || 'will',
45
- input: inputText,
49
+ input: trimmedInputText,
46
50
  },
47
51
  {
48
52
  headers: {