apexify.js 3.3.10 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/README.md +183 -0
  2. package/dist/ai/ApexAI.d.ts +88 -8
  3. package/dist/ai/ApexAI.d.ts.map +1 -1
  4. package/dist/ai/ApexAI.js +61 -161
  5. package/dist/ai/ApexAI.js.map +1 -1
  6. package/dist/ai/direct-use.d.ts +9 -0
  7. package/dist/ai/direct-use.d.ts.map +1 -0
  8. package/dist/ai/direct-use.js +572 -0
  9. package/dist/ai/direct-use.js.map +1 -0
  10. package/dist/ai/functions/Gemini.d.ts +6 -0
  11. package/dist/ai/functions/Gemini.d.ts.map +1 -0
  12. package/dist/ai/functions/Gemini.js +103 -0
  13. package/dist/ai/functions/Gemini.js.map +1 -0
  14. package/dist/ai/functions/draw.d.ts +2 -1
  15. package/dist/ai/functions/draw.d.ts.map +1 -1
  16. package/dist/ai/functions/draw.js +476 -102
  17. package/dist/ai/functions/draw.js.map +1 -1
  18. package/dist/ai/functions/generateVoiceResponse.d.ts +3 -1
  19. package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
  20. package/dist/ai/functions/generateVoiceResponse.js +29 -18
  21. package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
  22. package/dist/ai/functions/typeWriter.d.ts +1 -1
  23. package/dist/ai/functions/typeWriter.d.ts.map +1 -1
  24. package/dist/ai/functions/typeWriter.js +3 -2
  25. package/dist/ai/functions/typeWriter.js.map +1 -1
  26. package/dist/ai/functions/validOptions.d.ts +10 -0
  27. package/dist/ai/functions/validOptions.d.ts.map +1 -0
  28. package/dist/ai/functions/validOptions.js +130 -0
  29. package/dist/ai/functions/validOptions.js.map +1 -0
  30. package/dist/ai/modals-chat/Gemini-flash.d.ts +6 -0
  31. package/dist/ai/modals-chat/Gemini-flash.d.ts.map +1 -0
  32. package/dist/ai/modals-chat/Gemini-flash.js +111 -0
  33. package/dist/ai/modals-chat/Gemini-flash.js.map +1 -0
  34. package/dist/ai/modals-chat/Gemini-pro.d.ts +6 -0
  35. package/dist/ai/modals-chat/Gemini-pro.d.ts.map +1 -0
  36. package/dist/ai/modals-chat/Gemini-pro.js +96 -0
  37. package/dist/ai/modals-chat/Gemini-pro.js.map +1 -0
  38. package/dist/ai/modals-chat/apexChat.d.ts +2 -0
  39. package/dist/ai/modals-chat/apexChat.d.ts.map +1 -0
  40. package/dist/ai/modals-chat/apexChat.js +33 -0
  41. package/dist/ai/modals-chat/apexChat.js.map +1 -0
  42. package/dist/ai/modals-chat/config.d.ts +5 -0
  43. package/dist/ai/modals-chat/config.d.ts.map +1 -0
  44. package/dist/ai/modals-chat/config.js +13 -0
  45. package/dist/ai/modals-chat/config.js.map +1 -0
  46. package/dist/ai/modals-chat/facebook-ai.d.ts +2 -0
  47. package/dist/ai/modals-chat/facebook-ai.d.ts.map +1 -0
  48. package/dist/ai/modals-chat/facebook-ai.js +21 -0
  49. package/dist/ai/modals-chat/facebook-ai.js.map +1 -0
  50. package/dist/ai/modals-chat/gemma.d.ts +3 -0
  51. package/dist/ai/modals-chat/gemma.d.ts.map +1 -0
  52. package/dist/ai/modals-chat/gemma.js +54 -0
  53. package/dist/ai/modals-chat/gemma.js.map +1 -0
  54. package/dist/ai/modals-chat/modals.d.ts +8 -0
  55. package/dist/ai/modals-chat/modals.d.ts.map +1 -0
  56. package/dist/ai/modals-chat/modals.js +16 -0
  57. package/dist/ai/modals-chat/modals.js.map +1 -0
  58. package/dist/ai/modals-chat/starChat.d.ts +2 -0
  59. package/dist/ai/modals-chat/starChat.d.ts.map +1 -0
  60. package/dist/ai/modals-chat/starChat.js +32 -0
  61. package/dist/ai/modals-chat/starChat.js.map +1 -0
  62. package/dist/ai/modals-chat/yi-ai.d.ts +2 -0
  63. package/dist/ai/modals-chat/yi-ai.d.ts.map +1 -0
  64. package/dist/ai/modals-chat/yi-ai.js +41 -0
  65. package/dist/ai/modals-chat/yi-ai.js.map +1 -0
  66. package/dist/ai/utils.d.ts +3 -2
  67. package/dist/ai/utils.d.ts.map +1 -1
  68. package/dist/ai/utils.js +6 -4
  69. package/dist/ai/utils.js.map +1 -1
  70. package/dist/canvas/utils/general functions.js +1 -1
  71. package/dist/canvas/utils/general functions.js.map +1 -1
  72. package/lib/ai/ApexAI.ts +214 -224
  73. package/lib/ai/{models.ts → direct-use.ts} +9 -12
  74. package/lib/ai/functions/draw.ts +547 -149
  75. package/lib/ai/functions/generateVoiceResponse.ts +37 -22
  76. package/lib/ai/functions/typeWriter.ts +4 -2
  77. package/lib/ai/functions/validOptions.ts +210 -0
  78. package/lib/ai/modals-chat/Gemini-flash.ts +108 -0
  79. package/lib/ai/modals-chat/Gemini-pro.ts +93 -0
  80. package/lib/ai/modals-chat/apexChat.ts +31 -0
  81. package/lib/ai/modals-chat/config.ts +11 -0
  82. package/lib/ai/modals-chat/facebook-ai.ts +14 -0
  83. package/lib/ai/modals-chat/modals.ts +8 -0
  84. package/lib/ai/modals-chat/starChat.ts +31 -0
  85. package/lib/ai/modals-chat/yi-ai.ts +40 -0
  86. package/lib/ai/utils.ts +3 -1
  87. package/lib/canvas/utils/general functions.ts +1 -1
  88. package/package.json +5 -3
  89. package/lib/ai/functions/aivoice.ts +0 -0
package/lib/ai/ApexAI.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  import { Hercai } from "hercai";
2
- const hercai = new Hercai()
2
+ import fs from "fs";
3
+ import path from "path";
3
4
  import {
4
5
  imageReader,
5
6
  toDraw,
@@ -18,70 +19,155 @@ import {
18
19
  } from "discord.js";
19
20
  import { filters } from "./buttons/tools";
20
21
  import { imageTools } from "./buttons/drawMenu";
22
+ import { ImageModals } from './functions/validOptions';
23
+ import { apexai, geminiFlash, geminiPro, facebook_ai, yi_34b, starChat } from "./modals-chat/modals";
21
24
 
22
-
25
+ /**
26
+ * Configuration options for the Gemini Flash functionality.
27
+ */
23
28
  export interface Options {
24
- voice?: {
25
- textVoice?:{
26
- enable?: boolean;
27
- voiceModal?: string;
28
- voice_code?: string;
29
- apiKey?: string;
30
- type?: string;
31
- };
32
- };
33
- imagine?: {
34
- enable?: boolean;
35
- drawTrigger?: string[];
36
- imageModal?: string;
37
- numOfImages?: number;
38
- nsfw?: {
29
+ /**
30
+ * Configuration options related to voice functionality.
31
+ * @param voice.textVoice Configuration options for text-to-voice functionality.
32
+ * @param voice.textVoice.enable Whether text-to-voice functionality is enabled.
33
+ * @param voice.textVoice.voiceModal The voice modal to be used.
34
+ * @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI modal).
35
+ * @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI modal).
36
+ * @param voice.textVoice.type The type of voice only for (apexAI and zenithAI modal).
37
+ */
38
+ voice?: {
39
+ textVoice?: {
40
+ enable?: boolean;
41
+ voiceModal?: string;
42
+ voice_code?: string;
43
+ apiKey?: string;
44
+ type?: string;
45
+ };
46
+ };
47
+ /**
48
+ * Configuration options related to image generation.
49
+ * @param imagine.enable Whether image generation is enabled.
50
+ * @param imagine.drawTrigger The trigger phrases for initiating image generation.
51
+ * @param imagine.imageModal The modal for the image generation.
52
+ * @param imagine.numOfImages The number of images to generate.
53
+ * @param imagine.nsfw Configuration options for NSFW filtering.
54
+ * @param imagine.nsfw.enable Whether NSFW filtering is enabled.
55
+ * @param imagine.nsfw.keywords Keywords for NSFW filtering.
56
+ * @param imagine.enhancer Configuration options for image enhancement.
57
+ * @param imagine.enhancer.enable Whether image enhancement is enabled (rewrites your prompt in more descriptive way).
58
+ * @param imagine.enhancer.enhancerModal The modal for image enhancement only for (Prodia modals).
59
+ * @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia modals).
60
+ * @param imagine.enhancer.steps The number of enhancement steps only for (Prodia modals).
61
+ * @param imagine.enhancer.seed The seed for image enhancement only for (Prodia modals).
62
+ * @param imagine.enhancer.imgStyle The style of the image only for (Prodia modals).
63
+ * @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia modals).
64
+ * @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia modals).
65
+ */
66
+ imagine?: {
67
+ enable?: boolean;
68
+ drawTrigger?: string[];
69
+ imageModal?: ImageModals;
70
+ numOfImages?: number;
71
+ nsfw?: {
39
72
  enable?: boolean;
40
73
  keywords?: string[];
41
- };
42
- enhancer?: boolean;
43
- };
44
- chat?: {
45
- chatModal?: string;
46
- readFiles?: boolean;
47
- readImages?: boolean;
48
- typeWriting?:{
74
+ deepCheck?: boolean;
75
+ };
76
+ enhancer?: {
77
+ enable: boolean;
78
+ enhancerModal?: string | "ESRGAN_4x" | "Lanczos" | "Nearest" | "LDSR" | "R-ESRGAN 4x+" | "R-ESRGAN 4x+ Anime6B" | "ScuNET GAN" | "ScuNET PSNR" | "SwinIR 4x" | undefined;
79
+ cfg_scale?: number;
80
+ steps?: number;
81
+ seed?: number;
82
+ imgStyle?: string | "3d-model" | "analog-film" | "anime" | "cinematic" | "comic-book" | "digital-art" | "enhance" | "isometric" | "fantasy-art" | "isometric" | "line-art" | "low-poly" | "neon-punk" | "origami" | "photographic" | "pixel-art" | "texture" | "craft-clay";
83
+ negative_prompt?: string;
84
+ sampler?: string | "DPM++ 2M Karras" | "DPM++ SDE Karras" | "DPM++ 2M SDE Exponential" | "DPM++ 2M SDE Karras" | "Euler a" | "Euler" | "LMS" | "Heun" | "DPM2" | "DPM2 a" | "DPM++ 2S a" | "DPM++ 2M" | "DPM++ SDE" | "DPM++ 2M SDE" | "DPM++ 2M SDE Heun" | "DPM++ 2M SDE Heun Karras" | "DPM++ 2M SDE Heun Exponential" | "DPM++ 3M SDE" | "DPM++ 3M SDE Karras" | "DPM++ 3M SDE Exponential" | "DPM fast" | "DPM adaptive" | "LMS Karras" | "DPM2 Karras" | "DPM2 a Karras" | "DPM++ 2S a Karras" | "Restart" | "DDIM" | "PLMS" | "UniPC";
85
+ };
86
+ };
87
+ /**
88
+ * Configuration options related to chat functionality.
89
+ * @param chat.chatModal The chat modal to be used.
90
+ * @param chat.readFiles Whether to read files.
91
+ * @param chat.readImages Whether to read images.
92
+ * @param chat.personality The personality for the chat.
93
+ * @param chat.API_KEY The API key for accessing the chat service.
94
+ * @param chat.memory Configuration options for memory.
95
+ * @param chat.memory.memoryOn Whether memory is enabled.
96
+ * @param chat.memory.id The ID for memory.
97
+ * @param chat.typeWriting Configuration options for typing effect.
98
+ * @param chat.typeWriting.enable Whether the typing effect is enabled.
99
+ * @param chat.typeWriting.speed The speed of typing.
100
+ * @param chat.typeWriting.delay The delay for typing.
101
+ */
102
+ chat?: {
103
+ chatModal?: string;
104
+ readFiles?: boolean;
105
+ readImages?: boolean;
106
+ personality?: string | any;
107
+ API_KEY?: string;
108
+ memory?: {
109
+ memoryOn: boolean;
110
+ id: string;
111
+ };
112
+ typeWriting?: {
49
113
  enable?: boolean;
50
114
  speed?: number;
51
115
  delay?: number;
52
- };
53
- };
54
- others?: {
55
- messageType: {
56
- type: string;
57
- intialContent: string;
58
- };
59
- keywords?: string[];
60
- keywordResponses?: Record<string, string>;
61
- loader?: {
62
- enable?: boolean;
63
- loadingMessage?: string;
64
- loadingTimer?: number;
65
- };
66
- channel?: {
116
+ };
117
+ };
118
+ /**
119
+ * Additional configuration options.
120
+ * @param others.messageType Configuration options for message types.
121
+ * @param others.messageType.type The type of message.
122
+ * @param others.messageType.intialContent The initial content of the message.
123
+ * @param others.buttons Buttons configuration.
124
+ * @param others.keywords Keywords for response.
125
+ * @param others.keywordResponses Responses for keywords.
126
+ * @param others.loader Configuration options for loader.
127
+ * @param others.loader.enable Whether the loader is enabled.
128
+ * @param others.loader.loadingMessage The loading message.
129
+ * @param others.loader.loadingTimer The loading timer.
130
+ * @param others.channel Configuration options for channels.
131
+ * @param others.channel.enable Whether channels are enabled.
132
+ * @param others.channel.id The ID of the channels.
133
+ * @param others.permissions Configuration options for permissions.
134
+ * @param others.permissions.enable Whether permissions are enabled.
135
+ * @param others.permissions.role The role for permissions.
136
+ * @param others.permissions.permission The permission.
137
+ * @param others.permissions.blockedUsers Blocked users.
138
+ */
139
+ others?: {
140
+ messageType?: {
141
+ type: string;
142
+ intialContent: string;
143
+ };
144
+ buttons?: any[];
145
+ keywords?: string[];
146
+ keywordResponses?: Record<string, string>;
147
+ loader?: {
148
+ enable?: boolean;
149
+ loadingMessage?: string;
150
+ loadingTimer?: number;
151
+ };
152
+ channel?: {
67
153
  enable?: boolean;
68
154
  id?: string[];
69
- };
70
- permissions?: {
155
+ };
156
+ permissions?: {
71
157
  enable?: boolean;
72
158
  role?: string[];
73
159
  permission?: string[];
74
160
  blockedUsers?: string[];
75
- };
76
- };
161
+ };
162
+ };
77
163
  }
78
164
 
79
- type Response = string | { content?: string; reply?: string } | any;
165
+ type Response = string | { content?: string; reply?: string } | any;
166
+ const hercai = new Hercai('6eZZOdDwm6Epdzn8mnhcX9SBDkxvoNYcNj9ILS0P44=');
80
167
 
81
168
 
82
169
  export async function ApexAI (message: any, aiOptions: Options) {
83
170
 
84
-
85
171
  await imageTools(
86
172
  message.client,
87
173
  ModalBuilder,
@@ -89,11 +175,11 @@ export async function ApexAI (message: any, aiOptions: Options) {
89
175
  TextInputStyle,
90
176
  ActionRowBuilder
91
177
  );
178
+
92
179
  await filters(message.client);
93
180
 
94
181
  let usermsg: string = '';
95
182
 
96
-
97
183
  const {
98
184
  voice: {
99
185
  textVoice: {
@@ -111,14 +197,30 @@ export async function ApexAI (message: any, aiOptions: Options) {
111
197
  numOfImages = 2,
112
198
  nsfw: {
113
199
  enable: nsfwEnabled = false,
114
- keywords: nsfwKeyWords = []
200
+ keywords: nsfwKeyWords = [],
201
+ deepCheck: deepCheck = false
115
202
  } = {},
116
- enhancer = false
203
+ enhancer: {
204
+ enable: enhancerOn = false,
205
+ enhancerModal: enhanceModal = 'ESRGAN_4x',
206
+ negative_prompt: negative_prompt = '',
207
+ cfg_scale: cfgScale = 7,
208
+ sampler: sampler = 'DDIM',
209
+ steps: steps = 20,
210
+ seed: seed = -1,
211
+ imgStyle: imgStyle = 'enhance'
212
+ } = {},
117
213
  } = {},
118
214
  chat: {
119
215
  chatModal = "v3",
120
216
  readFiles = false,
121
217
  readImages = false,
218
+ API_KEY = null,
219
+ personality = null,
220
+ memory: {
221
+ memoryOn: memoryOn = false,
222
+ id: memoryId = ''
223
+ } = {},
122
224
  typeWriting: {
123
225
  enable: typeWritingEnable = false,
124
226
  speed = 70,
@@ -130,6 +232,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
130
232
  type: msgType = 'reply',
131
233
  intialContent: msgContent = ''
132
234
  } = {},
235
+ buttons: buttons = [],
133
236
  channel: {
134
237
  enable: channelEnable = false,
135
238
  id: channelIds = []
@@ -151,7 +254,6 @@ export async function ApexAI (message: any, aiOptions: Options) {
151
254
  } = aiOptions;
152
255
 
153
256
  if (permissionEnable) {
154
-
155
257
  if (role.length > 0) {
156
258
  const userRoles = message.member?.roles.cache.map((role: any) => role.id);
157
259
  const hasPermission = userRoles.some((roleId: any) => role.includes(roleId));
@@ -213,8 +315,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
213
315
  }
214
316
  }
215
317
 
216
- if (aiOptions.chat && readFiles) {
217
- if (message.attachments.size > 0) {
318
+ if (aiOptions.chat && readFiles) {
319
+ if (message.attachments.size > 0) {
218
320
  if (attachment.name.endsWith('.pdf')) {
219
321
  const pdfContent = await readPdf(attachment.url);
220
322
  usermsg += pdfContent;
@@ -222,8 +324,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
222
324
  const txtContent = await readTextFile(attachment.url);
223
325
  usermsg += txtContent;
224
326
  }
225
- }
226
- }
327
+ }
328
+ }
227
329
 
228
330
  if (aiOptions.others?.loader !== null && loaderEnable === true) {
229
331
  if (msgType === 'reply') {
@@ -270,10 +372,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
270
372
  usermsg = `${usermsg}\n\n Read previous message: ${replied}`;
271
373
  }
272
374
 
273
-
274
375
  let response: Response = '';
275
376
 
276
-
277
377
  for (const keyword of keywords) {
278
378
  if (usermsg.toLowerCase().includes(keyword.toLowerCase())) {
279
379
  response = keywordResponses[keyword] || "";
@@ -290,9 +390,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
290
390
 
291
391
  if (drawValid) {
292
392
 
293
- if (enhancer) {
294
- usermsg += await gemmaAi_4(usermsg)
295
- }
393
+ usermsg = `Rewrite this text below in more descriptive way make it clear to be visualized correctly and enhance it and use stronger words please\n\n\n ${usermsg}`
394
+ if (enhancerOn) {
395
+ usermsg += await geminiFlash(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, })
396
+ }
296
397
 
297
398
  return await aiImagine(
298
399
  message,
@@ -301,7 +402,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
301
402
  hercai,
302
403
  modal,
303
404
  nsfwEnabled,
304
- nsfwKeyWords
405
+ nsfwKeyWords,
406
+ deepCheck,
407
+ aiOptions.imagine?.enhancer,
408
+ buttons
305
409
  );
306
410
 
307
411
  } else if (aiOptions.voice) {
@@ -320,9 +424,11 @@ export async function ApexAI (message: any, aiOptions: Options) {
320
424
  textVoiceApiKey,
321
425
  textVoiceType,
322
426
  nsfwEnabled,
323
- nsfwKeyWords
427
+ nsfwKeyWords,
428
+ deepCheck,
429
+ aiOptions.imagine?.enhancer,
430
+ buttons
324
431
  );
325
-
326
432
  }
327
433
 
328
434
  if (msgType === 'reply') {
@@ -343,20 +449,37 @@ export async function ApexAI (message: any, aiOptions: Options) {
343
449
  try {
344
450
  if (chatModal === 'apexChat') {
345
451
  response = await apexai(usermsg);
346
- } else if (chatModal === 'gemma-v3') {
347
- response = await gemmaAi_3(usermsg);
348
- } else if (chatModal === 'gemma-v4') {
349
- response = await gemmaAi_4(usermsg);
452
+ } else if (chatModal === 'yi-ai') {
453
+ response = await yi_34b(usermsg);
454
+ } else if (chatModal === 'facebook-ai') {
455
+ response = await facebook_ai(usermsg);
350
456
  } else if (chatModal === 'starChat') {
351
457
  response = await starChat(usermsg);
352
- } else if (chatModal === 'zephyr-beta') {
353
- response = await zephyr_beta(usermsg);
458
+ } else if (chatModal === 'gemini-flash') {
459
+ response = await geminiFlash(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, });
460
+ } else if (chatModal === 'gemini-pro') {
461
+ response = await geminiPro(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, })
354
462
  } else if (chatModal === 'v3' || chatModal === 'v3-32k' || chatModal === 'turbo' || chatModal === 'turbo-16k' || chatModal === 'gemini') {
355
- response = await hercai.question({
356
- model: chatModal,
357
- content: usermsg,
358
- });
359
- response = response.reply
463
+ if (!memoryOn) {
464
+
465
+ const personalityFilePath = path.join(process.cwd(), personality);
466
+ const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
467
+ const personalityString = personalityContent.split('\n').join(' ');
468
+
469
+ response = await hercai.question({
470
+ model: chatModal,
471
+ content: usermsg,
472
+ personality: personalityString
473
+ });
474
+
475
+ response = response.reply;
476
+ } else {
477
+ response = await hercai.betaQuestion({
478
+ content: usermsg,
479
+ user: memoryId
480
+ });
481
+ response = response.reply
482
+ }
360
483
  } else {
361
484
  throw new Error('Invalid chat modal. Check documentation for valid chat modals.')
362
485
  }
@@ -364,23 +487,23 @@ export async function ApexAI (message: any, aiOptions: Options) {
364
487
  if (msgType === 'reply') {
365
488
  if (error.response && error.response.status === 429) {
366
489
  console.error("Too many requests. Please try again later.");
367
- return message.reply(`Please wait i am in a cool down for a minute`);
490
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
368
491
  } else if (error.response && error.response.status === 500) {
369
492
  console.error("Internal server error. Please try again later.");
370
- return message.reply(`Please wait i am in a cool down for a minute`);
493
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
371
494
  } else {
372
- await message.reply(`Please wait i am in a cool down for a minute`);
373
495
  console.error("The Api is on a cool down for 10 seconds", error.message);
496
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
374
497
  }
375
498
  } else if (msgType === 'send') {
376
499
  if (error.response && error.response.status === 429) {
377
500
  console.error("Too many requests. Please try again later.");
378
- return message.channel.send(`Please wait i am in a cool down for a minute`);
501
+ return message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
379
502
  } else if (error.response && error.response.status === 500) {
380
503
  console.error("Internal server error. Please try again later.");
381
- return message.channel.send(`Please wait i am in a cool down for a minute`);
504
+ return message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
382
505
  } else {
383
- await message.channel.send(`Please wait i am in a cool down for a minute`);
506
+ await message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
384
507
  console.error("The Api is on a cool down for 10 seconds", error.message);
385
508
  }
386
509
  }
@@ -389,7 +512,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
389
512
  if (msgType === 'reply') {
390
513
  if (typeWritingEnable) {
391
514
  if (response.length <= 2000) {
392
- await typeWriter(message.channel, response, speed, delay);
515
+ await typeWriter(message.channel, response, speed, delay, buttons);
393
516
  } else {
394
517
  let parts: string[] = [];
395
518
  while (typeof response === 'string' && response.length > 0) {
@@ -402,13 +525,15 @@ export async function ApexAI (message: any, aiOptions: Options) {
402
525
  }
403
526
  }
404
527
  for (const part of parts) {
405
- await typeWriter(message.channel, part, speed, delay);
528
+ await typeWriter(message.channel, part, speed, delay, buttons);
406
529
  }
407
530
  }
531
+
408
532
  } else {
409
533
  if (response.length <= 2000) {
410
534
  await message.reply({
411
535
  content: response,
536
+ components: buttons,
412
537
  allowedMentions: { repliedUser: false },
413
538
  });
414
539
  } else {
@@ -425,6 +550,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
425
550
  for (const part of parts) {
426
551
  await message.reply({
427
552
  content: part,
553
+ components: buttons,
428
554
  allowedMentions: { repliedUser: false },
429
555
  });
430
556
  }
@@ -433,7 +559,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
433
559
  } else if (msgType === 'send') {
434
560
  if (typeWritingEnable) {
435
561
  if (response.length <= 2000) {
436
- await typeWriter(message.channel, response, speed, delay);
562
+ await typeWriter(message.channel, response, speed, delay, buttons);
437
563
  } else {
438
564
  let parts: string[] = [];
439
565
  while (typeof response === 'string' && response.length > 0) {
@@ -446,13 +572,14 @@ export async function ApexAI (message: any, aiOptions: Options) {
446
572
  }
447
573
  }
448
574
  for (const part of parts) {
449
- await typeWriter(message.channel, part, speed, delay);
575
+ await typeWriter(message.channel, part, speed, delay, buttons);
450
576
  }
451
577
  }
452
578
  } else {
453
579
  if (response.length <= 2000) {
454
580
  await message.channel.send({
455
- content: response
581
+ content: response,
582
+ components: buttons
456
583
  });
457
584
  } else {
458
585
  let parts: string[] = [];
@@ -467,149 +594,12 @@ export async function ApexAI (message: any, aiOptions: Options) {
467
594
  }
468
595
  for (const part of parts) {
469
596
  await message.channel.send({
470
- content: part
597
+ content: part,
598
+ components: buttons
471
599
  });
472
600
  }
473
601
  }
474
602
  }
475
- }
476
- }
477
- }
478
-
479
- export async function gemmaAi_4(prompt: string) {
480
- try {
481
- const response = await fetch('https://api-inference.huggingface.co/models/google/gemma-7b-it', {
482
- method: 'POST',
483
- headers: {
484
- 'Content-Type': 'application/json',
485
- 'Authorization': 'Bearer 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'
486
- },
487
- body: JSON.stringify({ inputs: prompt })
488
- });
489
-
490
- if (!response.ok) {
491
- throw new Error('Network response was not ok');
492
603
  }
493
-
494
- const responseData = await response.json();
495
- return responseData[0].generated_text;
496
- } catch (error: any) {
497
- console.error('Error fetching response:', error.message);
498
- return null;
499
- }
500
- }
501
-
502
- export async function gemmaAi_3(prompt: string) {
503
- try {
504
- const response = await fetch('https://api-inference.huggingface.co/models/google/gemma-2b-it', {
505
- method: 'POST',
506
- headers: {
507
- 'Content-Type': 'application/json',
508
- 'Authorization': 'Bearer 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'
509
- },
510
- body: JSON.stringify({ inputs: prompt })
511
- });
512
-
513
- if (!response.ok) {
514
- throw new Error('Network response was not ok');
515
- }
516
-
517
- const responseData = await response.json();
518
- return responseData[0].generated_text;
519
- } catch (error: any) {
520
- console.error('Error fetching response:', error.message);
521
- return null;
522
604
  }
523
- }
524
- export async function apexai(prompt: string) {
525
- try {
526
- const messages = [
527
- {"role": "user", "content": `${prompt}`}
528
- ];
529
- const formattedMessages = messages.map(message => `[${message.role}] ${message.content}`).join('\n');
530
-
531
- const response = await fetch('https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1', {
532
- method: 'POST',
533
- headers: {
534
- 'Authorization': 'Bearer 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',
535
- 'Content-Type': 'application/json'
536
- },
537
- body: JSON.stringify({ inputs: formattedMessages })
538
- });
539
-
540
- if (!response.ok) {
541
- throw new Error('Network response was not ok');
542
- }
543
-
544
- const responseData = await response.json();
545
- const generatedText = responseData[0].generated_text;
546
- const lines = generatedText.split('\n').slice(1);
547
- const output = lines.join('\n');
548
-
549
- return output;
550
- } catch (error: any) {
551
- console.error('Error:', error.message);
552
- return 'Please wait i am on cooldown.';
553
- }
554
- }
555
-
556
- export async function starChat(prompt: string) {
557
- const messages = [{"role":"user","content": `${prompt}`}];
558
-
559
- try {
560
- const response = await fetch('https://api-inference.huggingface.co/models/HuggingFaceH4/starchat2-15b-v0.1', {
561
- method: 'POST',
562
- headers: {
563
- 'Authorization': 'Bearer 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',
564
- 'Content-Type': 'application/json'
565
- },
566
- body: JSON.stringify({ inputs: messages })
567
- });
568
-
569
- if (!response.ok) {
570
- throw new Error('Network response was not ok');
571
- }
572
-
573
- const responseData = await response.json();
574
- const chatbotReply = responseData[0];
575
- const chatbotResponseText = chatbotReply.generated_text.replace(/^.*?\n.*?\n/, '');
576
- const chatbotResponseArray = JSON.parse(chatbotResponseText);
577
- const chatbotResponseString = chatbotResponseArray.join(' ');
578
-
579
- return chatbotResponseString;
580
- } catch (error: any) {
581
- console.error('Error:', error.message);
582
- return null;
583
- }
584
- }
585
-
586
- export async function zephyr_beta(prompt: string) {
587
- const messages = [{"role":"user","content": `${prompt}` }];
588
-
589
- try {
590
- const response = await fetch('https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta', {
591
- method: 'POST',
592
- headers: {
593
- 'Authorization': 'Bearer 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',
594
- 'Content-Type': 'application/json'
595
- },
596
- body: JSON.stringify({ inputs: messages })
597
- });
598
-
599
- if (!response.ok) {
600
- throw new Error('Network response was not ok');
601
- }
602
-
603
- const responseData = await response.json();
604
- const chatbotReply = responseData[0];
605
- const textParts = chatbotReply.generated_text.split('\n');
606
- const secondArrayString = textParts[2];
607
- const chatbotResponseArray = JSON.parse(secondArrayString);
608
- const chatbotResponseString = chatbotResponseArray.map((obj: any) => obj.content).join(' ');
609
-
610
- return chatbotResponseString;
611
- } catch (error: any) {
612
- console.error('Error:', error.message);
613
- return null;
614
- }
615
- }
605
+ };
@@ -3,8 +3,8 @@ import axios from 'axios';
3
3
  import api from "api";
4
4
  const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
5
5
  sdk.auth("43435e1c-cab1-493f-a224-f51e4b97ce8d");
6
- const herc = new Hercai();
7
- import { apexai, starChat, gemmaAi_3, gemmaAi_4, zephyr_beta } from './ApexAI';
6
+ const hercai = new Hercai('6eZZOdDwm6Epdzn8mnhcX9SBDkxvoNYcNj9ILS0P44=');
7
+ import { apexai, starChat, facebook_ai, yi_34b } from './modals-chat/modals';
8
8
  interface ApexImagineOptions {
9
9
  negative?: string;
10
10
  number?: number;
@@ -100,7 +100,7 @@ const validProdiaModalsP = [
100
100
  if (allowedModelsH.includes(model)) {
101
101
  for (let i = 0; i < count; i++) {
102
102
  try {
103
- const result = await herc.drawImage({ model: model as ImagineModelOption, prompt: prompt, negative_prompt: neg });
103
+ const result = await hercai.drawImage({ model: model as ImagineModelOption, prompt: prompt, negative_prompt: neg });
104
104
  resultUrls.push(result.url);
105
105
  } catch (error) {
106
106
  console.error("Failed to draw image with Hercai. Retrying...", error);
@@ -513,22 +513,19 @@ async function processChunk(model: string, prompt: string): Promise<string> {
513
513
  case 'turbo':
514
514
  case 'turbo-16k':
515
515
  case 'gemini':
516
- response = (await herc.question({ model: model as ChatModelOption, content: prompt })).reply;
516
+ response = (await hercai.question({ model: model as ChatModelOption, content: prompt })).reply;
517
517
  break;
518
518
  case 'apexChat':
519
519
  response = await apexai(prompt);
520
520
  break;
521
- case 'gemma-v3':
522
- response = await gemmaAi_3(prompt);
523
- break;
524
- case 'gemma-v4':
525
- response = await gemmaAi_4(prompt);
526
- break;
527
521
  case 'starChat':
528
522
  response = await starChat(prompt);
529
523
  break;
530
- case 'zephyr-beta':
531
- response = await zephyr_beta(prompt);
524
+ case 'facebook-ai':
525
+ response = await facebook_ai(prompt);
526
+ break;
527
+ case 'yi-ai':
528
+ response = await yi_34b(prompt);
532
529
  break;
533
530
  default:
534
531
  throw new Error('Invalid model.');