apexify.js 3.3.11 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +183 -0
  2. package/dist/ai/ApexAI.d.ts +88 -8
  3. package/dist/ai/ApexAI.d.ts.map +1 -1
  4. package/dist/ai/ApexAI.js +61 -161
  5. package/dist/ai/ApexAI.js.map +1 -1
  6. package/dist/ai/direct-use.d.ts +9 -0
  7. package/dist/ai/direct-use.d.ts.map +1 -0
  8. package/dist/ai/direct-use.js +572 -0
  9. package/dist/ai/direct-use.js.map +1 -0
  10. package/dist/ai/functions/Gemini.d.ts +6 -0
  11. package/dist/ai/functions/Gemini.d.ts.map +1 -0
  12. package/dist/ai/functions/Gemini.js +103 -0
  13. package/dist/ai/functions/Gemini.js.map +1 -0
  14. package/dist/ai/functions/draw.d.ts +2 -1
  15. package/dist/ai/functions/draw.d.ts.map +1 -1
  16. package/dist/ai/functions/draw.js +476 -102
  17. package/dist/ai/functions/draw.js.map +1 -1
  18. package/dist/ai/functions/generateVoiceResponse.d.ts +3 -1
  19. package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
  20. package/dist/ai/functions/generateVoiceResponse.js +29 -18
  21. package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
  22. package/dist/ai/functions/typeWriter.d.ts +1 -1
  23. package/dist/ai/functions/typeWriter.d.ts.map +1 -1
  24. package/dist/ai/functions/typeWriter.js +3 -2
  25. package/dist/ai/functions/typeWriter.js.map +1 -1
  26. package/dist/ai/functions/validOptions.d.ts +10 -0
  27. package/dist/ai/functions/validOptions.d.ts.map +1 -0
  28. package/dist/ai/functions/validOptions.js +130 -0
  29. package/dist/ai/functions/validOptions.js.map +1 -0
  30. package/dist/ai/modals-chat/Gemini-flash.d.ts +6 -0
  31. package/dist/ai/modals-chat/Gemini-flash.d.ts.map +1 -0
  32. package/dist/ai/modals-chat/Gemini-flash.js +111 -0
  33. package/dist/ai/modals-chat/Gemini-flash.js.map +1 -0
  34. package/dist/ai/modals-chat/Gemini-pro.d.ts +6 -0
  35. package/dist/ai/modals-chat/Gemini-pro.d.ts.map +1 -0
  36. package/dist/ai/modals-chat/Gemini-pro.js +96 -0
  37. package/dist/ai/modals-chat/Gemini-pro.js.map +1 -0
  38. package/dist/ai/modals-chat/apexChat.d.ts +2 -0
  39. package/dist/ai/modals-chat/apexChat.d.ts.map +1 -0
  40. package/dist/ai/modals-chat/apexChat.js +33 -0
  41. package/dist/ai/modals-chat/apexChat.js.map +1 -0
  42. package/dist/ai/modals-chat/config.d.ts +5 -0
  43. package/dist/ai/modals-chat/config.d.ts.map +1 -0
  44. package/dist/ai/modals-chat/config.js +13 -0
  45. package/dist/ai/modals-chat/config.js.map +1 -0
  46. package/dist/ai/modals-chat/facebook-ai.d.ts +2 -0
  47. package/dist/ai/modals-chat/facebook-ai.d.ts.map +1 -0
  48. package/dist/ai/modals-chat/facebook-ai.js +21 -0
  49. package/dist/ai/modals-chat/facebook-ai.js.map +1 -0
  50. package/dist/ai/modals-chat/gemma.d.ts +3 -0
  51. package/dist/ai/modals-chat/gemma.d.ts.map +1 -0
  52. package/dist/ai/modals-chat/gemma.js +54 -0
  53. package/dist/ai/modals-chat/gemma.js.map +1 -0
  54. package/dist/ai/modals-chat/modals.d.ts +8 -0
  55. package/dist/ai/modals-chat/modals.d.ts.map +1 -0
  56. package/dist/ai/modals-chat/modals.js +16 -0
  57. package/dist/ai/modals-chat/modals.js.map +1 -0
  58. package/dist/ai/modals-chat/starChat.d.ts +2 -0
  59. package/dist/ai/modals-chat/starChat.d.ts.map +1 -0
  60. package/dist/ai/modals-chat/starChat.js +32 -0
  61. package/dist/ai/modals-chat/starChat.js.map +1 -0
  62. package/dist/ai/modals-chat/yi-ai.d.ts +2 -0
  63. package/dist/ai/modals-chat/yi-ai.d.ts.map +1 -0
  64. package/dist/ai/modals-chat/yi-ai.js +41 -0
  65. package/dist/ai/modals-chat/yi-ai.js.map +1 -0
  66. package/dist/ai/utils.d.ts +3 -2
  67. package/dist/ai/utils.d.ts.map +1 -1
  68. package/dist/ai/utils.js +6 -4
  69. package/dist/ai/utils.js.map +1 -1
  70. package/dist/canvas/utils/general functions.js +1 -1
  71. package/dist/canvas/utils/general functions.js.map +1 -1
  72. package/lib/ai/ApexAI.ts +215 -193
  73. package/lib/ai/{models.ts → direct-use.ts} +9 -12
  74. package/lib/ai/functions/draw.ts +479 -83
  75. package/lib/ai/functions/generateVoiceResponse.ts +16 -8
  76. package/lib/ai/functions/typeWriter.ts +4 -2
  77. package/lib/ai/functions/validOptions.ts +210 -0
  78. package/lib/ai/modals-chat/Gemini-flash.ts +108 -0
  79. package/lib/ai/modals-chat/Gemini-pro.ts +93 -0
  80. package/lib/ai/modals-chat/apexChat.ts +31 -0
  81. package/lib/ai/modals-chat/config.ts +11 -0
  82. package/lib/ai/modals-chat/facebook-ai.ts +14 -0
  83. package/lib/ai/modals-chat/modals.ts +8 -0
  84. package/lib/ai/modals-chat/starChat.ts +31 -0
  85. package/lib/ai/modals-chat/yi-ai.ts +40 -0
  86. package/lib/ai/utils.ts +3 -1
  87. package/lib/canvas/utils/general functions.ts +1 -1
  88. package/package.json +5 -3
package/lib/ai/ApexAI.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  import { Hercai } from "hercai";
2
- const hercai = new Hercai()
2
+ import fs from "fs";
3
+ import path from "path";
3
4
  import {
4
5
  imageReader,
5
6
  toDraw,
@@ -10,78 +11,163 @@ import {
10
11
  typeWriter,
11
12
  readImage,
12
13
  } from "./utils";
13
- import axios from "axios";
14
14
  import {
15
15
  ModalBuilder,
16
16
  TextInputBuilder,
17
17
  TextInputStyle,
18
- ActionRowBuilder
18
+ ActionRowBuilder
19
19
  } from "discord.js";
20
20
  import { filters } from "./buttons/tools";
21
21
  import { imageTools } from "./buttons/drawMenu";
22
+ import { ImageModals } from './functions/validOptions';
23
+ import { apexai, geminiFlash, geminiPro, facebook_ai, yi_34b, starChat } from "./modals-chat/modals";
22
24
 
25
+ /**
26
+ * Configuration options for the Gemini Flash functionality.
27
+ */
23
28
  export interface Options {
24
- voice?: {
25
- textVoice?:{
26
- enable?: boolean;
27
- voiceModal?: string;
28
- voice_code?: string;
29
- apiKey?: string;
30
- type?: string;
31
- };
32
- };
33
- imagine?: {
34
- enable?: boolean;
35
- drawTrigger?: string[];
36
- imageModal?: string;
37
- numOfImages?: number;
38
- nsfw?: {
29
+ /**
30
+ * Configuration options related to voice functionality.
31
+ * @param voice.textVoice Configuration options for text-to-voice functionality.
32
+ * @param voice.textVoice.enable Whether text-to-voice functionality is enabled.
33
+ * @param voice.textVoice.voiceModal The voice modal to be used.
34
+ * @param voice.textVoice.voice_code The voice code only for (apexAI and zenithAI modal).
35
+ * @param voice.textVoice.apiKey The API key for accessing the voice service only for (apexAI and zenithAI modal).
36
+ * @param voice.textVoice.type The type of voice only for (apexAI and zenithAI modal).
37
+ */
38
+ voice?: {
39
+ textVoice?: {
40
+ enable?: boolean;
41
+ voiceModal?: string;
42
+ voice_code?: string;
43
+ apiKey?: string;
44
+ type?: string;
45
+ };
46
+ };
47
+ /**
48
+ * Configuration options related to image generation.
49
+ * @param imagine.enable Whether image generation is enabled.
50
+ * @param imagine.drawTrigger The trigger phrases for initiating image generation.
51
+ * @param imagine.imageModal The modal for the image generation.
52
+ * @param imagine.numOfImages The number of images to generate.
53
+ * @param imagine.nsfw Configuration options for NSFW filtering.
54
+ * @param imagine.nsfw.enable Whether NSFW filtering is enabled.
55
+ * @param imagine.nsfw.keywords Keywords for NSFW filtering.
56
+ * @param imagine.enhancer Configuration options for image enhancement.
57
+ * @param imagine.enhancer.enable Whether image enhancement is enabled (rewrites your prompt in more descriptive way).
58
+ * @param imagine.enhancer.enhancerModal The modal for image enhancement only for (Prodia modals).
59
+ * @param imagine.enhancer.cfg_scale The scale for image enhancement only for (Prodia modals).
60
+ * @param imagine.enhancer.steps The number of enhancement steps only for (Prodia modals).
61
+ * @param imagine.enhancer.seed The seed for image enhancement only for (Prodia modals).
62
+ * @param imagine.enhancer.imgStyle The style of the image only for (Prodia modals).
63
+ * @param imagine.enhancer.negative_prompt The negative prompt for image enhancement only for (Prodia modals).
64
+ * @param imagine.enhancer.sampler The sampler for image enhancement only for (Prodia modals).
65
+ */
66
+ imagine?: {
67
+ enable?: boolean;
68
+ drawTrigger?: string[];
69
+ imageModal?: ImageModals;
70
+ numOfImages?: number;
71
+ nsfw?: {
39
72
  enable?: boolean;
40
73
  keywords?: string[];
41
- };
42
- enhancer?: boolean;
43
- };
44
- chat?: {
45
- chatModal?: string;
46
- readFiles?: boolean;
47
- readImages?: boolean;
48
- typeWriting?:{
74
+ deepCheck?: boolean;
75
+ };
76
+ enhancer?: {
77
+ enable: boolean;
78
+ enhancerModal?: string | "ESRGAN_4x" | "Lanczos" | "Nearest" | "LDSR" | "R-ESRGAN 4x+" | "R-ESRGAN 4x+ Anime6B" | "ScuNET GAN" | "ScuNET PSNR" | "SwinIR 4x" | undefined;
79
+ cfg_scale?: number;
80
+ steps?: number;
81
+ seed?: number;
82
+ imgStyle?: string | "3d-model" | "analog-film" | "anime" | "cinematic" | "comic-book" | "digital-art" | "enhance" | "isometric" | "fantasy-art" | "isometric" | "line-art" | "low-poly" | "neon-punk" | "origami" | "photographic" | "pixel-art" | "texture" | "craft-clay";
83
+ negative_prompt?: string;
84
+ sampler?: string | "DPM++ 2M Karras" | "DPM++ SDE Karras" | "DPM++ 2M SDE Exponential" | "DPM++ 2M SDE Karras" | "Euler a" | "Euler" | "LMS" | "Heun" | "DPM2" | "DPM2 a" | "DPM++ 2S a" | "DPM++ 2M" | "DPM++ SDE" | "DPM++ 2M SDE" | "DPM++ 2M SDE Heun" | "DPM++ 2M SDE Heun Karras" | "DPM++ 2M SDE Heun Exponential" | "DPM++ 3M SDE" | "DPM++ 3M SDE Karras" | "DPM++ 3M SDE Exponential" | "DPM fast" | "DPM adaptive" | "LMS Karras" | "DPM2 Karras" | "DPM2 a Karras" | "DPM++ 2S a Karras" | "Restart" | "DDIM" | "PLMS" | "UniPC";
85
+ };
86
+ };
87
+ /**
88
+ * Configuration options related to chat functionality.
89
+ * @param chat.chatModal The chat modal to be used.
90
+ * @param chat.readFiles Whether to read files.
91
+ * @param chat.readImages Whether to read images.
92
+ * @param chat.personality The personality for the chat.
93
+ * @param chat.API_KEY The API key for accessing the chat service.
94
+ * @param chat.memory Configuration options for memory.
95
+ * @param chat.memory.memoryOn Whether memory is enabled.
96
+ * @param chat.memory.id The ID for memory.
97
+ * @param chat.typeWriting Configuration options for typing effect.
98
+ * @param chat.typeWriting.enable Whether the typing effect is enabled.
99
+ * @param chat.typeWriting.speed The speed of typing.
100
+ * @param chat.typeWriting.delay The delay for typing.
101
+ */
102
+ chat?: {
103
+ chatModal?: string;
104
+ readFiles?: boolean;
105
+ readImages?: boolean;
106
+ personality?: string | any;
107
+ API_KEY?: string;
108
+ memory?: {
109
+ memoryOn: boolean;
110
+ id: string;
111
+ };
112
+ typeWriting?: {
49
113
  enable?: boolean;
50
114
  speed?: number;
51
115
  delay?: number;
52
- };
53
- };
54
- others?: {
55
- messageType?: {
56
- type: string;
57
- intialContent: string;
58
- };
59
- keywords?: string[];
60
- keywordResponses?: Record<string, string>;
61
- loader?: {
62
- enable?: boolean;
63
- loadingMessage?: string;
64
- loadingTimer?: number;
65
- };
66
- channel?: {
116
+ };
117
+ };
118
+ /**
119
+ * Additional configuration options.
120
+ * @param others.messageType Configuration options for message types.
121
+ * @param others.messageType.type The type of message.
122
+ * @param others.messageType.intialContent The initial content of the message.
123
+ * @param others.buttons Buttons configuration.
124
+ * @param others.keywords Keywords for response.
125
+ * @param others.keywordResponses Responses for keywords.
126
+ * @param others.loader Configuration options for loader.
127
+ * @param others.loader.enable Whether the loader is enabled.
128
+ * @param others.loader.loadingMessage The loading message.
129
+ * @param others.loader.loadingTimer The loading timer.
130
+ * @param others.channel Configuration options for channels.
131
+ * @param others.channel.enable Whether channels are enabled.
132
+ * @param others.channel.id The ID of the channels.
133
+ * @param others.permissions Configuration options for permissions.
134
+ * @param others.permissions.enable Whether permissions are enabled.
135
+ * @param others.permissions.role The role for permissions.
136
+ * @param others.permissions.permission The permission.
137
+ * @param others.permissions.blockedUsers Blocked users.
138
+ */
139
+ others?: {
140
+ messageType?: {
141
+ type: string;
142
+ intialContent: string;
143
+ };
144
+ buttons?: any[];
145
+ keywords?: string[];
146
+ keywordResponses?: Record<string, string>;
147
+ loader?: {
148
+ enable?: boolean;
149
+ loadingMessage?: string;
150
+ loadingTimer?: number;
151
+ };
152
+ channel?: {
67
153
  enable?: boolean;
68
154
  id?: string[];
69
- };
70
- permissions?: {
155
+ };
156
+ permissions?: {
71
157
  enable?: boolean;
72
158
  role?: string[];
73
159
  permission?: string[];
74
160
  blockedUsers?: string[];
75
- };
76
- };
161
+ };
162
+ };
77
163
  }
78
164
 
79
- type Response = string | { content?: string; reply?: string } | any;
165
+ type Response = string | { content?: string; reply?: string } | any;
166
+ const hercai = new Hercai('6eZZOdDwm6Epdzn8mnhcX9SBDkxvoNYcNj9ILS0P44=');
80
167
 
81
168
 
82
169
  export async function ApexAI (message: any, aiOptions: Options) {
83
170
 
84
-
85
171
  await imageTools(
86
172
  message.client,
87
173
  ModalBuilder,
@@ -89,11 +175,11 @@ export async function ApexAI (message: any, aiOptions: Options) {
89
175
  TextInputStyle,
90
176
  ActionRowBuilder
91
177
  );
178
+
92
179
  await filters(message.client);
93
180
 
94
181
  let usermsg: string = '';
95
182
 
96
-
97
183
  const {
98
184
  voice: {
99
185
  textVoice: {
@@ -111,14 +197,30 @@ export async function ApexAI (message: any, aiOptions: Options) {
111
197
  numOfImages = 2,
112
198
  nsfw: {
113
199
  enable: nsfwEnabled = false,
114
- keywords: nsfwKeyWords = []
200
+ keywords: nsfwKeyWords = [],
201
+ deepCheck: deepCheck = false
115
202
  } = {},
116
- enhancer = false
203
+ enhancer: {
204
+ enable: enhancerOn = false,
205
+ enhancerModal: enhanceModal = 'ESRGAN_4x',
206
+ negative_prompt: negative_prompt = '',
207
+ cfg_scale: cfgScale = 7,
208
+ sampler: sampler = 'DDIM',
209
+ steps: steps = 20,
210
+ seed: seed = -1,
211
+ imgStyle: imgStyle = 'enhance'
212
+ } = {},
117
213
  } = {},
118
214
  chat: {
119
215
  chatModal = "v3",
120
216
  readFiles = false,
121
217
  readImages = false,
218
+ API_KEY = null,
219
+ personality = null,
220
+ memory: {
221
+ memoryOn: memoryOn = false,
222
+ id: memoryId = ''
223
+ } = {},
122
224
  typeWriting: {
123
225
  enable: typeWritingEnable = false,
124
226
  speed = 70,
@@ -130,6 +232,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
130
232
  type: msgType = 'reply',
131
233
  intialContent: msgContent = ''
132
234
  } = {},
235
+ buttons: buttons = [],
133
236
  channel: {
134
237
  enable: channelEnable = false,
135
238
  id: channelIds = []
@@ -151,7 +254,6 @@ export async function ApexAI (message: any, aiOptions: Options) {
151
254
  } = aiOptions;
152
255
 
153
256
  if (permissionEnable) {
154
-
155
257
  if (role.length > 0) {
156
258
  const userRoles = message.member?.roles.cache.map((role: any) => role.id);
157
259
  const hasPermission = userRoles.some((roleId: any) => role.includes(roleId));
@@ -213,8 +315,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
213
315
  }
214
316
  }
215
317
 
216
- if (aiOptions.chat && readFiles) {
217
- if (message.attachments.size > 0) {
318
+ if (aiOptions.chat && readFiles) {
319
+ if (message.attachments.size > 0) {
218
320
  if (attachment.name.endsWith('.pdf')) {
219
321
  const pdfContent = await readPdf(attachment.url);
220
322
  usermsg += pdfContent;
@@ -222,8 +324,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
222
324
  const txtContent = await readTextFile(attachment.url);
223
325
  usermsg += txtContent;
224
326
  }
225
- }
226
- }
327
+ }
328
+ }
227
329
 
228
330
  if (aiOptions.others?.loader !== null && loaderEnable === true) {
229
331
  if (msgType === 'reply') {
@@ -270,10 +372,8 @@ export async function ApexAI (message: any, aiOptions: Options) {
270
372
  usermsg = `${usermsg}\n\n Read previous message: ${replied}`;
271
373
  }
272
374
 
273
-
274
375
  let response: Response = '';
275
376
 
276
-
277
377
  for (const keyword of keywords) {
278
378
  if (usermsg.toLowerCase().includes(keyword.toLowerCase())) {
279
379
  response = keywordResponses[keyword] || "";
@@ -290,9 +390,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
290
390
 
291
391
  if (drawValid) {
292
392
 
293
- if (enhancer) {
294
- usermsg += await gemmaAi_4(usermsg)
295
- }
393
+ usermsg = `Rewrite this text below in more descriptive way make it clear to be visualized correctly and enhance it and use stronger words please\n\n\n ${usermsg}`
394
+ if (enhancerOn) {
395
+ usermsg += await geminiFlash(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, })
396
+ }
296
397
 
297
398
  return await aiImagine(
298
399
  message,
@@ -301,7 +402,10 @@ export async function ApexAI (message: any, aiOptions: Options) {
301
402
  hercai,
302
403
  modal,
303
404
  nsfwEnabled,
304
- nsfwKeyWords
405
+ nsfwKeyWords,
406
+ deepCheck,
407
+ aiOptions.imagine?.enhancer,
408
+ buttons
305
409
  );
306
410
 
307
411
  } else if (aiOptions.voice) {
@@ -320,9 +424,11 @@ export async function ApexAI (message: any, aiOptions: Options) {
320
424
  textVoiceApiKey,
321
425
  textVoiceType,
322
426
  nsfwEnabled,
323
- nsfwKeyWords
427
+ nsfwKeyWords,
428
+ deepCheck,
429
+ aiOptions.imagine?.enhancer,
430
+ buttons
324
431
  );
325
-
326
432
  }
327
433
 
328
434
  if (msgType === 'reply') {
@@ -343,20 +449,37 @@ export async function ApexAI (message: any, aiOptions: Options) {
343
449
  try {
344
450
  if (chatModal === 'apexChat') {
345
451
  response = await apexai(usermsg);
346
- } else if (chatModal === 'gemma-v3') {
347
- response = await gemmaAi_3(usermsg);
348
- } else if (chatModal === 'gemma-v4') {
349
- response = await gemmaAi_4(usermsg);
452
+ } else if (chatModal === 'yi-ai') {
453
+ response = await yi_34b(usermsg);
454
+ } else if (chatModal === 'facebook-ai') {
455
+ response = await facebook_ai(usermsg);
350
456
  } else if (chatModal === 'starChat') {
351
457
  response = await starChat(usermsg);
352
- } else if (chatModal === 'zephyr-beta') {
353
- response = await zephyr_beta(usermsg);
458
+ } else if (chatModal === 'gemini-flash') {
459
+ response = await geminiFlash(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, });
460
+ } else if (chatModal === 'gemini-pro') {
461
+ response = await geminiPro(message, { userMsg: usermsg, API_KEY: API_KEY, AiPersonality: personality, })
354
462
  } else if (chatModal === 'v3' || chatModal === 'v3-32k' || chatModal === 'turbo' || chatModal === 'turbo-16k' || chatModal === 'gemini') {
355
- response = await hercai.question({
356
- model: chatModal,
357
- content: usermsg,
358
- });
359
- response = response.reply
463
+ if (!memoryOn) {
464
+
465
+ const personalityFilePath = path.join(process.cwd(), personality);
466
+ const personalityContent = fs.readFileSync(personalityFilePath, 'utf-8');
467
+ const personalityString = personalityContent.split('\n').join(' ');
468
+
469
+ response = await hercai.question({
470
+ model: chatModal,
471
+ content: usermsg,
472
+ personality: personalityString
473
+ });
474
+
475
+ response = response.reply;
476
+ } else {
477
+ response = await hercai.betaQuestion({
478
+ content: usermsg,
479
+ user: memoryId
480
+ });
481
+ response = response.reply
482
+ }
360
483
  } else {
361
484
  throw new Error('Invalid chat modal. Check documentation for valid chat modals.')
362
485
  }
@@ -364,23 +487,23 @@ export async function ApexAI (message: any, aiOptions: Options) {
364
487
  if (msgType === 'reply') {
365
488
  if (error.response && error.response.status === 429) {
366
489
  console.error("Too many requests. Please try again later.");
367
- return message.reply(`Please wait i am in a cool down for a minute`);
490
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
368
491
  } else if (error.response && error.response.status === 500) {
369
492
  console.error("Internal server error. Please try again later.");
370
- return message.reply(`Please wait i am in a cool down for a minute`);
493
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
371
494
  } else {
372
- await message.reply(`Please wait i am in a cool down for a minute`);
373
495
  console.error("The Api is on a cool down for 10 seconds", error.message);
496
+ return await message.reply({ content: `Please wait i am in a cool down for a minute`, components: buttons });
374
497
  }
375
498
  } else if (msgType === 'send') {
376
499
  if (error.response && error.response.status === 429) {
377
500
  console.error("Too many requests. Please try again later.");
378
- return message.channel.send(`Please wait i am in a cool down for a minute`);
501
+ return message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
379
502
  } else if (error.response && error.response.status === 500) {
380
503
  console.error("Internal server error. Please try again later.");
381
- return message.channel.send(`Please wait i am in a cool down for a minute`);
504
+ return message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
382
505
  } else {
383
- await message.channel.send(`Please wait i am in a cool down for a minute`);
506
+ await message.channel.send({ content: `Please wait i am in a cool down for a minute`, components: buttons });
384
507
  console.error("The Api is on a cool down for 10 seconds", error.message);
385
508
  }
386
509
  }
@@ -389,7 +512,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
389
512
  if (msgType === 'reply') {
390
513
  if (typeWritingEnable) {
391
514
  if (response.length <= 2000) {
392
- await typeWriter(message.channel, response, speed, delay);
515
+ await typeWriter(message.channel, response, speed, delay, buttons);
393
516
  } else {
394
517
  let parts: string[] = [];
395
518
  while (typeof response === 'string' && response.length > 0) {
@@ -402,7 +525,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
402
525
  }
403
526
  }
404
527
  for (const part of parts) {
405
- await typeWriter(message.channel, part, speed, delay);
528
+ await typeWriter(message.channel, part, speed, delay, buttons);
406
529
  }
407
530
  }
408
531
 
@@ -410,6 +533,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
410
533
  if (response.length <= 2000) {
411
534
  await message.reply({
412
535
  content: response,
536
+ components: buttons,
413
537
  allowedMentions: { repliedUser: false },
414
538
  });
415
539
  } else {
@@ -426,6 +550,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
426
550
  for (const part of parts) {
427
551
  await message.reply({
428
552
  content: part,
553
+ components: buttons,
429
554
  allowedMentions: { repliedUser: false },
430
555
  });
431
556
  }
@@ -434,7 +559,7 @@ export async function ApexAI (message: any, aiOptions: Options) {
434
559
  } else if (msgType === 'send') {
435
560
  if (typeWritingEnable) {
436
561
  if (response.length <= 2000) {
437
- await typeWriter(message.channel, response, speed, delay);
562
+ await typeWriter(message.channel, response, speed, delay, buttons);
438
563
  } else {
439
564
  let parts: string[] = [];
440
565
  while (typeof response === 'string' && response.length > 0) {
@@ -447,13 +572,14 @@ export async function ApexAI (message: any, aiOptions: Options) {
447
572
  }
448
573
  }
449
574
  for (const part of parts) {
450
- await typeWriter(message.channel, part, speed, delay);
575
+ await typeWriter(message.channel, part, speed, delay, buttons);
451
576
  }
452
577
  }
453
578
  } else {
454
579
  if (response.length <= 2000) {
455
580
  await message.channel.send({
456
- content: response
581
+ content: response,
582
+ components: buttons
457
583
  });
458
584
  } else {
459
585
  let parts: string[] = [];
@@ -468,116 +594,12 @@ export async function ApexAI (message: any, aiOptions: Options) {
468
594
  }
469
595
  for (const part of parts) {
470
596
  await message.channel.send({
471
- content: part
597
+ content: part,
598
+ components: buttons
472
599
  });
473
600
  }
474
601
  }
475
602
  }
476
- }
477
- }
478
- }
479
-
480
- export async function gemmaAi_4(prompt: string) {
481
- try {
482
- const response = await axios.post('https://api-inference.huggingface.co/models/google/gemma-7b-it', { inputs: prompt }, {
483
- headers: { 'Authorization': `Bearer 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` }
484
- });
485
- return response.data[0].generated_text;
486
- } catch (error: any) {
487
- console.error('Error fetching response:', error.message);
488
- return null;
489
- }
490
- }
491
-
492
- export async function gemmaAi_3(prompt: string) {
493
- try {
494
- const response = await axios.post('https://api-inference.huggingface.co/models/google/gemma-2b-it', { inputs: prompt }, {
495
- headers: { 'Authorization': `Bearer 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` }
496
- });
497
- return response.data[0].generated_text;
498
- } catch (error: any) {
499
- console.error('Error fetching response:', error.message);
500
- return null;
501
- }
502
- }
503
-
504
- export async function apexai(prompt: string) {
505
- try {
506
- const messages = [
507
- {"role": "user", "content": `${prompt}`}
508
- ];
509
- const formattedMessages = messages.map(message => `[${message.role}] ${message.content}`).join('\n');
510
-
511
- const response = await axios.post(`https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1`, {
512
- inputs: formattedMessages
513
- }, {
514
- headers: {
515
- 'Authorization': `Bearer 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`,
516
- 'Content-Type': 'application/json'
517
- }
518
- });
519
-
520
- const generatedText = response.data[0].generated_text;
521
-
522
- const lines = generatedText.split('\n').slice(1);
523
-
524
- const output = lines.join('\n');
525
-
526
- return output
527
- } catch (error: any) {
528
- console.error('Error:', error.response.data);
529
- return 'Please wait i am on cooldown.'
530
603
  }
531
- }
532
-
533
- export async function starChat(prompt: string) {
534
- const messages = [{"role":"user","content": `${prompt}`}]
535
-
536
- try {
537
- const response = await axios.post('https://api-inference.huggingface.co/models/HuggingFaceH4/starchat2-15b-v0.1', {
538
- inputs: JSON.stringify(messages),
539
- }, {
540
- headers: {
541
- 'Authorization': `Bearer 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`,
542
- },
543
- });
544
-
545
- const chatbotReply = response.data[0];
546
- const chatbotResponseText = chatbotReply.generated_text.replace(/^.*?\n.*?\n/, '');
547
- const chatbotResponseArray = JSON.parse(chatbotResponseText);
548
- const chatbotResponseString = chatbotResponseArray.join(' ');
549
-
550
- return chatbotResponseString;
551
- } catch (error: any) {
552
- console.error('Error fetching response:', error.message);
553
- return null;
554
- }
555
- }
556
-
557
- export async function zephyr_beta(prompt: string) {
558
-
559
- const messages = [{"role":"user","content": `${prompt}` }]
560
- try {
561
- const response = await axios.post('https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta', {
562
- inputs: JSON.stringify(messages),
563
- }, {
564
- headers: {
565
- 'Authorization': `Bearer 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`,
566
- },
567
- });
568
-
569
- const chatbotReply = response.data[0];
570
-
571
- const textParts = chatbotReply.generated_text.split('\n');
572
-
573
- const secondArrayString = textParts[2];
574
- const chatbotResponseArray = JSON.parse(secondArrayString);
575
-
576
- const chatbotResponseString = chatbotResponseArray.map((obj: any) => obj.content).join(' ');
577
-
578
- return chatbotResponseString;
579
- } catch (error: any) {
580
- console.error('Error fetching response:', error.message);
581
- return null;
582
- }
583
- }
604
+ }
605
+ };
@@ -3,8 +3,8 @@ import axios from 'axios';
3
3
  import api from "api";
4
4
  const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
5
5
  sdk.auth("43435e1c-cab1-493f-a224-f51e4b97ce8d");
6
- const herc = new Hercai();
7
- import { apexai, starChat, gemmaAi_3, gemmaAi_4, zephyr_beta } from './ApexAI';
6
+ const hercai = new Hercai('6eZZOdDwm6Epdzn8mnhcX9SBDkxvoNYcNj9ILS0P44=');
7
+ import { apexai, starChat, facebook_ai, yi_34b } from './modals-chat/modals';
8
8
  interface ApexImagineOptions {
9
9
  negative?: string;
10
10
  number?: number;
@@ -100,7 +100,7 @@ const validProdiaModalsP = [
100
100
  if (allowedModelsH.includes(model)) {
101
101
  for (let i = 0; i < count; i++) {
102
102
  try {
103
- const result = await herc.drawImage({ model: model as ImagineModelOption, prompt: prompt, negative_prompt: neg });
103
+ const result = await hercai.drawImage({ model: model as ImagineModelOption, prompt: prompt, negative_prompt: neg });
104
104
  resultUrls.push(result.url);
105
105
  } catch (error) {
106
106
  console.error("Failed to draw image with Hercai. Retrying...", error);
@@ -513,22 +513,19 @@ async function processChunk(model: string, prompt: string): Promise<string> {
513
513
  case 'turbo':
514
514
  case 'turbo-16k':
515
515
  case 'gemini':
516
- response = (await herc.question({ model: model as ChatModelOption, content: prompt })).reply;
516
+ response = (await hercai.question({ model: model as ChatModelOption, content: prompt })).reply;
517
517
  break;
518
518
  case 'apexChat':
519
519
  response = await apexai(prompt);
520
520
  break;
521
- case 'gemma-v3':
522
- response = await gemmaAi_3(prompt);
523
- break;
524
- case 'gemma-v4':
525
- response = await gemmaAi_4(prompt);
526
- break;
527
521
  case 'starChat':
528
522
  response = await starChat(prompt);
529
523
  break;
530
- case 'zephyr-beta':
531
- response = await zephyr_beta(prompt);
524
+ case 'facebook-ai':
525
+ response = await facebook_ai(prompt);
526
+ break;
527
+ case 'yi-ai':
528
+ response = await yi_34b(prompt);
532
529
  break;
533
530
  default:
534
531
  throw new Error('Invalid model.');