apexify.js 3.2.5 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.tsbuildinfo +1 -1
  2. package/README.md +4 -802
  3. package/change logs.md +19 -0
  4. package/dist/ai/ApexAI.d.ts +15 -6
  5. package/dist/ai/ApexAI.d.ts.map +1 -1
  6. package/dist/ai/ApexAI.js +102 -29
  7. package/dist/ai/ApexAI.js.map +1 -1
  8. package/dist/ai/buttons/tools.d.ts.map +1 -1
  9. package/dist/ai/buttons/tools.js +1 -1
  10. package/dist/ai/buttons/tools.js.map +1 -1
  11. package/dist/ai/functions/aivoice.d.ts +1 -0
  12. package/dist/ai/functions/aivoice.d.ts.map +1 -0
  13. package/dist/ai/functions/aivoice.js +2 -0
  14. package/dist/ai/functions/aivoice.js.map +1 -0
  15. package/dist/ai/functions/draw.d.ts +1 -1
  16. package/dist/ai/functions/draw.d.ts.map +1 -1
  17. package/dist/ai/functions/draw.js +12 -1
  18. package/dist/ai/functions/draw.js.map +1 -1
  19. package/dist/ai/functions/generateVoiceResponse.d.ts +1 -1
  20. package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
  21. package/dist/ai/functions/generateVoiceResponse.js +3 -3
  22. package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
  23. package/dist/ai/models.d.ts +1 -1
  24. package/dist/ai/models.d.ts.map +1 -1
  25. package/dist/ai/models.js +46 -28
  26. package/dist/ai/models.js.map +1 -1
  27. package/dist/ai/utils.d.ts.map +1 -1
  28. package/dist/ai/utils.js.map +1 -1
  29. package/dist/canvas/ApexPainter.d.ts +10 -10
  30. package/dist/canvas/ApexPainter.d.ts.map +1 -1
  31. package/dist/canvas/ApexPainter.js +21 -26
  32. package/dist/canvas/ApexPainter.js.map +1 -1
  33. package/dist/canvas/utils/bg.d.ts +1 -2
  34. package/dist/canvas/utils/bg.d.ts.map +1 -1
  35. package/dist/canvas/utils/bg.js +2 -5
  36. package/dist/canvas/utils/bg.js.map +1 -1
  37. package/dist/canvas/utils/charts.js +26 -26
  38. package/dist/canvas/utils/charts.js.map +1 -1
  39. package/dist/canvas/utils/general functions.d.ts +7 -7
  40. package/dist/canvas/utils/general functions.d.ts.map +1 -1
  41. package/dist/canvas/utils/general functions.js +47 -52
  42. package/dist/canvas/utils/general functions.js.map +1 -1
  43. package/dist/canvas/utils/types.d.ts +1 -3
  44. package/dist/canvas/utils/types.d.ts.map +1 -1
  45. package/dist/index.d.ts.map +1 -1
  46. package/dist/index.js +45 -1
  47. package/dist/index.js.map +1 -1
  48. package/lib/ai/ApexAI.ts +550 -0
  49. package/lib/ai/buttons/drawMenu.ts +361 -0
  50. package/lib/ai/buttons/tools.ts +550 -0
  51. package/lib/ai/functions/chunkString.ts +3 -0
  52. package/lib/ai/functions/draw.ts +440 -0
  53. package/lib/ai/functions/generateVoiceResponse.ts +177 -0
  54. package/lib/ai/functions/imageReader.ts +24 -0
  55. package/lib/ai/functions/readFiles.ts +34 -0
  56. package/lib/ai/functions/readImagess.ts +41 -0
  57. package/lib/ai/functions/shouldDrawImage.ts +7 -0
  58. package/lib/ai/functions/typeWriter.ts +24 -0
  59. package/lib/ai/models.ts +589 -0
  60. package/lib/ai/utils.ts +23 -0
  61. package/lib/canvas/ApexPainter.ts +572 -0
  62. package/lib/canvas/utils/bg.ts +79 -0
  63. package/lib/canvas/utils/charts.ts +524 -0
  64. package/lib/canvas/utils/circular.ts +17 -0
  65. package/lib/canvas/utils/customLines.ts +49 -0
  66. package/lib/canvas/utils/general functions.ts +434 -0
  67. package/lib/canvas/utils/imageProperties.ts +403 -0
  68. package/lib/canvas/utils/radius.ts +26 -0
  69. package/lib/canvas/utils/textProperties.ts +68 -0
  70. package/lib/canvas/utils/types.ts +417 -0
  71. package/lib/canvas/utils/utils.ts +59 -0
  72. package/lib/index.ts +88 -0
  73. package/lib/utils.ts +8 -0
  74. package/package.json +15 -2
  75. package/tsconfig.json +21 -0
@@ -0,0 +1,550 @@
1
+ import { Hercai } from "hercai";
2
+ const hercai = new Hercai()
3
+ import {
4
+ imageReader,
5
+ toDraw,
6
+ aiImagine,
7
+ aiVoice,
8
+ readPdf,
9
+ readTextFile,
10
+ typeWriter,
11
+ readImage,
12
+ } from "./utils";
13
+ import axios from "axios";
14
+ import {
15
+ ModalBuilder,
16
+ TextInputBuilder,
17
+ TextInputStyle,
18
+ ActionRowBuilder
19
+ } from "discord.js";
20
+ import { filters } from "./buttons/tools";
21
+ import { imageTools } from "./buttons/drawMenu";
22
+
23
+
24
+ export interface Options {
25
+ voice?: {
26
+ textVoice?:{
27
+ enable?: boolean;
28
+ voiceModal?: string;
29
+ voice_code?: string;
30
+ apiKey?: string;
31
+ type?: string;
32
+ };
33
+ };
34
+ imagine?: {
35
+ enable?: boolean;
36
+ drawTrigger?: string[];
37
+ imageModel?: string;
38
+ numOfImages?: number;
39
+ nsfw?: {
40
+ enable?: boolean;
41
+ keywords?: string[];
42
+ };
43
+ enhancer?: boolean;
44
+ };
45
+ chat?: {
46
+ chatModel?: string;
47
+ readFiles?: boolean;
48
+ readImages?: boolean;
49
+ typeWriting?:{
50
+ enable?: boolean;
51
+ speed?: number;
52
+ delay?: number;
53
+ };
54
+ };
55
+ others?: {
56
+ messageType: {
57
+ type: string;
58
+ intialContent: string;
59
+ };
60
+ keywords?: string[];
61
+ keywordResponses?: Record<string, string>;
62
+ loader?: {
63
+ enable?: boolean;
64
+ loadingMessage?: string;
65
+ loadingTimer?: number;
66
+ };
67
+ channel?: {
68
+ enable?: boolean;
69
+ id?: string[];
70
+ };
71
+ permissions?: {
72
+ enable?: boolean;
73
+ role?: string[];
74
+ permission?: string[];
75
+ blockedUsers?: string[];
76
+ };
77
+ };
78
+ }
79
+
80
+ type Response = string | { content?: string; reply?: string } | any;
81
+
82
+
83
+ export async function ApexAI (message: any, aiOptions: Options) {
84
+
85
+
86
+ await imageTools(
87
+ message.client,
88
+ ModalBuilder,
89
+ TextInputBuilder,
90
+ TextInputStyle,
91
+ ActionRowBuilder
92
+ );
93
+ await filters(message.client);
94
+
95
+ let usermsg: string = '';
96
+
97
+
98
+ const {
99
+ voice: {
100
+ textVoice: {
101
+ enable: textVoiceEnable = false,
102
+ voiceModal: textVoiceModal = "google",
103
+ voice_code: textVoiceCode = "en-US-3",
104
+ apiKey: textVoiceApiKey = "",
105
+ type: textVoiceType = "b"
106
+ } = {}
107
+ } = {},
108
+ imagine: {
109
+ enable: imagineEnable = false,
110
+ drawTrigger = ["create", "رسم"],
111
+ imageModel = "prodia",
112
+ numOfImages = 2,
113
+ nsfw: {
114
+ enable: nsfwEnabled = false,
115
+ keywords: nsfwKeyWords = []
116
+ } = {},
117
+ enhancer = false
118
+ } = {},
119
+ chat: {
120
+ chatModel = "v3",
121
+ readFiles = false,
122
+ readImages = false,
123
+ typeWriting: {
124
+ enable: typeWritingEnable = false,
125
+ speed = 70,
126
+ delay = 2000
127
+ } = {}
128
+ } = {},
129
+ others: {
130
+ messageType: {
131
+ type: msgType = 'reply',
132
+ intialContent: msgContent = ''
133
+ } = {},
134
+ channel: {
135
+ enable: channelEnable = false,
136
+ id: channelIds = []
137
+ } = {},
138
+ keywords = [],
139
+ keywordResponses = {},
140
+ loader: {
141
+ loadingMessage = 'loading...',
142
+ loadingTimer = 3000,
143
+ enable: loaderEnable = false
144
+ } = {},
145
+ permissions: {
146
+ enable: permissionEnable = false,
147
+ role = [],
148
+ permission = [],
149
+ blockedUsers = []
150
+ } = {}
151
+ } = {}
152
+ } = aiOptions;
153
+
154
+ if (permissionEnable) {
155
+
156
+ if (role.length > 0) {
157
+ const userRoles = message.member?.roles.cache.map((role: any) => role.id);
158
+ const hasPermission = userRoles.some((roleId: any) => role.includes(roleId));
159
+ if (!hasPermission) return;
160
+ }
161
+
162
+ if (permission.length > 0) {
163
+ const hasPermission = permission.some(perm => message.member?.permissions.has(perm));
164
+ if (!hasPermission) return;
165
+ }
166
+
167
+ if (blockedUsers.length > 0) {
168
+ const userId = message.author.id;
169
+ if (blockedUsers.includes(userId)) return;
170
+ }
171
+ }
172
+
173
+ if (channelEnable && !channelIds.includes(message.channel.id)) return;
174
+
175
+ await message.channel?.sendTyping();
176
+
177
+ usermsg = message.content;
178
+
179
+ if (
180
+ message.attachments.some((attachment: any) =>
181
+ attachment.contentType.startsWith("audio/")
182
+ )
183
+ ) {
184
+ return await message.reply({
185
+ content: "Voice messages are not supported at the moment. Stay tuned for future updates!"
186
+ });
187
+ }
188
+ const attachment = message.attachments?.first();
189
+ const imgURL = attachment?.url || null;
190
+
191
+ const validExtensions = /\.(png|jpg|jpeg|webp)$/i;
192
+
193
+ if (attachment && validExtensions.test(attachment.name)) {
194
+ if (imgURL && !readImages) {
195
+ usermsg += await imageReader(imgURL);
196
+ } else if (imgURL && readImages) {
197
+ usermsg += await readImage(imgURL);
198
+ }
199
+ }
200
+
201
+ if (imgURL === null && usermsg === '') {
202
+ return await message.reply({
203
+ content: "You need to provide a message or an attachment at least.",
204
+ allowedMentions: { repliedUser: false },
205
+ });
206
+ }
207
+
208
+
209
+ if (aiOptions.chat && readFiles) {
210
+ if (message.attachments.size > 0) {
211
+ if (attachment.name.endsWith('.pdf')) {
212
+ const pdfContent = await readPdf(attachment.url);
213
+ usermsg += pdfContent;
214
+ } else {
215
+ const txtContent = await readTextFile(attachment.url);
216
+ usermsg += txtContent;
217
+ }
218
+ }
219
+ }
220
+
221
+
222
+ if (aiOptions.others?.loader !== null && loaderEnable === true) {
223
+ await message.reply({
224
+ content: loadingMessage,
225
+ allowedMentions: { repliedUser: false },
226
+ }).then((replyMessage: any) => {
227
+ setTimeout(() => {
228
+ replyMessage.delete().catch(console.error);
229
+ }, loadingTimer || 3000);
230
+ });
231
+ }
232
+
233
+ await message.channel?.sendTyping();
234
+
235
+ let replied: string = "";
236
+
237
+
238
+ if (message.reference?.messageId) {
239
+ const fetchedMessage = await message.guild.channels.cache
240
+ .get(message.channel.id)
241
+ .messages.fetch(message.reference?.messageId);
242
+
243
+ if (fetchedMessage.content) {
244
+ replied += fetchedMessage.content;
245
+ }
246
+
247
+ if (fetchedMessage.attachments && validExtensions.test(attachment.name)) {
248
+ if (imgURL && !readImages) {
249
+ replied += await imageReader(fetchedMessage.attachments?.first().url);
250
+ } else if (imgURL && readImages) {
251
+ usermsg += await readImage(fetchedMessage.attachments?.first().url);
252
+ }
253
+ }
254
+ usermsg = `${usermsg}\n\n Read previous message: ${replied}`;
255
+ }
256
+
257
+
258
+ let response: Response = '';
259
+
260
+
261
+ for (const keyword of keywords) {
262
+ if (usermsg.toLowerCase().includes(keyword.toLowerCase())) {
263
+ response = keywordResponses[keyword] || "";
264
+ return await message.reply({
265
+ content: response,
266
+ allowedMentions: { repliedUser: false },
267
+ });
268
+ }
269
+ }
270
+
271
+ const drawValid: any = aiOptions.imagine && imagineEnable && toDraw(usermsg, drawTrigger);
272
+ const number = numOfImages;
273
+ const modal = imageModel;
274
+
275
+ if (drawValid) {
276
+
277
+ if (enhancer) {
278
+ usermsg += await gemmaAi_4(usermsg)
279
+ }
280
+
281
+ return await aiImagine(
282
+ message,
283
+ number,
284
+ usermsg,
285
+ hercai,
286
+ modal,
287
+ nsfwEnabled,
288
+ nsfwKeyWords
289
+ );
290
+
291
+ } else if (aiOptions.voice) {
292
+
293
+ if (aiOptions.voice.textVoice && textVoiceEnable) {
294
+ return await aiVoice(
295
+ message,
296
+ numOfImages,
297
+ usermsg,
298
+ hercai,
299
+ drawValid,
300
+ modal,
301
+ chatModel,
302
+ textVoiceModal,
303
+ textVoiceCode,
304
+ textVoiceApiKey,
305
+ textVoiceType,
306
+ nsfwEnabled,
307
+ nsfwKeyWords
308
+ );
309
+
310
+ }
311
+
312
+ if (usermsg.length >= 2000) {
313
+ return await message.reply({
314
+ content: 'Your message is too long for me to process. Please try sending a shorter message.',
315
+ allowedMentions: { repliedUser: false },
316
+ });
317
+ }
318
+
319
+ } else {
320
+ try {
321
+ if (chatModel === 'apexChat') {
322
+ response = await apexai(usermsg);
323
+ } else if (chatModel === 'gemma-v3') {
324
+ response = await gemmaAi_3(usermsg);
325
+ } else if (chatModel === 'gemma-v4') {
326
+ response = await gemmaAi_4(usermsg);
327
+ } else if (chatModel === 'starChat') {
328
+ response = await starChat(usermsg);
329
+ } else if (chatModel === 'zephyr-beta') {
330
+ response = await zephyr_beta(usermsg);
331
+ } else if (chatModel === 'v3' || chatModel === 'v3-32k' || chatModel === 'turbo' || chatModel === 'turbo-16k' || chatModel === 'gemini') {
332
+ response = await hercai.question({
333
+ model: chatModel,
334
+ content: usermsg,
335
+ });
336
+ response = response.reply
337
+ } else {
338
+ throw new Error('Invalid chat modal. Check documentation for valid chat modals.')
339
+ }
340
+ } catch (error: any) {
341
+ if (error.response && error.response.status === 429) {
342
+ console.error("Too many requests. Please try again later.");
343
+ return message.reply(`Please wait i am in a cool down for a minute`);
344
+ } else if (error.response && error.response.status === 500) {
345
+ console.error("Internal server error. Please try again later.");
346
+ return message.reply(`Please wait i am in a cool down for a minute`);
347
+ } else {
348
+ await message.reply(`Please wait i am in a cool down for a minute`);
349
+ console.error("The Api is on a cool down for 10 seconds", error.message);
350
+ }
351
+ }
352
+ response = `${msgContent}, ${response}`
353
+ if (msgType === 'reply') {
354
+ if (typeWritingEnable) {
355
+ if (response.length <= 2000) {
356
+ await typeWriter(message.channel, response, speed, delay);
357
+ } else {
358
+ let parts: string[] = [];
359
+ while (typeof response === 'string' && response.length > 0) {
360
+ const substring = response.substring(0, 1999);
361
+ parts.push(substring);
362
+ if (response.length > 1999) {
363
+ response = response.substring(1999);
364
+ } else {
365
+ break;
366
+ }
367
+ }
368
+ for (const part of parts) {
369
+ await typeWriter(message.channel, part, speed, delay);
370
+ }
371
+ }
372
+
373
+ } else {
374
+ if (response.length <= 2000) {
375
+ await message.reply({
376
+ content: response,
377
+ allowedMentions: { repliedUser: false },
378
+ });
379
+ } else {
380
+ let parts: string[] = [];
381
+ while (typeof response === 'string' && response.length > 0) {
382
+ const substring = response.substring(0, 1999);
383
+ parts.push(substring);
384
+ if (response.length > 1999) {
385
+ response = response.substring(1999);
386
+ } else {
387
+ break;
388
+ }
389
+ }
390
+ for (const part of parts) {
391
+ await message.reply({
392
+ content: part,
393
+ allowedMentions: { repliedUser: false },
394
+ });
395
+ }
396
+ }
397
+ }
398
+ } else if (msgType === 'send') {
399
+ if (typeWritingEnable) {
400
+ if (response.length <= 2000) {
401
+ await typeWriter(message.channel, response, speed, delay);
402
+ } else {
403
+ let parts: string[] = [];
404
+ while (typeof response === 'string' && response.length > 0) {
405
+ const substring = response.substring(0, 1999);
406
+ parts.push(substring);
407
+ if (response.length > 1999) {
408
+ response = response.substring(1999);
409
+ } else {
410
+ break;
411
+ }
412
+ }
413
+ for (const part of parts) {
414
+ await typeWriter(message.channel, part, speed, delay);
415
+ }
416
+ }
417
+
418
+ } else {
419
+ if (response.length <= 2000) {
420
+ await message.send({
421
+ content: response,
422
+ allowedMentions: { repliedUser: false },
423
+ });
424
+ } else {
425
+ let parts: string[] = [];
426
+ while (typeof response === 'string' && response.length > 0) {
427
+ const substring = response.substring(0, 1999);
428
+ parts.push(substring);
429
+ if (response.length > 1999) {
430
+ response = response.substring(1999);
431
+ } else {
432
+ break;
433
+ }
434
+ }
435
+ for (const part of parts) {
436
+ await message.send({
437
+ content: part,
438
+ allowedMentions: { repliedUser: false },
439
+ });
440
+ }
441
+ }
442
+ }
443
+ }
444
+ }
445
+ }
446
+
447
+ export async function gemmaAi_4(prompt: string) {
448
+ try {
449
+ const response = await axios.post('https://api-inference.huggingface.co/models/google/gemma-7b-it', { inputs: prompt }, {
450
+ headers: { 'Authorization': `Bearer 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` }
451
+ });
452
+ return response.data[0].generated_text;
453
+ } catch (error: any) {
454
+ console.error('Error fetching response:', error.message);
455
+ return null;
456
+ }
457
+ }
458
+
459
+ export async function gemmaAi_3(prompt: string) {
460
+ try {
461
+ const response = await axios.post('https://api-inference.huggingface.co/models/google/gemma-2b-it', { inputs: prompt }, {
462
+ headers: { 'Authorization': `Bearer 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` }
463
+ });
464
+ return response.data[0].generated_text;
465
+ } catch (error: any) {
466
+ console.error('Error fetching response:', error.message);
467
+ return null;
468
+ }
469
+ }
470
+
471
+ export async function apexai(prompt: string) {
472
+ try {
473
+ const messages = [
474
+ {"role": "user", "content": `${prompt}`}
475
+ ];
476
+ const formattedMessages = messages.map(message => `[${message.role}] ${message.content}`).join('\n');
477
+
478
+ const response = await axios.post(`https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1`, {
479
+ inputs: formattedMessages
480
+ }, {
481
+ headers: {
482
+ 'Authorization': `Bearer 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`,
483
+ 'Content-Type': 'application/json'
484
+ }
485
+ });
486
+
487
+ const generatedText = response.data[0].generated_text;
488
+
489
+ const lines = generatedText.split('\n').slice(1);
490
+
491
+ const output = lines.join('\n');
492
+
493
+ return output
494
+ } catch (error: any) {
495
+ console.error('Error:', error.response.data);
496
+ return 'Please wait i am on cooldown.'
497
+ }
498
+ }
499
+
500
+ export async function starChat(prompt: string) {
501
+ const messages = [{"role":"user","content": `${prompt}`}]
502
+
503
+ try {
504
+ const response = await axios.post('https://api-inference.huggingface.co/models/HuggingFaceH4/starchat2-15b-v0.1', {
505
+ inputs: JSON.stringify(messages),
506
+ }, {
507
+ headers: {
508
+ 'Authorization': `Bearer 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`,
509
+ },
510
+ });
511
+
512
+ const chatbotReply = response.data[0];
513
+ const chatbotResponseText = chatbotReply.generated_text.replace(/^.*?\n.*?\n/, '');
514
+ const chatbotResponseArray = JSON.parse(chatbotResponseText);
515
+ const chatbotResponseString = chatbotResponseArray.join(' ');
516
+
517
+ return chatbotResponseString;
518
+ } catch (error: any) {
519
+ console.error('Error fetching response:', error.message);
520
+ return null;
521
+ }
522
+ }
523
+
524
+ export async function zephyr_beta(prompt: string) {
525
+
526
+ const messages = [{"role":"user","content": `${prompt}` }]
527
+ try {
528
+ const response = await axios.post('https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta', {
529
+ inputs: JSON.stringify(messages),
530
+ }, {
531
+ headers: {
532
+ 'Authorization': `Bearer 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`,
533
+ },
534
+ });
535
+
536
+ const chatbotReply = response.data[0];
537
+
538
+ const textParts = chatbotReply.generated_text.split('\n');
539
+
540
+ const secondArrayString = textParts[2];
541
+ const chatbotResponseArray = JSON.parse(secondArrayString);
542
+
543
+ const chatbotResponseString = chatbotResponseArray.map((obj: any) => obj.content).join(' ');
544
+
545
+ return chatbotResponseString;
546
+ } catch (error: any) {
547
+ console.error('Error fetching response:', error.message);
548
+ return null;
549
+ }
550
+ }