apexify.js 4.4.35 → 4.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. package/README.md +26 -199
  2. package/dist/ai/ApexAI.d.ts +18 -5
  3. package/dist/ai/ApexAI.d.ts.map +1 -1
  4. package/dist/ai/ApexAI.js +33 -170
  5. package/dist/ai/ApexAI.js.map +1 -1
  6. package/dist/ai/ApexModules.d.ts.map +1 -1
  7. package/dist/ai/ApexModules.js +37 -125
  8. package/dist/ai/ApexModules.js.map +1 -1
  9. package/dist/ai/functions/draw.d.ts +6 -1
  10. package/dist/ai/functions/draw.d.ts.map +1 -1
  11. package/dist/ai/functions/draw.js +18 -59
  12. package/dist/ai/functions/draw.js.map +1 -1
  13. package/dist/ai/functions/generateVoiceResponse.d.ts +6 -1
  14. package/dist/ai/functions/generateVoiceResponse.d.ts.map +1 -1
  15. package/dist/ai/functions/generateVoiceResponse.js +2 -2
  16. package/dist/ai/functions/generateVoiceResponse.js.map +1 -1
  17. package/dist/ai/functions/validOptions.d.ts +11 -4
  18. package/dist/ai/functions/validOptions.d.ts.map +1 -1
  19. package/dist/ai/functions/validOptions.js +15 -14
  20. package/dist/ai/functions/validOptions.js.map +1 -1
  21. package/dist/ai/modals-chat/electronHub/imageModels.d.ts +7 -0
  22. package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -0
  23. package/dist/ai/modals-chat/electronHub/imageModels.js +28 -0
  24. package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -0
  25. package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts +2 -0
  26. package/dist/ai/modals-chat/freesedgpt/cartoon.d.ts.map +1 -0
  27. package/dist/ai/modals-chat/freesedgpt/cartoon.js +26 -0
  28. package/dist/ai/modals-chat/freesedgpt/cartoon.js.map +1 -0
  29. package/dist/ai/modals-chat/freesedgpt/chat.d.ts +5 -0
  30. package/dist/ai/modals-chat/freesedgpt/chat.d.ts.map +1 -0
  31. package/dist/ai/modals-chat/freesedgpt/chat.js +30 -0
  32. package/dist/ai/modals-chat/freesedgpt/chat.js.map +1 -0
  33. package/dist/ai/modals-chat/freesedgpt/flux.d.ts +2 -0
  34. package/dist/ai/modals-chat/freesedgpt/flux.d.ts.map +1 -0
  35. package/dist/ai/modals-chat/freesedgpt/flux.js +26 -0
  36. package/dist/ai/modals-chat/freesedgpt/flux.js.map +1 -0
  37. package/dist/ai/modals-chat/gemini/Gemini-flash.d.ts.map +1 -0
  38. package/dist/ai/modals-chat/{Gemini-flash.js → gemini/Gemini-flash.js} +2 -2
  39. package/dist/ai/modals-chat/gemini/Gemini-flash.js.map +1 -0
  40. package/dist/ai/modals-chat/gemini/Gemini-pro.d.ts.map +1 -0
  41. package/dist/ai/modals-chat/{Gemini-pro.js → gemini/Gemini-pro.js} +2 -2
  42. package/dist/ai/modals-chat/gemini/Gemini-pro.js.map +1 -0
  43. package/dist/ai/modals-chat/gemini/config.d.ts.map +1 -0
  44. package/dist/ai/modals-chat/gemini/config.js.map +1 -0
  45. package/dist/ai/modals-chat/gemini/geminiFast.d.ts.map +1 -0
  46. package/dist/ai/modals-chat/gemini/geminiFast.js.map +1 -0
  47. package/dist/ai/modals-chat/groq/chatgroq.d.ts +9 -0
  48. package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -0
  49. package/dist/ai/modals-chat/groq/chatgroq.js +58 -0
  50. package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -0
  51. package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts +8 -0
  52. package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -0
  53. package/dist/ai/modals-chat/groq/imageAnalyzer.js +64 -0
  54. package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -0
  55. package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -0
  56. package/dist/ai/modals-chat/{whisper.js → groq/whisper.js} +2 -2
  57. package/dist/ai/modals-chat/groq/whisper.js.map +1 -0
  58. package/dist/ai/modals-chat/hercai/chatModals.d.ts +7 -0
  59. package/dist/ai/modals-chat/hercai/chatModals.d.ts.map +1 -0
  60. package/dist/ai/modals-chat/hercai/chatModals.js +23 -0
  61. package/dist/ai/modals-chat/hercai/chatModals.js.map +1 -0
  62. package/dist/ai/modals-chat/hercai/chatModels.d.ts +7 -0
  63. package/dist/ai/modals-chat/hercai/chatModels.d.ts.map +1 -0
  64. package/dist/ai/modals-chat/hercai/chatModels.js +23 -0
  65. package/dist/ai/modals-chat/hercai/chatModels.js.map +1 -0
  66. package/dist/ai/modals-chat/others/otherModels.d.ts +7 -0
  67. package/dist/ai/modals-chat/others/otherModels.d.ts.map +1 -0
  68. package/dist/ai/modals-chat/others/otherModels.js +88 -0
  69. package/dist/ai/modals-chat/others/otherModels.js.map +1 -0
  70. package/dist/ai/modals-chat/rsn/rsnChat.d.ts +8 -0
  71. package/dist/ai/modals-chat/rsn/rsnChat.d.ts.map +1 -0
  72. package/dist/ai/modals-chat/{bing.js → rsn/rsnChat.js} +22 -8
  73. package/dist/ai/modals-chat/rsn/rsnChat.js.map +1 -0
  74. package/dist/ai/modals-images/cartoon.js +1 -1
  75. package/dist/ai/modals-images/cartoon.js.map +1 -1
  76. package/dist/ai/modals-images/flux.js +1 -1
  77. package/dist/ai/modals-images/flux.js.map +1 -1
  78. package/dist/ai/utils.d.ts +4 -7
  79. package/dist/ai/utils.d.ts.map +1 -1
  80. package/dist/ai/utils.js +6 -12
  81. package/dist/ai/utils.js.map +1 -1
  82. package/dist/index.d.ts +11 -6
  83. package/dist/index.d.ts.map +1 -1
  84. package/dist/index.js +6 -3
  85. package/dist/index.js.map +1 -1
  86. package/lib/ai/ApexAI.ts +92 -189
  87. package/lib/ai/ApexModules.ts +43 -135
  88. package/lib/ai/functions/draw.ts +24 -68
  89. package/lib/ai/functions/generateVoiceResponse.ts +3 -2
  90. package/lib/ai/functions/validOptions.ts +23 -22
  91. package/lib/ai/modals-chat/electronHub/imageModels.ts +26 -0
  92. package/lib/ai/{modals-images → modals-chat/freesedgpt}/cartoon.ts +3 -3
  93. package/lib/ai/modals-chat/freesedgpt/chat.ts +31 -0
  94. package/lib/ai/{modals-images → modals-chat/freesedgpt}/flux.ts +3 -3
  95. package/lib/ai/modals-chat/{Gemini-flash.ts → gemini/Gemini-flash.ts} +2 -2
  96. package/lib/ai/modals-chat/{Gemini-pro.ts → gemini/Gemini-pro.ts} +2 -2
  97. package/lib/ai/modals-chat/groq/chatgroq.ts +68 -0
  98. package/lib/ai/modals-chat/groq/imageAnalyzer.ts +68 -0
  99. package/lib/ai/modals-chat/{whisper.ts → groq/whisper.ts} +2 -2
  100. package/lib/ai/modals-chat/hercai/chatModels.ts +20 -0
  101. package/lib/ai/modals-chat/others/otherModels.ts +99 -0
  102. package/lib/ai/modals-chat/{mixtral.ts → rsn/rsnChat.ts} +26 -8
  103. package/lib/ai/utils.ts +7 -12
  104. package/lib/index.ts +5 -3
  105. package/package.json +1 -1
  106. package/dist/ai/functions/imageAnalysis.d.ts +0 -2
  107. package/dist/ai/functions/imageAnalysis.d.ts.map +0 -1
  108. package/dist/ai/functions/imageAnalysis.js +0 -45
  109. package/dist/ai/functions/imageAnalysis.js.map +0 -1
  110. package/dist/ai/functions/readImagess.d.ts +0 -2
  111. package/dist/ai/functions/readImagess.d.ts.map +0 -1
  112. package/dist/ai/functions/readImagess.js +0 -45
  113. package/dist/ai/functions/readImagess.js.map +0 -1
  114. package/dist/ai/modals-chat/Gemini-flash.d.ts.map +0 -1
  115. package/dist/ai/modals-chat/Gemini-flash.js.map +0 -1
  116. package/dist/ai/modals-chat/Gemini-pro.d.ts.map +0 -1
  117. package/dist/ai/modals-chat/Gemini-pro.js.map +0 -1
  118. package/dist/ai/modals-chat/apexChat.d.ts +0 -2
  119. package/dist/ai/modals-chat/apexChat.d.ts.map +0 -1
  120. package/dist/ai/modals-chat/apexChat.js +0 -32
  121. package/dist/ai/modals-chat/apexChat.js.map +0 -1
  122. package/dist/ai/modals-chat/bard.d.ts +0 -7
  123. package/dist/ai/modals-chat/bard.d.ts.map +0 -1
  124. package/dist/ai/modals-chat/bard.js +0 -48
  125. package/dist/ai/modals-chat/bard.js.map +0 -1
  126. package/dist/ai/modals-chat/bing.d.ts +0 -7
  127. package/dist/ai/modals-chat/bing.d.ts.map +0 -1
  128. package/dist/ai/modals-chat/bing.js.map +0 -1
  129. package/dist/ai/modals-chat/codellama.d.ts +0 -7
  130. package/dist/ai/modals-chat/codellama.d.ts.map +0 -1
  131. package/dist/ai/modals-chat/codellama.js +0 -48
  132. package/dist/ai/modals-chat/codellama.js.map +0 -1
  133. package/dist/ai/modals-chat/config.d.ts.map +0 -1
  134. package/dist/ai/modals-chat/config.js.map +0 -1
  135. package/dist/ai/modals-chat/facebook-ai.d.ts +0 -2
  136. package/dist/ai/modals-chat/facebook-ai.d.ts.map +0 -1
  137. package/dist/ai/modals-chat/facebook-ai.js +0 -20
  138. package/dist/ai/modals-chat/facebook-ai.js.map +0 -1
  139. package/dist/ai/modals-chat/geminiFast.d.ts.map +0 -1
  140. package/dist/ai/modals-chat/geminiFast.js.map +0 -1
  141. package/dist/ai/modals-chat/geminiV2.d.ts +0 -7
  142. package/dist/ai/modals-chat/geminiV2.d.ts.map +0 -1
  143. package/dist/ai/modals-chat/geminiV2.js +0 -48
  144. package/dist/ai/modals-chat/geminiV2.js.map +0 -1
  145. package/dist/ai/modals-chat/gemma.d.ts +0 -2
  146. package/dist/ai/modals-chat/gemma.d.ts.map +0 -1
  147. package/dist/ai/modals-chat/gemma.js +0 -43
  148. package/dist/ai/modals-chat/gemma.js.map +0 -1
  149. package/dist/ai/modals-chat/llama.d.ts +0 -7
  150. package/dist/ai/modals-chat/llama.d.ts.map +0 -1
  151. package/dist/ai/modals-chat/llama.js +0 -48
  152. package/dist/ai/modals-chat/llama.js.map +0 -1
  153. package/dist/ai/modals-chat/llamav2.d.ts +0 -2
  154. package/dist/ai/modals-chat/llamav2.d.ts.map +0 -1
  155. package/dist/ai/modals-chat/llamav2.js +0 -43
  156. package/dist/ai/modals-chat/llamav2.js.map +0 -1
  157. package/dist/ai/modals-chat/llamav3.d.ts +0 -2
  158. package/dist/ai/modals-chat/llamav3.d.ts.map +0 -1
  159. package/dist/ai/modals-chat/llamav3.js +0 -43
  160. package/dist/ai/modals-chat/llamav3.js.map +0 -1
  161. package/dist/ai/modals-chat/mixtral.d.ts +0 -7
  162. package/dist/ai/modals-chat/mixtral.d.ts.map +0 -1
  163. package/dist/ai/modals-chat/mixtral.js +0 -48
  164. package/dist/ai/modals-chat/mixtral.js.map +0 -1
  165. package/dist/ai/modals-chat/mixtralv2.d.ts +0 -2
  166. package/dist/ai/modals-chat/mixtralv2.d.ts.map +0 -1
  167. package/dist/ai/modals-chat/mixtralv2.js +0 -43
  168. package/dist/ai/modals-chat/mixtralv2.js.map +0 -1
  169. package/dist/ai/modals-chat/modals.d.ts +0 -8
  170. package/dist/ai/modals-chat/modals.d.ts.map +0 -1
  171. package/dist/ai/modals-chat/modals.js +0 -16
  172. package/dist/ai/modals-chat/modals.js.map +0 -1
  173. package/dist/ai/modals-chat/openChat.d.ts +0 -7
  174. package/dist/ai/modals-chat/openChat.d.ts.map +0 -1
  175. package/dist/ai/modals-chat/openChat.js +0 -48
  176. package/dist/ai/modals-chat/openChat.js.map +0 -1
  177. package/dist/ai/modals-chat/starChat.d.ts +0 -2
  178. package/dist/ai/modals-chat/starChat.d.ts.map +0 -1
  179. package/dist/ai/modals-chat/starChat.js +0 -31
  180. package/dist/ai/modals-chat/starChat.js.map +0 -1
  181. package/dist/ai/modals-chat/v4.d.ts +0 -7
  182. package/dist/ai/modals-chat/v4.d.ts.map +0 -1
  183. package/dist/ai/modals-chat/v4.js +0 -48
  184. package/dist/ai/modals-chat/v4.js.map +0 -1
  185. package/dist/ai/modals-chat/whisper.d.ts.map +0 -1
  186. package/dist/ai/modals-chat/whisper.js.map +0 -1
  187. package/dist/ai/modals-chat/yi-ai.d.ts +0 -2
  188. package/dist/ai/modals-chat/yi-ai.d.ts.map +0 -1
  189. package/dist/ai/modals-chat/yi-ai.js +0 -40
  190. package/dist/ai/modals-chat/yi-ai.js.map +0 -1
  191. package/lib/ai/functions/imageAnalysis.ts +0 -41
  192. package/lib/ai/modals-chat/apexChat.ts +0 -31
  193. package/lib/ai/modals-chat/bard.ts +0 -44
  194. package/lib/ai/modals-chat/bing.ts +0 -44
  195. package/lib/ai/modals-chat/codellama.ts +0 -44
  196. package/lib/ai/modals-chat/facebook-ai.ts +0 -14
  197. package/lib/ai/modals-chat/geminiV2.ts +0 -44
  198. package/lib/ai/modals-chat/gemma.ts +0 -35
  199. package/lib/ai/modals-chat/llama.ts +0 -44
  200. package/lib/ai/modals-chat/llamav2.ts +0 -35
  201. package/lib/ai/modals-chat/llamav3.ts +0 -35
  202. package/lib/ai/modals-chat/mixtralv2.ts +0 -35
  203. package/lib/ai/modals-chat/modals.ts +0 -8
  204. package/lib/ai/modals-chat/openChat.ts +0 -44
  205. package/lib/ai/modals-chat/starChat.ts +0 -31
  206. package/lib/ai/modals-chat/v4.ts +0 -44
  207. package/lib/ai/modals-chat/yi-ai.ts +0 -40
  208. /package/dist/ai/modals-chat/{Gemini-flash.d.ts → gemini/Gemini-flash.d.ts} +0 -0
  209. /package/dist/ai/modals-chat/{Gemini-pro.d.ts → gemini/Gemini-pro.d.ts} +0 -0
  210. /package/dist/ai/modals-chat/{config.d.ts → gemini/config.d.ts} +0 -0
  211. /package/dist/ai/modals-chat/{config.js → gemini/config.js} +0 -0
  212. /package/dist/ai/modals-chat/{geminiFast.d.ts → gemini/geminiFast.d.ts} +0 -0
  213. /package/dist/ai/modals-chat/{geminiFast.js → gemini/geminiFast.js} +0 -0
  214. /package/dist/ai/modals-chat/{whisper.d.ts → groq/whisper.d.ts} +0 -0
  215. /package/lib/ai/modals-chat/{config.ts → gemini/config.ts} +0 -0
  216. /package/lib/ai/modals-chat/{geminiFast.ts → gemini/geminiFast.ts} +0 -0
@@ -3,22 +3,18 @@ import axios from 'axios';
3
3
  import api from "api";
4
4
  import translate from "@iamtraction/google-translate";
5
5
  const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
6
- import { apexai, starChat, facebook_ai, yi_34b } from './modals-chat/modals';
7
- import { validOptions } from "./functions/validOptions";
8
- import { llamaChat, mixtral, openChat, v4 } from './utils';
9
- import { codellama } from './modals-chat/codellama';
10
- import { bard } from './modals-chat/bard';
11
- import { bing } from './modals-chat/bing';
12
- import { gemini_v2 } from './modals-chat/geminiV2';
13
- import { whisper } from './modals-chat/whisper';
14
- import { llama2 } from './modals-chat/llamav2';
15
- import { llama3 } from './modals-chat/llamav3';
16
- import { gemma } from './modals-chat/gemma';
17
- import { mixtral2 } from './modals-chat/mixtralv2';
6
+ import { groqChatModels, hercChatModels, otherChatModel, rsnChatModels, validateModels } from "./functions/validOptions";
7
+ import { whisper } from './modals-chat/groq/whisper';
18
8
  import { connect } from 'verse.db';
19
- import { geminiFast } from './modals-chat/geminiFast';
20
- import { flux } from './modals-images/flux';
21
- import { cartoon } from './modals-images/cartoon';
9
+ import { otherChatModels } from './modals-chat/others/otherModels';
10
+ import { rsnAPI } from './modals-chat/rsn/rsnChat';
11
+ import { chatGroq } from './modals-chat/groq/chatgroq';
12
+ import { geminiFast } from './modals-chat/gemini/geminiFast';
13
+ import { cartoon } from './modals-chat/freesedgpt/cartoon';
14
+ import { flux } from './modals-chat/freesedgpt/flux';
15
+ import { gpt4o } from './modals-chat/freesedgpt/chat';
16
+ import { groqAnalyzer } from './utils';
17
+
22
18
 
23
19
  const dbConfig = {
24
20
  adapter: 'json',
@@ -50,21 +46,19 @@ type hercmodals = "v1" | "v2" | "v2-beta" | "v3" | "lexica" | "prodia" | "animef
50
46
  async function ApexImagine(model: string, prompt: string, options: ApexImagineOptions): Promise<string[] | undefined> {
51
47
  let imageURL: string | null = '';
52
48
  let response: string[] = [];
53
- const imageType = await validOptions;
54
- const { nsfw = false, deepCheck = false, nsfwWords = [], count = 2, negative_prompt = "", sampler = "DPM++ 2M Karras", height = 512, width = 512, cfg_scale = 9, steps = 20, seed = -1, image_style = "cinematic" } = options;
55
-
56
- sdk.auth(options.Api_Key || 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3');
49
+ const imageType = await validateModels;
50
+ const { Api_Key = 'eaebff6e-c7b2-477c-8edd-9aa91becf1e3', nsfw = false, deepCheck = false, nsfwWords = [], count = 2, negative_prompt = "", sampler = "DPM++ 2M Karras", height = 512, width = 512, cfg_scale = 9, steps = 20, seed = -1, image_style = "cinematic" } = options;
57
51
 
58
52
  const translatedText = await translate(prompt, {
59
53
  from: "auto",
60
- to: "en",
61
- });
54
+ to: "en"
55
+ });
62
56
 
63
57
  if (count > 4 || count <= 0) throw new Error("Inavlid usage. Count can't be less than 0 or more than 4.");
64
- if ((width || height) > 1024 || (width || height) <= 0) throw new Error("Inavlid usage. Image width/height can't be less than 0 or more than 1024.");
58
+ if ((width || height) <= 0) throw new Error("Inavlid usage. Image width/height can't be less than 0 or more than 1024.");
65
59
 
66
60
  for (let i = 0; i < count; i++) {
67
- if (imageType.validHercaiModals.includes(model)) {
61
+ if (imageType.validHercaiModels.includes(model)) {
68
62
  if (model === 'prodia-v2') {
69
63
  imageURL = (await hercai.betaDrawImage({
70
64
  prompt: translatedText.text,
@@ -83,7 +77,8 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
83
77
  negative_prompt: negative_prompt
84
78
  })).url;
85
79
  }
86
- } else if (imageType.validProdiaModals.includes(model)) {
80
+ } else if (imageType.validProdiaModels.includes(model)) {
81
+ sdk.auth(Api_Key);
87
82
  const generating = await sdk.generate({
88
83
  model: model,
89
84
  prompt: translatedText.text,
@@ -100,6 +95,7 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
100
95
  const generatedJobId = generating.data.job;
101
96
  imageURL = await checkJobStatus(generatedJobId);
102
97
  } else if (imageType.validSXDL.includes(model)) {
98
+ sdk.auth(Api_Key);
103
99
  const generating = await sdk.sdxlGenerate({
104
100
  model: model,
105
101
  prompt: translatedText.text,
@@ -115,15 +111,16 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
115
111
 
116
112
  const generatedJobId = generating.data.job;
117
113
  imageURL = await checkJobStatus(generatedJobId);
118
- } else if (model === 'flux') {
119
- const response = await flux(prompt);
120
- if (response !== null) imageURL = response;
121
- if (response === null) throw new Error('This modal got broke please inform the owner directly!')
122
- } else if (model === 'cartoon') {
123
- const response = await cartoon(prompt);
124
- if (response !== null) imageURL = response;
125
- if (response === null) throw new Error('This modal got broke please inform the owner directly!')
126
- } else {
114
+ } else if (model === 'flux-schnell') {
115
+ const gen = await flux(translatedText.text, options.Api_Key as string);
116
+ if (gen === null) throw new Error('this model reached rate limit. Provide your own api key from: "https://discord.gg/94qUZWhwFE"');
117
+ response = gen
118
+
119
+ } else if (model === 'real-cartoon-xl-v6') {
120
+ const gen = await cartoon(translatedText.text, options.Api_Key as string);
121
+ if (gen === null) throw new Error('this model reached rate limit. Provide your own api key from: "https://discord.gg/94qUZWhwFE"');
122
+ response = gen
123
+ } else {
127
124
  throw new Error("Invalid model provided. Please check docs/npm page for valid models.");
128
125
  }
129
126
 
@@ -618,7 +615,7 @@ async function ApexImagine(model: string, prompt: string, options: ApexImagineOp
618
615
  let shouldExclude = false;
619
616
 
620
617
  if (nsfw === true) {
621
- const caption = await apexChecker(imageURL);
618
+ const caption = await groqAnalyzer({ imgURL: imageURL, prompt });
622
619
  if (!caption) return;
623
620
  shouldExclude = NSFWWORDS.some(word => caption.includes(word));
624
621
  shouldExclude = nsfwWords.some(word => caption.includes(word));
@@ -678,7 +675,7 @@ async function ApexChat(model: string, prompt: string, { userId, memory, limit,
678
675
  }
679
676
 
680
677
  const responses = await Promise.all(chunks.map(async (chunk) => {
681
- return await processChunk(model, chunk, {});
678
+ return await processChunk(model, chunk, { ApiKey: Api_key, personality: instruction });
682
679
  }));
683
680
 
684
681
  response = responses.join('');
@@ -699,116 +696,27 @@ async function ApexChat(model: string, prompt: string, { userId, memory, limit,
699
696
 
700
697
  async function processChunk(model: string, prompt: string, { ApiKey, personality }: { ApiKey?: string, personality?: string }): Promise<string> {
701
698
  let response: string;
702
- switch (model) {
703
- case 'v3':
704
- case 'v3-32k':
705
- case 'turbo':
706
- case 'turbo-16k':
707
- case 'gemini':
699
+ switch (true) {
700
+ case hercChatModels.includes(model):
708
701
  response = (await hercai.question({ model: model as ChatModelOption, content: prompt, personality })).reply;
709
702
  break;
710
- case 'apexChat':
711
- response = await apexai(prompt);
712
- break;
713
- case 'starChat':
714
- response = await starChat(prompt);
715
- break;
716
- case 'facebook-ai':
717
- response = await facebook_ai(prompt);
718
- break;
719
- case 'yi-ai':
720
- response = await yi_34b(prompt);
721
- break;
722
- case 'v4':
723
- response = await v4({ API_KEY: ApiKey, prompt });
724
- break;
725
- case 'openChat':
726
- response = await openChat({ API_KEY: ApiKey, prompt });
727
- break;
728
- case 'mixtral':
729
- response = await mixtral({ API_KEY: ApiKey, prompt });
730
- break;
731
- case 'llama':
732
- response = await llamaChat({ API_KEY: ApiKey, prompt });
733
- break;
734
- case 'llama-v2':
735
- response = await llama2(prompt, ApiKey);
736
- break;
737
- case 'llama-v3':
738
- response = await llama3(prompt, ApiKey);
739
- break;
740
- case 'gemma':
741
- response = await gemma(prompt, ApiKey);
742
- break;
743
- case 'mixtral-v2':
744
- response = await mixtral2(prompt, ApiKey);
703
+ case otherChatModel.includes(model):
704
+ response = await otherChatModels({ modelName: model as any, prompt });
745
705
  break;
746
- case 'codellama':
747
- response = await codellama({ API_KEY: ApiKey, prompt });
706
+ case rsnChatModels.includes(model):
707
+ response = await rsnAPI({ API_KEY: ApiKey, apiName: model, prompt });
748
708
  break;
749
- case 'bard':
750
- response = await bard({ API_KEY: ApiKey, prompt });
751
- break;
752
- case 'bing':
753
- response = await bing({ API_KEY: ApiKey, prompt });
754
- break;
755
- case 'gemini-v2':
756
- response = await gemini_v2({ API_KEY: ApiKey, prompt });
709
+ case groqChatModels.includes(model):
710
+ response = await chatGroq({ API_KEY: ApiKey, apiName: model, prompt, instruction: personality });
757
711
  break;
712
+ case model === 'gpt-4o':
713
+ response = await gpt4o({ ApiKey, prompt });
714
+ break;
758
715
  default:
759
716
  throw new Error('Invalid model.');
760
717
  }
761
718
  return response;
762
719
  }
763
- async function apexChecker(urls: any) {
764
- try {
765
- let retryCount = 0;
766
- const maxRetries = 3;
767
-
768
- const fetchData = async () => {
769
- try {
770
- const response = await axios.post(
771
- `https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base`,
772
- { image: urls },
773
- {
774
- headers: {
775
- "Content-Type": "application/json",
776
- Authorization: `Bearer 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`,
777
- },
778
- },
779
- );
780
-
781
- if (response.status === 200) {
782
- return response.data[0].generated_text;
783
- } else {
784
- console.error(
785
- `Failed to fetch image captioning API: ${response.statusText}`,
786
- );
787
- return null;
788
- }
789
- } catch (e: any) {
790
- console.error(`Error fetching data: ${e.message}`);
791
- throw e;
792
- }
793
- };
794
-
795
- while (retryCount < maxRetries) {
796
- try {
797
- return await fetchData();
798
- } catch (e: any) {
799
- console.error(
800
- `Error fetching data (Retry ${retryCount + 1}): ${e.message}`,
801
- );
802
- retryCount++;
803
- }
804
- }
805
-
806
- return null;
807
- } catch (e: any) {
808
- console.error(`Error in attemptImageCaptioning: ${e.message}`);
809
- return null;
810
- }
811
- }
812
720
 
813
721
  async function checkJobStatus(jobId: string): Promise<string | null> {
814
722
  try {
@@ -2,13 +2,14 @@ import translate from "@iamtraction/google-translate";
2
2
  import sharp from "sharp";
3
3
  import { ButtonBuilder, StringSelectMenuBuilder, StringSelectMenuOptionBuilder, ButtonStyle, ActionRowBuilder, AttachmentBuilder, AttachmentData } from "discord.js";
4
4
  import axios from "axios";
5
- import { validOptions } from './validOptions';
5
+ import { validateModels } from './validOptions';
6
6
  import api from "api";
7
- import { cartoon } from "../modals-images/cartoon";
8
- import { flux } from "../modals-images/flux";
7
+ import { groqAnalyzer } from "../utils";
8
+ import { flux } from "../modals-chat/freesedgpt/flux";
9
+ import { cartoon } from "../modals-chat/freesedgpt/cartoon";
10
+
9
11
 
10
12
  const sdk: any = api("@prodia/v1.3.0#be019b2kls0gqss3");
11
- sdk.auth('eaebff6e-c7b2-477c-8edd-9aa91becf1e3');
12
13
  async function aiImagine(
13
14
  message: any,
14
15
  numOfImages: number,
@@ -20,13 +21,13 @@ async function aiImagine(
20
21
  deepCheck: boolean,
21
22
  enhancer: any,
22
23
  buttons: any[],
23
- RespondMessage: any
24
+ RespondMessage: any,
25
+ imageAPIS?: { groqAPI?: string, rsnAPI?: string, prodiaAPI?: string, freesedGPTApi?: string }
24
26
  ) {
25
-
26
27
  const maxRetryAttempts = 4;
27
28
  const retryInterval = 5000;
28
29
  let response: any;
29
- const imageType = await validOptions;
30
+ const imageType = await validateModels;
30
31
 
31
32
  async function retry(fn: any, retriesLeft = maxRetryAttempts) {
32
33
 
@@ -79,14 +80,16 @@ async function aiImagine(
79
80
  );
80
81
  if (response.url === 'This Modal is Currently Under Maintenance.') throw new Error('this model is under Maintenance for a while.')
81
82
 
82
- } else if (imageType.validHercaiModals.includes(imageModal)) {
83
+ } else if (imageType.validHercaiModels.includes(imageModal)) {
83
84
  response = await retry(() =>
84
85
  hercai.drawImage({
85
86
  model: imageModal,
86
87
  prompt: translatedText.text,
87
88
  }),
88
89
  );
89
- } else if (imageType.validProdiaModals.includes(imageModal)) {
90
+ } else if (imageType.validProdiaModels.includes(imageModal)) {
91
+ sdk.auth(imageAPIS?.prodiaAPI);
92
+
90
93
  const generateResponse = await sdk.generate({
91
94
  model: imageModal,
92
95
  prompt: translatedText.text,
@@ -105,6 +108,7 @@ async function aiImagine(
105
108
  const generatedJobId = generateResponse.data.job;
106
109
  response = await checkJobStatus(generatedJobId);
107
110
  } else if (imageType.validSXDL.includes(imageModal)) {
111
+ sdk.auth(imageAPIS?.prodiaAPI);
108
112
 
109
113
  const generateResponse = await sdk.sdxlGenerate({
110
114
  model: imageModal,
@@ -123,15 +127,16 @@ async function aiImagine(
123
127
 
124
128
  const generatedJobId = generateResponse.data.job;
125
129
  response = await checkJobStatus(generatedJobId);
126
- } else if (imageModal === 'flux') {
127
- const re = await flux(translatedText.text);
128
- if (response !== null) response = re;
129
- if (response === null) throw new Error('This modal got broke please inform the owner directly!')
130
- } else if (imageModal === 'cartoon') {
131
- const re = await cartoon(translatedText.text);
132
- if (response !== null) response = re;
133
- if (response === null) throw new Error('This modal got broke please inform the owner directly!')
134
- } else {
130
+ } else if (imageModal === 'flux-schnell') {
131
+ const gen = await flux(translatedText.text, imageAPIS?.freesedGPTApi as string);
132
+ if (gen === null) return message.reply(`Please wait i am in a cool down for a minute`);
133
+ response = gen
134
+
135
+ } else if (imageModal === 'real-cartoon-xl-v6') {
136
+ const gen = await cartoon(translatedText.text, imageAPIS?.freesedGPTApi as string);
137
+ if (gen === null) return message.reply(`Please wait i am in a cool down for a minute`);
138
+ response = gen
139
+ } else {
135
140
  throw new Error("Invalid modal name.");
136
141
  }
137
142
  } catch (error: any) {
@@ -690,7 +695,7 @@ async function aiImagine(
690
695
  }
691
696
 
692
697
  if (nsfw) {
693
- const textToCheck = await attemptImageCaptioning(imageUrl);
698
+ const textToCheck = await groqAnalyzer({ imgURL: imageUrl, prompt: textToDraw });
694
699
 
695
700
  if (textToCheck && nsfwWords.some(word => textToCheck.includes(word))) {
696
701
  return message.reply("Warning ⚠️. Your prompt contains **`NSFW/Illegal/Prohibited`** words. Please refrain from doing this.");
@@ -831,54 +836,5 @@ async function checkJobStatus(jobId: number | string | any) {
831
836
  }
832
837
  }
833
838
 
834
- async function attemptImageCaptioning(imageUrl: string) {
835
- try {
836
- let retryCount = 0;
837
- const maxRetries = 3;
838
-
839
- const fetchData = async () => {
840
- try {
841
- const response = await axios.post(
842
- `https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base`,
843
- { image: imageUrl },
844
- {
845
- headers: {
846
- "Content-Type": "application/json",
847
- Authorization: `Bearer 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`,
848
- },
849
- },
850
- );
851
-
852
- if (response.status === 200) {
853
- return response.data[0].generated_text;
854
- } else {
855
- console.error(
856
- `Failed to fetch image captioning API: ${response.statusText}`,
857
- );
858
- return null;
859
- }
860
- } catch (error: any) {
861
- console.error(`Error fetching data: ${error.message}`);
862
- throw error;
863
- }
864
- };
865
-
866
- while (retryCount < maxRetries) {
867
- try {
868
- return await fetchData();
869
- } catch (error: any) {
870
- console.error(
871
- `Error fetching data (Retry ${retryCount + 1}): ${error.message}`,
872
- );
873
- retryCount++;
874
- }
875
- }
876
-
877
- return null;
878
- } catch (error: any) {
879
- console.error(`Error in attemptImageCaptioning: ${error.message}`);
880
- return null;
881
- }
882
- }
883
839
 
884
840
  export { aiImagine };
@@ -27,7 +27,8 @@ async function aiVoice(
27
27
  deepCheck: boolean,
28
28
  enhancer: any,
29
29
  buttons: any[],
30
- RespondMessage: any
30
+ RespondMessage: any,
31
+ imageAPIS?: { groqAPI?: string, rsnAPI?: string, prodiaAPI?: string, freesedGPTApi?: string }
31
32
  ) {
32
33
  if (message.author.bot || isProcessing || !message.guild) {
33
34
  return;
@@ -39,7 +40,7 @@ async function aiVoice(
39
40
  let msg = message.content;
40
41
 
41
42
  if (drawValid) {
42
- return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage);
43
+ return await aiImagine(message, numOfImages, finalText, hercai, imageModal, nsfw, nsfwKeyWords, deepCheck, enhancer, buttons, RespondMessage, { rsnAPI: imageAPIS?.rsnAPI, groqAPI: imageAPIS?.groqAPI, prodiaAPI: imageAPIS?.prodiaAPI, freesedGPTApi: imageAPIS?.freesedGPTApi as string});
43
44
  }
44
45
 
45
46
  if (message.attachments.size > 0) {
@@ -3,20 +3,20 @@ import api from "api";
3
3
  const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
4
4
  sdk.auth('43435e1c-cab1-493f-a224-f51e4b97ce8d');
5
5
 
6
- async function initializeValidOptions() {
7
- const [SDModals, SDXLModals, samplers] = await Promise.all([
8
- sdModals(),
9
- sdxlModals(),
6
+ export const hercChatModels = ["v3" , "v3-32k" , "turbo" , "turbo-16k" , "gemini" , "llama3-70b" , "llama3-8b" , "mixtral-8x7b" , "gemma-7b" , "gemma2-9b"];
7
+ export const groqChatModels = ['gemma-7b-it', 'gemma2-9b-it', 'llama3-groq-70b-8192-tool-use-preview', 'llama3-groq-8b-8192-tool-use-preview', 'llama-3.1-70b-versatile', 'llama-3.1-8b-instant', 'llama-guard-3-8b', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
8
+ export const rsnChatModels = ['bard', 'bing', 'codellama', 'gemini', 'llama', 'mixtral', 'openchat', 'gpt4'];
9
+ export const otherChatModel = ['apexai', 'facebook_ai', 'yi_34b', 'starChat'];
10
+ export const fresedgptModels = ['real-cartoon-xl-v6', 'flux-schnell', 'gpt-4o'];
11
+
12
+ async function initializeValidOptions() {
13
+ const [SDModels, SDXLModels, samplers] = await Promise.all([
14
+ sdModels(),
15
+ sdxlModels(),
10
16
  sampler()
11
17
  ]);
12
18
 
13
- const ApexAIModals = ['v4', 'gemmini-pro', 'gemini-flash', 'gemini', 'v3', 'v3-32k', 'turbo', 'turbo-16k', 'llama', 'apexChat', 'openChat', 'yi-ai', 'facebook-ai', 'starChat', 'mixtral'];
14
- const ApexChatModals = ['v4', 'gemini-v2', 'gemini', 'v3', 'v3-32k', 'turbo', 'turbo-16k', 'llama', 'apexChat', 'openChat', 'yi-ai', 'facebook-ai', 'starChat', 'mixtral', 'codellama', 'bing', 'bard'];
15
-
16
- const HercModals = [
17
- "v1",
18
- "v2",
19
- "v2-beta",
19
+ const HercImageModels = [
20
20
  "v3",
21
21
  "lexica",
22
22
  "prodia",
@@ -27,38 +27,39 @@ async function initializeValidOptions() {
27
27
  "shonin"
28
28
  ];
29
29
 
30
- const others = ['cartoon', 'flux'];
31
30
 
32
31
  return {
33
- validApexAI: ApexAIModals,
34
- validApexChat: ApexChatModals,
35
- validHercaiModals: HercModals,
36
- validProdiaModals: SDModals,
37
- otherImageModals: others,
32
+ validHercChatModels: hercChatModels,
33
+ validgroqChatModels: groqChatModels,
34
+ validRSNChatModels: rsnChatModels,
35
+ validHercaiModels: HercImageModels,
36
+ validProdiaModels: SDModels,
37
+ validotherChatModels: otherChatModel,
38
+ validfresedgptModels: fresedgptModels,
38
39
  validEnhancers: [
39
40
  "ESRGAN_4x", "Lanczos", "Nearest", "LDSR", "R-ESRGAN 4x+",
40
41
  "R-ESRGAN 4x+ Anime6B", "ScuNET GAN", "ScuNET PSNR", "SwinIR 4x"
41
42
  ],
42
43
  validSamplers: samplers,
43
- validSXDL: SDXLModals,
44
+ validSXDL: SDXLModels,
44
45
  validImgStyle: [
45
46
  "3d-model", "analog-film", "anime", "cinematic", "comic-book",
46
47
  "digital-art", "enhance", "isometric", "fantasy-art", "isometric",
47
48
  "line-art", "low-poly", "neon-punk", "origami", "photographic",
48
49
  "pixel-art", "texture", "craft-clay"
49
50
  ],
50
- allModals: [...SDModals, ...SDXLModals, ...HercModals, ...ApexAIModals, ...ApexChatModals, ...others]
51
+ allModels: [...SDModels, ...SDXLModels, ...HercImageModels, ...fresedgptModels, ...hercChatModels, ...otherChatModel, ...groqChatModels, ...rsnChatModels]
51
52
  };
52
53
  }
53
54
 
54
- export const validOptions = initializeValidOptions();
55
+ export const validateModels = initializeValidOptions();
55
56
 
56
- async function sdModals(): Promise<string[]> {
57
+ async function sdModels(): Promise<string[]> {
57
58
  const SDModals = await sdk.listModels();
58
59
  return SDModals.data;
59
60
  }
60
61
 
61
- async function sdxlModals(): Promise<string[]> {
62
+ async function sdxlModels(): Promise<string[]> {
62
63
  const SDXLModals = await sdk.listSdxlModels();
63
64
  return SDXLModals.data;
64
65
  }
@@ -0,0 +1,26 @@
1
+ import axios from 'axios';
2
+
3
+ export async function electronImagine({ ApiKey, prompt, modelName, count = 1 }: { ApiKey?: string, prompt: string, modelName: string, count?: number }) {
4
+ try {
5
+ const response = await axios.post(
6
+ 'https://api.electronhub.top/v1/images/generate',
7
+ {
8
+ model: modelName,
9
+ prompt: prompt,
10
+ n: count
11
+ },
12
+ {
13
+ headers: {
14
+ 'Authorization': `Bearer ${ApiKey || 'ek-nFO8tz6qiu5cJ31lwCfPZNNrxFZLsJYou6yx4X1FS2Jyr2dm0a'}`,
15
+ 'Content-Type': 'application/json'
16
+ }
17
+ }
18
+ );
19
+
20
+ const imagesUrl = response.data;
21
+ return imagesUrl;
22
+ } catch (e: any) {
23
+ console.error('Error generating images:', e.response ? e.response.data : e.message);
24
+ throw e;
25
+ }
26
+ }
@@ -1,6 +1,6 @@
1
1
  import axios from 'axios';
2
2
 
3
- export async function cartoon(prompt: string) {
3
+ export async function cartoon(prompt: string, apiKey: string) {
4
4
  try {
5
5
  const response = await axios.post(
6
6
  'https://fresedgpt.space/v1/images/generations',
@@ -11,13 +11,13 @@ export async function cartoon(prompt: string) {
11
11
  },
12
12
  {
13
13
  headers: {
14
- 'Authorization': 'Bearer fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ',
14
+ 'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
15
15
  'Content-Type': 'application/json'
16
16
  }
17
17
  }
18
18
  );
19
19
 
20
- return response.data.data[0].url;
20
+ return response.data.data[0].url || null;
21
21
  } catch (e: any) {
22
22
  return null
23
23
  }
@@ -0,0 +1,31 @@
1
+ import axios from 'axios';
2
+ const API_URL = 'https://fresedgpt.space/v1/chat/completions';
3
+
4
+ export async function gpt4o({ prompt, ApiKey}: { prompt: string, ApiKey?: string}) {
5
+ try {
6
+ const API_KEY = ApiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ';
7
+ const response = await axios.post(
8
+ API_URL,
9
+ {
10
+ messages: [{ role: 'user', content: `${prompt}` }],
11
+ model: 'chatgpt-4o-latest',
12
+ stream: false
13
+ },
14
+ {
15
+ headers: {
16
+ 'Authorization': `Bearer ${API_KEY}`,
17
+ 'Content-Type': 'application/json'
18
+ }
19
+ }
20
+ );
21
+
22
+ const responseData = response.data;
23
+ const assistantContent = responseData.choices[0]?.message?.content || 'No response content available';
24
+
25
+ return assistantContent || null;
26
+
27
+ } catch (error) {
28
+ console.error('Error creating chat completion:', error);
29
+ }
30
+ }
31
+
@@ -1,6 +1,6 @@
1
1
  import axios from 'axios';
2
2
 
3
- export async function flux(prompt: string) {
3
+ export async function flux(prompt: string, apiKey: string) {
4
4
  try {
5
5
  const response = await axios.post(
6
6
  'https://fresedgpt.space/v1/images/generations',
@@ -11,13 +11,13 @@ export async function flux(prompt: string) {
11
11
  },
12
12
  {
13
13
  headers: {
14
- 'Authorization': 'Bearer fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ',
14
+ 'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
15
15
  'Content-Type': 'application/json'
16
16
  }
17
17
  }
18
18
  );
19
19
 
20
- return response.data.data[0].url;
20
+ return response.data.data[0].url || null;
21
21
  } catch (e: any) {
22
22
  return null
23
23
  }
@@ -1,11 +1,11 @@
1
1
  import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
2
- import { converter } from "../../canvas/utils/general functions";
2
+ import { converter } from "../../../canvas/utils/general functions";
3
3
  import { connect } from "verse.db";
4
4
  import config from './config';
5
5
  import axios from "axios";
6
6
  import path from 'path';
7
7
  import fs from 'fs';
8
- import { readFile } from "../utils";
8
+ import { readFile } from "../../utils";
9
9
 
10
10
  export async function geminiFlash(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
11
11
 
@@ -1,5 +1,5 @@
1
1
  import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai";
2
- import { converter } from "../../canvas/utils/general functions";
2
+ import { converter } from "../../../canvas/utils/general functions";
3
3
  import { GoogleAIFileManager } from "@google/generative-ai/server";
4
4
 
5
5
  import { connect } from "verse.db";
@@ -7,7 +7,7 @@ import config from './config';
7
7
  import axios from "axios";
8
8
  import path from 'path';
9
9
  import fs from 'fs';
10
- import { readFile } from "../utils";
10
+ import { readFile } from "../../utils";
11
11
 
12
12
  export async function geminiPro(message: { userId: string, serverName: string, serverId: string, channelName: string, attachment: any, db: boolean }, AI: { AiPersonality: string | null, userMsg: string, API_KEY: string | null }): Promise<any> {
13
13