apexify.js 4.5.31 → 4.5.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/dist/ai/ApexAI.d.ts.map +1 -1
  2. package/dist/ai/ApexAI.js +5 -12
  3. package/dist/ai/ApexAI.js.map +1 -1
  4. package/dist/ai/ApexModules.d.ts.map +1 -1
  5. package/dist/ai/ApexModules.js +26 -29
  6. package/dist/ai/ApexModules.js.map +1 -1
  7. package/dist/ai/functions/draw.d.ts.map +1 -1
  8. package/dist/ai/functions/draw.js +2 -10
  9. package/dist/ai/functions/draw.js.map +1 -1
  10. package/dist/ai/functions/validOptions.d.ts +6 -12
  11. package/dist/ai/functions/validOptions.d.ts.map +1 -1
  12. package/dist/ai/functions/validOptions.js +54 -32
  13. package/dist/ai/functions/validOptions.js.map +1 -1
  14. package/dist/ai/modals-chat/electronHub/chatmodels.d.ts +2 -2
  15. package/dist/ai/modals-chat/electronHub/chatmodels.d.ts.map +1 -1
  16. package/dist/ai/modals-chat/electronHub/chatmodels.js +26 -29
  17. package/dist/ai/modals-chat/electronHub/chatmodels.js.map +1 -1
  18. package/dist/ai/modals-chat/electronHub/imageModels.d.ts +1 -1
  19. package/dist/ai/modals-chat/electronHub/imageModels.d.ts.map +1 -1
  20. package/dist/ai/modals-chat/electronHub/imageModels.js +29 -24
  21. package/dist/ai/modals-chat/electronHub/imageModels.js.map +1 -1
  22. package/dist/ai/modals-chat/groq/chatgroq.d.ts.map +1 -1
  23. package/dist/ai/modals-chat/groq/chatgroq.js +10 -4
  24. package/dist/ai/modals-chat/groq/chatgroq.js.map +1 -1
  25. package/dist/ai/modals-chat/groq/imageAnalyzer.d.ts.map +1 -1
  26. package/dist/ai/modals-chat/groq/imageAnalyzer.js +0 -1
  27. package/dist/ai/modals-chat/groq/imageAnalyzer.js.map +1 -1
  28. package/dist/ai/modals-chat/groq/whisper.d.ts.map +1 -1
  29. package/dist/ai/modals-chat/groq/whisper.js +0 -3
  30. package/dist/ai/modals-chat/groq/whisper.js.map +1 -1
  31. package/dist/index.d.ts +4 -7
  32. package/dist/index.d.ts.map +1 -1
  33. package/lib/ai/ApexAI.ts +8 -11
  34. package/lib/ai/ApexModules.ts +42 -33
  35. package/lib/ai/functions/draw.ts +2 -8
  36. package/lib/ai/functions/validOptions.ts +83 -58
  37. package/lib/ai/modals-chat/electronHub/chatmodels.ts +40 -43
  38. package/lib/ai/modals-chat/electronHub/imageModels.ts +34 -32
  39. package/lib/ai/modals-chat/groq/chatgroq.ts +17 -7
  40. package/lib/ai/modals-chat/groq/imageAnalyzer.ts +0 -2
  41. package/lib/ai/modals-chat/groq/whisper.ts +0 -3
  42. package/package.json +6 -4
  43. package/lib/ai/modals-chat/freesedgpt/chat.ts +0 -31
  44. package/lib/ai/modals-chat/freesedgpt/fresedImagine.ts +0 -24
  45. package/lib/ai/modals-chat/rsn/rsnChat.ts +0 -74
@@ -1,76 +1,101 @@
1
1
  import api from "api";
2
+ import Groq from 'groq-sdk';
3
+ import OpenAI from "openai";
2
4
 
3
5
  const sdk = api("@prodia/v1.3.0#be019b2kls0gqss3");
4
- sdk.auth('43435e1c-cab1-493f-a224-f51e4b97ce8d');
6
+ sdk.auth("43435e1c-cab1-493f-a224-f51e4b97ce8d");
5
7
 
6
- export const hercChatModels = ["v3" , "v3-32k" , "turbo" , "turbo-16k" , "gemini" , "llama3-70b" , "llama3-8b" , "mixtral-8x7b" , "gemma-7b" , "gemma2-9b"];
7
- export const groqChatModels = ['gemma-7b-it', 'gemma2-9b-it', 'llama3-groq-70b-8192-tool-use-preview', 'llama3-groq-8b-8192-tool-use-preview', 'llama-3.1-70b-versatile', 'llama-3.1-8b-instant', 'llama-guard-3-8b', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
8
- export const rsnChatModels = ['bard', 'bing', 'codellama', 'llama', 'mixtral', 'openchat', 'gpt4', 'dalle'];
9
- export const electronImagineModels = ['sdxl-lightning', 'kandinsky-3', 'dall-e-3', 'niji-v6', 'niji-v5', 'midjourney-v6.1', 'midjourney-v5', 'flux-dev', 'flux-pro'];
10
- export const electronChatModels = ['chatgpt-4o-latest', 'o1-mini', 'o1-preview', 'claude-3.5-sonnet-200k', 'claude-3-sonnet-200k', 'gpt-4-turbo', 'command-r-plus', 'command-r', 'gpt-4'];
11
- export const otherChatModel = ['apexai', 'facebook_ai', 'yi_34b', 'starChat'];
12
- export const fresedgptModels = ['real-cartoon-xl-v6', 'flux-schnell', 'gpt-4o'];
8
+ export const hercChatModels = [
9
+ "v3", "v3-32k", "turbo", "turbo-16k", "gemini",
10
+ "llama3-70b", "llama3-8b", "mixtral-8x7b",
11
+ "gemma-7b", "gemma2-9b"
12
+ ];
13
13
 
14
- async function initializeValidOptions() {
15
- const [SDModels, SDXLModels, samplers] = await Promise.all([
16
- sdModels(),
17
- sdxlModels(),
18
- sampler()
19
- ]);
14
+ export const otherChatModel = ["apexai", "facebook_ai", "yi_34b", "starChat"];
20
15
 
21
- const HercImageModels = [
22
- "v3",
23
- "lexica",
24
- "prodia",
25
- "prodia-v2",
26
- "simurg",
27
- "animefy",
28
- "raava",
29
- "shonin",
30
- ];
16
+ const initializeGroqInstance = () => {
17
+ return new Groq({ apiKey: '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' });
18
+ };
19
+ export async function electronHubModels(apiKey?: string): Promise<string[]> {
20
+ const key = apiKey || "ek-am4V5cXBlrCvRx7Ts3TXmvki5CTP3HZst4bNAbyH0XSkzmOqG1";
21
+ const openai = new OpenAI({
22
+ apiKey: key,
23
+ baseURL: "https://api.electronhub.top/v1",
24
+ });
25
+
26
+ const models = await openai.models.list();
27
+ return models.data.map((model: any) => model.id);
28
+ }
29
+
30
+ export const groqChatModels = async (groqInstance: any) => {
31
+ const allModels = await groqInstance.models.list();
32
+
33
+ const filteredModels = allModels.data.filter((model: any) =>
34
+ !model.id.startsWith("whisper") &&
35
+ !model.id.startsWith("llava") &&
36
+ !model.id.startsWith("distil-whisper")
37
+ );
38
+
39
+ return filteredModels.map((model: any) => model.id);
40
+ };
41
+
42
+ async function initializeValidOptions() {
43
+ const groq = initializeGroqInstance();
31
44
 
45
+ const [SDModels, SDXLModels, samplers, groqModels, electronModels] = await Promise.all([
46
+ sdModels(),
47
+ sdxlModels(),
48
+ sampler(),
49
+ groqChatModels(groq),
50
+ electronHubModels()
51
+ ]);
32
52
 
33
- return {
34
- validHercChatModels: hercChatModels,
35
- validgroqChatModels: groqChatModels,
36
- validRSNChatModels: rsnChatModels,
37
- validHercaiModels: HercImageModels,
38
- validProdiaModels: SDModels,
39
- validotherChatModels: otherChatModel,
40
- validfresedgptModels: fresedgptModels,
41
- valideElectronChatModels: electronChatModels,
42
- valideElectronImagineModels: electronImagineModels,
43
- validEnhancers: [
44
- "ESRGAN_4x", "Lanczos", "Nearest", "LDSR", "R-ESRGAN 4x+",
45
- "R-ESRGAN 4x+ Anime6B", "ScuNET GAN", "ScuNET PSNR", "SwinIR 4x"
46
- ],
47
- validSamplers: samplers,
48
- validSXDL: SDXLModels,
49
- validImgStyle: [
50
- "3d-model", "analog-film", "anime", "cinematic", "comic-book",
51
- "digital-art", "enhance", "isometric", "fantasy-art", "isometric",
52
- "line-art", "low-poly", "neon-punk", "origami", "photographic",
53
- "pixel-art", "texture", "craft-clay"
54
- ],
55
- allModels: [...SDModels, ...SDXLModels, ...HercImageModels, ...electronImagineModels, ...electronChatModels, ...fresedgptModels, ...hercChatModels, ...otherChatModel, ...groqChatModels, ...rsnChatModels]
56
- };
53
+ const HercImageModels = [
54
+ "v3", "lexica", "prodia", "prodia-v2", "simurg",
55
+ "animefy", "raava", "shonin"
56
+ ];
57
+
58
+ return {
59
+ validHercChatModels: hercChatModels,
60
+ validgroqChatModels: groqModels,
61
+ validElectronModels: electronModels,
62
+ validHercaiModels: HercImageModels,
63
+ validProdiaModels: SDModels,
64
+ validotherChatModels: otherChatModel,
65
+ validEnhancers: [
66
+ "ESRGAN_4x", "Lanczos", "Nearest", "LDSR",
67
+ "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B",
68
+ "ScuNET GAN", "ScuNET PSNR", "SwinIR 4x"
69
+ ],
70
+ validSamplers: samplers,
71
+ validSXDL: SDXLModels,
72
+ validImgStyle: [
73
+ "3d-model", "analog-film", "anime", "cinematic",
74
+ "comic-book", "digital-art", "enhance", "isometric",
75
+ "fantasy-art", "line-art", "low-poly", "neon-punk",
76
+ "origami", "photographic", "pixel-art", "texture",
77
+ "craft-clay"
78
+ ],
79
+ allModels: [
80
+ ...SDModels, ...SDXLModels, ...HercImageModels,
81
+ ...hercChatModels, ...otherChatModel, ...groqModels, ...electronModels
82
+ ]
83
+ };
57
84
  }
58
85
 
59
- export const validateModels = initializeValidOptions();
86
+ export const validateModels = async () => await initializeValidOptions();
60
87
 
61
88
  async function sdModels(): Promise<string[]> {
62
- const SDModals = await sdk.listModels();
63
- return SDModals.data;
89
+ const SDModals = await sdk.listModels();
90
+ return SDModals.data;
64
91
  }
65
92
 
66
93
  async function sdxlModels(): Promise<string[]> {
67
- const SDXLModals = await sdk.listSdxlModels();
68
- return SDXLModals.data;
94
+ const SDXLModals = await sdk.listSdxlModels();
95
+ return SDXLModals.data;
69
96
  }
70
97
 
71
-
72
98
  async function sampler(): Promise<string[]> {
73
- const Samplers = await sdk.listSamplers();
74
-
75
- return Samplers.data;
76
- }
99
+ const Samplers = await sdk.listSamplers();
100
+ return Samplers.data;
101
+ }
@@ -1,66 +1,63 @@
1
- import axios from 'axios';
2
- import { electronChatModels } from '../../functions/validOptions';
3
-
1
+ import OpenAI from "openai";
4
2
  export async function electronChat({
5
3
  ApiKey,
6
4
  prompt,
7
5
  modelName,
8
- instruction
6
+ instruction,
9
7
  }: {
10
- ApiKey?: string,
11
- prompt: string,
12
- modelName: string,
13
- instruction?: string
8
+ ApiKey?: string;
9
+ prompt: string;
10
+ modelName: string;
11
+ instruction?: string;
14
12
  }) {
13
+
15
14
 
16
- if (!electronChatModels.includes(modelName)) {
17
- throw new Error(`Invalid model name. Valid models are: ${electronChatModels.join(', ')}`);
15
+ if (modelName === "claude-3-sonnet-200k") {
16
+ modelName = "claude-3-sonnet-20240229";
17
+ } else if (modelName === "claude-3.5-sonnet-200k") {
18
+ modelName = "claude-3.5-sonnet-20240620";
18
19
  }
19
20
 
20
- const messages: { role: string; content: string }[] = [
21
- { role: 'user', content: prompt }
22
- ];
21
+ try {
22
+ const apiKey = ApiKey || "ek-am4V5cXBlrCvRx7Ts3TXmvki5CTP3HZst4bNAbyH0XSkzmOqG1";
23
+ const openai = new OpenAI({
24
+ apiKey: apiKey,
25
+ baseURL: "https://api.electronhub.top/v1",
26
+ });
23
27
 
24
- if (instruction) {
25
- messages.unshift({ role: 'system', content: instruction });
26
- }
27
-
28
- if (modelName === 'claude-3-sonnet-200k') {
29
- modelName = 'claude-3-sonnet-20240229';
30
- } else if (modelName === 'claude-3.5-sonnet-200k') {
31
- modelName = 'claude-3.5-sonnet-20240620';
32
- }
28
+ const models = await openai.models.list();
29
+
30
+ const modelExists = models.data.some((model: any) => model.id === modelName);
33
31
 
34
- try {
35
- const response = await axios.post(
36
- 'https://api.electronhub.top/v1/chat/completions',
37
- {
38
- model: modelName,
39
- messages: messages
40
- },
41
- {
42
- headers: {
43
- 'Authorization': `Bearer ${ApiKey || 'ek-nFO8tz6qiu5cJ31lwCfPZNNrxFZLsJYou6yx4X1FS2Jyr2dm0a'}`,
44
- 'Content-Type': 'application/json'
45
- }
46
- }
47
- );
32
+ if (!modelExists) {
33
+ throw new Error('Invalid model name please check out Electron hub models for more info.')
34
+ }
35
+
36
+ const completion = await openai.chat.completions.create({
37
+ model: modelName,
38
+ messages: [
39
+ {"role": "system", "content": `${instruction}`},
40
+ {"role": "user", "content": `${prompt}`}
41
+ ]
42
+ });
48
43
 
49
- console.log(response.data.choices[0].message.content);
50
- return response.data.choices[0].message.content;
44
+ return completion.choices[0]?.message?.content;
51
45
  } catch (e: any) {
52
46
  if (e.response) {
53
47
  if (e.response.status === 429) {
54
- throw new Error('Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key.');
48
+ throw new Error(
49
+ "Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key."
50
+ );
55
51
  } else if (e.response.status === 500) {
56
- throw new Error('Server error. Please try again later.');
52
+ throw new Error("Server error. Please try again later.");
57
53
  } else {
58
- console.error('Error generating response:', e.response.data);
54
+ console.error("Error generating response:", e.response.data);
59
55
  throw e;
60
56
  }
61
57
  } else {
62
- console.error('Error generating response:', e.message);
58
+ console.error("Error generating response:", e.message);
63
59
  throw e;
64
60
  }
65
61
  }
66
- }
62
+ }
63
+
@@ -1,16 +1,4 @@
1
- import axios from 'axios';
2
-
3
- const validModels = [
4
- 'sdxl-lightning',
5
- 'kandinsky-3',
6
- 'dall-e-3',
7
- 'niji-v6',
8
- 'niji-v5',
9
- 'midjourney-v6.1',
10
- 'midjourney-v5',
11
- 'flux-dev',
12
- 'flux-pro'
13
- ];
1
+ import OpenAI from "openai";
14
2
 
15
3
  export async function electronImagine({
16
4
  ApiKey,
@@ -22,30 +10,44 @@ export async function electronImagine({
22
10
  modelName: string
23
11
  }) {
24
12
 
25
- if (!validModels.includes(modelName)) {
26
- throw new Error(`Invalid model name. Valid models are: ${validModels.join(', ')}`);
13
+ const apiKey = ApiKey || "ek-am4V5cXBlrCvRx7Ts3TXmvki5CTP3HZst4bNAbyH0XSkzmOqG1";
14
+ const openai = new OpenAI({
15
+ apiKey: apiKey,
16
+ baseURL: "https://api.electronhub.top/v1",
17
+ });
18
+
19
+ const models = await openai.models.list();
20
+
21
+ const modelExists = models.data.some((model: any) => model.id === modelName);
22
+
23
+ if (!modelExists) {
24
+ throw new Error('Invalid model name please check out Electron hub models for more info.')
27
25
  }
28
26
 
29
27
  try {
30
- const response = await axios.post(
31
- 'https://api.electronhub.top/v1/images/generate',
32
- {
33
- model: modelName,
34
- prompt: prompt,
35
- n: 1
36
- },
37
- {
38
- headers: {
39
- 'Authorization': `Bearer ${ApiKey || 'ek-nFO8tz6qiu5cJ31lwCfPZNNrxFZLsJYou6yx4X1FS2Jyr2dm0a'}`,
40
- 'Content-Type': 'application/json'
41
- }
42
- }
43
- );
28
+ const response = await openai.images.generate({
29
+ model: modelName,
30
+ prompt: prompt,
31
+ n: 1,
32
+ });
44
33
 
45
34
  const imagesUrl = response.data;
46
- return imagesUrl.data[0].url;
35
+ return imagesUrl[0].url;
47
36
  } catch (e: any) {
48
- console.error('Error generating images:', e.response ? e.response.data : e.message);
49
- throw e;
37
+ if (e.response) {
38
+ if (e.response.status === 429) {
39
+ throw new Error(
40
+ "Rate limit exceeded. Please join the server at https://discord.gg/83XcjD8vgW for an API key."
41
+ );
42
+ } else if (e.response.status === 500) {
43
+ throw new Error("Server error. Please try again later.");
44
+ } else {
45
+ console.error("Error generating response:", e.response.data);
46
+ throw e;
47
+ }
48
+ } else {
49
+ console.error("Error generating response:", e.message);
50
+ throw e;
51
+ }
50
52
  }
51
53
  }
@@ -7,24 +7,34 @@ interface groqOptions {
7
7
  instruction?: string;
8
8
  }
9
9
 
10
- // Define your own type for message parameters
11
10
  interface ChatCompletionMessageParam {
12
11
  role: 'system' | 'user' | 'assistant';
13
12
  content: string;
14
13
  }
15
14
 
16
- const validGroqApiNames = ['gemma-7b-it', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768'];
15
+ const getModels = async (groqInstance: any) => {
16
+ const allModels = await groqInstance.models.list();
17
+
18
+ const filteredModels = allModels.data.filter((model: any) =>
19
+ !model.id.startsWith('whisper') && !model.id.startsWith('llava') && !model.id.startsWith('distil-whisper')
20
+ );
17
21
 
18
- export async function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOptions): Promise<string> {
19
- if (!validGroqApiNames.includes(apiName)) {
20
- return `Invalid API name: ${apiName}. Please provide one of the following: ${validGroqApiNames.join(', ')}`;
21
- }
22
+ return filteredModels;
23
+ };
22
24
 
25
+ export async function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOptions): Promise<string> {
23
26
  try {
24
27
  const groq = new Groq({
25
28
  apiKey: API_KEY || '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',
26
29
  });
27
30
 
31
+ const models = await getModels(groq);
32
+ const modelExists = models.some((model: any) => model.id === apiName);
33
+
34
+ if (!modelExists) {
35
+ throw new Error('Invalid model name provided. Please check the available models on Groq.');
36
+ }
37
+
28
38
  const messages: ChatCompletionMessageParam[] = [];
29
39
 
30
40
  if (instruction) {
@@ -65,4 +75,4 @@ export async function chatGroq({ API_KEY, prompt, apiName, instruction }: groqOp
65
75
  return 'Unknown error occurred.';
66
76
  }
67
77
  }
68
- }
78
+ }
@@ -19,7 +19,6 @@ async function convertUrlToDataUrl(url: string): Promise<string> {
19
19
  }
20
20
  }
21
21
 
22
-
23
22
  export async function groqAnalyzer({ imgURL, ApiKey, prompt }: GroqAnalyzerOptions): Promise<string> {
24
23
  try {
25
24
  const groq = new Groq({
@@ -28,7 +27,6 @@ export async function groqAnalyzer({ imgURL, ApiKey, prompt }: GroqAnalyzerOptio
28
27
 
29
28
  const imageDataUrl = await convertUrlToDataUrl(imgURL);
30
29
 
31
- // @ts-ignore
32
30
  const chatCompletion = await groq.chat.completions.create({
33
31
  messages: [
34
32
  {
@@ -33,13 +33,11 @@ async function createReadableStream(filepathOrUrl: string): Promise<NodeJS.Reada
33
33
  }
34
34
  });
35
35
 
36
- // @ts-ignore: Ignore type checking for this line
37
36
  response.pipe(file);
38
37
 
39
38
  file.on('finish', () => {
40
39
  file.close(() => {
41
40
  if (fileSize <= maxFileSizeBytes) {
42
- // @ts-ignore: Ignore type checking for this line
43
41
  resolve(fs.createReadStream(tempFilePath));
44
42
  }
45
43
  });
@@ -55,7 +53,6 @@ async function createReadableStream(filepathOrUrl: string): Promise<NodeJS.Reada
55
53
  if (fileSize > maxFileSizeBytes) {
56
54
  return 'File size exceeds the limit (25MB)';
57
55
  }
58
- // @ts-ignore: Ignore type checking for this line
59
56
  return fs.createReadStream(filepathOrUrl);
60
57
  }
61
58
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "apexify.js",
3
- "version": "4.5.31",
3
+ "version": "4.5.40",
4
4
  "description": "Unlimited AI models and Canvas library. Supports ts & js (supports front/back end).",
5
5
  "main": "./dist/index.js",
6
6
  "author": "zenith-79",
@@ -89,6 +89,7 @@
89
89
  "canvas crop",
90
90
  "canvas-crop",
91
91
  "canvas-cropper",
92
+ "canvas",
92
93
  "custom crop",
93
94
  "cropper",
94
95
  "custom cropper",
@@ -208,16 +209,16 @@
208
209
  "@google/generative-ai": "^0.14.1",
209
210
  "@iamtraction/google-translate": "^2.0.1",
210
211
  "@napi-rs/canvas": "^0.1.53",
211
- "api": "^6.1.2",
212
- "compromise": "^14.14.0",
212
+ "api": "^5.0.8",
213
213
  "csv-parse": "^5.5.6",
214
214
  "discord.js": "^14.15.3",
215
215
  "fluent-ffmpeg": "^2.1.3",
216
216
  "gifencoder": "^2.0.1",
217
217
  "groq-sdk": "^0.5.0",
218
- "hercai": "^12.3.2",
218
+ "hercai": "^12.4.0",
219
219
  "imgur": "^1.0.2",
220
220
  "jimp": "^0.22.12",
221
+ "openai": "^4.71.1",
221
222
  "pdf-parse": "^1.1.1",
222
223
  "sharp": "^0.33.4",
223
224
  "tesseract.js": "^5.1.0",
@@ -228,6 +229,7 @@
228
229
  "@types/gifencoder": "^2.0.3",
229
230
  "@types/node": "^22.5.4",
230
231
  "@types/pdf-parse": "^1.1.4",
232
+ "axios": "^1.7.7",
231
233
  "ts-node": "^10.9.2",
232
234
  "typescript": "^5.6.2"
233
235
  }
@@ -1,31 +0,0 @@
1
- import axios from 'axios';
2
- const API_URL = 'https://fresedgpt.space/v1/chat/completions';
3
-
4
- export async function gpt4o({ prompt, ApiKey}: { prompt: string, ApiKey?: string}) {
5
- try {
6
- const API_KEY = ApiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ';
7
- const response = await axios.post(
8
- API_URL,
9
- {
10
- messages: [{ role: 'user', content: `${prompt}` }],
11
- model: 'chatgpt-4o-latest',
12
- stream: false
13
- },
14
- {
15
- headers: {
16
- 'Authorization': `Bearer ${API_KEY}`,
17
- 'Content-Type': 'application/json'
18
- }
19
- }
20
- );
21
-
22
- const responseData = response.data;
23
- const assistantContent = responseData.choices[0]?.message?.content || 'No response content available';
24
-
25
- return assistantContent || null;
26
-
27
- } catch (error) {
28
- console.error('Error creating chat completion:', error);
29
- }
30
- }
31
-
@@ -1,24 +0,0 @@
1
- import axios from 'axios';
2
-
3
- export async function fresedImagine(prompt: string, modelName: string, apiKey: string) {
4
- try {
5
- const response = await axios.post(
6
- 'https://fresedgpt.space/v1/images/generations',
7
- {
8
- model: modelName,
9
- prompt: prompt,
10
- size: '1024x1024'
11
- },
12
- {
13
- headers: {
14
- 'Authorization': `Bearer ${ apiKey || 'fresed-Dtm2TBDA9vXcaHFdrDL1apbF2fnOIQ' }`,
15
- 'Content-Type': 'application/json'
16
- }
17
- }
18
- );
19
-
20
- return response.data.data[0].url || null;
21
- } catch (e: any) {
22
- return null
23
- }
24
- }
@@ -1,74 +0,0 @@
1
- import axios, { AxiosResponse } from 'axios';
2
-
3
- interface aiOptions {
4
- API_KEY?: string | null;
5
- prompt: string;
6
- apiName: string;
7
- }
8
-
9
- const validApiNames = [
10
- 'bard',
11
- 'bing',
12
- 'codellama',
13
- 'gemini',
14
- 'llama',
15
- 'mixtral',
16
- 'openchat',
17
- 'gpt4',
18
- 'dalle'
19
- ];
20
-
21
- export async function rsnAPI({ API_KEY, prompt, apiName }: aiOptions): Promise<string> {
22
-
23
- if (!validApiNames.includes(apiName)) {
24
- return `Invalid API name: ${apiName}. Please provide one of the following: ${validApiNames.join(', ')}`;
25
- }
26
-
27
- const apiKey: string = API_KEY || 'rsnai_SbLbFcwdT2h2KoYet2LS0F34';
28
- const apiUrl = `https://api.rnilaweera.lk/api/v1/user/${apiName}`;
29
-
30
- try {
31
- const payload = { prompt };
32
- const response: AxiosResponse = await axios.post(apiUrl, payload, {
33
- headers: { Authorization: `Bearer ${apiKey}` },
34
- });
35
-
36
- if (apiName === 'dalle') return response.data?.image.url;
37
-
38
- return response.data.message;
39
- } catch (e: any) {
40
- if (e.response && e.response.data.message === 'Invalid API key.') {
41
- try {
42
- const defaultResponse = await axios.post(apiUrl, { prompt }, {
43
- headers: { Authorization: `Bearer rsnai_SbLbFcwdT2h2KoYet2LS0F34` },
44
- });
45
-
46
- if (apiName === 'dalle') return defaultResponse.data?.image.url;
47
-
48
- return defaultResponse.data.message;
49
- } catch (err) {
50
- console.log(err);
51
- return 'An error occurred while using the default API key.';
52
- }
53
- }
54
- else if (e.response && e.response.status === 500) {
55
- try {
56
- const backupResponse = await axios.post(apiUrl, { prompt }, {
57
- headers: { Authorization: `Bearer rsnai_lvIch9Z7apBHqfXYqOiXwzm7` },
58
- });
59
-
60
- if (apiName === 'dalle') return backupResponse.data?.image.url;
61
-
62
- return backupResponse.data.message;
63
- } catch (err) {
64
- console.log(err);
65
- return 'An error occurred while using the backup API key.';
66
- }
67
- } else {
68
- console.log(e);
69
- return 'The API is on cooldown for 50 seconds.';
70
- }
71
- }
72
- }
73
-
74
-