@razzium/piece-aimwork-backend 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,215 @@
1
+ import { encoding_for_model } from 'tiktoken';
2
+
3
+ export const baseUrl = 'https://rekto.dashboard.aimw.ai/api';
4
+
5
+ export const Languages = [
6
+ { value: 'es', label: 'Spanish' },
7
+ { value: 'it', label: 'Italian' },
8
+ { value: 'en', label: 'English' },
9
+ { value: 'pt', label: 'Portuguese' },
10
+ { value: 'de', label: 'German' },
11
+ { value: 'ja', label: 'Japanese' },
12
+ { value: 'pl', label: 'Polish' },
13
+ { value: 'ar', label: 'Arabic' },
14
+ { value: 'af', label: 'Afrikaans' },
15
+ { value: 'az', label: 'Azerbaijani' },
16
+ { value: 'bg', label: 'Bulgarian' },
17
+ { value: 'bs', label: 'Bosnian' },
18
+ { value: 'ca', label: 'Catalan' },
19
+ { value: 'cs', label: 'Czech' },
20
+ { value: 'da', label: 'Danish' },
21
+ { value: 'el', label: 'Greek' },
22
+ { value: 'et', label: 'Estonian' },
23
+ { value: 'fa', label: 'Persian' },
24
+ { value: 'fi', label: 'Finnish' },
25
+ { value: 'tl', label: 'Tagalog' },
26
+ { value: 'fr', label: 'French' },
27
+ { value: 'gl', label: 'Galician' },
28
+ { value: 'he', label: 'Hebrew' },
29
+ { value: 'hi', label: 'Hindi' },
30
+ { value: 'hr', label: 'Croatian' },
31
+ { value: 'hu', label: 'Hungarian' },
32
+ { value: 'hy', label: 'Armenian' },
33
+ { value: 'id', label: 'Indonesian' },
34
+ { value: 'is', label: 'Icelandic' },
35
+ { value: 'kk', label: 'Kazakh' },
36
+ { value: 'kn', label: 'Kannada' },
37
+ { value: 'ko', label: 'Korean' },
38
+ { value: 'lt', label: 'Lithuanian' },
39
+ { value: 'lv', label: 'Latvian' },
40
+ { value: 'ma', label: 'Maori' },
41
+ { value: 'mk', label: 'Macedonian' },
42
+ { value: 'mr', label: 'Marathi' },
43
+ { value: 'ms', label: 'Malay' },
44
+ { value: 'ne', label: 'Nepali' },
45
+ { value: 'nl', label: 'Dutch' },
46
+ { value: 'no', label: 'Norwegian' },
47
+ { value: 'ro', label: 'Romanian' },
48
+ { value: 'ru', label: 'Russian' },
49
+ { value: 'sk', label: 'Slovak' },
50
+ { value: 'sl', label: 'Slovenian' },
51
+ { value: 'sr', label: 'Serbian' },
52
+ { value: 'sv', label: 'Swedish' },
53
+ { value: 'sw', label: 'Swahili' },
54
+ { value: 'ta', label: 'Tamil' },
55
+ { value: 'th', label: 'Thai' },
56
+ { value: 'tr', label: 'Turkish' },
57
+ { value: 'uk', label: 'Ukrainian' },
58
+ { value: 'ur', label: 'Urdu' },
59
+ { value: 'vi', label: 'Vietnamese' },
60
+ { value: 'zh', label: 'Chinese (Simplified)' },
61
+ { value: 'cy', label: 'Welsh' },
62
+ { value: 'be', label: 'Belarusian' },
63
+ ];
64
+
65
+ export const billingIssueMessage = `Error Occurred: 429 \n
66
+ 1. Ensure that billing is enabled on your OpenAI platform. \n
67
+ 2. Generate a new API key. \n
68
+ 3. Attempt the process again. \n
69
+ For guidance, visit: https://beta.openai.com/account/billing`;
70
+
71
+ export const unauthorizedMessage = `Error Occurred: 401 \n
72
+ Ensure that your API key is valid. \n`;
73
+
74
+ export const sleep = (ms: number) => {
75
+ return new Promise((resolve) => setTimeout(resolve, ms));
76
+ };
77
+
78
+ export const streamToBuffer = (stream: any) => {
79
+ const chunks: any[] = [];
80
+ return new Promise((resolve, reject) => {
81
+ stream.on('data', (chunk: any) => chunks.push(Buffer.from(chunk)));
82
+ stream.on('error', (err: any) => reject(err));
83
+ stream.on('end', () => resolve(Buffer.concat(chunks)));
84
+ });
85
+ };
86
+
87
+ export const calculateTokensFromString = (string: string, model: string) => {
88
+ try {
89
+ const encoder = encoding_for_model(model as any);
90
+ const tokens = encoder.encode(string);
91
+ encoder.free();
92
+
93
+ return tokens.length;
94
+ } catch (e) {
95
+ // Model not supported by tiktoken, every 4 chars is a token
96
+ return Math.round(string.length / 4);
97
+ }
98
+ };
99
+
100
+ export const calculateMessagesTokenSize = async (
101
+ messages: any[],
102
+ model: string
103
+ ) => {
104
+ let tokenLength = 0;
105
+ await Promise.all(
106
+ messages.map((message: any) => {
107
+ return new Promise((resolve) => {
108
+ tokenLength += calculateTokensFromString(message.content, model);
109
+ resolve(tokenLength);
110
+ });
111
+ })
112
+ );
113
+
114
+ return tokenLength;
115
+ };
116
+
117
+ export const reduceContextSize = async (
118
+ messages: any[],
119
+ model: string,
120
+ maxTokens: number
121
+ ) => {
122
+ // TODO: Summarize context instead of cutoff
123
+ const cutoffSize = Math.round(messages.length * 0.1);
124
+ const cutoffMessages = messages.splice(cutoffSize, messages.length - 1);
125
+
126
+ if (
127
+ (await calculateMessagesTokenSize(cutoffMessages, model)) >
128
+ maxTokens / 1.5
129
+ ) {
130
+ reduceContextSize(cutoffMessages, model, maxTokens);
131
+ }
132
+
133
+ return cutoffMessages;
134
+ };
135
+
136
+ export const exceedsHistoryLimit = (
137
+ tokenLength: number,
138
+ model: string,
139
+ maxTokens: number
140
+ ) => {
141
+ if (
142
+ tokenLength >= tokenLimit / 1.1 ||
143
+ tokenLength >= (modelTokenLimit(model) - maxTokens) / 1.1
144
+ ) {
145
+ return true;
146
+ }
147
+
148
+ return false;
149
+ };
150
+
151
+ export const tokenLimit = 32000;
152
+
153
+ export const modelTokenLimit = (model: string) => {
154
+ switch (model) {
155
+ case 'gpt-4-1106-preview':
156
+ return 128000;
157
+ case 'gpt-4-vision-preview':
158
+ return 128000;
159
+ case 'gpt-4':
160
+ return 8192;
161
+ case 'gpt-4-32k':
162
+ return 32768;
163
+ case 'gpt-4-0613':
164
+ return 8192;
165
+ case 'gpt-4-32k-0613':
166
+ return 32768;
167
+ case 'gpt-4-0314':
168
+ return 8192;
169
+ case 'gpt-4-32k-0314':
170
+ return 32768;
171
+ case 'gpt-3.5-turbo-1106':
172
+ return 16385;
173
+ case 'gpt-3.5-turbo':
174
+ return 4096;
175
+ case 'gpt-3.5-turbo-16k':
176
+ return 16385;
177
+ case 'gpt-3.5-turbo-instruct':
178
+ return 4096;
179
+ case 'gpt-3.5-turbo-0613':
180
+ return 4096;
181
+ case 'gpt-3.5-turbo-16k-0613':
182
+ return 16385;
183
+ case 'gpt-3.5-turbo-0301':
184
+ return 4096;
185
+ case 'text-davinci-003':
186
+ return 4096;
187
+ case 'text-davinci-002':
188
+ return 4096;
189
+ case 'code-davinci-002':
190
+ return 8001;
191
+ case 'text-moderation-latest':
192
+ return 32768;
193
+ case 'text-moderation-stable':
194
+ return 32768;
195
+ default:
196
+ return 2048;
197
+ }
198
+ };
199
+
200
+ // List of non-text models to filter out in Ask GPT action
201
+ export const notLLMs = [
202
+ 'gpt-4o-realtime-preview-2024-10-01',
203
+ 'gpt-4o-realtime-preview',
204
+ 'babbage-002',
205
+ 'davinci-002',
206
+ 'tts-1-hd-1106',
207
+ 'whisper-1',
208
+ 'canary-whisper',
209
+ 'canary-tts',
210
+ 'tts-1',
211
+ 'tts-1-hd',
212
+ 'tts-1-1106',
213
+ 'dall-e-3',
214
+ 'dall-e-2',
215
+ ];
package/tsconfig.json ADDED
@@ -0,0 +1,19 @@
1
+ {
2
+ "extends": "../../../../tsconfig.base.json",
3
+ "compilerOptions": {
4
+ "module": "commonjs",
5
+ "forceConsistentCasingInFileNames": true,
6
+ "strict": true,
7
+ "noImplicitOverride": true,
8
+ "noImplicitReturns": true,
9
+ "noFallthroughCasesInSwitch": true,
10
+ "noPropertyAccessFromIndexSignature": true
11
+ },
12
+ "files": [],
13
+ "include": [],
14
+ "references": [
15
+ {
16
+ "path": "./tsconfig.lib.json"
17
+ }
18
+ ]
19
+ }
@@ -0,0 +1,11 @@
1
+ {
2
+ "extends": "./tsconfig.json",
3
+ "compilerOptions": {
4
+ "module": "commonjs",
5
+ "outDir": "../../../../dist/out-tsc",
6
+ "declaration": true,
7
+ "types": ["node"]
8
+ },
9
+ "exclude": ["jest.config.ts", "src/**/*.spec.ts", "src/**/*.test.ts"],
10
+ "include": ["src/**/*.ts"]
11
+ }