@botpress/zai 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/adapter.js +2 -0
- package/dist/adapters/botpress-table.js +168 -0
- package/dist/adapters/memory.js +12 -0
- package/dist/index.d.ts +99 -98
- package/dist/index.js +9 -1873
- package/dist/models.js +387 -0
- package/dist/operations/check.js +141 -0
- package/dist/operations/constants.js +2 -0
- package/dist/operations/errors.js +15 -0
- package/dist/operations/extract.js +212 -0
- package/dist/operations/filter.js +179 -0
- package/dist/operations/label.js +237 -0
- package/dist/operations/rewrite.js +111 -0
- package/dist/operations/summarize.js +132 -0
- package/dist/operations/text.js +46 -0
- package/dist/utils.js +43 -0
- package/dist/zai.js +140 -0
- package/package.json +21 -19
- package/src/adapters/adapter.ts +35 -0
- package/src/adapters/botpress-table.ts +210 -0
- package/src/adapters/memory.ts +13 -0
- package/src/index.ts +11 -0
- package/src/models.ts +394 -0
- package/src/operations/__tests/botpress_docs.txt +26040 -0
- package/src/operations/__tests/cache.jsonl +101 -0
- package/src/operations/__tests/index.ts +87 -0
- package/src/operations/check.ts +187 -0
- package/src/operations/constants.ts +2 -0
- package/src/operations/errors.ts +9 -0
- package/src/operations/extract.ts +291 -0
- package/src/operations/filter.ts +231 -0
- package/src/operations/label.ts +332 -0
- package/src/operations/rewrite.ts +148 -0
- package/src/operations/summarize.ts +193 -0
- package/src/operations/text.ts +63 -0
- package/src/sdk-interfaces/llm/generateContent.ts +127 -0
- package/src/sdk-interfaces/llm/listLanguageModels.ts +19 -0
- package/src/utils.ts +61 -0
- package/src/zai.ts +193 -0
- package/tsconfig.json +2 -2
- package/dist/index.cjs +0 -1903
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -916
- package/dist/index.js.map +0 -1
- package/tsup.config.ts +0 -16
- package/vitest.config.ts +0 -9
- package/vitest.setup.ts +0 -24
package/dist/models.js
ADDED
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
export const Models = [
|
|
2
|
+
{
|
|
3
|
+
"id": "anthropic__claude-3-haiku-20240307",
|
|
4
|
+
"name": "Claude 3 Haiku",
|
|
5
|
+
"integration": "anthropic",
|
|
6
|
+
"input": {
|
|
7
|
+
"maxTokens": 2e5
|
|
8
|
+
},
|
|
9
|
+
"output": {
|
|
10
|
+
"maxTokens": 4096
|
|
11
|
+
}
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"id": "anthropic__claude-3-5-sonnet-20240620",
|
|
15
|
+
"name": "Claude 3.5 Sonnet",
|
|
16
|
+
"integration": "anthropic",
|
|
17
|
+
"input": {
|
|
18
|
+
"maxTokens": 2e5
|
|
19
|
+
},
|
|
20
|
+
"output": {
|
|
21
|
+
"maxTokens": 4096
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"id": "cerebras__llama3.1-70b",
|
|
26
|
+
"name": "Llama 3.1 70B",
|
|
27
|
+
"integration": "cerebras",
|
|
28
|
+
"input": {
|
|
29
|
+
"maxTokens": 8192
|
|
30
|
+
},
|
|
31
|
+
"output": {
|
|
32
|
+
"maxTokens": 8192
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"id": "cerebras__llama3.1-8b",
|
|
37
|
+
"name": "Llama 3.1 8B",
|
|
38
|
+
"integration": "cerebras",
|
|
39
|
+
"input": {
|
|
40
|
+
"maxTokens": 8192
|
|
41
|
+
},
|
|
42
|
+
"output": {
|
|
43
|
+
"maxTokens": 8192
|
|
44
|
+
}
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-instruct",
|
|
48
|
+
"name": "DeepSeek Coder V2 Instruct",
|
|
49
|
+
"integration": "fireworks-ai",
|
|
50
|
+
"input": {
|
|
51
|
+
"maxTokens": 131072
|
|
52
|
+
},
|
|
53
|
+
"output": {
|
|
54
|
+
"maxTokens": 131072
|
|
55
|
+
}
|
|
56
|
+
},
|
|
57
|
+
{
|
|
58
|
+
"id": "fireworks-ai__accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
|
|
59
|
+
"name": "DeepSeek Coder V2 Lite",
|
|
60
|
+
"integration": "fireworks-ai",
|
|
61
|
+
"input": {
|
|
62
|
+
"maxTokens": 163840
|
|
63
|
+
},
|
|
64
|
+
"output": {
|
|
65
|
+
"maxTokens": 163840
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
"id": "fireworks-ai__accounts/fireworks/models/firellava-13b",
|
|
70
|
+
"name": "FireLLaVA-13B",
|
|
71
|
+
"integration": "fireworks-ai",
|
|
72
|
+
"input": {
|
|
73
|
+
"maxTokens": 4096
|
|
74
|
+
},
|
|
75
|
+
"output": {
|
|
76
|
+
"maxTokens": 4096
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
"id": "fireworks-ai__accounts/fireworks/models/firefunction-v2",
|
|
81
|
+
"name": "Firefunction V2",
|
|
82
|
+
"integration": "fireworks-ai",
|
|
83
|
+
"input": {
|
|
84
|
+
"maxTokens": 8192
|
|
85
|
+
},
|
|
86
|
+
"output": {
|
|
87
|
+
"maxTokens": 8192
|
|
88
|
+
}
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"id": "fireworks-ai__accounts/fireworks/models/gemma2-9b-it",
|
|
92
|
+
"name": "Gemma 2 9B Instruct",
|
|
93
|
+
"integration": "fireworks-ai",
|
|
94
|
+
"input": {
|
|
95
|
+
"maxTokens": 8192
|
|
96
|
+
},
|
|
97
|
+
"output": {
|
|
98
|
+
"maxTokens": 8192
|
|
99
|
+
}
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
103
|
+
"name": "Llama 3.1 405B Instruct",
|
|
104
|
+
"integration": "fireworks-ai",
|
|
105
|
+
"input": {
|
|
106
|
+
"maxTokens": 131072
|
|
107
|
+
},
|
|
108
|
+
"output": {
|
|
109
|
+
"maxTokens": 131072
|
|
110
|
+
}
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
114
|
+
"name": "Llama 3.1 70B Instruct",
|
|
115
|
+
"integration": "fireworks-ai",
|
|
116
|
+
"input": {
|
|
117
|
+
"maxTokens": 131072
|
|
118
|
+
},
|
|
119
|
+
"output": {
|
|
120
|
+
"maxTokens": 131072
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
{
|
|
124
|
+
"id": "fireworks-ai__accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
125
|
+
"name": "Llama 3.1 8B Instruct",
|
|
126
|
+
"integration": "fireworks-ai",
|
|
127
|
+
"input": {
|
|
128
|
+
"maxTokens": 131072
|
|
129
|
+
},
|
|
130
|
+
"output": {
|
|
131
|
+
"maxTokens": 131072
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
{
|
|
135
|
+
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x22b-instruct",
|
|
136
|
+
"name": "Mixtral MoE 8x22B Instruct",
|
|
137
|
+
"integration": "fireworks-ai",
|
|
138
|
+
"input": {
|
|
139
|
+
"maxTokens": 65536
|
|
140
|
+
},
|
|
141
|
+
"output": {
|
|
142
|
+
"maxTokens": 65536
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
"id": "fireworks-ai__accounts/fireworks/models/mixtral-8x7b-instruct",
|
|
147
|
+
"name": "Mixtral MoE 8x7B Instruct",
|
|
148
|
+
"integration": "fireworks-ai",
|
|
149
|
+
"input": {
|
|
150
|
+
"maxTokens": 32768
|
|
151
|
+
},
|
|
152
|
+
"output": {
|
|
153
|
+
"maxTokens": 32768
|
|
154
|
+
}
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
"id": "fireworks-ai__accounts/fireworks/models/mythomax-l2-13b",
|
|
158
|
+
"name": "MythoMax L2 13b",
|
|
159
|
+
"integration": "fireworks-ai",
|
|
160
|
+
"input": {
|
|
161
|
+
"maxTokens": 4096
|
|
162
|
+
},
|
|
163
|
+
"output": {
|
|
164
|
+
"maxTokens": 4096
|
|
165
|
+
}
|
|
166
|
+
},
|
|
167
|
+
{
|
|
168
|
+
"id": "fireworks-ai__accounts/fireworks/models/qwen2-72b-instruct",
|
|
169
|
+
"name": "Qwen2 72b Instruct",
|
|
170
|
+
"integration": "fireworks-ai",
|
|
171
|
+
"input": {
|
|
172
|
+
"maxTokens": 32768
|
|
173
|
+
},
|
|
174
|
+
"output": {
|
|
175
|
+
"maxTokens": 32768
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
"id": "groq__gemma2-9b-it",
|
|
180
|
+
"name": "Gemma2 9B",
|
|
181
|
+
"integration": "groq",
|
|
182
|
+
"input": {
|
|
183
|
+
"maxTokens": 8192
|
|
184
|
+
},
|
|
185
|
+
"output": {
|
|
186
|
+
"maxTokens": 8192
|
|
187
|
+
}
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
"id": "groq__llama3-70b-8192",
|
|
191
|
+
"name": "LLaMA 3 70B",
|
|
192
|
+
"integration": "groq",
|
|
193
|
+
"input": {
|
|
194
|
+
"maxTokens": 8192
|
|
195
|
+
},
|
|
196
|
+
"output": {
|
|
197
|
+
"maxTokens": 8192
|
|
198
|
+
}
|
|
199
|
+
},
|
|
200
|
+
{
|
|
201
|
+
"id": "groq__llama3-8b-8192",
|
|
202
|
+
"name": "LLaMA 3 8B",
|
|
203
|
+
"integration": "groq",
|
|
204
|
+
"input": {
|
|
205
|
+
"maxTokens": 8192
|
|
206
|
+
},
|
|
207
|
+
"output": {
|
|
208
|
+
"maxTokens": 8192
|
|
209
|
+
}
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
"id": "groq__llama-3.1-70b-versatile",
|
|
213
|
+
"name": "LLaMA 3.1 70B",
|
|
214
|
+
"integration": "groq",
|
|
215
|
+
"input": {
|
|
216
|
+
"maxTokens": 128e3
|
|
217
|
+
},
|
|
218
|
+
"output": {
|
|
219
|
+
"maxTokens": 8192
|
|
220
|
+
}
|
|
221
|
+
},
|
|
222
|
+
{
|
|
223
|
+
"id": "groq__llama-3.1-8b-instant",
|
|
224
|
+
"name": "LLaMA 3.1 8B",
|
|
225
|
+
"integration": "groq",
|
|
226
|
+
"input": {
|
|
227
|
+
"maxTokens": 128e3
|
|
228
|
+
},
|
|
229
|
+
"output": {
|
|
230
|
+
"maxTokens": 8192
|
|
231
|
+
}
|
|
232
|
+
},
|
|
233
|
+
{
|
|
234
|
+
"id": "groq__llama-3.2-11b-vision-preview",
|
|
235
|
+
"name": "LLaMA 3.2 11B Vision",
|
|
236
|
+
"integration": "groq",
|
|
237
|
+
"input": {
|
|
238
|
+
"maxTokens": 128e3
|
|
239
|
+
},
|
|
240
|
+
"output": {
|
|
241
|
+
"maxTokens": 8192
|
|
242
|
+
}
|
|
243
|
+
},
|
|
244
|
+
{
|
|
245
|
+
"id": "groq__llama-3.2-1b-preview",
|
|
246
|
+
"name": "LLaMA 3.2 1B",
|
|
247
|
+
"integration": "groq",
|
|
248
|
+
"input": {
|
|
249
|
+
"maxTokens": 128e3
|
|
250
|
+
},
|
|
251
|
+
"output": {
|
|
252
|
+
"maxTokens": 8192
|
|
253
|
+
}
|
|
254
|
+
},
|
|
255
|
+
{
|
|
256
|
+
"id": "groq__llama-3.2-3b-preview",
|
|
257
|
+
"name": "LLaMA 3.2 3B",
|
|
258
|
+
"integration": "groq",
|
|
259
|
+
"input": {
|
|
260
|
+
"maxTokens": 128e3
|
|
261
|
+
},
|
|
262
|
+
"output": {
|
|
263
|
+
"maxTokens": 8192
|
|
264
|
+
}
|
|
265
|
+
},
|
|
266
|
+
{
|
|
267
|
+
"id": "groq__llama-3.2-90b-vision-preview",
|
|
268
|
+
"name": "LLaMA 3.2 90B Vision",
|
|
269
|
+
"integration": "groq",
|
|
270
|
+
"input": {
|
|
271
|
+
"maxTokens": 128e3
|
|
272
|
+
},
|
|
273
|
+
"output": {
|
|
274
|
+
"maxTokens": 8192
|
|
275
|
+
}
|
|
276
|
+
},
|
|
277
|
+
{
|
|
278
|
+
"id": "groq__llama-3.3-70b-versatile",
|
|
279
|
+
"name": "LLaMA 3.3 70B",
|
|
280
|
+
"integration": "groq",
|
|
281
|
+
"input": {
|
|
282
|
+
"maxTokens": 128e3
|
|
283
|
+
},
|
|
284
|
+
"output": {
|
|
285
|
+
"maxTokens": 32768
|
|
286
|
+
}
|
|
287
|
+
},
|
|
288
|
+
{
|
|
289
|
+
"id": "groq__mixtral-8x7b-32768",
|
|
290
|
+
"name": "Mixtral 8x7B",
|
|
291
|
+
"integration": "groq",
|
|
292
|
+
"input": {
|
|
293
|
+
"maxTokens": 32768
|
|
294
|
+
},
|
|
295
|
+
"output": {
|
|
296
|
+
"maxTokens": 32768
|
|
297
|
+
}
|
|
298
|
+
},
|
|
299
|
+
{
|
|
300
|
+
"id": "openai__o1-2024-12-17",
|
|
301
|
+
"name": "GPT o1",
|
|
302
|
+
"integration": "openai",
|
|
303
|
+
"input": {
|
|
304
|
+
"maxTokens": 2e5
|
|
305
|
+
},
|
|
306
|
+
"output": {
|
|
307
|
+
"maxTokens": 1e5
|
|
308
|
+
}
|
|
309
|
+
},
|
|
310
|
+
{
|
|
311
|
+
"id": "openai__o1-mini-2024-09-12",
|
|
312
|
+
"name": "GPT o1-mini",
|
|
313
|
+
"integration": "openai",
|
|
314
|
+
"input": {
|
|
315
|
+
"maxTokens": 128e3
|
|
316
|
+
},
|
|
317
|
+
"output": {
|
|
318
|
+
"maxTokens": 65536
|
|
319
|
+
}
|
|
320
|
+
},
|
|
321
|
+
{
|
|
322
|
+
"id": "openai__gpt-3.5-turbo-0125",
|
|
323
|
+
"name": "GPT-3.5 Turbo",
|
|
324
|
+
"integration": "openai",
|
|
325
|
+
"input": {
|
|
326
|
+
"maxTokens": 128e3
|
|
327
|
+
},
|
|
328
|
+
"output": {
|
|
329
|
+
"maxTokens": 4096
|
|
330
|
+
}
|
|
331
|
+
},
|
|
332
|
+
{
|
|
333
|
+
"id": "openai__gpt-4-turbo-2024-04-09",
|
|
334
|
+
"name": "GPT-4 Turbo",
|
|
335
|
+
"integration": "openai",
|
|
336
|
+
"input": {
|
|
337
|
+
"maxTokens": 128e3
|
|
338
|
+
},
|
|
339
|
+
"output": {
|
|
340
|
+
"maxTokens": 4096
|
|
341
|
+
}
|
|
342
|
+
},
|
|
343
|
+
{
|
|
344
|
+
"id": "openai__gpt-4o-2024-08-06",
|
|
345
|
+
"name": "GPT-4o (August 2024)",
|
|
346
|
+
"integration": "openai",
|
|
347
|
+
"input": {
|
|
348
|
+
"maxTokens": 128e3
|
|
349
|
+
},
|
|
350
|
+
"output": {
|
|
351
|
+
"maxTokens": 16384
|
|
352
|
+
}
|
|
353
|
+
},
|
|
354
|
+
{
|
|
355
|
+
"id": "openai__gpt-4o-2024-05-13",
|
|
356
|
+
"name": "GPT-4o (May 2024)",
|
|
357
|
+
"integration": "openai",
|
|
358
|
+
"input": {
|
|
359
|
+
"maxTokens": 128e3
|
|
360
|
+
},
|
|
361
|
+
"output": {
|
|
362
|
+
"maxTokens": 4096
|
|
363
|
+
}
|
|
364
|
+
},
|
|
365
|
+
{
|
|
366
|
+
"id": "openai__gpt-4o-2024-11-20",
|
|
367
|
+
"name": "GPT-4o (November 2024)",
|
|
368
|
+
"integration": "openai",
|
|
369
|
+
"input": {
|
|
370
|
+
"maxTokens": 128e3
|
|
371
|
+
},
|
|
372
|
+
"output": {
|
|
373
|
+
"maxTokens": 16384
|
|
374
|
+
}
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
"id": "openai__gpt-4o-mini-2024-07-18",
|
|
378
|
+
"name": "GPT-4o Mini",
|
|
379
|
+
"integration": "openai",
|
|
380
|
+
"input": {
|
|
381
|
+
"maxTokens": 128e3
|
|
382
|
+
},
|
|
383
|
+
"output": {
|
|
384
|
+
"maxTokens": 16384
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
];
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
import { fastHash, stringify, takeUntilTokens } from "../utils";
|
|
3
|
+
import { Zai } from "../zai";
|
|
4
|
+
import { PROMPT_INPUT_BUFFER } from "./constants";
|
|
5
|
+
const Example = z.object({
|
|
6
|
+
input: z.any(),
|
|
7
|
+
check: z.boolean(),
|
|
8
|
+
reason: z.string().optional()
|
|
9
|
+
});
|
|
10
|
+
const Options = z.object({
|
|
11
|
+
examples: z.array(Example).describe("Examples to check the condition against").default([])
|
|
12
|
+
});
|
|
13
|
+
const TRUE = "\u25A0TRUE\u25A0";
|
|
14
|
+
const FALSE = "\u25A0FALSE\u25A0";
|
|
15
|
+
const END = "\u25A0END\u25A0";
|
|
16
|
+
Zai.prototype.check = async function(input, condition, _options) {
|
|
17
|
+
const options = Options.parse(_options ?? {});
|
|
18
|
+
const tokenizer = await this.getTokenizer();
|
|
19
|
+
const PROMPT_COMPONENT = Math.max(this.Model.input.maxTokens - PROMPT_INPUT_BUFFER, 100);
|
|
20
|
+
const taskId = this.taskId;
|
|
21
|
+
const taskType = "zai.check";
|
|
22
|
+
const PROMPT_TOKENS = {
|
|
23
|
+
INPUT: Math.floor(0.5 * PROMPT_COMPONENT),
|
|
24
|
+
CONDITION: Math.floor(0.2 * PROMPT_COMPONENT)
|
|
25
|
+
};
|
|
26
|
+
const inputAsString = tokenizer.truncate(stringify(input), PROMPT_TOKENS.INPUT);
|
|
27
|
+
condition = tokenizer.truncate(condition, PROMPT_TOKENS.CONDITION);
|
|
28
|
+
const EXAMPLES_TOKENS = PROMPT_COMPONENT - tokenizer.count(inputAsString) - tokenizer.count(condition);
|
|
29
|
+
const Key = fastHash(
|
|
30
|
+
JSON.stringify({
|
|
31
|
+
taskType,
|
|
32
|
+
taskId,
|
|
33
|
+
input: inputAsString,
|
|
34
|
+
condition
|
|
35
|
+
})
|
|
36
|
+
);
|
|
37
|
+
const examples = taskId ? await this.adapter.getExamples({
|
|
38
|
+
input: inputAsString,
|
|
39
|
+
taskType,
|
|
40
|
+
taskId
|
|
41
|
+
}) : [];
|
|
42
|
+
const exactMatch = examples.find((x) => x.key === Key);
|
|
43
|
+
if (exactMatch) {
|
|
44
|
+
return exactMatch.output;
|
|
45
|
+
}
|
|
46
|
+
const defaultExamples = [
|
|
47
|
+
{ input: "50 Cent", check: true, reason: "50 Cent is widely recognized as a public personality." },
|
|
48
|
+
{
|
|
49
|
+
input: ["apple", "banana", "carrot", "house"],
|
|
50
|
+
check: false,
|
|
51
|
+
reason: "The list contains a house, which is not a fruit. Also, the list contains a carrot, which is a vegetable."
|
|
52
|
+
}
|
|
53
|
+
];
|
|
54
|
+
const userExamples = [
|
|
55
|
+
...examples.map((e) => ({ input: e.input, check: e.output, reason: e.explanation })),
|
|
56
|
+
...options.examples
|
|
57
|
+
];
|
|
58
|
+
let exampleId = 1;
|
|
59
|
+
const formatInput = (input2, condition2) => {
|
|
60
|
+
const header = userExamples.length ? `Expert Example #${exampleId++}` : `Example of condition: "${condition2}"`;
|
|
61
|
+
return `
|
|
62
|
+
${header}
|
|
63
|
+
<|start_input|>
|
|
64
|
+
${input2.trim()}
|
|
65
|
+
<|end_input|>
|
|
66
|
+
`.trim();
|
|
67
|
+
};
|
|
68
|
+
const formatOutput = (answer2, justification) => {
|
|
69
|
+
return `
|
|
70
|
+
Analysis: ${justification}
|
|
71
|
+
Final Answer: ${answer2 ? TRUE : FALSE}
|
|
72
|
+
${END}
|
|
73
|
+
`.trim();
|
|
74
|
+
};
|
|
75
|
+
const formatExample = (example) => [
|
|
76
|
+
{ type: "text", content: formatInput(stringify(example.input ?? null), condition), role: "user" },
|
|
77
|
+
{
|
|
78
|
+
type: "text",
|
|
79
|
+
content: formatOutput(example.check, example.reason ?? ""),
|
|
80
|
+
role: "assistant"
|
|
81
|
+
}
|
|
82
|
+
];
|
|
83
|
+
const allExamples = takeUntilTokens(
|
|
84
|
+
userExamples.length ? userExamples : defaultExamples,
|
|
85
|
+
EXAMPLES_TOKENS,
|
|
86
|
+
(el) => tokenizer.count(stringify(el.input)) + tokenizer.count(el.reason ?? "")
|
|
87
|
+
).map(formatExample).flat();
|
|
88
|
+
const specialInstructions = userExamples.length ? `
|
|
89
|
+
- You have been provided with examples from previous experts. Make sure to read them carefully before making your decision.
|
|
90
|
+
- Make sure to refer to the examples provided by the experts to justify your decision (when applicable).
|
|
91
|
+
- When in doubt, ground your decision on the examples provided by the experts instead of your own intuition.
|
|
92
|
+
- When no example is similar to the input, make sure to provide a clear justification for your decision while inferring the decision-making process from the examples provided by the experts.
|
|
93
|
+
`.trim() : "";
|
|
94
|
+
const output = await this.callModel({
|
|
95
|
+
systemPrompt: `
|
|
96
|
+
Check if the following condition is true or false for the given input. Before answering, make sure to read the input and the condition carefully.
|
|
97
|
+
Justify your answer, then answer with either ${TRUE} or ${FALSE} at the very end, then add ${END} to finish the response.
|
|
98
|
+
IMPORTANT: Make sure to answer with either ${TRUE} or ${FALSE} at the end of your response, but NOT both.
|
|
99
|
+
---
|
|
100
|
+
Expert Examples (#1 to #${exampleId - 1}):
|
|
101
|
+
${specialInstructions}
|
|
102
|
+
`.trim(),
|
|
103
|
+
stopSequences: [END],
|
|
104
|
+
messages: [
|
|
105
|
+
...allExamples,
|
|
106
|
+
{
|
|
107
|
+
type: "text",
|
|
108
|
+
content: `
|
|
109
|
+
Considering the below input and above examples, is the following condition true or false?
|
|
110
|
+
${formatInput(inputAsString, condition)}
|
|
111
|
+
In your "Analysis", please refer to the Expert Examples # to justify your decision.`.trim(),
|
|
112
|
+
role: "user"
|
|
113
|
+
}
|
|
114
|
+
]
|
|
115
|
+
});
|
|
116
|
+
const answer = output.choices[0]?.content;
|
|
117
|
+
const hasTrue = answer.includes(TRUE);
|
|
118
|
+
const hasFalse = answer.includes(FALSE);
|
|
119
|
+
if (!hasTrue && !hasFalse) {
|
|
120
|
+
throw new Error(`The model did not return a valid answer. The response was: ${answer}`);
|
|
121
|
+
}
|
|
122
|
+
let finalAnswer;
|
|
123
|
+
if (hasTrue && hasFalse) {
|
|
124
|
+
finalAnswer = answer.lastIndexOf(TRUE) > answer.lastIndexOf(FALSE);
|
|
125
|
+
} else {
|
|
126
|
+
finalAnswer = hasTrue;
|
|
127
|
+
}
|
|
128
|
+
if (taskId) {
|
|
129
|
+
await this.adapter.saveExample({
|
|
130
|
+
key: Key,
|
|
131
|
+
taskType,
|
|
132
|
+
taskId,
|
|
133
|
+
input: inputAsString,
|
|
134
|
+
instructions: condition,
|
|
135
|
+
metadata: output.metadata,
|
|
136
|
+
output: finalAnswer,
|
|
137
|
+
explanation: answer.replace(TRUE, "").replace(FALSE, "").replace(END, "").replace("Final Answer:", "").trim()
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
return finalAnswer;
|
|
141
|
+
};
|