@botpress/zai 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/adapter.js +2 -0
- package/dist/adapters/botpress-table.js +168 -0
- package/dist/adapters/memory.js +12 -0
- package/dist/index.d.ts +99 -98
- package/dist/index.js +9 -1873
- package/dist/models.js +387 -0
- package/dist/operations/check.js +141 -0
- package/dist/operations/constants.js +2 -0
- package/dist/operations/errors.js +15 -0
- package/dist/operations/extract.js +212 -0
- package/dist/operations/filter.js +179 -0
- package/dist/operations/label.js +237 -0
- package/dist/operations/rewrite.js +111 -0
- package/dist/operations/summarize.js +132 -0
- package/dist/operations/text.js +46 -0
- package/dist/utils.js +43 -0
- package/dist/zai.js +140 -0
- package/package.json +21 -19
- package/src/adapters/adapter.ts +35 -0
- package/src/adapters/botpress-table.ts +210 -0
- package/src/adapters/memory.ts +13 -0
- package/src/index.ts +11 -0
- package/src/models.ts +394 -0
- package/src/operations/__tests/botpress_docs.txt +26040 -0
- package/src/operations/__tests/cache.jsonl +101 -0
- package/src/operations/__tests/index.ts +87 -0
- package/src/operations/check.ts +187 -0
- package/src/operations/constants.ts +2 -0
- package/src/operations/errors.ts +9 -0
- package/src/operations/extract.ts +291 -0
- package/src/operations/filter.ts +231 -0
- package/src/operations/label.ts +332 -0
- package/src/operations/rewrite.ts +148 -0
- package/src/operations/summarize.ts +193 -0
- package/src/operations/text.ts +63 -0
- package/src/sdk-interfaces/llm/generateContent.ts +127 -0
- package/src/sdk-interfaces/llm/listLanguageModels.ts +19 -0
- package/src/utils.ts +61 -0
- package/src/zai.ts +193 -0
- package/tsconfig.json +2 -2
- package/dist/index.cjs +0 -1903
- package/dist/index.cjs.map +0 -1
- package/dist/index.d.cts +0 -916
- package/dist/index.js.map +0 -1
- package/tsup.config.ts +0 -16
- package/vitest.config.ts +0 -9
- package/vitest.setup.ts +0 -24
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
import { fastHash, stringify, takeUntilTokens } from "../utils";
|
|
3
|
+
import { Zai } from "../zai";
|
|
4
|
+
import { PROMPT_INPUT_BUFFER } from "./constants";
|
|
5
|
+
const Example = z.object({
|
|
6
|
+
input: z.string(),
|
|
7
|
+
output: z.string()
|
|
8
|
+
});
|
|
9
|
+
const Options = z.object({
|
|
10
|
+
examples: z.array(Example).default([]),
|
|
11
|
+
length: z.number().min(10).max(16e3).optional().describe("The maximum number of tokens to generate")
|
|
12
|
+
});
|
|
13
|
+
const START = "\u25A0START\u25A0";
|
|
14
|
+
const END = "\u25A0END\u25A0";
|
|
15
|
+
Zai.prototype.rewrite = async function(original, prompt, _options) {
|
|
16
|
+
const options = Options.parse(_options ?? {});
|
|
17
|
+
const tokenizer = await this.getTokenizer();
|
|
18
|
+
const taskId = this.taskId;
|
|
19
|
+
const taskType = "zai.rewrite";
|
|
20
|
+
const INPUT_COMPONENT_SIZE = Math.max(100, (this.Model.input.maxTokens - PROMPT_INPUT_BUFFER) / 2);
|
|
21
|
+
prompt = tokenizer.truncate(prompt, INPUT_COMPONENT_SIZE);
|
|
22
|
+
const inputSize = tokenizer.count(original) + tokenizer.count(prompt);
|
|
23
|
+
const maxInputSize = this.Model.input.maxTokens - tokenizer.count(prompt) - PROMPT_INPUT_BUFFER;
|
|
24
|
+
if (inputSize > maxInputSize) {
|
|
25
|
+
throw new Error(
|
|
26
|
+
`The input size is ${inputSize} tokens long, which is more than the maximum of ${maxInputSize} tokens for this model (${this.Model.name} = ${this.Model.input.maxTokens} tokens)`
|
|
27
|
+
);
|
|
28
|
+
}
|
|
29
|
+
const instructions = [];
|
|
30
|
+
const originalSize = tokenizer.count(original);
|
|
31
|
+
if (options.length && originalSize > options.length) {
|
|
32
|
+
instructions.push(`The original text is ${originalSize} tokens long \u2013 it should be less than ${options.length}`);
|
|
33
|
+
instructions.push(
|
|
34
|
+
`The text must be standalone and complete in less than ${options.length} tokens, so it has to be shortened to fit the length as well`
|
|
35
|
+
);
|
|
36
|
+
}
|
|
37
|
+
const format = (before, prompt2) => {
|
|
38
|
+
return `
|
|
39
|
+
Prompt: ${prompt2}
|
|
40
|
+
|
|
41
|
+
${START}
|
|
42
|
+
${before}
|
|
43
|
+
${END}
|
|
44
|
+
`.trim();
|
|
45
|
+
};
|
|
46
|
+
const Key = fastHash(
|
|
47
|
+
stringify({
|
|
48
|
+
taskId,
|
|
49
|
+
taskType,
|
|
50
|
+
input: original,
|
|
51
|
+
prompt
|
|
52
|
+
})
|
|
53
|
+
);
|
|
54
|
+
const formatExample = ({ input, output: output2, instructions: instructions2 }) => {
|
|
55
|
+
return [
|
|
56
|
+
{ type: "text", role: "user", content: format(input, instructions2 || prompt) },
|
|
57
|
+
{ type: "text", role: "assistant", content: `${START}${output2}${END}` }
|
|
58
|
+
];
|
|
59
|
+
};
|
|
60
|
+
const defaultExamples = [
|
|
61
|
+
{ input: "Hello, how are you?", output: "Bonjour, comment \xE7a va?", instructions: "translate to French" },
|
|
62
|
+
{ input: "1\n2\n3", output: "3\n2\n1", instructions: "reverse the order" }
|
|
63
|
+
];
|
|
64
|
+
const tableExamples = taskId ? await this.adapter.getExamples({
|
|
65
|
+
input: original,
|
|
66
|
+
taskId,
|
|
67
|
+
taskType
|
|
68
|
+
}) : [];
|
|
69
|
+
const exactMatch = tableExamples.find((x) => x.key === Key);
|
|
70
|
+
if (exactMatch) {
|
|
71
|
+
return exactMatch.output;
|
|
72
|
+
}
|
|
73
|
+
const savedExamples = [
|
|
74
|
+
...tableExamples.map((x) => ({ input: x.input, output: x.output })),
|
|
75
|
+
...options.examples
|
|
76
|
+
];
|
|
77
|
+
const REMAINING_TOKENS = this.Model.input.maxTokens - tokenizer.count(prompt) - PROMPT_INPUT_BUFFER;
|
|
78
|
+
const examples = takeUntilTokens(
|
|
79
|
+
savedExamples.length ? savedExamples : defaultExamples,
|
|
80
|
+
REMAINING_TOKENS,
|
|
81
|
+
(el) => tokenizer.count(stringify(el.input)) + tokenizer.count(stringify(el.output))
|
|
82
|
+
).map(formatExample).flat();
|
|
83
|
+
const output = await this.callModel({
|
|
84
|
+
systemPrompt: `
|
|
85
|
+
Rewrite the text between the ${START} and ${END} tags to match the user prompt.
|
|
86
|
+
${instructions.map((x) => `\u2022 ${x}`).join("\n")}
|
|
87
|
+
`.trim(),
|
|
88
|
+
messages: [...examples, { type: "text", content: format(original, prompt), role: "user" }],
|
|
89
|
+
maxTokens: options.length,
|
|
90
|
+
stopSequences: [END]
|
|
91
|
+
});
|
|
92
|
+
let result = output.choices[0]?.content;
|
|
93
|
+
if (result.includes(START)) {
|
|
94
|
+
result = result.slice(result.indexOf(START) + START.length);
|
|
95
|
+
}
|
|
96
|
+
if (result.includes(END)) {
|
|
97
|
+
result = result.slice(0, result.indexOf(END));
|
|
98
|
+
}
|
|
99
|
+
if (taskId) {
|
|
100
|
+
await this.adapter.saveExample({
|
|
101
|
+
key: Key,
|
|
102
|
+
metadata: output.metadata,
|
|
103
|
+
instructions: prompt,
|
|
104
|
+
input: original,
|
|
105
|
+
output: result,
|
|
106
|
+
taskType,
|
|
107
|
+
taskId
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
return result;
|
|
111
|
+
};
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
import { chunk } from "lodash-es";
|
|
3
|
+
import { Zai } from "../zai";
|
|
4
|
+
import { PROMPT_INPUT_BUFFER, PROMPT_OUTPUT_BUFFER } from "./constants";
|
|
5
|
+
const Options = z.object({
|
|
6
|
+
prompt: z.string().describe("What should the text be summarized to?").default("New information, concepts and ideas that are deemed important"),
|
|
7
|
+
format: z.string().describe("How to format the example text").default(
|
|
8
|
+
"A normal text with multiple sentences and paragraphs. Use markdown to format the text into sections. Use headings, lists, and other markdown features to make the text more readable. Do not include links, images, or other non-text elements."
|
|
9
|
+
),
|
|
10
|
+
length: z.number().min(10).max(1e5).describe("The length of the summary in tokens").default(250),
|
|
11
|
+
intermediateFactor: z.number().min(1).max(10).describe("How many times longer (than final length) are the intermediate summaries generated").default(4),
|
|
12
|
+
maxIterations: z.number().min(1).default(100),
|
|
13
|
+
sliding: z.object({
|
|
14
|
+
window: z.number().min(10).max(1e5),
|
|
15
|
+
overlap: z.number().min(0).max(1e5)
|
|
16
|
+
}).describe("Sliding window options").default({ window: 5e4, overlap: 250 })
|
|
17
|
+
});
|
|
18
|
+
const START = "\u25A0START\u25A0";
|
|
19
|
+
const END = "\u25A0END\u25A0";
|
|
20
|
+
Zai.prototype.summarize = async function(original, _options) {
|
|
21
|
+
const options = Options.parse(_options ?? {});
|
|
22
|
+
const tokenizer = await this.getTokenizer();
|
|
23
|
+
const INPUT_COMPONENT_SIZE = Math.max(100, (this.Model.input.maxTokens - PROMPT_INPUT_BUFFER) / 4);
|
|
24
|
+
options.prompt = tokenizer.truncate(options.prompt, INPUT_COMPONENT_SIZE);
|
|
25
|
+
options.format = tokenizer.truncate(options.format, INPUT_COMPONENT_SIZE);
|
|
26
|
+
const maxOutputSize = this.Model.output.maxTokens - PROMPT_OUTPUT_BUFFER;
|
|
27
|
+
if (options.length > maxOutputSize) {
|
|
28
|
+
throw new Error(
|
|
29
|
+
`The desired output length is ${maxOutputSize} tokens long, which is more than the maximum of ${this.Model.output.maxTokens} tokens for this model (${this.Model.name})`
|
|
30
|
+
);
|
|
31
|
+
}
|
|
32
|
+
options.sliding.window = Math.min(options.sliding.window, this.Model.input.maxTokens - PROMPT_INPUT_BUFFER);
|
|
33
|
+
options.sliding.overlap = Math.min(options.sliding.overlap, options.sliding.window - 3 * options.sliding.overlap);
|
|
34
|
+
const format = (summary, newText) => {
|
|
35
|
+
return `
|
|
36
|
+
${START}
|
|
37
|
+
${summary.length ? summary : "<summary still empty>"}
|
|
38
|
+
${END}
|
|
39
|
+
|
|
40
|
+
Please amend the summary between the ${START} and ${END} tags to accurately reflect the prompt and the additional text below.
|
|
41
|
+
|
|
42
|
+
<|start_new_information|>
|
|
43
|
+
${newText}
|
|
44
|
+
<|new_information|>`.trim();
|
|
45
|
+
};
|
|
46
|
+
const tokens = tokenizer.split(original);
|
|
47
|
+
const parts = Math.ceil(tokens.length / (options.sliding.window - options.sliding.overlap));
|
|
48
|
+
let iteration = 0;
|
|
49
|
+
const N = 2;
|
|
50
|
+
const useMergeSort = parts >= Math.pow(2, N);
|
|
51
|
+
const chunkSize = Math.ceil(tokens.length / (parts * N));
|
|
52
|
+
if (useMergeSort) {
|
|
53
|
+
const chunks = chunk(tokens, chunkSize).map((x) => x.join(""));
|
|
54
|
+
const allSummaries = await Promise.all(chunks.map((chunk2) => this.summarize(chunk2, options)));
|
|
55
|
+
return this.summarize(allSummaries.join("\n\n============\n\n"), options);
|
|
56
|
+
}
|
|
57
|
+
const summaries = [];
|
|
58
|
+
let currentSummary = "";
|
|
59
|
+
for (let i = 0; i < tokens.length; i += options.sliding.window) {
|
|
60
|
+
const from = Math.max(0, i - options.sliding.overlap);
|
|
61
|
+
const to = Math.min(tokens.length, i + options.sliding.window + options.sliding.overlap);
|
|
62
|
+
const isFirst = i === 0;
|
|
63
|
+
const isLast = to >= tokens.length;
|
|
64
|
+
const slice = tokens.slice(from, to).join("");
|
|
65
|
+
if (iteration++ >= options.maxIterations) {
|
|
66
|
+
break;
|
|
67
|
+
}
|
|
68
|
+
const instructions = [
|
|
69
|
+
`At each step, you will receive a part of the text to summarize. Make sure to reply with the new summary in the tags ${START} and ${END}.`,
|
|
70
|
+
"Summarize the text and make sure that the main points are included.",
|
|
71
|
+
"Ignore any unnecessary details and focus on the main points.",
|
|
72
|
+
"Use short and concise sentences to increase readability and information density.",
|
|
73
|
+
"When looking at the new information, focus on: " + options.prompt
|
|
74
|
+
];
|
|
75
|
+
if (isFirst) {
|
|
76
|
+
instructions.push(
|
|
77
|
+
"The current summary is empty. You need to generate a summary that covers the main points of the text."
|
|
78
|
+
);
|
|
79
|
+
}
|
|
80
|
+
let generationLength = options.length;
|
|
81
|
+
if (!isLast) {
|
|
82
|
+
generationLength = Math.min(
|
|
83
|
+
tokenizer.count(currentSummary) + options.length * options.intermediateFactor,
|
|
84
|
+
maxOutputSize
|
|
85
|
+
);
|
|
86
|
+
instructions.push(
|
|
87
|
+
"You need to amend the summary to include the new information. Make sure the summary is complete and covers all the main points."
|
|
88
|
+
);
|
|
89
|
+
instructions.push(`The current summary is ${currentSummary.length} tokens long.`);
|
|
90
|
+
instructions.push(`You can amend the summary to be up to ${generationLength} tokens long.`);
|
|
91
|
+
}
|
|
92
|
+
if (isLast) {
|
|
93
|
+
instructions.push(
|
|
94
|
+
"This is the last part you will have to summarize. Make sure the summary is complete and covers all the main points."
|
|
95
|
+
);
|
|
96
|
+
instructions.push(
|
|
97
|
+
`The current summary is ${currentSummary.length} tokens long. You need to make sure it is ${options.length} tokens or less.`
|
|
98
|
+
);
|
|
99
|
+
if (currentSummary.length > options.length) {
|
|
100
|
+
instructions.push(
|
|
101
|
+
`The current summary is already too long, so you need to shorten it to ${options.length} tokens while also including the new information.`
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
const output = await this.callModel({
|
|
106
|
+
systemPrompt: `
|
|
107
|
+
You are summarizing a text. The text is split into ${parts} parts, and you are currently working on part ${iteration}.
|
|
108
|
+
At every step, you will receive the current summary and a new part of the text. You need to amend the summary to include the new information (if needed).
|
|
109
|
+
The summary needs to cover the main points of the text and must be concise.
|
|
110
|
+
|
|
111
|
+
IMPORTANT INSTRUCTIONS:
|
|
112
|
+
${instructions.map((x) => `- ${x.trim()}`).join("\n")}
|
|
113
|
+
|
|
114
|
+
FORMAT OF THE SUMMARY:
|
|
115
|
+
${options.format}
|
|
116
|
+
`.trim(),
|
|
117
|
+
messages: [{ type: "text", content: format(currentSummary, slice), role: "user" }],
|
|
118
|
+
maxTokens: generationLength,
|
|
119
|
+
stopSequences: [END]
|
|
120
|
+
});
|
|
121
|
+
let result = output?.choices[0]?.content;
|
|
122
|
+
if (result.includes(START)) {
|
|
123
|
+
result = result.slice(result.indexOf(START) + START.length);
|
|
124
|
+
}
|
|
125
|
+
if (result.includes("\u25A0")) {
|
|
126
|
+
result = result.slice(0, result.indexOf("\u25A0"));
|
|
127
|
+
}
|
|
128
|
+
summaries.push(result);
|
|
129
|
+
currentSummary = result;
|
|
130
|
+
}
|
|
131
|
+
return currentSummary.trim();
|
|
132
|
+
};
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
import { clamp } from "lodash-es";
|
|
3
|
+
import { Zai } from "../zai";
|
|
4
|
+
import { PROMPT_INPUT_BUFFER, PROMPT_OUTPUT_BUFFER } from "./constants";
|
|
5
|
+
const Options = z.object({
|
|
6
|
+
length: z.number().min(1).max(1e5).optional().describe("The maximum number of tokens to generate")
|
|
7
|
+
});
|
|
8
|
+
Zai.prototype.text = async function(prompt, _options) {
|
|
9
|
+
const options = Options.parse(_options ?? {});
|
|
10
|
+
const tokenizer = await this.getTokenizer();
|
|
11
|
+
prompt = tokenizer.truncate(prompt, Math.max(this.Model.input.maxTokens - PROMPT_INPUT_BUFFER, 100));
|
|
12
|
+
if (options.length) {
|
|
13
|
+
options.length = Math.min(this.Model.output.maxTokens - PROMPT_OUTPUT_BUFFER, options.length);
|
|
14
|
+
}
|
|
15
|
+
const instructions = [];
|
|
16
|
+
let chart = "";
|
|
17
|
+
if (options.length) {
|
|
18
|
+
const length = clamp(options.length * 0.75, 5, options.length);
|
|
19
|
+
instructions.push(`IMPORTANT: Length constraint: ${length} tokens/words`);
|
|
20
|
+
instructions.push(`The text must be standalone and complete in less than ${length} tokens/words`);
|
|
21
|
+
}
|
|
22
|
+
if (options.length && options.length <= 500) {
|
|
23
|
+
chart = `
|
|
24
|
+
| Tokens | Text Length (approximate) |
|
|
25
|
+
|-------------|--------------------------------------|
|
|
26
|
+
| < 5 tokens | 1-3 words |
|
|
27
|
+
| 5-10 tokens | 3-6 words |
|
|
28
|
+
| 10-20 tokens| 6-15 words |
|
|
29
|
+
| 20-50 tokens| A short sentence (15-30 words) |
|
|
30
|
+
| 50-100 tokens| A medium sentence (30-70 words) |
|
|
31
|
+
| 100-200 tokens| A short paragraph (70-150 words) |
|
|
32
|
+
| 200-300 tokens| A medium paragraph (150-200 words) |
|
|
33
|
+
| 300-500 tokens| A long paragraph (200-300 words) |`.trim();
|
|
34
|
+
}
|
|
35
|
+
const output = await this.callModel({
|
|
36
|
+
systemPrompt: `
|
|
37
|
+
Generate a text that fulfills the user prompt below. Answer directly to the prompt, without any acknowledgements or fluff. Also, make sure the text is standalone and complete.
|
|
38
|
+
${instructions.map((x) => `- ${x}`).join("\n")}
|
|
39
|
+
${chart}
|
|
40
|
+
`.trim(),
|
|
41
|
+
temperature: 0.7,
|
|
42
|
+
messages: [{ type: "text", content: prompt, role: "user" }],
|
|
43
|
+
maxTokens: options.length
|
|
44
|
+
});
|
|
45
|
+
return output?.choices?.[0]?.content;
|
|
46
|
+
};
|
package/dist/utils.js
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
export const stringify = (input, beautify = true) => {
|
|
3
|
+
return typeof input === "string" && !!input.length ? input : input ? JSON.stringify(input, beautify ? null : void 0, beautify ? 2 : void 0) : "<input is null, false, undefined or empty>";
|
|
4
|
+
};
|
|
5
|
+
export const BotpressClient = z.custom(
|
|
6
|
+
(value) => typeof value === "object" && value !== null && "callAction" in value && typeof value.callAction === "function",
|
|
7
|
+
{
|
|
8
|
+
message: "Invalid Botpress Client. Make sure to pass an instance of @botpress/client"
|
|
9
|
+
}
|
|
10
|
+
);
|
|
11
|
+
export function fastHash(str) {
|
|
12
|
+
let hash = 0;
|
|
13
|
+
for (let i = 0; i < str.length; i++) {
|
|
14
|
+
hash = (hash << 5) - hash + str.charCodeAt(i);
|
|
15
|
+
hash |= 0;
|
|
16
|
+
}
|
|
17
|
+
return (hash >>> 0).toString(16);
|
|
18
|
+
}
|
|
19
|
+
export const takeUntilTokens = (arr, tokens, count) => {
|
|
20
|
+
const result = [];
|
|
21
|
+
let total = 0;
|
|
22
|
+
for (const value of arr) {
|
|
23
|
+
const valueTokens = count(value);
|
|
24
|
+
if (total + valueTokens > tokens) {
|
|
25
|
+
break;
|
|
26
|
+
}
|
|
27
|
+
total += valueTokens;
|
|
28
|
+
result.push(value);
|
|
29
|
+
}
|
|
30
|
+
return result;
|
|
31
|
+
};
|
|
32
|
+
export const GenerationMetadata = z.object({
|
|
33
|
+
model: z.string(),
|
|
34
|
+
cost: z.object({
|
|
35
|
+
input: z.number(),
|
|
36
|
+
output: z.number()
|
|
37
|
+
}).describe("Cost in $USD"),
|
|
38
|
+
latency: z.number().describe("Latency in milliseconds"),
|
|
39
|
+
tokens: z.object({
|
|
40
|
+
input: z.number(),
|
|
41
|
+
output: z.number()
|
|
42
|
+
}).describe("Number of tokens used")
|
|
43
|
+
});
|
package/dist/zai.js
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { z } from "@bpinternal/zui";
|
|
2
|
+
import { getWasmTokenizer } from "@botpress/wasm";
|
|
3
|
+
import { TableAdapter } from "./adapters/botpress-table";
|
|
4
|
+
import { MemoryAdapter } from "./adapters/memory";
|
|
5
|
+
import { Models } from "./models";
|
|
6
|
+
import { BotpressClient } from "./utils";
|
|
7
|
+
const ActiveLearning = z.object({
|
|
8
|
+
enable: z.boolean().describe("Whether to enable active learning").default(false),
|
|
9
|
+
tableName: z.string().regex(
|
|
10
|
+
/^[A-Za-z0-9_/-]{1,100}Table$/,
|
|
11
|
+
"Namespace must be alphanumeric and contain only letters, numbers, underscores, hyphens and slashes"
|
|
12
|
+
).describe("The name of the table to store active learning tasks").default("ActiveLearningTable"),
|
|
13
|
+
taskId: z.string().regex(
|
|
14
|
+
/^[A-Za-z0-9_/-]{1,100}$/,
|
|
15
|
+
"Namespace must be alphanumeric and contain only letters, numbers, underscores, hyphens and slashes"
|
|
16
|
+
).describe("The ID of the task").default("default")
|
|
17
|
+
});
|
|
18
|
+
const ZaiConfig = z.object({
|
|
19
|
+
client: BotpressClient,
|
|
20
|
+
userId: z.string().describe("The ID of the user consuming the API").optional(),
|
|
21
|
+
retry: z.object({ maxRetries: z.number().min(0).max(100) }).default({ maxRetries: 3 }),
|
|
22
|
+
modelId: z.custom(
|
|
23
|
+
(value) => {
|
|
24
|
+
if (typeof value !== "string" || !value.includes("__")) {
|
|
25
|
+
return false;
|
|
26
|
+
}
|
|
27
|
+
return true;
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
message: "Invalid model ID"
|
|
31
|
+
}
|
|
32
|
+
).describe("The ID of the model you want to use").default("openai__gpt-4o-mini-2024-07-18"),
|
|
33
|
+
activeLearning: ActiveLearning.default({ enable: false }),
|
|
34
|
+
namespace: z.string().regex(
|
|
35
|
+
/^[A-Za-z0-9_/-]{1,100}$/,
|
|
36
|
+
"Namespace must be alphanumeric and contain only letters, numbers, underscores, hyphens and slashes"
|
|
37
|
+
).default("zai")
|
|
38
|
+
});
|
|
39
|
+
export class Zai {
|
|
40
|
+
static tokenizer = null;
|
|
41
|
+
client;
|
|
42
|
+
originalConfig;
|
|
43
|
+
userId;
|
|
44
|
+
integration;
|
|
45
|
+
model;
|
|
46
|
+
retry;
|
|
47
|
+
Model;
|
|
48
|
+
namespace;
|
|
49
|
+
adapter;
|
|
50
|
+
activeLearning;
|
|
51
|
+
constructor(config) {
|
|
52
|
+
this.originalConfig = config;
|
|
53
|
+
const parsed = ZaiConfig.parse(config);
|
|
54
|
+
this.client = parsed.client;
|
|
55
|
+
const [integration, modelId] = parsed.modelId.split("__");
|
|
56
|
+
if (!integration?.length || !modelId?.length) {
|
|
57
|
+
throw new Error(`Invalid model ID: ${parsed.modelId}. Expected format: <integration>__<modelId>`);
|
|
58
|
+
}
|
|
59
|
+
this.integration = integration;
|
|
60
|
+
this.model = modelId;
|
|
61
|
+
this.namespace = parsed.namespace;
|
|
62
|
+
this.userId = parsed.userId;
|
|
63
|
+
this.retry = parsed.retry;
|
|
64
|
+
this.Model = Models.find((m) => m.id === parsed.modelId);
|
|
65
|
+
this.activeLearning = parsed.activeLearning;
|
|
66
|
+
this.adapter = parsed.activeLearning?.enable ? new TableAdapter({ client: this.client, tableName: parsed.activeLearning.tableName }) : new MemoryAdapter([]);
|
|
67
|
+
}
|
|
68
|
+
/** @internal */
|
|
69
|
+
async callModel(props) {
|
|
70
|
+
let retries = this.retry.maxRetries;
|
|
71
|
+
while (retries-- >= 0) {
|
|
72
|
+
try {
|
|
73
|
+
return await this._callModel(props);
|
|
74
|
+
} catch (e) {
|
|
75
|
+
if (retries >= 0) {
|
|
76
|
+
await new Promise((resolve) => setTimeout(resolve, 1e3));
|
|
77
|
+
} else {
|
|
78
|
+
throw new Error("Failed to call model after multiple retries");
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
throw new Error("Failed to call model after multiple retries");
|
|
83
|
+
}
|
|
84
|
+
/** @internal */
|
|
85
|
+
async _callModel(props) {
|
|
86
|
+
let retries = this.retry.maxRetries;
|
|
87
|
+
do {
|
|
88
|
+
const start = Date.now();
|
|
89
|
+
const input = {
|
|
90
|
+
messages: [],
|
|
91
|
+
temperature: 0,
|
|
92
|
+
topP: 1,
|
|
93
|
+
model: { id: this.model },
|
|
94
|
+
userId: this.userId,
|
|
95
|
+
...props
|
|
96
|
+
};
|
|
97
|
+
const { output } = await this.client.callAction({
|
|
98
|
+
type: `${this.integration}:generateContent`,
|
|
99
|
+
input
|
|
100
|
+
});
|
|
101
|
+
const latency = Date.now() - start;
|
|
102
|
+
return {
|
|
103
|
+
...output,
|
|
104
|
+
metadata: {
|
|
105
|
+
model: this.model,
|
|
106
|
+
latency,
|
|
107
|
+
cost: { input: output.usage.inputCost, output: output.usage.outputCost },
|
|
108
|
+
tokens: { input: output.usage.inputTokens, output: output.usage.outputTokens }
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
} while (--retries > 0);
|
|
112
|
+
}
|
|
113
|
+
async getTokenizer() {
|
|
114
|
+
Zai.tokenizer ??= await (async () => {
|
|
115
|
+
while (!getWasmTokenizer) {
|
|
116
|
+
await new Promise((resolve) => setTimeout(resolve, 25));
|
|
117
|
+
}
|
|
118
|
+
return getWasmTokenizer();
|
|
119
|
+
})();
|
|
120
|
+
return Zai.tokenizer;
|
|
121
|
+
}
|
|
122
|
+
get taskId() {
|
|
123
|
+
if (!this.activeLearning.enable) {
|
|
124
|
+
return void 0;
|
|
125
|
+
}
|
|
126
|
+
return `${this.namespace}/${this.activeLearning.taskId}`.replace(/\/+/g, "/");
|
|
127
|
+
}
|
|
128
|
+
with(options) {
|
|
129
|
+
return new Zai({
|
|
130
|
+
...this.originalConfig,
|
|
131
|
+
...options
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
learn(taskId) {
|
|
135
|
+
return new Zai({
|
|
136
|
+
...this.originalConfig,
|
|
137
|
+
activeLearning: { ...this.activeLearning, taskId, enable: true }
|
|
138
|
+
});
|
|
139
|
+
}
|
|
140
|
+
}
|
package/package.json
CHANGED
|
@@ -1,46 +1,48 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@botpress/zai",
|
|
3
|
-
"version": "1.0.1",
|
|
4
|
-
"type": "module",
|
|
5
|
-
"private": false,
|
|
6
3
|
"description": "Zui AI (zai) – An LLM utility library written on top of Zui and the Botpress API",
|
|
4
|
+
"version": "1.1.0",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"types": "./dist/index.d.ts",
|
|
7
7
|
"exports": {
|
|
8
|
-
"
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
"require": "./dist/index.cjs"
|
|
12
|
-
}
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"require": "./dist/index.js",
|
|
10
|
+
"import": "./dist/index.js"
|
|
13
11
|
},
|
|
14
12
|
"scripts": {
|
|
15
|
-
"build": "
|
|
13
|
+
"build": "npm run build:types && npm run build:neutral",
|
|
14
|
+
"build:neutral": "esbuild src/**/*.ts src/*.ts --platform=neutral --outdir=dist",
|
|
15
|
+
"build:types": "tsup",
|
|
16
16
|
"watch": "tsup --watch",
|
|
17
17
|
"test": "vitest run --config vitest.config.ts",
|
|
18
18
|
"test:update": "vitest -u run --config vitest.config.ts",
|
|
19
19
|
"test:watch": "vitest --config vitest.config.ts",
|
|
20
20
|
"build-with-latest-models": "pnpm run update-types && pnpm run update-models && pnpm run build",
|
|
21
|
-
"update-models": "ts-node ./
|
|
22
|
-
"update-types": "ts-node ./
|
|
21
|
+
"update-models": "ts-node ./scripts/update-models.mts",
|
|
22
|
+
"update-types": "ts-node ./scripts/update-types.mts"
|
|
23
23
|
},
|
|
24
24
|
"keywords": [],
|
|
25
25
|
"author": "",
|
|
26
26
|
"license": "ISC",
|
|
27
27
|
"dependencies": {
|
|
28
|
-
"json5": "^2.2.
|
|
29
|
-
"jsonrepair": "^3.2
|
|
28
|
+
"json5": "^2.2.3",
|
|
29
|
+
"jsonrepair": "^3.11.2",
|
|
30
|
+
"lodash-es": "^4.17.21"
|
|
30
31
|
},
|
|
31
32
|
"devDependencies": {
|
|
32
33
|
"@botpress/vai": "0.0.1-beta.7",
|
|
33
|
-
"@types/lodash": "^4.17.
|
|
34
|
-
"dotenv": "^16.
|
|
34
|
+
"@types/lodash-es": "^4.17.12",
|
|
35
|
+
"dotenv": "^16.4.7",
|
|
36
|
+
"esbuild": "^0.24.2",
|
|
37
|
+
"lodash": "^4.17.21",
|
|
35
38
|
"ts-node": "^10.9.2",
|
|
36
39
|
"tsup": "^8.3.5",
|
|
37
40
|
"typescript": "^5.7.2",
|
|
38
|
-
"vitest": "^2.
|
|
41
|
+
"vitest": "^2.1.8"
|
|
39
42
|
},
|
|
40
43
|
"peerDependencies": {
|
|
41
|
-
"@botpress/client": "^0.
|
|
42
|
-
"@botpress/sdk": "^1.6.1",
|
|
44
|
+
"@botpress/client": "^0.40.0",
|
|
43
45
|
"@botpress/wasm": "^1.0.0",
|
|
44
|
-
"
|
|
46
|
+
"@bpinternal/zui": "^0.13.4"
|
|
45
47
|
}
|
|
46
48
|
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { GenerationMetadata } from '../utils'
|
|
2
|
+
|
|
3
|
+
export type SaveExampleProps<TInput, TOutput> = {
|
|
4
|
+
key: string
|
|
5
|
+
taskType: string
|
|
6
|
+
taskId: string
|
|
7
|
+
instructions: string
|
|
8
|
+
input: TInput
|
|
9
|
+
output: TOutput
|
|
10
|
+
explanation?: string
|
|
11
|
+
metadata: GenerationMetadata
|
|
12
|
+
status?: 'pending' | 'approved'
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export type GetExamplesProps<TInput> = {
|
|
16
|
+
taskType: string
|
|
17
|
+
taskId: string
|
|
18
|
+
input: TInput
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export abstract class Adapter {
|
|
22
|
+
abstract getExamples<TInput, TOutput>(
|
|
23
|
+
props: GetExamplesProps<TInput>
|
|
24
|
+
): Promise<
|
|
25
|
+
Array<{
|
|
26
|
+
key: string
|
|
27
|
+
input: TInput
|
|
28
|
+
output: TOutput
|
|
29
|
+
explanation?: string
|
|
30
|
+
similarity: number
|
|
31
|
+
}>
|
|
32
|
+
>
|
|
33
|
+
|
|
34
|
+
abstract saveExample<TInput, TOutput>(props: SaveExampleProps<TInput, TOutput>): Promise<void>
|
|
35
|
+
}
|