@bedrockio/ai 0.3.0 → 0.4.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/README.md +58 -17
- package/dist/cjs/BaseClient.js +242 -182
- package/dist/cjs/anthropic.js +115 -93
- package/dist/cjs/google.js +74 -80
- package/dist/cjs/index.js +23 -75
- package/dist/cjs/openai.js +114 -72
- package/dist/cjs/package.json +1 -0
- package/dist/cjs/utils/code.js +11 -0
- package/dist/cjs/utils/json.js +53 -0
- package/dist/cjs/utils/templates.js +83 -0
- package/dist/cjs/xai.js +11 -20
- package/dist/esm/BaseClient.js +243 -0
- package/dist/esm/anthropic.js +116 -0
- package/dist/esm/google.js +75 -0
- package/dist/esm/index.js +25 -0
- package/dist/esm/openai.js +113 -0
- package/dist/esm/utils/code.js +8 -0
- package/dist/esm/utils/json.js +50 -0
- package/dist/esm/utils/templates.js +76 -0
- package/dist/esm/xai.js +10 -0
- package/package.json +25 -18
- package/types/BaseClient.d.ts +67 -26
- package/types/BaseClient.d.ts.map +1 -1
- package/types/anthropic.d.ts +26 -2
- package/types/anthropic.d.ts.map +1 -1
- package/types/google.d.ts.map +1 -1
- package/types/index.d.ts +4 -11
- package/types/index.d.ts.map +1 -1
- package/types/openai.d.ts +45 -2
- package/types/openai.d.ts.map +1 -1
- package/types/utils/code.d.ts +2 -0
- package/types/utils/code.d.ts.map +1 -0
- package/types/utils/json.d.ts +2 -0
- package/types/utils/json.d.ts.map +1 -0
- package/types/utils/templates.d.ts +3 -0
- package/types/utils/templates.d.ts.map +1 -0
- package/types/utils.d.ts +4 -0
- package/types/utils.d.ts.map +1 -0
- package/types/xai.d.ts.map +1 -1
- package/.prettierignore +0 -1
- package/.prettierrc.cjs +0 -1
- package/__mocks__/@anthropic-ai/sdk.js +0 -43
- package/__mocks__/@google/generative-ai.js +0 -59
- package/__mocks__/openai.js +0 -48
- package/dist/cjs/util.js +0 -62
- package/src/BaseClient.js +0 -195
- package/src/anthropic.js +0 -97
- package/src/google.js +0 -91
- package/src/index.js +0 -72
- package/src/openai.js +0 -71
- package/src/util.js +0 -60
- package/src/xai.js +0 -19
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.parseCode = parseCode;
|
|
4
|
+
const CODE_REG = /^```\w*(.+)```/s;
|
|
5
|
+
function parseCode(content) {
|
|
6
|
+
const match = content.trim().match(CODE_REG);
|
|
7
|
+
if (match) {
|
|
8
|
+
content = match[1].trim();
|
|
9
|
+
}
|
|
10
|
+
return content;
|
|
11
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createMessageExtractor = createMessageExtractor;
|
|
4
|
+
const partial_json_1 = require("partial-json");
|
|
5
|
+
function createMessageExtractor(keys) {
|
|
6
|
+
let buffer = '';
|
|
7
|
+
const extractors = keys.map((key) => {
|
|
8
|
+
return createExtractor(key);
|
|
9
|
+
});
|
|
10
|
+
return (delta) => {
|
|
11
|
+
buffer += delta;
|
|
12
|
+
return extractors
|
|
13
|
+
.map((extractor) => {
|
|
14
|
+
return extractor(buffer);
|
|
15
|
+
})
|
|
16
|
+
.filter((extracted) => {
|
|
17
|
+
return extracted;
|
|
18
|
+
});
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
function createExtractor(key) {
|
|
22
|
+
let lastText = '';
|
|
23
|
+
let done = false;
|
|
24
|
+
return (buffer) => {
|
|
25
|
+
if (done) {
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
const text = extractText(buffer, key);
|
|
29
|
+
if (!text) {
|
|
30
|
+
return;
|
|
31
|
+
}
|
|
32
|
+
// Don't finish while the buffer has whitespace as it
|
|
33
|
+
// may be in the middle of trying to extract.
|
|
34
|
+
if (text === lastText && !buffer.endsWith(' ')) {
|
|
35
|
+
done = true;
|
|
36
|
+
}
|
|
37
|
+
const delta = text.slice(lastText.length);
|
|
38
|
+
lastText = text;
|
|
39
|
+
return {
|
|
40
|
+
key,
|
|
41
|
+
text,
|
|
42
|
+
delta,
|
|
43
|
+
done,
|
|
44
|
+
};
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
function extractText(input, key) {
|
|
48
|
+
if (!input) {
|
|
49
|
+
return;
|
|
50
|
+
}
|
|
51
|
+
const parsed = (0, partial_json_1.parse)(input, partial_json_1.STR | partial_json_1.OBJ);
|
|
52
|
+
return parsed?.[key] || '';
|
|
53
|
+
}
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.loadTemplates = loadTemplates;
|
|
7
|
+
exports.renderTemplate = renderTemplate;
|
|
8
|
+
const promises_1 = __importDefault(require("fs/promises"));
|
|
9
|
+
const path_1 = __importDefault(require("path"));
|
|
10
|
+
const glob_1 = require("glob");
|
|
11
|
+
const mustache_1 = __importDefault(require("mustache"));
|
|
12
|
+
async function loadTemplates(dir) {
|
|
13
|
+
const result = {};
|
|
14
|
+
const files = await (0, glob_1.glob)(path_1.default.join(dir, '*.md'));
|
|
15
|
+
if (!files.length) {
|
|
16
|
+
throw new Error(`No templates found in: ${dir}.`);
|
|
17
|
+
}
|
|
18
|
+
for (let file of files) {
|
|
19
|
+
const base = path_1.default.basename(file, '.md');
|
|
20
|
+
result[base] = await loadTemplate(file);
|
|
21
|
+
}
|
|
22
|
+
return result;
|
|
23
|
+
}
|
|
24
|
+
function renderTemplate(template, options) {
|
|
25
|
+
let params = {
|
|
26
|
+
...options,
|
|
27
|
+
...options.params,
|
|
28
|
+
};
|
|
29
|
+
params = mapObjects(params);
|
|
30
|
+
params = wrapProxy(params);
|
|
31
|
+
return mustache_1.default.render(template, params);
|
|
32
|
+
}
|
|
33
|
+
// Utils
|
|
34
|
+
async function loadTemplate(file) {
|
|
35
|
+
return await promises_1.default.readFile(file, 'utf-8');
|
|
36
|
+
}
|
|
37
|
+
// Transform arrays and object to versions
|
|
38
|
+
// that are more understandable in the context
|
|
39
|
+
// of a template that may have meaningful whitespace.
|
|
40
|
+
function mapObjects(params) {
|
|
41
|
+
const result = {};
|
|
42
|
+
for (let [key, value] of Object.entries(params)) {
|
|
43
|
+
if (Array.isArray(value)) {
|
|
44
|
+
value = mapArray(value);
|
|
45
|
+
}
|
|
46
|
+
else if (typeof value === 'object') {
|
|
47
|
+
value = JSON.stringify(value, null, 2);
|
|
48
|
+
}
|
|
49
|
+
result[key] = value;
|
|
50
|
+
}
|
|
51
|
+
return result;
|
|
52
|
+
}
|
|
53
|
+
function mapArray(arr) {
|
|
54
|
+
// Only map simple arrays of primitives.
|
|
55
|
+
if (typeof arr[0] === 'string') {
|
|
56
|
+
arr = arr
|
|
57
|
+
.map((el) => {
|
|
58
|
+
return `- ${el}`;
|
|
59
|
+
})
|
|
60
|
+
.join('\n');
|
|
61
|
+
}
|
|
62
|
+
return arr;
|
|
63
|
+
}
|
|
64
|
+
// Wrap params with a proxy object that reports
|
|
65
|
+
// as having all properties. If one is accessed
|
|
66
|
+
// that does not exist then return the original
|
|
67
|
+
// token. This way templates can be partially
|
|
68
|
+
// interpolated and re-interpolated later.
|
|
69
|
+
function wrapProxy(params) {
|
|
70
|
+
return new Proxy(params, {
|
|
71
|
+
has() {
|
|
72
|
+
return true;
|
|
73
|
+
},
|
|
74
|
+
get(target, prop) {
|
|
75
|
+
if (prop in target) {
|
|
76
|
+
return target[prop];
|
|
77
|
+
}
|
|
78
|
+
else {
|
|
79
|
+
return `{{{${prop.toString()}}}}`;
|
|
80
|
+
}
|
|
81
|
+
},
|
|
82
|
+
});
|
|
83
|
+
}
|
package/dist/cjs/xai.js
CHANGED
|
@@ -1,23 +1,14 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
|
|
3
|
-
Object.defineProperty(exports, "__esModule", {
|
|
4
|
-
value: true
|
|
5
|
-
});
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
3
|
exports.XAiClient = void 0;
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
async getCompletion(options) {
|
|
17
|
-
return super.getCompletion({
|
|
18
|
-
model: DEFAULT_MODEL,
|
|
19
|
-
...options
|
|
20
|
-
});
|
|
21
|
-
}
|
|
4
|
+
const openai_js_1 = require("./openai.js");
|
|
5
|
+
class XAiClient extends openai_js_1.OpenAiClient {
|
|
6
|
+
static DEFAULT_MODEL = 'grok-4-fast';
|
|
7
|
+
constructor(options) {
|
|
8
|
+
super({
|
|
9
|
+
...options,
|
|
10
|
+
baseURL: 'https://api.x.ai/v1',
|
|
11
|
+
});
|
|
12
|
+
}
|
|
22
13
|
}
|
|
23
|
-
exports.XAiClient = XAiClient;
|
|
14
|
+
exports.XAiClient = XAiClient;
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
import { parseCode } from './utils/code.js';
|
|
2
|
+
import { createMessageExtractor } from './utils/json.js';
|
|
3
|
+
import { loadTemplates, renderTemplate } from './utils/templates.js';
|
|
4
|
+
export default class BaseClient {
|
|
5
|
+
constructor(options) {
|
|
6
|
+
this.options = {
|
|
7
|
+
// @ts-ignore
|
|
8
|
+
model: this.constructor.DEFAULT_MODEL,
|
|
9
|
+
...options,
|
|
10
|
+
};
|
|
11
|
+
this.templates = null;
|
|
12
|
+
}
|
|
13
|
+
// Public
|
|
14
|
+
/**
|
|
15
|
+
* Interpolates vars into the provided template as instructions and runs the
|
|
16
|
+
* prompt.
|
|
17
|
+
*
|
|
18
|
+
* @param {PromptOptions} options
|
|
19
|
+
*/
|
|
20
|
+
async prompt(options) {
|
|
21
|
+
options = await this.normalizeOptions(options);
|
|
22
|
+
const { input, output, stream, schema } = options;
|
|
23
|
+
const response = await this.runPrompt(options);
|
|
24
|
+
if (!stream) {
|
|
25
|
+
this.debug('Response:', response);
|
|
26
|
+
}
|
|
27
|
+
if (output === 'raw') {
|
|
28
|
+
return response;
|
|
29
|
+
}
|
|
30
|
+
let result;
|
|
31
|
+
if (schema) {
|
|
32
|
+
result = this.getStructuredResponse(response);
|
|
33
|
+
// @ts-ignore
|
|
34
|
+
if (options.hasWrappedSchema) {
|
|
35
|
+
result = result.items;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
else if (output === 'json') {
|
|
39
|
+
result = JSON.parse(parseCode(this.getTextResponse(response)));
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
result = parseCode(this.getTextResponse(response));
|
|
43
|
+
}
|
|
44
|
+
if (output === 'messages') {
|
|
45
|
+
return {
|
|
46
|
+
result,
|
|
47
|
+
...this.getMessagesResponse(input, response),
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
else {
|
|
51
|
+
return result;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Streams the prompt response.
|
|
56
|
+
*
|
|
57
|
+
* @param {PromptOptions & StreamOptions} options
|
|
58
|
+
* @returns {AsyncIterator}
|
|
59
|
+
*/
|
|
60
|
+
async *stream(options) {
|
|
61
|
+
options = await this.normalizeOptions(options);
|
|
62
|
+
const extractor = this.getMessageExtractor(options);
|
|
63
|
+
try {
|
|
64
|
+
const stream = await this.runStream(options);
|
|
65
|
+
// @ts-ignore
|
|
66
|
+
for await (let event of stream) {
|
|
67
|
+
this.debug('Event:', event);
|
|
68
|
+
event = this.normalizeStreamEvent(event);
|
|
69
|
+
if (event) {
|
|
70
|
+
yield event;
|
|
71
|
+
}
|
|
72
|
+
const extractedMessages = extractor?.(event) || [];
|
|
73
|
+
for (let message of extractedMessages) {
|
|
74
|
+
const { key, delta, text, done } = message;
|
|
75
|
+
let extractEvent;
|
|
76
|
+
if (done) {
|
|
77
|
+
extractEvent = {
|
|
78
|
+
type: 'extract:done',
|
|
79
|
+
text,
|
|
80
|
+
key,
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
else {
|
|
84
|
+
extractEvent = {
|
|
85
|
+
type: 'extract:delta',
|
|
86
|
+
delta,
|
|
87
|
+
key,
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
this.debug('Extract:', extractEvent);
|
|
91
|
+
yield extractEvent;
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
catch (error) {
|
|
96
|
+
const { message, code } = error;
|
|
97
|
+
yield {
|
|
98
|
+
type: 'error',
|
|
99
|
+
code,
|
|
100
|
+
message,
|
|
101
|
+
};
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
async buildTemplate(options) {
|
|
105
|
+
const template = await this.resolveTemplate(options);
|
|
106
|
+
return renderTemplate(template, options);
|
|
107
|
+
}
|
|
108
|
+
// Protected
|
|
109
|
+
runPrompt(options) {
|
|
110
|
+
void options;
|
|
111
|
+
throw new Error('Method not implemented.');
|
|
112
|
+
}
|
|
113
|
+
runStream(options) {
|
|
114
|
+
void options;
|
|
115
|
+
throw new Error('Method not implemented.');
|
|
116
|
+
}
|
|
117
|
+
getTextResponse(response) {
|
|
118
|
+
void response;
|
|
119
|
+
throw new Error('Method not implemented.');
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* @returns {Object}
|
|
123
|
+
*/
|
|
124
|
+
getStructuredResponse(response) {
|
|
125
|
+
void response;
|
|
126
|
+
throw new Error('Method not implemented.');
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* @returns {Object}
|
|
130
|
+
*/
|
|
131
|
+
getMessagesResponse(input, response) {
|
|
132
|
+
void response;
|
|
133
|
+
throw new Error('Method not implemented.');
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* @returns {Object}
|
|
137
|
+
*/
|
|
138
|
+
normalizeStreamEvent(event) {
|
|
139
|
+
void event;
|
|
140
|
+
throw new Error('Method not implemented.');
|
|
141
|
+
}
|
|
142
|
+
// Private
|
|
143
|
+
async normalizeOptions(options) {
|
|
144
|
+
options = {
|
|
145
|
+
input: '',
|
|
146
|
+
output: 'text',
|
|
147
|
+
...this.options,
|
|
148
|
+
...options,
|
|
149
|
+
};
|
|
150
|
+
options.input = this.normalizeInput(options);
|
|
151
|
+
options.schema = this.normalizeSchema(options);
|
|
152
|
+
options.instructions ||= await this.resolveInstructions(options);
|
|
153
|
+
return options;
|
|
154
|
+
}
|
|
155
|
+
normalizeInput(options) {
|
|
156
|
+
let { input = '', output } = options;
|
|
157
|
+
if (typeof input === 'string') {
|
|
158
|
+
if (output === 'json') {
|
|
159
|
+
input += '\nOutput only valid JSON.';
|
|
160
|
+
}
|
|
161
|
+
input = [
|
|
162
|
+
{
|
|
163
|
+
role: 'user',
|
|
164
|
+
content: input,
|
|
165
|
+
},
|
|
166
|
+
];
|
|
167
|
+
}
|
|
168
|
+
return input;
|
|
169
|
+
}
|
|
170
|
+
normalizeSchema(options) {
|
|
171
|
+
let { schema } = options;
|
|
172
|
+
if (!schema) {
|
|
173
|
+
return;
|
|
174
|
+
}
|
|
175
|
+
// Convert to JSON schema.
|
|
176
|
+
schema = schema.toJSON?.() || schema;
|
|
177
|
+
if (schema?.type === 'array') {
|
|
178
|
+
schema = {
|
|
179
|
+
type: 'object',
|
|
180
|
+
properties: {
|
|
181
|
+
items: schema,
|
|
182
|
+
},
|
|
183
|
+
required: ['items'],
|
|
184
|
+
additionalProperties: false,
|
|
185
|
+
};
|
|
186
|
+
options.hasWrappedSchema = true;
|
|
187
|
+
}
|
|
188
|
+
return schema;
|
|
189
|
+
}
|
|
190
|
+
getMessageExtractor(options) {
|
|
191
|
+
const { extractMessages } = options;
|
|
192
|
+
if (!extractMessages) {
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
const messageExtractor = createMessageExtractor([extractMessages]);
|
|
196
|
+
return (event) => {
|
|
197
|
+
if (event?.type === 'delta') {
|
|
198
|
+
return messageExtractor(event.delta);
|
|
199
|
+
}
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
debug(message, arg) {
|
|
203
|
+
if (this.options.debug) {
|
|
204
|
+
// TODO: replace with logger when opentelemetry is removed
|
|
205
|
+
// eslint-disable-next-line
|
|
206
|
+
console.debug(`${message}\n${JSON.stringify(arg, null, 2)}\n`);
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
async resolveInstructions(options) {
|
|
210
|
+
if (options.template) {
|
|
211
|
+
const template = await this.resolveTemplate(options);
|
|
212
|
+
return renderTemplate(template, options);
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
async resolveTemplate(options) {
|
|
216
|
+
const { template } = options;
|
|
217
|
+
await this.loadTemplates();
|
|
218
|
+
return this.templates[template] || template;
|
|
219
|
+
}
|
|
220
|
+
async loadTemplates() {
|
|
221
|
+
const { templates } = this.options;
|
|
222
|
+
this.templates ||= await loadTemplates(templates);
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
/**
|
|
226
|
+
* @typedef {Object} PromptOptions
|
|
227
|
+
* @property {string|PromptMessage[]} input - Input to use.
|
|
228
|
+
* @property {string} [model] - The model to use.
|
|
229
|
+
* @property {boolean} stream - Stream response.
|
|
230
|
+
* @property {Object} [schema] - A JSON schema compatible object that defines the output shape.
|
|
231
|
+
* @property {"raw" | "text" | "json" | "messages"} [output] - The return value type.
|
|
232
|
+
* @property {Object} [params] - Params to be interpolated into the template.
|
|
233
|
+
* May also be passed as additional props to options.
|
|
234
|
+
*/
|
|
235
|
+
/**
|
|
236
|
+
* @typedef {Object} StreamOptions
|
|
237
|
+
* @property {string} [extractMessages] - Key in JSON response to extract a message stream from.
|
|
238
|
+
*/
|
|
239
|
+
/**
|
|
240
|
+
* @typedef {Object} PromptMessage
|
|
241
|
+
* @property {"system" | "user" | "assistant"} role
|
|
242
|
+
* @property {string} content
|
|
243
|
+
*/
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import BaseClient from './BaseClient.js';
|
|
3
|
+
const DEFAULT_TOKENS = 4096;
|
|
4
|
+
export class AnthropicClient extends BaseClient {
|
|
5
|
+
static DEFAULT_MODEL = 'claude-sonnet-4-5';
|
|
6
|
+
constructor(options) {
|
|
7
|
+
super(options);
|
|
8
|
+
this.client = new Anthropic(options);
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Lists available models.
|
|
12
|
+
* {@link https://docs.anthropic.com/en/docs/about-claude/models Documentation}
|
|
13
|
+
*/
|
|
14
|
+
async models() {
|
|
15
|
+
const { data } = await this.client.models.list();
|
|
16
|
+
return data.map((o) => o.id);
|
|
17
|
+
}
|
|
18
|
+
async runPrompt(options) {
|
|
19
|
+
const { input, model, temperature, instructions, stream = false, tokens = DEFAULT_TOKENS, } = options;
|
|
20
|
+
// @ts-ignore
|
|
21
|
+
return await this.client.messages.create({
|
|
22
|
+
model,
|
|
23
|
+
stream,
|
|
24
|
+
temperature,
|
|
25
|
+
max_tokens: tokens,
|
|
26
|
+
system: instructions,
|
|
27
|
+
...this.getSchemaOptions(options),
|
|
28
|
+
messages: input,
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
async runStream(options) {
|
|
32
|
+
return await this.runPrompt({
|
|
33
|
+
...options,
|
|
34
|
+
output: 'raw',
|
|
35
|
+
stream: true,
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
getTextResponse(response) {
|
|
39
|
+
const textBlock = response.content.find((block) => {
|
|
40
|
+
return block.type === 'text';
|
|
41
|
+
});
|
|
42
|
+
return textBlock?.text || null;
|
|
43
|
+
}
|
|
44
|
+
getStructuredResponse(response) {
|
|
45
|
+
const toolBlock = response.content.find((block) => {
|
|
46
|
+
return block.type === 'tool_use';
|
|
47
|
+
});
|
|
48
|
+
return toolBlock?.input || null;
|
|
49
|
+
}
|
|
50
|
+
getMessagesResponse(input, response) {
|
|
51
|
+
return {
|
|
52
|
+
messages: [
|
|
53
|
+
...input,
|
|
54
|
+
...response.content
|
|
55
|
+
.filter((item) => {
|
|
56
|
+
return item.type === 'text';
|
|
57
|
+
})
|
|
58
|
+
.map((item) => {
|
|
59
|
+
return {
|
|
60
|
+
role: 'assistant',
|
|
61
|
+
content: item.text,
|
|
62
|
+
};
|
|
63
|
+
}),
|
|
64
|
+
],
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
normalizeStreamEvent(event) {
|
|
68
|
+
let { type } = event;
|
|
69
|
+
if (type === 'content_block_start') {
|
|
70
|
+
return {
|
|
71
|
+
type: 'start',
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
else if (type === 'content_block_stop') {
|
|
75
|
+
return {
|
|
76
|
+
type: 'stop',
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
else if (type === 'content_block_delta') {
|
|
80
|
+
return {
|
|
81
|
+
type: 'delta',
|
|
82
|
+
text: event.delta.text,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
// Private
|
|
87
|
+
getSchemaOptions(options) {
|
|
88
|
+
const { output } = options;
|
|
89
|
+
if (output?.type) {
|
|
90
|
+
let schema = output;
|
|
91
|
+
if (schema.type === 'array') {
|
|
92
|
+
schema = {
|
|
93
|
+
type: 'object',
|
|
94
|
+
properties: {
|
|
95
|
+
items: schema,
|
|
96
|
+
},
|
|
97
|
+
required: ['items'],
|
|
98
|
+
additionalProperties: false,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
return {
|
|
102
|
+
tools: [
|
|
103
|
+
{
|
|
104
|
+
name: 'schema',
|
|
105
|
+
description: 'Follow the schema for JSON output.',
|
|
106
|
+
input_schema: schema,
|
|
107
|
+
},
|
|
108
|
+
],
|
|
109
|
+
tool_choice: {
|
|
110
|
+
type: 'tool',
|
|
111
|
+
name: 'schema',
|
|
112
|
+
},
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
2
|
+
import BaseClient from './BaseClient.js';
|
|
3
|
+
const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
|
|
4
|
+
export class GoogleClient extends BaseClient {
|
|
5
|
+
constructor(options) {
|
|
6
|
+
super(options);
|
|
7
|
+
const { apiKey } = options;
|
|
8
|
+
this.client = new GoogleGenerativeAI(apiKey);
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* Lists available models.
|
|
12
|
+
* {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
|
|
13
|
+
*/
|
|
14
|
+
async models() {
|
|
15
|
+
return [
|
|
16
|
+
'gemini-2.0-flash-exp',
|
|
17
|
+
'gemini-1.5-flash',
|
|
18
|
+
'gemini-1.5-flash-8b',
|
|
19
|
+
'gemini-1.5-pro',
|
|
20
|
+
];
|
|
21
|
+
}
|
|
22
|
+
async getCompletion(options) {
|
|
23
|
+
const { model = DEFAULT_MODEL, output = 'text', stream = false } = options;
|
|
24
|
+
const { client } = this;
|
|
25
|
+
const generator = client.getGenerativeModel({
|
|
26
|
+
model,
|
|
27
|
+
});
|
|
28
|
+
// @ts-ignore
|
|
29
|
+
const messages = await this.getMessages(options);
|
|
30
|
+
const prompts = messages.map((message) => {
|
|
31
|
+
return message.content;
|
|
32
|
+
});
|
|
33
|
+
let response;
|
|
34
|
+
if (stream) {
|
|
35
|
+
response = await generator.generateContentStream(prompts);
|
|
36
|
+
}
|
|
37
|
+
else {
|
|
38
|
+
response = await generator.generateContent(prompts);
|
|
39
|
+
}
|
|
40
|
+
if (output === 'raw') {
|
|
41
|
+
return response;
|
|
42
|
+
}
|
|
43
|
+
// @ts-ignore
|
|
44
|
+
const parts = response.response.candidates.flatMap((candidate) => {
|
|
45
|
+
return candidate.content.parts;
|
|
46
|
+
});
|
|
47
|
+
const [message] = parts;
|
|
48
|
+
return message;
|
|
49
|
+
}
|
|
50
|
+
async getStream(options) {
|
|
51
|
+
// @ts-ignore
|
|
52
|
+
const response = await super.getStream(options);
|
|
53
|
+
// @ts-ignore
|
|
54
|
+
return response.stream;
|
|
55
|
+
}
|
|
56
|
+
getStreamedChunk(chunk, started) {
|
|
57
|
+
const [candidate] = chunk.candidates;
|
|
58
|
+
let type;
|
|
59
|
+
if (!started) {
|
|
60
|
+
type = 'start';
|
|
61
|
+
}
|
|
62
|
+
else if (candidate.finishReason === 'STOP') {
|
|
63
|
+
type = 'stop';
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
type = 'chunk';
|
|
67
|
+
}
|
|
68
|
+
if (type) {
|
|
69
|
+
return {
|
|
70
|
+
type,
|
|
71
|
+
text: candidate.content.parts[0].text || '',
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { AnthropicClient } from './anthropic.js';
|
|
2
|
+
import { GoogleClient } from './google.js';
|
|
3
|
+
import { OpenAiClient } from './openai.js';
|
|
4
|
+
import { XAiClient } from './xai.js';
|
|
5
|
+
export function createClient(options = {}) {
|
|
6
|
+
const { platform } = options;
|
|
7
|
+
if (!platform) {
|
|
8
|
+
throw new Error('No platform specified.');
|
|
9
|
+
}
|
|
10
|
+
if (platform === 'openai' || platform === 'gpt') {
|
|
11
|
+
return new OpenAiClient(options);
|
|
12
|
+
}
|
|
13
|
+
else if (platform === 'google' || platform === 'gemini') {
|
|
14
|
+
return new GoogleClient(options);
|
|
15
|
+
}
|
|
16
|
+
else if (platform === 'anthropic' || platform === 'claude') {
|
|
17
|
+
return new AnthropicClient(options);
|
|
18
|
+
}
|
|
19
|
+
else if (platform === 'xai' || platform === 'grok') {
|
|
20
|
+
return new XAiClient(options);
|
|
21
|
+
}
|
|
22
|
+
else if (platform) {
|
|
23
|
+
throw new Error(`Unknown platform "${platform}".`);
|
|
24
|
+
}
|
|
25
|
+
}
|