@bedrockio/ai 0.3.0 → 0.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +25 -0
  2. package/README.md +58 -17
  3. package/dist/cjs/BaseClient.js +242 -182
  4. package/dist/cjs/anthropic.js +115 -93
  5. package/dist/cjs/google.js +74 -80
  6. package/dist/cjs/index.js +23 -75
  7. package/dist/cjs/openai.js +114 -72
  8. package/dist/cjs/package.json +1 -0
  9. package/dist/cjs/utils/code.js +11 -0
  10. package/dist/cjs/utils/json.js +53 -0
  11. package/dist/cjs/utils/templates.js +83 -0
  12. package/dist/cjs/xai.js +11 -20
  13. package/dist/esm/BaseClient.js +243 -0
  14. package/dist/esm/anthropic.js +116 -0
  15. package/dist/esm/google.js +75 -0
  16. package/dist/esm/index.js +25 -0
  17. package/dist/esm/openai.js +113 -0
  18. package/dist/esm/utils/code.js +8 -0
  19. package/dist/esm/utils/json.js +50 -0
  20. package/dist/esm/utils/templates.js +76 -0
  21. package/dist/esm/xai.js +10 -0
  22. package/package.json +25 -18
  23. package/types/BaseClient.d.ts +67 -26
  24. package/types/BaseClient.d.ts.map +1 -1
  25. package/types/anthropic.d.ts +26 -2
  26. package/types/anthropic.d.ts.map +1 -1
  27. package/types/google.d.ts.map +1 -1
  28. package/types/index.d.ts +4 -11
  29. package/types/index.d.ts.map +1 -1
  30. package/types/openai.d.ts +45 -2
  31. package/types/openai.d.ts.map +1 -1
  32. package/types/utils/code.d.ts +2 -0
  33. package/types/utils/code.d.ts.map +1 -0
  34. package/types/utils/json.d.ts +2 -0
  35. package/types/utils/json.d.ts.map +1 -0
  36. package/types/utils/templates.d.ts +3 -0
  37. package/types/utils/templates.d.ts.map +1 -0
  38. package/types/utils.d.ts +4 -0
  39. package/types/utils.d.ts.map +1 -0
  40. package/types/xai.d.ts.map +1 -1
  41. package/.prettierignore +0 -1
  42. package/.prettierrc.cjs +0 -1
  43. package/__mocks__/@anthropic-ai/sdk.js +0 -43
  44. package/__mocks__/@google/generative-ai.js +0 -59
  45. package/__mocks__/openai.js +0 -48
  46. package/dist/cjs/util.js +0 -62
  47. package/src/BaseClient.js +0 -195
  48. package/src/anthropic.js +0 -97
  49. package/src/google.js +0 -91
  50. package/src/index.js +0 -72
  51. package/src/openai.js +0 -71
  52. package/src/util.js +0 -60
  53. package/src/xai.js +0 -19
@@ -1,43 +0,0 @@
1
- let mock;
2
-
3
- function MockAnthropicClient() {
4
- return {
5
- messages: {
6
- create(options) {
7
- if (options.stream) {
8
- return streamMock();
9
- } else {
10
- return mock;
11
- }
12
- },
13
- },
14
- };
15
- }
16
-
17
- function setResponse(data) {
18
- mock = data;
19
- }
20
-
21
- async function* streamMock() {
22
- const content = mock.content[0].text;
23
- const size = Math.floor(content.length / 3);
24
- const one = content.slice(0, size);
25
- const two = content.slice(size, 2 * size);
26
- const three = content.slice(2 * size);
27
- yield wrapChunk(one, 'content_block_start');
28
- yield wrapChunk(two, 'content_block_delta');
29
- yield wrapChunk(three, 'message_stop');
30
- }
31
-
32
- function wrapChunk(str, type) {
33
- return {
34
- type,
35
- delta: {
36
- text: str,
37
- },
38
- };
39
- }
40
-
41
- MockAnthropicClient.setResponse = setResponse;
42
-
43
- module.exports = MockAnthropicClient;
@@ -1,59 +0,0 @@
1
- let mock;
2
-
3
- class MockGoogleClient {
4
- constructor() {
5
- return {
6
- getGenerativeModel() {
7
- return {
8
- generateContent() {
9
- return mock;
10
- },
11
- generateContentStream() {
12
- return {
13
- stream: streamMock(),
14
- };
15
- },
16
- };
17
- },
18
- };
19
- }
20
- }
21
-
22
- function setResponse(data) {
23
- mock = data;
24
- }
25
-
26
- async function* streamMock() {
27
- const content = mock.response.candidates[0].content.parts[0].text;
28
- const size = Math.floor(content.length / 3);
29
- const one = content.slice(0, size);
30
- const two = content.slice(size, 2 * size);
31
- const three = content.slice(2 * size);
32
- yield wrapChunk(one);
33
- yield wrapChunk(two);
34
- yield wrapChunk(three, true);
35
- }
36
-
37
- function wrapChunk(str, finish) {
38
- return {
39
- candidates: [
40
- {
41
- ...(finish && {
42
- finishReason: 'STOP',
43
- }),
44
- content: {
45
- parts: [
46
- {
47
- text: str,
48
- },
49
- ],
50
- },
51
- },
52
- ],
53
- };
54
- }
55
-
56
- module.exports = {
57
- setResponse,
58
- GoogleGenerativeAI: MockGoogleClient,
59
- };
@@ -1,48 +0,0 @@
1
- let mock;
2
-
3
- function MockOpenAiClient() {
4
- return {
5
- chat: {
6
- completions: {
7
- create(options) {
8
- if (options.stream) {
9
- return streamMock();
10
- } else {
11
- return mock;
12
- }
13
- },
14
- },
15
- },
16
- };
17
- }
18
-
19
- async function* streamMock() {
20
- const content = mock.choices[0].message.content;
21
- const size = Math.floor(content.length / 3);
22
- const one = content.slice(0, size);
23
- const two = content.slice(size, 2 * size);
24
- const three = content.slice(2 * size);
25
- yield wrapChunk(one);
26
- yield wrapChunk(two);
27
- yield wrapChunk(three);
28
- }
29
-
30
- function wrapChunk(str) {
31
- return {
32
- choices: [
33
- {
34
- delta: {
35
- content: str,
36
- },
37
- },
38
- ],
39
- };
40
- }
41
-
42
- function setResponse(data) {
43
- mock = data;
44
- }
45
-
46
- MockOpenAiClient.setResponse = setResponse;
47
-
48
- module.exports = MockOpenAiClient;
package/dist/cjs/util.js DELETED
@@ -1,62 +0,0 @@
1
- "use strict";
2
-
3
- Object.defineProperty(exports, "__esModule", {
4
- value: true
5
- });
6
- exports.loadTemplate = loadTemplate;
7
- exports.loadTemplates = loadTemplates;
8
- exports.transformResponse = transformResponse;
9
- var _promises = _interopRequireDefault(require("fs/promises"));
10
- var _path = _interopRequireDefault(require("path"));
11
- var _glob = require("glob");
12
- function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
13
- const CODE_REG = /^```\w*$(.+)```/ms;
14
- const JSON_REG = /([{[].+[}\]])/s;
15
- async function loadTemplates(dir) {
16
- const result = {};
17
- const files = await (0, _glob.glob)(_path.default.join(dir, '*.md'));
18
- if (!files.length) {
19
- throw new Error(`No templates found in: ${dir}.`);
20
- }
21
- for (let file of files) {
22
- const base = _path.default.basename(file, '.md');
23
- result[base] = await loadTemplate(file);
24
- }
25
- return result;
26
- }
27
- async function loadTemplate(file) {
28
- return await _promises.default.readFile(file, 'utf-8');
29
- }
30
- function transformResponse(options) {
31
- const {
32
- output = 'text',
33
- messages,
34
- message
35
- } = options;
36
- const content = message.content || message.text;
37
- if (output === 'text') {
38
- return content;
39
- } else if (output === 'messages') {
40
- return [...messages, message];
41
- } else if (output === 'json') {
42
- return parseJson(content);
43
- } else if (output === 'code') {
44
- return parseCode(content);
45
- } else {
46
- throw new Error(`No output type provided.`);
47
- }
48
- }
49
- function parseJson(content) {
50
- try {
51
- return JSON.parse(content.match(JSON_REG)[0]);
52
- } catch (error) {
53
- throw new Error(`Unable to derive JSON from response:\n\n${content}`);
54
- }
55
- }
56
- function parseCode(content) {
57
- try {
58
- return content.match(CODE_REG)[1].trim();
59
- } catch (error) {
60
- throw new Error(`Unable to derive code from response:\n\n${content}`);
61
- }
62
- }
package/src/BaseClient.js DELETED
@@ -1,195 +0,0 @@
1
- import Mustache from 'mustache';
2
-
3
- import { loadTemplates, loadTemplate } from './util.js';
4
-
5
- const MESSAGES_REG = /(?:^|\n)-{3,}\s*(\w+)\s*-{3,}(.*?)(?=\n-{3,}|$)/gs;
6
-
7
- export default class BaseClient {
8
- constructor(options) {
9
- this.options = options;
10
- this.templates = null;
11
- }
12
-
13
- /**
14
- * Interpolates vars into the provided template and
15
- * runs the chat completion. The "output" option may
16
- * be omitted and will default to `"text"`.
17
- * {@link https://github.com/bedrockio/ai?tab=readme-ov-file#bedrockioai Documentation}
18
- *
19
- * @param {object} options
20
- * @param {string} options.model - The model to use.
21
- * @param {"raw" | "text" | "json" | "messages"} [options.output] - The output to use.
22
- * @param {Object.<string, any>} [options.other] - Additional props
23
- * will be interpolated in the template.
24
- */
25
- async prompt(options) {
26
- options = {
27
- ...this.options,
28
- ...options,
29
- };
30
-
31
- const messages = await this.getMessages(options);
32
- return await this.getCompletion({
33
- ...options,
34
- messages,
35
- });
36
- }
37
-
38
- /**
39
- * Streams the prompt response.
40
- * @returns {AsyncIterator}
41
- */
42
- async *stream(options) {
43
- const stream = await this.getStream(options);
44
-
45
- let started = false;
46
-
47
- // @ts-ignore
48
- for await (const chunk of stream) {
49
- const resolved = this.getStreamedChunk(chunk, started);
50
- started = true;
51
-
52
- // @ts-ignore
53
- if (resolved) {
54
- yield resolved;
55
- }
56
- }
57
- }
58
-
59
- async getMessages(options) {
60
- const { text } = options;
61
- const template = await this.resolveTemplate(options);
62
-
63
- if (template) {
64
- const raw = render(template, options);
65
-
66
- const messages = [];
67
- for (let match of raw.matchAll(MESSAGES_REG)) {
68
- const [, role, content] = match;
69
- messages.push({
70
- role: role.toLowerCase(),
71
- content: content.trim(),
72
- });
73
- }
74
-
75
- if (!messages.length) {
76
- messages.push({
77
- role: 'user',
78
- content: raw.trim(),
79
- });
80
- }
81
-
82
- return messages;
83
- } else if (text) {
84
- return [
85
- {
86
- role: 'user',
87
- content: text,
88
- },
89
- ];
90
- } else {
91
- throw new Error('No input provided.');
92
- }
93
- }
94
-
95
- async buildTemplate(options) {
96
- const template = await this.resolveTemplate(options);
97
- return render(template, options);
98
- }
99
-
100
- async loadTemplates() {
101
- const { templates } = this.options;
102
- this.templates ||= await loadTemplates(templates);
103
- }
104
-
105
- async resolveTemplate(options) {
106
- const { template, file } = options;
107
- if (template) {
108
- return template;
109
- } else if (file?.endsWith('.md')) {
110
- return await loadTemplate(file);
111
- } else if (file) {
112
- await this.loadTemplates();
113
- return this.templates[file];
114
- }
115
- }
116
-
117
- async getStream(options) {
118
- return await this.prompt({
119
- ...options,
120
- output: 'raw',
121
- stream: true,
122
- });
123
- }
124
-
125
- getCompletion(options) {
126
- void options;
127
- new Error('Method not implemented.');
128
- }
129
-
130
- getStreamedChunk(chunk, started) {
131
- void chunk;
132
- void started;
133
- new Error('Method not implemented.');
134
- }
135
- }
136
-
137
- function render(template, options) {
138
- let params = {
139
- ...options,
140
- ...options.params,
141
- };
142
-
143
- params = mapObjects(params);
144
- params = wrapProxy(params);
145
- return Mustache.render(template, params);
146
- }
147
-
148
- // Transform arrays and object to versions
149
- // that are more understandable in the context
150
- // of a template that may have meaningful whitespace.
151
- function mapObjects(params) {
152
- const result = {};
153
- for (let [key, value] of Object.entries(params)) {
154
- if (Array.isArray(value)) {
155
- value = mapArray(value);
156
- } else if (typeof value === 'object') {
157
- value = JSON.stringify(value, null, 2);
158
- }
159
- result[key] = value;
160
- }
161
- return result;
162
- }
163
-
164
- function mapArray(arr) {
165
- // Only map simple arrays of primitives.
166
- if (typeof arr[0] === 'string') {
167
- arr = arr
168
- .map((el) => {
169
- return `- ${el}`;
170
- })
171
- .join('\n');
172
- }
173
- return arr;
174
- }
175
-
176
- // Wrap params with a proxy object that reports
177
- // as having all properties. If one is accessed
178
- // that does not exist then return the original
179
- // token. This way templates can be partially
180
- // interpolated and re-interpolated later.
181
- function wrapProxy(params) {
182
- return new Proxy(params, {
183
- has() {
184
- return true;
185
- },
186
-
187
- get(target, prop) {
188
- if (prop in target) {
189
- return target[prop];
190
- } else {
191
- return `{{{${prop.toString()}}}}`;
192
- }
193
- },
194
- });
195
- }
package/src/anthropic.js DELETED
@@ -1,97 +0,0 @@
1
- import Anthropic from '@anthropic-ai/sdk';
2
-
3
- import BaseClient from './BaseClient.js';
4
- import { transformResponse } from './util.js';
5
-
6
- const MODELS_URL = 'https://docs.anthropic.com/en/docs/about-claude/models';
7
- const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
8
-
9
- export class AnthropicClient extends BaseClient {
10
- constructor(options) {
11
- super(options);
12
- this.client = new Anthropic({
13
- ...options,
14
- });
15
- }
16
-
17
- /**
18
- * Lists available models.
19
- * {@link https://docs.anthropic.com/en/docs/about-claude/models Documentation}
20
- */
21
- async models() {
22
- const { data } = await this.client.models.list();
23
- return data.map((o) => o.id);
24
- }
25
-
26
- async getCompletion(options) {
27
- const {
28
- model = DEFAULT_MODEL,
29
- max_tokens = 2048,
30
- output = 'text',
31
- stream = false,
32
- messages,
33
- } = options;
34
- const { client } = this;
35
-
36
- const { system, user } = splitMessages(messages);
37
-
38
- if (!model) {
39
- throw new Error(
40
- `No model specified. Available models are here: ${MODELS_URL}.`,
41
- );
42
- }
43
-
44
- const response = await client.messages.create({
45
- max_tokens,
46
- messages: user,
47
- system,
48
- model,
49
- stream,
50
- });
51
-
52
- if (output === 'raw') {
53
- return response;
54
- }
55
-
56
- // @ts-ignore
57
- const message = response.content[0];
58
-
59
- return transformResponse({
60
- ...options,
61
- messages,
62
- message,
63
- });
64
- }
65
-
66
- getStreamedChunk(chunk) {
67
- // @ts-ignore
68
- let type;
69
- if (chunk.type === 'content_block_start') {
70
- type = 'start';
71
- } else if (chunk.type === 'content_block_delta') {
72
- type = 'chunk';
73
- } else if (chunk.type === 'message_stop') {
74
- type = 'stop';
75
- }
76
-
77
- if (type) {
78
- return {
79
- type,
80
- text: chunk.delta?.text || '',
81
- };
82
- }
83
- }
84
- }
85
-
86
- function splitMessages(messages) {
87
- const system = [];
88
- const user = [];
89
- for (let message of messages) {
90
- if (message.role === 'system') {
91
- system.push(message);
92
- } else {
93
- user.push(message);
94
- }
95
- }
96
- return { system: system.join('\n'), user };
97
- }
package/src/google.js DELETED
@@ -1,91 +0,0 @@
1
- import { GoogleGenerativeAI } from '@google/generative-ai';
2
-
3
- import BaseClient from './BaseClient.js';
4
- import { transformResponse } from './util.js';
5
-
6
- const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
7
-
8
- export class GoogleClient extends BaseClient {
9
- constructor(options) {
10
- super(options);
11
- const { apiKey } = options;
12
- this.client = new GoogleGenerativeAI(apiKey);
13
- }
14
-
15
- /**
16
- * Lists available models.
17
- * {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
18
- */
19
- async models() {
20
- return [
21
- 'gemini-2.0-flash-exp',
22
- 'gemini-1.5-flash',
23
- 'gemini-1.5-flash-8b',
24
- 'gemini-1.5-pro',
25
- ];
26
- }
27
-
28
- async getCompletion(options) {
29
- const { model = DEFAULT_MODEL, output = 'text', stream = false } = options;
30
- const { client } = this;
31
-
32
- const generator = client.getGenerativeModel({
33
- model,
34
- });
35
-
36
- const messages = await this.getMessages(options);
37
-
38
- const prompts = messages.map((message) => {
39
- return message.content;
40
- });
41
-
42
- let response;
43
-
44
- if (stream) {
45
- response = await generator.generateContentStream(prompts);
46
- } else {
47
- response = await generator.generateContent(prompts);
48
- }
49
-
50
- if (output === 'raw') {
51
- return response;
52
- }
53
-
54
- // @ts-ignore
55
- const parts = response.response.candidates.flatMap((candidate) => {
56
- return candidate.content.parts;
57
- });
58
- const [message] = parts;
59
-
60
- return transformResponse({
61
- ...options,
62
- messages,
63
- message,
64
- });
65
- }
66
- async getStream(options) {
67
- const response = await super.getStream(options);
68
- // @ts-ignore
69
- return response.stream;
70
- }
71
-
72
- getStreamedChunk(chunk, started) {
73
- const [candidate] = chunk.candidates;
74
-
75
- let type;
76
- if (!started) {
77
- type = 'start';
78
- } else if (candidate.finishReason === 'STOP') {
79
- type = 'stop';
80
- } else {
81
- type = 'chunk';
82
- }
83
-
84
- if (type) {
85
- return {
86
- type,
87
- text: candidate.content.parts[0].text || '',
88
- };
89
- }
90
- }
91
- }
package/src/index.js DELETED
@@ -1,72 +0,0 @@
1
- import { OpenAiClient } from './openai.js';
2
- import { GoogleClient } from './google.js';
3
- import { AnthropicClient } from './anthropic.js';
4
- import { XAiClient } from './xai.js';
5
-
6
- export class Client {
7
- constructor(options = {}) {
8
- if (!options.platform) {
9
- throw new Error('No platform specified.');
10
- } else if (!options.templates) {
11
- throw new Error('No templates directory specified.');
12
- } else if (!options.apiKey) {
13
- throw new Error('No API key specified.');
14
- }
15
- return getClientForPlatform(options);
16
- }
17
- }
18
-
19
- export class MultiClient {
20
- constructor(options) {
21
- const { platforms } = options;
22
-
23
- this.clients = {};
24
-
25
- for (let platform of platforms) {
26
- const { name, apiKey } = platform;
27
- const client = getClientForPlatform({
28
- ...options,
29
- platform: name,
30
- apiKey,
31
- });
32
- this.clients[name] = client;
33
- this.clients[undefined] ||= client;
34
- }
35
- }
36
-
37
- prompt(options) {
38
- return this.getClient(options).prompt(options);
39
- }
40
-
41
- stream(options) {
42
- return this.getClient(options).stream(options);
43
- }
44
-
45
- buildTemplate(options) {
46
- return this.getClient(options).buildTemplate(options);
47
- }
48
-
49
- getClient(options) {
50
- const { platform } = options;
51
- const client = this.clients[platform];
52
- if (!client) {
53
- throw new Error(`Platform "${platform}" not found.`);
54
- }
55
- return client;
56
- }
57
- }
58
-
59
- function getClientForPlatform(options) {
60
- const { platform } = options;
61
- if (platform === 'openai' || platform === 'gpt') {
62
- return new OpenAiClient(options);
63
- } else if (platform === 'google' || platform === 'gemini') {
64
- return new GoogleClient(options);
65
- } else if (platform === 'anthropic' || platform === 'claude') {
66
- return new AnthropicClient(options);
67
- } else if (platform === 'xai' || platform === 'grok') {
68
- return new XAiClient(options);
69
- } else if (platform) {
70
- throw new Error(`Unknown platform "${platform}".`);
71
- }
72
- }