@bedrockio/ai 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 0.2.0
2
+
3
+ - Added Gemini
4
+
1
5
  ## 0.1.0
2
6
 
3
7
  - Initial commit
package/README.md CHANGED
@@ -6,6 +6,10 @@ usage.
6
6
 
7
7
  - [Install](#install)
8
8
  - [Usage](#usage)
9
+ - [Streaming](#stream)
10
+ - [Templates](#templates)
11
+ - [Platforms](#platforms)
12
+ - [Models](#models)
9
13
 
10
14
  ## Install
11
15
 
@@ -42,26 +46,28 @@ const response = await client.prompt({
42
46
  text: 'a long yellow fruit',
43
47
  fruit: 'banana, apple, pear',
44
48
  });
49
+ ```
50
+
51
+ ## Streaming
45
52
 
53
+ Responses may be streamed:
54
+
55
+ ```js
46
56
  // Stream the results
47
57
  const stream = await client.stream({
48
58
  file: 'classify-fruits',
49
- // ...
50
59
  });
51
60
 
52
61
  // Will return an AsyncIterator
53
62
  for await (const chunk of stream) {
54
63
  console.info(chunk.text);
55
64
  }
56
-
57
- // List available models
58
- const models = await client.models();
59
65
  ```
60
66
 
61
67
  ## Templates
62
68
 
63
- Template files must have be markdown (`.md`) and live in your templates
64
- directory. They may be a simple text description or delineated roles:
69
+ Template files must be markdown (`.md`) and live in your templates directory.
70
+ They may be a simple text prompt or delineated roles:
65
71
 
66
72
  ````
67
73
  --- SYSTEM ---
@@ -82,3 +88,19 @@ Please provide your response as a JSON object containing:
82
88
  {{text}}
83
89
  ```
84
90
  ````
91
+
92
+ ## Platforms
93
+
94
+ Currently supported platforms:
95
+
96
+ - OpenAI (ChatGPT)
97
+ - Anthropic (Claude)
98
+ - Google (Gemini).
99
+
100
+ ## Models
101
+
102
+ Available models can be listed with:
103
+
104
+ ```js
105
+ const models = await client.models();
106
+ ```
@@ -1,6 +1,6 @@
1
1
  let mock;
2
2
 
3
- function Anthropic() {
3
+ function MockAnthropicClient() {
4
4
  return {
5
5
  messages: {
6
6
  create(options) {
@@ -38,6 +38,6 @@ function wrapChunk(str, type) {
38
38
  };
39
39
  }
40
40
 
41
- Anthropic.setResponse = setResponse;
41
+ MockAnthropicClient.setResponse = setResponse;
42
42
 
43
- module.exports = Anthropic;
43
+ module.exports = MockAnthropicClient;
@@ -0,0 +1,59 @@
1
+ let mock;
2
+
3
+ class MockGoogleClient {
4
+ constructor() {
5
+ return {
6
+ getGenerativeModel() {
7
+ return {
8
+ generateContent() {
9
+ return mock;
10
+ },
11
+ generateContentStream() {
12
+ return {
13
+ stream: streamMock(),
14
+ };
15
+ },
16
+ };
17
+ },
18
+ };
19
+ }
20
+ }
21
+
22
+ function setResponse(data) {
23
+ mock = data;
24
+ }
25
+
26
+ async function* streamMock() {
27
+ const content = mock.response.candidates[0].content.parts[0].text;
28
+ const size = Math.floor(content.length / 3);
29
+ const one = content.slice(0, size);
30
+ const two = content.slice(size, 2 * size);
31
+ const three = content.slice(2 * size);
32
+ yield wrapChunk(one);
33
+ yield wrapChunk(two);
34
+ yield wrapChunk(three, true);
35
+ }
36
+
37
+ function wrapChunk(str, finish) {
38
+ return {
39
+ candidates: [
40
+ {
41
+ ...(finish && {
42
+ finishReason: 'STOP',
43
+ }),
44
+ content: {
45
+ parts: [
46
+ {
47
+ text: str,
48
+ },
49
+ ],
50
+ },
51
+ },
52
+ ],
53
+ };
54
+ }
55
+
56
+ module.exports = {
57
+ setResponse,
58
+ GoogleGenerativeAI: MockGoogleClient,
59
+ };
@@ -1,6 +1,6 @@
1
1
  let mock;
2
2
 
3
- function OpenAI() {
3
+ function MockOpenAiClient() {
4
4
  return {
5
5
  chat: {
6
6
  completions: {
@@ -43,6 +43,6 @@ function setResponse(data) {
43
43
  mock = data;
44
44
  }
45
45
 
46
- OpenAI.setResponse = setResponse;
46
+ MockOpenAiClient.setResponse = setResponse;
47
47
 
48
- module.exports = OpenAI;
48
+ module.exports = MockOpenAiClient;
@@ -0,0 +1,130 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.default = void 0;
7
+ var _mustache = _interopRequireDefault(require("mustache"));
8
+ var _util = require("./util.js");
9
+ function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
10
+ const MESSAGES_REG = /(?:^|\n)-{3,}\s*(\w+)\s*-{3,}(.*?)(?=\n-{3,}|$)/gs;
11
+ class BaseClient {
12
+ constructor(options) {
13
+ this.options = options;
14
+ this.templates = null;
15
+ }
16
+
17
+ /**
18
+ * Interpolates vars into the provided template and
19
+ * runs the chat completion. The "output" option may
20
+ * be omitted and will default to `"text"`.
21
+ * {@link https://github.com/bedrockio/ai?tab=readme-ov-file#bedrockioai Documentation}
22
+ *
23
+ * @param {object} options
24
+ * @param {string} options.model - The model to use.
25
+ * @param {"raw" | "text" | "json" | "messages"} [options.output] - The output to use.
26
+ * @param {Object.<string, any>} [options.other] - Additional props
27
+ * will be interpolated in the template.
28
+ */
29
+ async prompt(options) {
30
+ options = {
31
+ ...this.options,
32
+ ...options
33
+ };
34
+ const messages = await this.getMessages(options);
35
+ return await this.getCompletion({
36
+ ...options,
37
+ messages
38
+ });
39
+ }
40
+
41
+ /**
42
+ * Streams the prompt response.
43
+ * @returns {AsyncIterator}
44
+ */
45
+ async *stream(options) {
46
+ const stream = await this.getStream(options);
47
+ let started = false;
48
+
49
+ // @ts-ignore
50
+ for await (const chunk of stream) {
51
+ const resolved = this.getStreamedChunk(chunk, started);
52
+ started = true;
53
+
54
+ // @ts-ignore
55
+ if (resolved) {
56
+ yield resolved;
57
+ }
58
+ }
59
+ }
60
+ async getMessages(options) {
61
+ const template = await this.resolveTemplate(options);
62
+ const raw = _mustache.default.render(template, transformParams(options));
63
+ const messages = [];
64
+ for (let match of raw.matchAll(MESSAGES_REG)) {
65
+ const [, role, content] = match;
66
+ messages.push({
67
+ role: role.toLowerCase(),
68
+ content: content.trim()
69
+ });
70
+ }
71
+ if (!messages.length) {
72
+ messages.push({
73
+ role: 'user',
74
+ content: raw.trim()
75
+ });
76
+ }
77
+ return messages;
78
+ }
79
+ async loadTemplates() {
80
+ const {
81
+ templates
82
+ } = this.options;
83
+ this.templates ||= await (0, _util.loadTemplates)(templates);
84
+ }
85
+ async resolveTemplate(options) {
86
+ await this.loadTemplates();
87
+ let {
88
+ file,
89
+ template
90
+ } = options;
91
+ if (!template && file) {
92
+ template = this.templates[file];
93
+ }
94
+ if (!template) {
95
+ throw new Error('No template provided.');
96
+ }
97
+ return template;
98
+ }
99
+ async getStream(options) {
100
+ return await this.prompt({
101
+ ...options,
102
+ output: 'raw',
103
+ stream: true
104
+ });
105
+ }
106
+ getCompletion(options) {
107
+ void options;
108
+ new Error('Method not implemented.');
109
+ }
110
+ getStreamedChunk(chunk, started) {
111
+ void chunk;
112
+ void started;
113
+ new Error('Method not implemented.');
114
+ }
115
+ }
116
+ exports.default = BaseClient;
117
+ function transformParams(params) {
118
+ const result = {};
119
+ for (let [key, value] of Object.entries(params)) {
120
+ if (Array.isArray(value)) {
121
+ value = value.map(el => {
122
+ return `- ${el}`;
123
+ }).join('\n');
124
+ } else if (typeof value === 'object') {
125
+ value = JSON.stringify(value, null, 2);
126
+ }
127
+ result[key] = value;
128
+ }
129
+ return result;
130
+ }
@@ -1,19 +1,101 @@
1
1
  "use strict";
2
2
 
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.AnthropicClient = void 0;
3
7
  var _sdk = _interopRequireDefault(require("@anthropic-ai/sdk"));
8
+ var _BaseClient = _interopRequireDefault(require("./BaseClient.js"));
9
+ var _util = require("./util.js");
4
10
  function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
5
- const client = new _sdk.default({
6
- apiKey: process.env['ANTHROPIC_API_KEY'] // This is the default and can be omitted
7
- });
8
- async function main() {
9
- const message = await client.messages.create({
10
- max_tokens: 1024,
11
- messages: [{
12
- role: 'user',
13
- content: 'Hello, Claude'
14
- }],
15
- model: 'claude-3-5-sonnet-latest'
16
- });
17
- console.log(message.content);
11
+ const MODELS_URL = 'https://docs.anthropic.com/en/docs/about-claude/models';
12
+ const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
13
+ class AnthropicClient extends _BaseClient.default {
14
+ constructor(options) {
15
+ super(options);
16
+ this.client = new _sdk.default({
17
+ ...options
18
+ });
19
+ }
20
+
21
+ /**
22
+ * Lists available models.
23
+ * {@link https://docs.anthropic.com/en/docs/about-claude/models Documentation}
24
+ */
25
+ async models() {
26
+ const {
27
+ data
28
+ } = await this.client.models.list();
29
+ return data.map(o => o.id);
30
+ }
31
+ async getCompletion(options) {
32
+ const {
33
+ model = DEFAULT_MODEL,
34
+ max_tokens = 2048,
35
+ output = 'text',
36
+ stream = false,
37
+ messages
38
+ } = options;
39
+ const {
40
+ client
41
+ } = this;
42
+ const {
43
+ system,
44
+ user
45
+ } = splitMessages(messages);
46
+ if (!model) {
47
+ throw new Error(`No model specified. Available models are here: ${MODELS_URL}.`);
48
+ }
49
+ const response = await client.messages.create({
50
+ max_tokens,
51
+ messages: user,
52
+ system,
53
+ model,
54
+ stream
55
+ });
56
+ if (output === 'raw') {
57
+ return response;
58
+ }
59
+
60
+ // @ts-ignore
61
+ const message = response.content[0];
62
+ return (0, _util.transformResponse)({
63
+ ...options,
64
+ messages,
65
+ message
66
+ });
67
+ }
68
+ getStreamedChunk(chunk) {
69
+ // @ts-ignore
70
+ let type;
71
+ if (chunk.type === 'content_block_start') {
72
+ type = 'start';
73
+ } else if (chunk.type === 'content_block_delta') {
74
+ type = 'chunk';
75
+ } else if (chunk.type === 'message_stop') {
76
+ type = 'stop';
77
+ }
78
+ if (type) {
79
+ return {
80
+ type,
81
+ text: chunk.delta?.text || ''
82
+ };
83
+ }
84
+ }
18
85
  }
19
- main();
86
+ exports.AnthropicClient = AnthropicClient;
87
+ function splitMessages(messages) {
88
+ const system = [];
89
+ const user = [];
90
+ for (let message of messages) {
91
+ if (message.role === 'system') {
92
+ system.push(message);
93
+ } else {
94
+ user.push(message);
95
+ }
96
+ }
97
+ return {
98
+ system: system.join('\n'),
99
+ user
100
+ };
101
+ }
@@ -0,0 +1,94 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.GoogleClient = void 0;
7
+ var _generativeAi = require("@google/generative-ai");
8
+ var _BaseClient = _interopRequireDefault(require("./BaseClient.js"));
9
+ var _util = require("./util.js");
10
+ function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
11
+ const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
12
+ class GoogleClient extends _BaseClient.default {
13
+ constructor(options) {
14
+ super(options);
15
+ const {
16
+ apiKey
17
+ } = options;
18
+ this.client = new _generativeAi.GoogleGenerativeAI(apiKey);
19
+ }
20
+
21
+ /**
22
+ * Lists available models.
23
+ * {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
24
+ */
25
+ async models() {
26
+ return ['gemini-2.0-flash-exp', 'gemini-1.5-flash', 'gemini-1.5-flash-8b', 'gemini-1.5-pro'];
27
+ }
28
+ async getCompletion(options) {
29
+ const {
30
+ model = DEFAULT_MODEL,
31
+ output = 'text',
32
+ stream = false
33
+ } = options;
34
+ const {
35
+ client
36
+ } = this;
37
+ const generator = client.getGenerativeModel({
38
+ model
39
+ });
40
+ const messages = await this.getMessages(options);
41
+ const prompts = messages.map(message => {
42
+ return message.content;
43
+ });
44
+ let response;
45
+ if (stream) {
46
+ response = await generator.generateContentStream(prompts);
47
+ } else {
48
+ response = await generator.generateContent(prompts);
49
+ }
50
+ // const response = await client.chat.completions.create({
51
+ // model,
52
+ // messages,
53
+ // stream,
54
+ // });
55
+
56
+ if (output === 'raw') {
57
+ return response;
58
+ }
59
+
60
+ // @ts-ignore
61
+ const parts = response.response.candidates.flatMap(candidate => {
62
+ return candidate.content.parts;
63
+ });
64
+ const [message] = parts;
65
+ return (0, _util.transformResponse)({
66
+ ...options,
67
+ messages,
68
+ message
69
+ });
70
+ }
71
+ async getStream(options) {
72
+ const response = await super.getStream(options);
73
+ // @ts-ignore
74
+ return response.stream;
75
+ }
76
+ getStreamedChunk(chunk, started) {
77
+ const [candidate] = chunk.candidates;
78
+ let type;
79
+ if (!started) {
80
+ type = 'start';
81
+ } else if (candidate.finishReason === 'STOP') {
82
+ type = 'stop';
83
+ } else {
84
+ type = 'chunk';
85
+ }
86
+ if (type) {
87
+ return {
88
+ type,
89
+ text: candidate.content.parts[0].text || ''
90
+ };
91
+ }
92
+ }
93
+ }
94
+ exports.GoogleClient = GoogleClient;
package/dist/cjs/index.js CHANGED
@@ -1,3 +1,29 @@
1
1
  "use strict";
2
2
 
3
- console.info('ohai');
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.Client = void 0;
7
+ var _openai = require("./openai.js");
8
+ var _google = require("./google.js");
9
+ var _anthropic = require("./anthropic.js");
10
+ class Client {
11
+ constructor(options) {
12
+ const {
13
+ platform,
14
+ ...rest
15
+ } = options;
16
+ if (platform === 'openai' || platform === 'gpt') {
17
+ return new _openai.OpenAiClient(rest);
18
+ } else if (platform === 'google' || platform === 'gemini') {
19
+ return new _google.GoogleClient(rest);
20
+ } else if (platform === 'anthropic' || platform === 'claude') {
21
+ return new _anthropic.AnthropicClient(rest);
22
+ } else if (platform) {
23
+ throw new Error(`Unknown platform "${platform}".`);
24
+ } else {
25
+ throw new Error('Platform required.');
26
+ }
27
+ }
28
+ }
29
+ exports.Client = Client;
@@ -5,45 +5,71 @@ Object.defineProperty(exports, "__esModule", {
5
5
  });
6
6
  exports.OpenAiClient = void 0;
7
7
  var _openai = _interopRequireDefault(require("openai"));
8
- var _Wrapper = _interopRequireDefault(require("./Wrapper.js"));
8
+ var _BaseClient = _interopRequireDefault(require("./BaseClient.js"));
9
9
  var _util = require("./util.js");
10
10
  function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
11
11
  const DEFAULT_MODEL = 'gpt-4o';
12
- class OpenAiClient extends _Wrapper.default {
12
+ class OpenAiClient extends _BaseClient.default {
13
13
  constructor(options) {
14
14
  super(options);
15
15
  this.client = new _openai.default({
16
16
  ...options
17
17
  });
18
18
  }
19
- async prompt(options) {
20
- const messages = await this.getMessages(options);
21
- return await runCompletion(this.client, messages, options);
19
+
20
+ /**
21
+ * Lists available models.
22
+ * {@link https://platform.openai.com/docs/models Documentation}
23
+ */
24
+ async models() {
25
+ const {
26
+ data
27
+ } = await this.client.models.list();
28
+ return data.map(o => o.id);
22
29
  }
23
- }
24
- exports.OpenAiClient = OpenAiClient;
25
- async function runCompletion(client, messages, options) {
26
- const {
27
- output = 'text',
28
- model = DEFAULT_MODEL
29
- } = options;
30
- const response = await client.chat.completions.create({
31
- model,
32
- messages
33
- });
34
- let content = response.choices[0].message.content;
35
- if (output === 'raw') {
36
- return response;
37
- } else if (output === 'text') {
38
- return content;
39
- } else if (output === 'messages') {
30
+ async getCompletion(options) {
31
+ const {
32
+ model = DEFAULT_MODEL,
33
+ output = 'text',
34
+ stream = false
35
+ } = options;
36
+ const {
37
+ client
38
+ } = this;
39
+ const messages = await this.getMessages(options);
40
+ const response = await client.chat.completions.create({
41
+ model,
42
+ messages,
43
+ stream
44
+ });
45
+ if (output === 'raw') {
46
+ return response;
47
+ }
40
48
  const {
41
49
  message
42
50
  } = response.choices[0];
43
- return [...messages, message];
44
- } else if (output === 'json') {
45
- return (0, _util.parse)(content);
46
- } else {
47
- throw new Error(`Unknown output type "${output}".`);
51
+ return (0, _util.transformResponse)({
52
+ ...options,
53
+ messages,
54
+ message
55
+ });
56
+ }
57
+ getStreamedChunk(chunk, started) {
58
+ const [choice] = chunk.choices;
59
+ let type;
60
+ if (!started) {
61
+ type = 'start';
62
+ } else if (choice.finish_reason === 'stop') {
63
+ type = 'stop';
64
+ } else {
65
+ type = 'chunk';
66
+ }
67
+ if (type) {
68
+ return {
69
+ type,
70
+ text: choice.delta.content || ''
71
+ };
72
+ }
48
73
  }
49
- }
74
+ }
75
+ exports.OpenAiClient = OpenAiClient;
package/dist/cjs/util.js CHANGED
@@ -5,6 +5,7 @@ Object.defineProperty(exports, "__esModule", {
5
5
  });
6
6
  exports.loadTemplates = loadTemplates;
7
7
  exports.parse = parse;
8
+ exports.transformResponse = transformResponse;
8
9
  var _promises = _interopRequireDefault(require("fs/promises"));
9
10
  var _path = _interopRequireDefault(require("path"));
10
11
  var _glob = require("glob");
@@ -26,4 +27,21 @@ function parse(content) {
26
27
  } catch (error) {
27
28
  throw new Error('Unable to derive JSON object in response.');
28
29
  }
30
+ }
31
+ function transformResponse(options) {
32
+ const {
33
+ output = 'text',
34
+ messages,
35
+ message
36
+ } = options;
37
+ const content = message.content || message.text;
38
+ if (output === 'text') {
39
+ return content;
40
+ } else if (output === 'messages') {
41
+ return [...messages, message];
42
+ } else if (output === 'json') {
43
+ return parse(content);
44
+ } else {
45
+ throw new Error(`Unknown output type "${output}".`);
46
+ }
29
47
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@bedrockio/ai",
3
- "version": "0.1.0",
3
+ "version": "0.2.0",
4
4
  "description": "Bedrock wrapper for common AI chatbots.",
5
5
  "type": "module",
6
6
  "scripts": {
@@ -26,6 +26,7 @@
26
26
  },
27
27
  "dependencies": {
28
28
  "@anthropic-ai/sdk": "^0.33.1",
29
+ "@google/generative-ai": "^0.21.0",
29
30
  "glob": "^11.0.1",
30
31
  "mustache": "^4.2.0",
31
32
  "openai": "^4.79.1"
package/src/BaseClient.js CHANGED
@@ -40,11 +40,7 @@ export default class BaseClient {
40
40
  * @returns {AsyncIterator}
41
41
  */
42
42
  async *stream(options) {
43
- const stream = await this.prompt({
44
- ...options,
45
- output: 'raw',
46
- stream: true,
47
- });
43
+ const stream = await this.getStream(options);
48
44
 
49
45
  let started = false;
50
46
 
@@ -104,6 +100,14 @@ export default class BaseClient {
104
100
  return template;
105
101
  }
106
102
 
103
+ async getStream(options) {
104
+ return await this.prompt({
105
+ ...options,
106
+ output: 'raw',
107
+ stream: true,
108
+ });
109
+ }
110
+
107
111
  getCompletion(options) {
108
112
  void options;
109
113
  new Error('Method not implemented.');
package/src/anthropic.js CHANGED
@@ -16,6 +16,7 @@ export class AnthropicClient extends BaseClient {
16
16
 
17
17
  /**
18
18
  * Lists available models.
19
+ * {@link https://docs.anthropic.com/en/docs/about-claude/models Documentation}
19
20
  */
20
21
  async models() {
21
22
  const { data } = await this.client.models.list();
package/src/google.js ADDED
@@ -0,0 +1,94 @@
1
+ import { GoogleGenerativeAI } from '@google/generative-ai';
2
+
3
+ import BaseClient from './BaseClient.js';
4
+ import { transformResponse } from './util.js';
5
+
6
+ const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
7
+
8
+ export class GoogleClient extends BaseClient {
9
+ constructor(options) {
10
+ super(options);
11
+ const { apiKey } = options;
12
+ this.client = new GoogleGenerativeAI(apiKey);
13
+ }
14
+
15
+ /**
16
+ * Lists available models.
17
+ * {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
18
+ */
19
+ async models() {
20
+ return [
21
+ 'gemini-2.0-flash-exp',
22
+ 'gemini-1.5-flash',
23
+ 'gemini-1.5-flash-8b',
24
+ 'gemini-1.5-pro',
25
+ ];
26
+ }
27
+
28
+ async getCompletion(options) {
29
+ const { model = DEFAULT_MODEL, output = 'text', stream = false } = options;
30
+ const { client } = this;
31
+
32
+ const generator = client.getGenerativeModel({ model });
33
+
34
+ const messages = await this.getMessages(options);
35
+
36
+ const prompts = messages.map((message) => {
37
+ return message.content;
38
+ });
39
+
40
+ let response;
41
+
42
+ if (stream) {
43
+ response = await generator.generateContentStream(prompts);
44
+ } else {
45
+ response = await generator.generateContent(prompts);
46
+ }
47
+ // const response = await client.chat.completions.create({
48
+ // model,
49
+ // messages,
50
+ // stream,
51
+ // });
52
+
53
+ if (output === 'raw') {
54
+ return response;
55
+ }
56
+
57
+ // @ts-ignore
58
+ const parts = response.response.candidates.flatMap((candidate) => {
59
+ return candidate.content.parts;
60
+ });
61
+ const [message] = parts;
62
+
63
+ return transformResponse({
64
+ ...options,
65
+ messages,
66
+ message,
67
+ });
68
+ }
69
+ async getStream(options) {
70
+ const response = await super.getStream(options);
71
+ // @ts-ignore
72
+ return response.stream;
73
+ }
74
+
75
+ getStreamedChunk(chunk, started) {
76
+ const [candidate] = chunk.candidates;
77
+
78
+ let type;
79
+ if (!started) {
80
+ type = 'start';
81
+ } else if (candidate.finishReason === 'STOP') {
82
+ type = 'stop';
83
+ } else {
84
+ type = 'chunk';
85
+ }
86
+
87
+ if (type) {
88
+ return {
89
+ type,
90
+ text: candidate.content.parts[0].text || '',
91
+ };
92
+ }
93
+ }
94
+ }
package/src/index.js CHANGED
@@ -1,4 +1,5 @@
1
1
  import { OpenAiClient } from './openai.js';
2
+ import { GoogleClient } from './google.js';
2
3
  import { AnthropicClient } from './anthropic.js';
3
4
 
4
5
  export class Client {
@@ -6,6 +7,8 @@ export class Client {
6
7
  const { platform, ...rest } = options;
7
8
  if (platform === 'openai' || platform === 'gpt') {
8
9
  return new OpenAiClient(rest);
10
+ } else if (platform === 'google' || platform === 'gemini') {
11
+ return new GoogleClient(rest);
9
12
  } else if (platform === 'anthropic' || platform === 'claude') {
10
13
  return new AnthropicClient(rest);
11
14
  } else if (platform) {
package/src/openai.js CHANGED
@@ -15,6 +15,7 @@ export class OpenAiClient extends BaseClient {
15
15
 
16
16
  /**
17
17
  * Lists available models.
18
+ * {@link https://platform.openai.com/docs/models Documentation}
18
19
  */
19
20
  async models() {
20
21
  const { data } = await this.client.models.list();
@@ -0,0 +1,39 @@
1
+ export default class BaseClient {
2
+ constructor(options: any);
3
+ options: any;
4
+ templates: any;
5
+ /**
6
+ * Interpolates vars into the provided template and
7
+ * runs the chat completion. The "output" option may
8
+ * be omitted and will default to `"text"`.
9
+ * {@link https://github.com/bedrockio/ai?tab=readme-ov-file#bedrockioai Documentation}
10
+ *
11
+ * @param {object} options
12
+ * @param {string} options.model - The model to use.
13
+ * @param {"raw" | "text" | "json" | "messages"} [options.output] - The output to use.
14
+ * @param {Object.<string, any>} [options.other] - Additional props
15
+ * will be interpolated in the template.
16
+ */
17
+ prompt(options: {
18
+ model: string;
19
+ output?: "raw" | "text" | "json" | "messages";
20
+ other?: {
21
+ [x: string]: any;
22
+ };
23
+ }): Promise<void>;
24
+ /**
25
+ * Streams the prompt response.
26
+ * @returns {AsyncIterator}
27
+ */
28
+ stream(options: any): AsyncIterator<any, any, any>;
29
+ getMessages(options: any): Promise<{
30
+ role: any;
31
+ content: any;
32
+ }[]>;
33
+ loadTemplates(): Promise<void>;
34
+ resolveTemplate(options: any): Promise<any>;
35
+ getStream(options: any): Promise<void>;
36
+ getCompletion(options: any): void;
37
+ getStreamedChunk(chunk: any, started: any): void;
38
+ }
39
+ //# sourceMappingURL=BaseClient.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"BaseClient.d.ts","sourceRoot":"","sources":["../src/BaseClient.js"],"names":[],"mappings":"AAMA;IACE,0BAGC;IAFC,aAAsB;IACtB,eAAqB;IAGvB;;;;;;;;;;;OAWG;IACH,gBALG;QAAwB,KAAK,EAArB,MAAM;QACyC,MAAM,GAArD,KAAK,GAAG,MAAM,GAAG,MAAM,GAAG,UAAU;QACL,KAAK,GAApC;gBAAQ,MAAM,GAAE,GAAG;SAAC;KAE9B,iBAYA;IAED;;;OAGG;IACH,mDAeC;IAED;;;SAqBC;IAED,+BAGC;IAED,4CAcC;IAED,uCAMC;IAED,kCAGC;IAED,iDAIC;CACF"}
@@ -0,0 +1,16 @@
1
+ export class AnthropicClient extends BaseClient {
2
+ client: Anthropic;
3
+ /**
4
+ * Lists available models.
5
+ * {@link https://docs.anthropic.com/en/docs/about-claude/models Documentation}
6
+ */
7
+ models(): Promise<string[]>;
8
+ getCompletion(options: any): Promise<any>;
9
+ getStreamedChunk(chunk: any): {
10
+ type: string;
11
+ text: any;
12
+ };
13
+ }
14
+ import BaseClient from './BaseClient.js';
15
+ import Anthropic from '@anthropic-ai/sdk';
16
+ //# sourceMappingURL=anthropic.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../src/anthropic.js"],"names":[],"mappings":"AAQA;IAGI,kBAEE;IAGJ;;;OAGG;IACH,4BAGC;IAED,0CAsCC;IAED;;;MAiBC;CACF;uBAjFsB,iBAAiB;sBAFlB,mBAAmB"}
@@ -0,0 +1,17 @@
1
+ export class GoogleClient extends BaseClient {
2
+ client: GoogleGenerativeAI;
3
+ /**
4
+ * Lists available models.
5
+ * {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
6
+ */
7
+ models(): Promise<string[]>;
8
+ getCompletion(options: any): Promise<any>;
9
+ getStream(options: any): Promise<any>;
10
+ getStreamedChunk(chunk: any, started: any): {
11
+ type: string;
12
+ text: any;
13
+ };
14
+ }
15
+ import BaseClient from './BaseClient.js';
16
+ import { GoogleGenerativeAI } from '@google/generative-ai';
17
+ //# sourceMappingURL=google.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../src/google.js"],"names":[],"mappings":"AAOA;IAII,2BAA4C;IAG9C;;;OAGG;IACH,4BAOC;IAED,0CAwCC;IACD,sCAIC;IAED;;;MAkBC;CACF;uBA3FsB,iBAAiB;mCAFL,uBAAuB"}
package/types/index.d.ts CHANGED
@@ -1 +1,4 @@
1
+ export class Client {
2
+ constructor(options: any);
3
+ }
1
4
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.js"],"names":[],"mappings":""}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.js"],"names":[],"mappings":"AAIA;IACE,0BAaC;CACF"}
@@ -0,0 +1,16 @@
1
+ export class OpenAiClient extends BaseClient {
2
+ client: OpenAI;
3
+ /**
4
+ * Lists available models.
5
+ * {@link https://platform.openai.com/docs/models Documentation}
6
+ */
7
+ models(): Promise<string[]>;
8
+ getCompletion(options: any): Promise<any>;
9
+ getStreamedChunk(chunk: any, started: any): {
10
+ type: string;
11
+ text: any;
12
+ };
13
+ }
14
+ import BaseClient from './BaseClient.js';
15
+ import OpenAI from 'openai';
16
+ //# sourceMappingURL=openai.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../src/openai.js"],"names":[],"mappings":"AAOA;IAGI,eAEE;IAGJ;;;OAGG;IACH,4BAGC;IAED,0CAsBC;IAED;;;MAkBC;CACF;uBAjEsB,iBAAiB;mBAFrB,QAAQ"}
@@ -0,0 +1,4 @@
1
+ export function loadTemplates(dir: any): Promise<{}>;
2
+ export function parse(content: any): any;
3
+ export function transformResponse(options: any): any;
4
+ //# sourceMappingURL=util.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"util.d.ts","sourceRoot":"","sources":["../src/util.js"],"names":[],"mappings":"AAQA,qDAUC;AAED,yCAOC;AAED,qDAYC"}
@@ -1,63 +0,0 @@
1
- "use strict";
2
-
3
- Object.defineProperty(exports, "__esModule", {
4
- value: true
5
- });
6
- exports.default = void 0;
7
- var _mustache = _interopRequireDefault(require("mustache"));
8
- var _util = require("./util.js");
9
- function _interopRequireDefault(e) { return e && e.__esModule ? e : { default: e }; }
10
- const MESSAGES_REG = /(?:^|\n)-{3,}\s*(\w+)\s*-{3,}(.*?)(?=\n-{3,}|$)/gs;
11
- class Wrapper {
12
- constructor(options) {
13
- this.options = options;
14
- }
15
- async getMessages(options) {
16
- const template = await this.resolveTemplate(options);
17
- const raw = _mustache.default.render(template, transformParams(options));
18
- const messages = [];
19
- for (let match of raw.matchAll(MESSAGES_REG)) {
20
- const [, role, content] = match;
21
- messages.push({
22
- role: role.toLowerCase(),
23
- content: content.trim()
24
- });
25
- }
26
- return messages;
27
- }
28
- async loadTemplates() {
29
- const {
30
- templates
31
- } = this.options;
32
- this.templates ||= await (0, _util.loadTemplates)(templates);
33
- }
34
- async resolveTemplate(options) {
35
- await this.loadTemplates();
36
- let {
37
- file,
38
- template
39
- } = options;
40
- if (!template && file) {
41
- template = this.templates[file];
42
- }
43
- if (!template) {
44
- throw new Error('No template provided.');
45
- }
46
- return template;
47
- }
48
- }
49
- exports.default = Wrapper;
50
- function transformParams(params) {
51
- const result = {};
52
- for (let [key, value] of Object.entries(params)) {
53
- if (Array.isArray(value)) {
54
- value = value.map(el => {
55
- return `- ${el}`;
56
- }).join('\n');
57
- } else if (typeof value === 'object') {
58
- value = JSON.stringify(value, null, 2);
59
- }
60
- result[key] = value;
61
- }
62
- return result;
63
- }