@bedrockio/ai 0.7.2 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,14 @@
1
+ ## 0.8.0
2
+
3
+ - Simplified return values.
4
+ - Restored basic Gemini functionality.
5
+ - Normalize options inputs.
6
+ - Normalize stream event outputs.
7
+
8
+ ## 0.7.3
9
+
10
+ - Allow debug on individual calls.
11
+
1
12
  ## 0.7.2
2
13
 
3
14
  - Handling long filenames.
@@ -26,10 +26,7 @@ class BaseClient {
26
26
  const { output, stream, schema } = options;
27
27
  const response = await this.runPrompt(options);
28
28
  if (!stream) {
29
- this.debug('Response:', response);
30
- }
31
- if (output === 'raw') {
32
- return response;
29
+ this.debug('Response:', response, options);
33
30
  }
34
31
  let result;
35
32
  if (schema) {
@@ -45,15 +42,11 @@ class BaseClient {
45
42
  else {
46
43
  result = (0, code_js_1.parseCode)(this.getTextResponse(response));
47
44
  }
48
- if (output === 'messages') {
49
- return {
50
- result,
51
- ...this.getMessagesResponse(response, options),
52
- };
53
- }
54
- else {
55
- return result;
56
- }
45
+ return {
46
+ result,
47
+ response,
48
+ ...this.normalizeResponse(response, options),
49
+ };
57
50
  }
58
51
  /**
59
52
  * Streams the prompt response.
@@ -68,8 +61,8 @@ class BaseClient {
68
61
  const stream = await this.runStream(options);
69
62
  // @ts-ignore
70
63
  for await (let event of stream) {
71
- this.debug('Event:', event);
72
- event = this.normalizeStreamEvent(event);
64
+ this.debug('Event:', event, options);
65
+ event = this.normalizeStreamEvent(event, options);
73
66
  if (event) {
74
67
  yield event;
75
68
  }
@@ -91,7 +84,7 @@ class BaseClient {
91
84
  key,
92
85
  };
93
86
  }
94
- this.debug('Extract:', extractEvent);
87
+ this.debug('Extract:', extractEvent, options);
95
88
  yield extractEvent;
96
89
  }
97
90
  }
@@ -136,15 +129,17 @@ class BaseClient {
136
129
  /**
137
130
  * @returns {Object}
138
131
  */
139
- getMessagesResponse(input, response) {
132
+ normalizeResponse(response, options) {
140
133
  void response;
134
+ void options;
141
135
  throw new Error('Method not implemented.');
142
136
  }
143
137
  /**
144
138
  * @returns {Object}
145
139
  */
146
- normalizeStreamEvent(event) {
140
+ normalizeStreamEvent(event, options) {
147
141
  void event;
142
+ void options;
148
143
  throw new Error('Method not implemented.');
149
144
  }
150
145
  // Private
@@ -160,7 +155,18 @@ class BaseClient {
160
155
  };
161
156
  }
162
157
  normalizeInputs(options) {
163
- const { template, params, output = 'text' } = options;
158
+ const { output = 'text' } = options;
159
+ let { system, messages } = this.normalizeTemplateMessages(options);
160
+ if (output === 'json') {
161
+ system = [system, 'Output only valid JSON.'].join('\n\n');
162
+ }
163
+ return {
164
+ system,
165
+ messages: [...messages, ...this.normalizeOptionsMessages(options)],
166
+ };
167
+ }
168
+ normalizeTemplateMessages(options) {
169
+ const { template, params } = options;
164
170
  const { sections } = this.renderer.run({
165
171
  params,
166
172
  template,
@@ -183,23 +189,25 @@ class BaseClient {
183
189
  ];
184
190
  }
185
191
  }
186
- messages = [...messages, ...this.normalizeInput(options)];
187
- if (output === 'json') {
188
- system = [system, 'Output only valid JSON.'].join('\n\n');
189
- }
192
+ system = system.trim();
190
193
  return {
191
194
  system,
192
195
  messages,
193
196
  };
194
197
  }
195
- normalizeInput(options) {
196
- const { input = '' } = options;
197
- return [
198
- {
199
- role: 'user',
200
- content: input,
201
- },
202
- ];
198
+ normalizeOptionsMessages(options) {
199
+ const input = options.input || options.messages;
200
+ if (Array.isArray(input)) {
201
+ return input;
202
+ }
203
+ else {
204
+ return [
205
+ {
206
+ role: 'user',
207
+ content: input || '',
208
+ },
209
+ ];
210
+ }
203
211
  }
204
212
  normalizeSchema(options) {
205
213
  let { schema } = options;
@@ -237,8 +245,8 @@ class BaseClient {
237
245
  }
238
246
  };
239
247
  }
240
- debug(message, arg) {
241
- if (this.options.debug) {
248
+ debug(message, arg, options) {
249
+ if (options.debug) {
242
250
  // TODO: replace with logger when opentelemetry is removed
243
251
  // eslint-disable-next-line
244
252
  console.debug(`${message}\n${JSON.stringify(arg, null, 2)}\n`);
@@ -252,7 +260,7 @@ exports.default = BaseClient;
252
260
  * @property {string} [model] - The model to use.
253
261
  * @property {boolean} stream - Stream response.
254
262
  * @property {Object} [schema] - A JSON schema compatible object that defines the output shape.
255
- * @property {"raw" | "text" | "json" | "messages"} [output] - The return value type.
263
+ * @property {"text" | "json"} [output] - The result output type.
256
264
  * @property {Object} [params] - Params to be interpolated into the template.
257
265
  * May also be passed as additional props to options.
258
266
  */
@@ -22,31 +22,30 @@ class AnthropicClient extends BaseClient_js_1.default {
22
22
  return data.map((o) => o.id);
23
23
  }
24
24
  async runPrompt(options) {
25
- const { input, model, temperature, instructions, stream = false, tokens = DEFAULT_TOKENS, } = options;
25
+ const { model, system, messages, temperature, stream = false, tokens = DEFAULT_TOKENS, } = options;
26
26
  const params = {
27
27
  model,
28
28
  stream,
29
+ system,
30
+ messages,
29
31
  temperature,
30
32
  max_tokens: tokens,
31
- system: instructions,
32
- messages: input,
33
33
  ...this.getToolOptions(options),
34
34
  };
35
35
  const clientOptions = this.getClientOptions(params);
36
- this.debug('Params:', params);
37
- this.debug('Options:', options);
36
+ this.debug('Params:', params, options);
37
+ this.debug('Options:', options, options);
38
38
  // @ts-ignore
39
39
  return await this.client.messages.create(params, clientOptions);
40
40
  }
41
41
  async runStream(options) {
42
42
  return await this.runPrompt({
43
43
  ...options,
44
- output: 'raw',
45
44
  stream: true,
46
45
  });
47
46
  }
48
47
  getTextResponse(response) {
49
- const textBlock = response.content.find((block) => {
48
+ const textBlock = response?.content.find((block) => {
50
49
  return block.type === 'text';
51
50
  });
52
51
  return textBlock?.text || null;
@@ -57,7 +56,7 @@ class AnthropicClient extends BaseClient_js_1.default {
57
56
  });
58
57
  return toolBlock?.input || null;
59
58
  }
60
- getMessagesResponse(response, options) {
59
+ normalizeResponse(response, options) {
61
60
  const { messages } = options;
62
61
  return {
63
62
  messages: [
@@ -73,24 +72,41 @@ class AnthropicClient extends BaseClient_js_1.default {
73
72
  };
74
73
  }),
75
74
  ],
75
+ usage: this.normalizeUsage(response),
76
76
  };
77
77
  }
78
- normalizeStreamEvent(event) {
78
+ normalizeUsage(response) {
79
+ return {
80
+ input_tokens: response.usage.input_tokens,
81
+ output_tokens: response.usage.output_tokens,
82
+ };
83
+ }
84
+ normalizeStreamEvent(event, options) {
79
85
  let { type } = event;
86
+ options.buffer ||= '';
80
87
  if (type === 'content_block_start') {
81
88
  return {
82
89
  type: 'start',
83
90
  };
84
91
  }
85
- else if (type === 'content_block_stop') {
92
+ else if (type === 'content_block_delta') {
93
+ options.buffer += event.delta.text;
86
94
  return {
87
- type: 'stop',
95
+ type: 'delta',
96
+ delta: event.delta.text,
88
97
  };
89
98
  }
90
- else if (type === 'content_block_delta') {
99
+ else if (type === 'message_delta') {
91
100
  return {
92
- type: 'delta',
93
- text: event.delta.text,
101
+ type: 'stop',
102
+ messages: [
103
+ ...options.messages,
104
+ {
105
+ role: 'assistant',
106
+ content: options.buffer,
107
+ },
108
+ ],
109
+ usage: this.normalizeUsage(event),
94
110
  };
95
111
  }
96
112
  }
@@ -4,14 +4,16 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
4
4
  };
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.GoogleClient = void 0;
7
- const generative_ai_1 = require("@google/generative-ai");
7
+ const genai_1 = require("@google/genai");
8
8
  const BaseClient_js_1 = __importDefault(require("./BaseClient.js"));
9
- const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
9
+ const DEFAULT_MODEL = 'gemini-2.5-flash';
10
10
  class GoogleClient extends BaseClient_js_1.default {
11
11
  constructor(options) {
12
12
  super(options);
13
13
  const { apiKey } = options;
14
- this.client = new generative_ai_1.GoogleGenerativeAI(apiKey);
14
+ this.client = new genai_1.GoogleGenAI({
15
+ apiKey,
16
+ });
15
17
  }
16
18
  /**
17
19
  * Lists available models.
@@ -19,64 +21,97 @@ class GoogleClient extends BaseClient_js_1.default {
19
21
  */
20
22
  async models() {
21
23
  return [
22
- 'gemini-2.0-flash-exp',
23
- 'gemini-1.5-flash',
24
- 'gemini-1.5-flash-8b',
24
+ // Gemini 3 (Nov 2025)
25
+ 'gemini-3-pro-preview',
26
+ // Gemini 2.5
27
+ 'gemini-2.5-pro',
28
+ 'gemini-2.5-flash',
29
+ // Gemini 2.0
30
+ 'gemini-2.0-flash',
31
+ // Gemini 1.5 (legacy but still available)
25
32
  'gemini-1.5-pro',
33
+ 'gemini-1.5-flash',
26
34
  ];
27
35
  }
28
- async getCompletion(options) {
29
- const { model = DEFAULT_MODEL, output = 'text', stream = false } = options;
30
- const { client } = this;
31
- const generator = client.getGenerativeModel({
32
- model,
33
- });
34
- // @ts-ignore
35
- const messages = await this.getMessages(options);
36
- const prompts = messages.map((message) => {
37
- return message.content;
38
- });
39
- let response;
40
- if (stream) {
41
- response = await generator.generateContentStream(prompts);
42
- }
43
- else {
44
- response = await generator.generateContent(prompts);
45
- }
46
- if (output === 'raw') {
47
- return response;
48
- }
49
- // @ts-ignore
50
- const parts = response.response.candidates.flatMap((candidate) => {
51
- return candidate.content.parts;
36
+ async runPrompt(options) {
37
+ const { model = DEFAULT_MODEL, messages, system } = options;
38
+ const contents = messages.map((message) => {
39
+ const { role, content } = message;
40
+ return {
41
+ role,
42
+ parts: [
43
+ {
44
+ text: content,
45
+ },
46
+ ],
47
+ };
52
48
  });
53
- const [message] = parts;
54
- return message;
49
+ const params = {
50
+ model,
51
+ contents,
52
+ ...(system && {
53
+ config: {
54
+ systemInstruction: system,
55
+ },
56
+ }),
57
+ };
58
+ return await this.client.models.generateContent(params);
59
+ }
60
+ runStream(options) {
61
+ const params = this.getParams(options);
62
+ return this.client.models.generateContentStream(params);
63
+ }
64
+ getTextResponse(response) {
65
+ return response.text;
66
+ }
67
+ normalizeResponse(response, options) {
68
+ const { messages } = options;
69
+ return {
70
+ messages: [
71
+ ...messages,
72
+ {
73
+ role: 'assistant',
74
+ content: response.text,
75
+ },
76
+ ],
77
+ usage: this.normalizeUsage(response),
78
+ };
55
79
  }
56
- async getStream(options) {
57
- // @ts-ignore
58
- const response = await super.getStream(options);
59
- // @ts-ignore
60
- return response.stream;
80
+ normalizeUsage(response) {
81
+ return {
82
+ input_tokens: response.usageMetadata.promptTokenCount,
83
+ output_tokens: response.usageMetadata.candidatesTokenCount,
84
+ };
61
85
  }
62
- getStreamedChunk(chunk, started) {
63
- const [candidate] = chunk.candidates;
64
- let type;
65
- if (!started) {
66
- type = 'start';
67
- }
68
- else if (candidate.finishReason === 'STOP') {
69
- type = 'stop';
70
- }
71
- else {
72
- type = 'chunk';
73
- }
74
- if (type) {
86
+ getParams(options) {
87
+ const { model = DEFAULT_MODEL, messages, system } = options;
88
+ const contents = messages.map((message) => {
89
+ const { role, content } = message;
75
90
  return {
76
- type,
77
- text: candidate.content.parts[0].text || '',
91
+ role,
92
+ parts: [
93
+ {
94
+ text: content,
95
+ },
96
+ ],
78
97
  };
79
- }
98
+ });
99
+ return {
100
+ model,
101
+ contents,
102
+ ...(system && {
103
+ config: {
104
+ systemInstruction: system,
105
+ },
106
+ }),
107
+ };
108
+ }
109
+ normalizeStreamEvent(event) {
110
+ // Note Gemini doesn't provide different events, only a single GenerateContentResponse.
111
+ return {
112
+ type: 'delta',
113
+ delta: event.text,
114
+ };
80
115
  }
81
116
  }
82
117
  exports.GoogleClient = GoogleClient;
@@ -58,7 +58,7 @@ class OpenAiClient extends BaseClient_js_1.default {
58
58
  verbosity,
59
59
  },
60
60
  };
61
- this.debug('Params:', params);
61
+ this.debug('Params:', params, options);
62
62
  // @ts-ignore
63
63
  return await this.client.responses.create(params);
64
64
  }
@@ -69,7 +69,7 @@ class OpenAiClient extends BaseClient_js_1.default {
69
69
  });
70
70
  }
71
71
  getTextResponse(response) {
72
- return response.output_text;
72
+ return response?.output_text;
73
73
  }
74
74
  getStructuredResponse(response) {
75
75
  // Note here that certain cases (tool usage etc)
@@ -90,7 +90,7 @@ class OpenAiClient extends BaseClient_js_1.default {
90
90
  const last = outputs[outputs.length - 1];
91
91
  return JSON.parse(last.text);
92
92
  }
93
- getMessagesResponse(response, options) {
93
+ normalizeResponse(response, options) {
94
94
  const { messages } = options;
95
95
  return {
96
96
  messages: [
@@ -103,6 +103,13 @@ class OpenAiClient extends BaseClient_js_1.default {
103
103
  // Note that this ability currently only
104
104
  // exists for OpenAI compatible providers.
105
105
  prevResponseId: response.id,
106
+ usage: this.normalizeUsage(response),
107
+ };
108
+ }
109
+ normalizeUsage(response) {
110
+ return {
111
+ input_tokens: response.usage.input_tokens,
112
+ output_tokens: response.usage.output_tokens,
106
113
  };
107
114
  }
108
115
  // Private
@@ -128,7 +135,7 @@ class OpenAiClient extends BaseClient_js_1.default {
128
135
  };
129
136
  }
130
137
  }
131
- normalizeStreamEvent(event) {
138
+ normalizeStreamEvent(event, options) {
132
139
  const { type } = event;
133
140
  if (type === 'response.created') {
134
141
  return {
@@ -137,10 +144,20 @@ class OpenAiClient extends BaseClient_js_1.default {
137
144
  };
138
145
  }
139
146
  else if (type === 'response.completed') {
147
+ const output = event.response.output.find((item) => {
148
+ return item.type === 'message';
149
+ });
140
150
  return {
141
151
  type: 'stop',
142
152
  id: event.response.id,
143
- usage: event.response.usage,
153
+ messages: [
154
+ ...options.messages,
155
+ {
156
+ role: 'assistant',
157
+ content: output?.content[0].text,
158
+ },
159
+ ],
160
+ usage: this.normalizeUsage(event.response),
144
161
  };
145
162
  }
146
163
  else if (type === 'response.output_text.delta') {
@@ -149,12 +166,6 @@ class OpenAiClient extends BaseClient_js_1.default {
149
166
  delta: event.delta,
150
167
  };
151
168
  }
152
- else if (type === 'response.output_text.done') {
153
- return {
154
- type: 'done',
155
- text: event.text,
156
- };
157
- }
158
169
  }
159
170
  }
160
171
  exports.OpenAiClient = OpenAiClient;
@@ -3,6 +3,9 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.parseCode = parseCode;
4
4
  const CODE_REG = /^```\w*(.+)```/s;
5
5
  function parseCode(content) {
6
+ if (!content) {
7
+ return '';
8
+ }
6
9
  const match = content.trim().match(CODE_REG);
7
10
  if (match) {
8
11
  content = match[1].trim();
@@ -24,10 +24,7 @@ export default class BaseClient {
24
24
  const { output, stream, schema } = options;
25
25
  const response = await this.runPrompt(options);
26
26
  if (!stream) {
27
- this.debug('Response:', response);
28
- }
29
- if (output === 'raw') {
30
- return response;
27
+ this.debug('Response:', response, options);
31
28
  }
32
29
  let result;
33
30
  if (schema) {
@@ -43,15 +40,11 @@ export default class BaseClient {
43
40
  else {
44
41
  result = parseCode(this.getTextResponse(response));
45
42
  }
46
- if (output === 'messages') {
47
- return {
48
- result,
49
- ...this.getMessagesResponse(response, options),
50
- };
51
- }
52
- else {
53
- return result;
54
- }
43
+ return {
44
+ result,
45
+ response,
46
+ ...this.normalizeResponse(response, options),
47
+ };
55
48
  }
56
49
  /**
57
50
  * Streams the prompt response.
@@ -66,8 +59,8 @@ export default class BaseClient {
66
59
  const stream = await this.runStream(options);
67
60
  // @ts-ignore
68
61
  for await (let event of stream) {
69
- this.debug('Event:', event);
70
- event = this.normalizeStreamEvent(event);
62
+ this.debug('Event:', event, options);
63
+ event = this.normalizeStreamEvent(event, options);
71
64
  if (event) {
72
65
  yield event;
73
66
  }
@@ -89,7 +82,7 @@ export default class BaseClient {
89
82
  key,
90
83
  };
91
84
  }
92
- this.debug('Extract:', extractEvent);
85
+ this.debug('Extract:', extractEvent, options);
93
86
  yield extractEvent;
94
87
  }
95
88
  }
@@ -134,15 +127,17 @@ export default class BaseClient {
134
127
  /**
135
128
  * @returns {Object}
136
129
  */
137
- getMessagesResponse(input, response) {
130
+ normalizeResponse(response, options) {
138
131
  void response;
132
+ void options;
139
133
  throw new Error('Method not implemented.');
140
134
  }
141
135
  /**
142
136
  * @returns {Object}
143
137
  */
144
- normalizeStreamEvent(event) {
138
+ normalizeStreamEvent(event, options) {
145
139
  void event;
140
+ void options;
146
141
  throw new Error('Method not implemented.');
147
142
  }
148
143
  // Private
@@ -158,7 +153,18 @@ export default class BaseClient {
158
153
  };
159
154
  }
160
155
  normalizeInputs(options) {
161
- const { template, params, output = 'text' } = options;
156
+ const { output = 'text' } = options;
157
+ let { system, messages } = this.normalizeTemplateMessages(options);
158
+ if (output === 'json') {
159
+ system = [system, 'Output only valid JSON.'].join('\n\n');
160
+ }
161
+ return {
162
+ system,
163
+ messages: [...messages, ...this.normalizeOptionsMessages(options)],
164
+ };
165
+ }
166
+ normalizeTemplateMessages(options) {
167
+ const { template, params } = options;
162
168
  const { sections } = this.renderer.run({
163
169
  params,
164
170
  template,
@@ -181,23 +187,25 @@ export default class BaseClient {
181
187
  ];
182
188
  }
183
189
  }
184
- messages = [...messages, ...this.normalizeInput(options)];
185
- if (output === 'json') {
186
- system = [system, 'Output only valid JSON.'].join('\n\n');
187
- }
190
+ system = system.trim();
188
191
  return {
189
192
  system,
190
193
  messages,
191
194
  };
192
195
  }
193
- normalizeInput(options) {
194
- const { input = '' } = options;
195
- return [
196
- {
197
- role: 'user',
198
- content: input,
199
- },
200
- ];
196
+ normalizeOptionsMessages(options) {
197
+ const input = options.input || options.messages;
198
+ if (Array.isArray(input)) {
199
+ return input;
200
+ }
201
+ else {
202
+ return [
203
+ {
204
+ role: 'user',
205
+ content: input || '',
206
+ },
207
+ ];
208
+ }
201
209
  }
202
210
  normalizeSchema(options) {
203
211
  let { schema } = options;
@@ -235,8 +243,8 @@ export default class BaseClient {
235
243
  }
236
244
  };
237
245
  }
238
- debug(message, arg) {
239
- if (this.options.debug) {
246
+ debug(message, arg, options) {
247
+ if (options.debug) {
240
248
  // TODO: replace with logger when opentelemetry is removed
241
249
  // eslint-disable-next-line
242
250
  console.debug(`${message}\n${JSON.stringify(arg, null, 2)}\n`);
@@ -249,7 +257,7 @@ export default class BaseClient {
249
257
  * @property {string} [model] - The model to use.
250
258
  * @property {boolean} stream - Stream response.
251
259
  * @property {Object} [schema] - A JSON schema compatible object that defines the output shape.
252
- * @property {"raw" | "text" | "json" | "messages"} [output] - The return value type.
260
+ * @property {"text" | "json"} [output] - The result output type.
253
261
  * @property {Object} [params] - Params to be interpolated into the template.
254
262
  * May also be passed as additional props to options.
255
263
  */
@@ -16,31 +16,30 @@ export class AnthropicClient extends BaseClient {
16
16
  return data.map((o) => o.id);
17
17
  }
18
18
  async runPrompt(options) {
19
- const { input, model, temperature, instructions, stream = false, tokens = DEFAULT_TOKENS, } = options;
19
+ const { model, system, messages, temperature, stream = false, tokens = DEFAULT_TOKENS, } = options;
20
20
  const params = {
21
21
  model,
22
22
  stream,
23
+ system,
24
+ messages,
23
25
  temperature,
24
26
  max_tokens: tokens,
25
- system: instructions,
26
- messages: input,
27
27
  ...this.getToolOptions(options),
28
28
  };
29
29
  const clientOptions = this.getClientOptions(params);
30
- this.debug('Params:', params);
31
- this.debug('Options:', options);
30
+ this.debug('Params:', params, options);
31
+ this.debug('Options:', options, options);
32
32
  // @ts-ignore
33
33
  return await this.client.messages.create(params, clientOptions);
34
34
  }
35
35
  async runStream(options) {
36
36
  return await this.runPrompt({
37
37
  ...options,
38
- output: 'raw',
39
38
  stream: true,
40
39
  });
41
40
  }
42
41
  getTextResponse(response) {
43
- const textBlock = response.content.find((block) => {
42
+ const textBlock = response?.content.find((block) => {
44
43
  return block.type === 'text';
45
44
  });
46
45
  return textBlock?.text || null;
@@ -51,7 +50,7 @@ export class AnthropicClient extends BaseClient {
51
50
  });
52
51
  return toolBlock?.input || null;
53
52
  }
54
- getMessagesResponse(response, options) {
53
+ normalizeResponse(response, options) {
55
54
  const { messages } = options;
56
55
  return {
57
56
  messages: [
@@ -67,24 +66,41 @@ export class AnthropicClient extends BaseClient {
67
66
  };
68
67
  }),
69
68
  ],
69
+ usage: this.normalizeUsage(response),
70
70
  };
71
71
  }
72
- normalizeStreamEvent(event) {
72
+ normalizeUsage(response) {
73
+ return {
74
+ input_tokens: response.usage.input_tokens,
75
+ output_tokens: response.usage.output_tokens,
76
+ };
77
+ }
78
+ normalizeStreamEvent(event, options) {
73
79
  let { type } = event;
80
+ options.buffer ||= '';
74
81
  if (type === 'content_block_start') {
75
82
  return {
76
83
  type: 'start',
77
84
  };
78
85
  }
79
- else if (type === 'content_block_stop') {
86
+ else if (type === 'content_block_delta') {
87
+ options.buffer += event.delta.text;
80
88
  return {
81
- type: 'stop',
89
+ type: 'delta',
90
+ delta: event.delta.text,
82
91
  };
83
92
  }
84
- else if (type === 'content_block_delta') {
93
+ else if (type === 'message_delta') {
85
94
  return {
86
- type: 'delta',
87
- text: event.delta.text,
95
+ type: 'stop',
96
+ messages: [
97
+ ...options.messages,
98
+ {
99
+ role: 'assistant',
100
+ content: options.buffer,
101
+ },
102
+ ],
103
+ usage: this.normalizeUsage(event),
88
104
  };
89
105
  }
90
106
  }
@@ -1,11 +1,13 @@
1
- import { GoogleGenerativeAI } from '@google/generative-ai';
1
+ import { GoogleGenAI } from '@google/genai';
2
2
  import BaseClient from './BaseClient.js';
3
- const DEFAULT_MODEL = 'models/gemini-2.0-flash-exp';
3
+ const DEFAULT_MODEL = 'gemini-2.5-flash';
4
4
  export class GoogleClient extends BaseClient {
5
5
  constructor(options) {
6
6
  super(options);
7
7
  const { apiKey } = options;
8
- this.client = new GoogleGenerativeAI(apiKey);
8
+ this.client = new GoogleGenAI({
9
+ apiKey,
10
+ });
9
11
  }
10
12
  /**
11
13
  * Lists available models.
@@ -13,63 +15,96 @@ export class GoogleClient extends BaseClient {
13
15
  */
14
16
  async models() {
15
17
  return [
16
- 'gemini-2.0-flash-exp',
17
- 'gemini-1.5-flash',
18
- 'gemini-1.5-flash-8b',
18
+ // Gemini 3 (Nov 2025)
19
+ 'gemini-3-pro-preview',
20
+ // Gemini 2.5
21
+ 'gemini-2.5-pro',
22
+ 'gemini-2.5-flash',
23
+ // Gemini 2.0
24
+ 'gemini-2.0-flash',
25
+ // Gemini 1.5 (legacy but still available)
19
26
  'gemini-1.5-pro',
27
+ 'gemini-1.5-flash',
20
28
  ];
21
29
  }
22
- async getCompletion(options) {
23
- const { model = DEFAULT_MODEL, output = 'text', stream = false } = options;
24
- const { client } = this;
25
- const generator = client.getGenerativeModel({
26
- model,
27
- });
28
- // @ts-ignore
29
- const messages = await this.getMessages(options);
30
- const prompts = messages.map((message) => {
31
- return message.content;
32
- });
33
- let response;
34
- if (stream) {
35
- response = await generator.generateContentStream(prompts);
36
- }
37
- else {
38
- response = await generator.generateContent(prompts);
39
- }
40
- if (output === 'raw') {
41
- return response;
42
- }
43
- // @ts-ignore
44
- const parts = response.response.candidates.flatMap((candidate) => {
45
- return candidate.content.parts;
30
+ async runPrompt(options) {
31
+ const { model = DEFAULT_MODEL, messages, system } = options;
32
+ const contents = messages.map((message) => {
33
+ const { role, content } = message;
34
+ return {
35
+ role,
36
+ parts: [
37
+ {
38
+ text: content,
39
+ },
40
+ ],
41
+ };
46
42
  });
47
- const [message] = parts;
48
- return message;
43
+ const params = {
44
+ model,
45
+ contents,
46
+ ...(system && {
47
+ config: {
48
+ systemInstruction: system,
49
+ },
50
+ }),
51
+ };
52
+ return await this.client.models.generateContent(params);
53
+ }
54
+ runStream(options) {
55
+ const params = this.getParams(options);
56
+ return this.client.models.generateContentStream(params);
57
+ }
58
+ getTextResponse(response) {
59
+ return response.text;
60
+ }
61
+ normalizeResponse(response, options) {
62
+ const { messages } = options;
63
+ return {
64
+ messages: [
65
+ ...messages,
66
+ {
67
+ role: 'assistant',
68
+ content: response.text,
69
+ },
70
+ ],
71
+ usage: this.normalizeUsage(response),
72
+ };
49
73
  }
50
- async getStream(options) {
51
- // @ts-ignore
52
- const response = await super.getStream(options);
53
- // @ts-ignore
54
- return response.stream;
74
+ normalizeUsage(response) {
75
+ return {
76
+ input_tokens: response.usageMetadata.promptTokenCount,
77
+ output_tokens: response.usageMetadata.candidatesTokenCount,
78
+ };
55
79
  }
56
- getStreamedChunk(chunk, started) {
57
- const [candidate] = chunk.candidates;
58
- let type;
59
- if (!started) {
60
- type = 'start';
61
- }
62
- else if (candidate.finishReason === 'STOP') {
63
- type = 'stop';
64
- }
65
- else {
66
- type = 'chunk';
67
- }
68
- if (type) {
80
+ getParams(options) {
81
+ const { model = DEFAULT_MODEL, messages, system } = options;
82
+ const contents = messages.map((message) => {
83
+ const { role, content } = message;
69
84
  return {
70
- type,
71
- text: candidate.content.parts[0].text || '',
85
+ role,
86
+ parts: [
87
+ {
88
+ text: content,
89
+ },
90
+ ],
72
91
  };
73
- }
92
+ });
93
+ return {
94
+ model,
95
+ contents,
96
+ ...(system && {
97
+ config: {
98
+ systemInstruction: system,
99
+ },
100
+ }),
101
+ };
102
+ }
103
+ normalizeStreamEvent(event) {
104
+ // Note Gemini doesn't provide different events, only a single GenerateContentResponse.
105
+ return {
106
+ type: 'delta',
107
+ delta: event.text,
108
+ };
74
109
  }
75
110
  }
@@ -52,7 +52,7 @@ export class OpenAiClient extends BaseClient {
52
52
  verbosity,
53
53
  },
54
54
  };
55
- this.debug('Params:', params);
55
+ this.debug('Params:', params, options);
56
56
  // @ts-ignore
57
57
  return await this.client.responses.create(params);
58
58
  }
@@ -63,7 +63,7 @@ export class OpenAiClient extends BaseClient {
63
63
  });
64
64
  }
65
65
  getTextResponse(response) {
66
- return response.output_text;
66
+ return response?.output_text;
67
67
  }
68
68
  getStructuredResponse(response) {
69
69
  // Note here that certain cases (tool usage etc)
@@ -84,7 +84,7 @@ export class OpenAiClient extends BaseClient {
84
84
  const last = outputs[outputs.length - 1];
85
85
  return JSON.parse(last.text);
86
86
  }
87
- getMessagesResponse(response, options) {
87
+ normalizeResponse(response, options) {
88
88
  const { messages } = options;
89
89
  return {
90
90
  messages: [
@@ -97,6 +97,13 @@ export class OpenAiClient extends BaseClient {
97
97
  // Note that this ability currently only
98
98
  // exists for OpenAI compatible providers.
99
99
  prevResponseId: response.id,
100
+ usage: this.normalizeUsage(response),
101
+ };
102
+ }
103
+ normalizeUsage(response) {
104
+ return {
105
+ input_tokens: response.usage.input_tokens,
106
+ output_tokens: response.usage.output_tokens,
100
107
  };
101
108
  }
102
109
  // Private
@@ -122,7 +129,7 @@ export class OpenAiClient extends BaseClient {
122
129
  };
123
130
  }
124
131
  }
125
- normalizeStreamEvent(event) {
132
+ normalizeStreamEvent(event, options) {
126
133
  const { type } = event;
127
134
  if (type === 'response.created') {
128
135
  return {
@@ -131,10 +138,20 @@ export class OpenAiClient extends BaseClient {
131
138
  };
132
139
  }
133
140
  else if (type === 'response.completed') {
141
+ const output = event.response.output.find((item) => {
142
+ return item.type === 'message';
143
+ });
134
144
  return {
135
145
  type: 'stop',
136
146
  id: event.response.id,
137
- usage: event.response.usage,
147
+ messages: [
148
+ ...options.messages,
149
+ {
150
+ role: 'assistant',
151
+ content: output?.content[0].text,
152
+ },
153
+ ],
154
+ usage: this.normalizeUsage(event.response),
138
155
  };
139
156
  }
140
157
  else if (type === 'response.output_text.delta') {
@@ -143,12 +160,6 @@ export class OpenAiClient extends BaseClient {
143
160
  delta: event.delta,
144
161
  };
145
162
  }
146
- else if (type === 'response.output_text.done') {
147
- return {
148
- type: 'done',
149
- text: event.text,
150
- };
151
- }
152
163
  }
153
164
  }
154
165
  // Categories
@@ -1,5 +1,8 @@
1
1
  const CODE_REG = /^```\w*(.+)```/s;
2
2
  export function parseCode(content) {
3
+ if (!content) {
4
+ return '';
5
+ }
3
6
  const match = content.trim().match(CODE_REG);
4
7
  if (match) {
5
8
  content = match[1].trim();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@bedrockio/ai",
3
- "version": "0.7.2",
3
+ "version": "0.8.0",
4
4
  "description": "Bedrock wrapper for common AI chatbots.",
5
5
  "type": "module",
6
6
  "scripts": {
@@ -35,9 +35,9 @@
35
35
  "url": "https://github.com/bedrockio/router"
36
36
  },
37
37
  "dependencies": {
38
- "@anthropic-ai/sdk": "^0.66.0",
38
+ "@anthropic-ai/sdk": "^0.71.2",
39
39
  "@bedrockio/templates": "^0.3.1",
40
- "@google/generative-ai": "^0.21.0",
40
+ "@google/genai": "^1.34.0",
41
41
  "openai": "^6.3.0",
42
42
  "partial-json": "^0.1.7"
43
43
  },
@@ -32,11 +32,11 @@ export default class BaseClient {
32
32
  /**
33
33
  * @returns {Object}
34
34
  */
35
- getMessagesResponse(input: any, response: any): any;
35
+ normalizeResponse(response: any, options: any): any;
36
36
  /**
37
37
  * @returns {Object}
38
38
  */
39
- normalizeStreamEvent(event: any): any;
39
+ normalizeStreamEvent(event: any, options: any): any;
40
40
  /**
41
41
  * @returns {Object}
42
42
  */
@@ -45,16 +45,17 @@ export default class BaseClient {
45
45
  system: string;
46
46
  messages: any[];
47
47
  };
48
- normalizeInput(options: any): {
49
- role: string;
50
- content: any;
51
- }[];
48
+ normalizeTemplateMessages(options: any): {
49
+ system: string;
50
+ messages: any[];
51
+ };
52
+ normalizeOptionsMessages(options: any): any[];
52
53
  normalizeSchema(options: any): {
53
54
  schema: any;
54
55
  hasWrappedSchema: boolean;
55
56
  };
56
57
  getMessageExtractor(options: any): (event: any) => any;
57
- debug(message: any, arg: any): void;
58
+ debug(message: any, arg: any, options: any): void;
58
59
  }
59
60
  export type PromptOptions = {
60
61
  /**
@@ -74,9 +75,9 @@ export type PromptOptions = {
74
75
  */
75
76
  schema?: any;
76
77
  /**
77
- * - The return value type.
78
+ * - The result output type.
78
79
  */
79
- output?: "raw" | "text" | "json" | "messages";
80
+ output?: "text" | "json";
80
81
  /**
81
82
  * - Params to be interpolated into the template.
82
83
  * May also be passed as additional props to options.
@@ -1 +1 @@
1
- {"version":3,"file":"BaseClient.d.ts","sourceRoot":"","sources":["../src/BaseClient.js"],"names":[],"mappings":"AAKA;IACE,0BASC;IARC,aAIC;IACD,2BAEE;IAKJ;;;;;OAKG;IACH,gBAFW,aAAa,gBAuCvB;IAED;;;;;OAKG;IACH,gBAHW,aAAa,GAAG,aAAa,gCAsDvC;IAED;;;;OAIG;IACH,wBAFW,MAAM,OAIhB;IAID,8BAGC;IAED,8BAGC;IAED,qCAGC;IAED;;OAEG;IACH,0CAGC;IAED;;OAEG;IACH,oDAGC;IAED;;OAEG;IACH,sCAGC;IAID;;OAEG;IACH,oCAOC;IAED;;;MAuCC;IAED;;;QAQC;IAED;;;MA4BC;IAED,uDAWC;IAED,oCAMC;CACF;;;;;WAIa,MAAM,GAAC,aAAa,EAAE;;;;YACtB,MAAM;;;;YACN,OAAO;;;;;;;;aAEP,KAAK,GAAG,MAAM,GAAG,MAAM,GAAG,UAAU;;;;;;;;;;;sBAOpC,MAAM;;;UAKN,QAAQ,GAAG,MAAM,GAAG,WAAW;aAC/B,MAAM;;iCArTa,sBAAsB"}
1
+ {"version":3,"file":"BaseClient.d.ts","sourceRoot":"","sources":["../src/BaseClient.js"],"names":[],"mappings":"AAKA;IACE,0BASC;IARC,aAIC;IACD,2BAEE;IAKJ;;;;;OAKG;IACH,gBAFW,aAAa,gBAgCvB;IAED;;;;;OAKG;IACH,gBAHW,aAAa,GAAG,aAAa,gCAsDvC;IAED;;;;OAIG;IACH,wBAFW,MAAM,OAIhB;IAID,8BAGC;IAED,8BAGC;IAED,qCAGC;IAED;;OAEG;IACH,0CAGC;IAED;;OAEG;IACH,oDAIC;IAED;;OAEG;IACH,oDAIC;IAID;;OAEG;IACH,oCAOC;IAED;;;MAaC;IAED;;;MAmCC;IAED,8CAaC;IAED;;;MA4BC;IAED,uDAWC;IAED,kDAMC;CACF;;;;;WAIa,MAAM,GAAC,aAAa,EAAE;;;;YACtB,MAAM;;;;YACN,OAAO;;;;;;;;aAEP,MAAM,GAAG,MAAM;;;;;;;;;;;sBAOf,MAAM;;;UAKN,QAAQ,GAAG,MAAM,GAAG,WAAW;aAC/B,MAAM;;iCAhUa,sBAAsB"}
@@ -13,15 +13,35 @@ export class AnthropicClient extends BaseClient {
13
13
  _request_id?: string | null;
14
14
  } & import("@anthropic-ai/sdk/core/streaming.js").Stream<Anthropic.Messages.RawMessageStreamEvent>>;
15
15
  getTextResponse(response: any): any;
16
- getMessagesResponse(response: any, options: any): {
16
+ normalizeResponse(response: any, options: any): {
17
17
  messages: any[];
18
+ usage: {
19
+ input_tokens: any;
20
+ output_tokens: any;
21
+ };
22
+ };
23
+ normalizeUsage(response: any): {
24
+ input_tokens: any;
25
+ output_tokens: any;
18
26
  };
19
- normalizeStreamEvent(event: any): {
27
+ normalizeStreamEvent(event: any, options: any): {
28
+ type: string;
29
+ delta?: undefined;
30
+ messages?: undefined;
31
+ usage?: undefined;
32
+ } | {
20
33
  type: string;
21
- text?: undefined;
34
+ delta: any;
35
+ messages?: undefined;
36
+ usage?: undefined;
22
37
  } | {
23
38
  type: string;
24
- text: any;
39
+ messages: any[];
40
+ usage: {
41
+ input_tokens: any;
42
+ output_tokens: any;
43
+ };
44
+ delta?: undefined;
25
45
  };
26
46
  getToolOptions(options: any): {
27
47
  tools: any;
@@ -1 +1 @@
1
- {"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../src/anthropic.js"],"names":[],"mappings":"AAMA;IACE,6BAA2C;IAIzC,kBAAoC;IAGtC;;;OAGG;IACH,4BAGC;IAED;;wGA2BC;IAED;;wGAMC;IAED,oCAKC;IASD;;MAiBC;IAED;;;;;;MAgBC;IAID;;;;;;;;;;MAsCC;IAID;;;;MAOC;IAED;;;;MAQC;CACF;uBA3KsB,iBAAiB;sBAFlB,mBAAmB"}
1
+ {"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../src/anthropic.js"],"names":[],"mappings":"AAMA;IACE,6BAA2C;IAIzC,kBAAoC;IAGtC;;;OAGG;IACH,4BAGC;IAED;;wGA2BC;IAED;;wGAKC;IAED,oCAKC;IASD;;;;;;MAkBC;IAED;;;MAKC;IAED;;;;;;;;;;;;;;;;;;MA0BC;IAID;;;;;;;;;;MAsCC;IAID;;;;MAOC;IAED;;;;MAQC;CACF;uBA5LsB,iBAAiB;sBAFlB,mBAAmB"}
package/types/google.d.ts CHANGED
@@ -1,17 +1,36 @@
1
1
  export class GoogleClient extends BaseClient {
2
- client: GoogleGenerativeAI;
2
+ client: GoogleGenAI;
3
3
  /**
4
4
  * Lists available models.
5
5
  * {@link https://ai.google.dev/gemini-api/docs/models/gemini#gemini-2.0-flashl Documentation}
6
6
  */
7
7
  models(): Promise<string[]>;
8
- getCompletion(options: any): Promise<any>;
9
- getStream(options: any): Promise<any>;
10
- getStreamedChunk(chunk: any, started: any): {
8
+ runPrompt(options: any): Promise<import("@google/genai").GenerateContentResponse>;
9
+ runStream(options: any): Promise<AsyncGenerator<import("@google/genai").GenerateContentResponse, any, any>>;
10
+ getTextResponse(response: any): any;
11
+ normalizeResponse(response: any, options: any): {
12
+ messages: any[];
13
+ usage: {
14
+ input_tokens: any;
15
+ output_tokens: any;
16
+ };
17
+ };
18
+ normalizeUsage(response: any): {
19
+ input_tokens: any;
20
+ output_tokens: any;
21
+ };
22
+ getParams(options: any): {
23
+ config: {
24
+ systemInstruction: any;
25
+ };
26
+ model: any;
27
+ contents: any;
28
+ };
29
+ normalizeStreamEvent(event: any): {
11
30
  type: string;
12
- text: any;
31
+ delta: any;
13
32
  };
14
33
  }
15
34
  import BaseClient from './BaseClient.js';
16
- import { GoogleGenerativeAI } from '@google/generative-ai';
35
+ import { GoogleGenAI } from '@google/genai';
17
36
  //# sourceMappingURL=google.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../src/google.js"],"names":[],"mappings":"AAMA;IAII,2BAA4C;IAG9C;;;OAGG;IACH,4BAOC;IAED,0CAkCC;IACD,sCAKC;IAED;;;MAkBC;CACF;uBArFsB,iBAAiB;mCAFL,uBAAuB"}
1
+ {"version":3,"file":"google.d.ts","sourceRoot":"","sources":["../src/google.js"],"names":[],"mappings":"AAMA;IAII,oBAEE;IAGJ;;;OAGG;IACH,4BAgBC;IAED,kFA0BC;IAED,4GAGC;IAED,oCAEC;IAED;;;;;;MAYC;IAED;;;MAKC;IAED;;;;;;MAwBC;IAED;;;MAMC;CACF;uBA9HsB,iBAAiB;4BAFZ,eAAe"}
package/types/openai.d.ts CHANGED
@@ -14,9 +14,17 @@ export class OpenAiClient extends BaseClient {
14
14
  _request_id?: string | null;
15
15
  } & import("openai/core/streaming.js").Stream<OpenAI.Responses.ResponseStreamEvent>>;
16
16
  getTextResponse(response: any): any;
17
- getMessagesResponse(response: any, options: any): {
17
+ normalizeResponse(response: any, options: any): {
18
18
  messages: any[];
19
19
  prevResponseId: any;
20
+ usage: {
21
+ input_tokens: any;
22
+ output_tokens: any;
23
+ };
24
+ };
25
+ normalizeUsage(response: any): {
26
+ input_tokens: any;
27
+ output_tokens: any;
20
28
  };
21
29
  getOutputFormat(options: any): {
22
30
  type: string;
@@ -29,30 +37,27 @@ export class OpenAiClient extends BaseClient {
29
37
  strict: boolean;
30
38
  schema: any;
31
39
  };
32
- normalizeStreamEvent(event: any): {
40
+ normalizeStreamEvent(event: any, options: any): {
33
41
  type: string;
34
42
  id: any;
43
+ messages?: undefined;
35
44
  usage?: undefined;
36
45
  delta?: undefined;
37
- text?: undefined;
38
46
  } | {
39
47
  type: string;
40
48
  id: any;
41
- usage: any;
49
+ messages: any[];
50
+ usage: {
51
+ input_tokens: any;
52
+ output_tokens: any;
53
+ };
42
54
  delta?: undefined;
43
- text?: undefined;
44
55
  } | {
45
56
  type: string;
46
57
  delta: any;
47
58
  id?: undefined;
59
+ messages?: undefined;
48
60
  usage?: undefined;
49
- text?: undefined;
50
- } | {
51
- type: string;
52
- text: any;
53
- id?: undefined;
54
- usage?: undefined;
55
- delta?: undefined;
56
61
  };
57
62
  }
58
63
  export type OpenAICategory = any | "all" | "general" | "reasoning" | "lightweight" | "moderation" | "embedding" | "speech" | "audio" | "image" | "code" | "legacy";
@@ -1 +1 @@
1
- {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../src/openai.js"],"names":[],"mappings":"AAIA;IACE,6BAAoC;IAIlC,eAAiC;IAGnC;;;;OAIG;IACH,kBAHW,cAAc,qBAgCxB;IAED;;yFAiCC;IAED;;yFAKC;IAED,oCAEC;IAwBD;;;MAcC;IAID;;;;;;;;;;MAmBC;IAED;;;;;;;;;;;;;;;;;;;;;;;;MAyBC;CACF;6BAQA,GAAC,GAAK,KAAK,GACL,SAAS,GACT,WAAW,GACX,aAAa,GACb,YAAY,GACZ,WAAW,GACX,QAAQ,GACR,OAAO,GACP,OAAO,GACP,MAAM,GACN,QAAQ;uBArMQ,iBAAiB;mBAFrB,QAAQ"}
1
+ {"version":3,"file":"openai.d.ts","sourceRoot":"","sources":["../src/openai.js"],"names":[],"mappings":"AAIA;IACE,6BAAoC;IAIlC,eAAiC;IAGnC;;;;OAIG;IACH,kBAHW,cAAc,qBAgCxB;IAED;;yFAiCC;IAED;;yFAKC;IAED,oCAEC;IAwBD;;;;;;;MAeC;IAED;;;MAKC;IAID;;;;;;;;;;MAmBC;IAED;;;;;;;;;;;;;;;;;;;;;MA8BC;CACF;6BAQA,GAAC,GAAK,KAAK,GACL,SAAS,GACT,WAAW,GACX,aAAa,GACb,YAAY,GACZ,WAAW,GACX,QAAQ,GACR,OAAO,GACP,OAAO,GACP,MAAM,GACN,QAAQ;uBAlNQ,iBAAiB;mBAFrB,QAAQ"}
@@ -1 +1 @@
1
- {"version":3,"file":"code.d.ts","sourceRoot":"","sources":["../../src/utils/code.js"],"names":[],"mappings":"AAEA,6CAMC"}
1
+ {"version":3,"file":"code.d.ts","sourceRoot":"","sources":["../../src/utils/code.js"],"names":[],"mappings":"AAEA,6CAUC"}