@adobe/spacecat-shared-gpt-client 1.2.21 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,17 @@
1
+ # [@adobe/spacecat-shared-gpt-client-v1.3.0](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.2.22...@adobe/spacecat-shared-gpt-client-v1.3.0) (2024-11-12)
2
+
3
+
4
+ ### Features
5
+
6
+ * SITES-26591 [Import Assistant] Expand GPT client to user chat endpoint ([#436](https://github.com/adobe/spacecat-shared/issues/436)) ([4aaca2e](https://github.com/adobe/spacecat-shared/commit/4aaca2ede0fb9b019c8c88c2f7afd396065088b1))
7
+
8
+ # [@adobe/spacecat-shared-gpt-client-v1.2.22](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.2.21...@adobe/spacecat-shared-gpt-client-v1.2.22) (2024-11-11)
9
+
10
+
11
+ ### Bug Fixes
12
+
13
+ * **deps:** update external fixes ([#434](https://github.com/adobe/spacecat-shared/issues/434)) ([b71b615](https://github.com/adobe/spacecat-shared/commit/b71b61528513821f9e34c50a095d47cb4e14f8db))
14
+
1
15
  # [@adobe/spacecat-shared-gpt-client-v1.2.21](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.2.20...@adobe/spacecat-shared-gpt-client-v1.2.21) (2024-11-08)
2
16
 
3
17
 
package/README.md CHANGED
@@ -42,7 +42,12 @@ try {
42
42
 
43
43
  ### Fetching Insights
44
44
 
45
+ #### Via Capability Execution endpoint
46
+
45
47
  ```javascript
48
+ /**
49
+ * Fetch insights using the Firefall's capability execution endpoint.
50
+ */
46
51
  async function fetchInsights(prompt) {
47
52
  try {
48
53
  const client = FirefallClient.createFrom({
@@ -58,7 +63,7 @@ async function fetchInsights(prompt) {
58
63
  log: console,
59
64
  });
60
65
 
61
- const insights = await client.fetch(prompt);
66
+ const insights = await client.fetchCapabilityExecution(prompt);
62
67
  console.log('Insights:', insights);
63
68
  } catch (error) {
64
69
  console.error('Failed to fetch insights:', error.message);
@@ -68,6 +73,41 @@ async function fetchInsights(prompt) {
68
73
  fetchInsights('How can we improve customer satisfaction?');
69
74
  ```
70
75
 
76
+ #### Via Chat Completions endpoint
77
+
78
+ ```javascript
79
+ /**
80
+ * Fetch completions using the Firefall's chat completions endpoint.
81
+ */
82
+ async function fetchCompletions(prompt) {
83
+ try {
84
+ const client = FirefallClient.createFrom({
85
+ env: {
86
+ FIREFALL_API_ENDPOINT: 'https://api.firefall.example.com',
87
+ FIREFALL_API_KEY: 'yourApiKey',
88
+ IMS_HOST: 'ims.example.com',
89
+ IMS_CLIENT_ID: 'yourClientId',
90
+ IMS_CLIENT_CODE: 'yourClientCode',
91
+ IMS_CLIENT_SECRET: 'yourClientSecret',
92
+ },
93
+ log: console,
94
+ });
95
+ const options = {
96
+ imageUrls: ['data:image/png;base64,iVBORw0KGgoAAAA...='],
97
+ model:'gpt-4-vision',
98
+ responseFormat: undefined,
99
+ };
100
+
101
+ const response = await client.fetchChatCompletion(prompt, { options });
102
+ console.log('Response:', JSON.stringify(response));
103
+ } catch (error) {
104
+ console.error('Failed to fetch chat completion:', error.message);
105
+ }
106
+ }
107
+
108
+ fetchCompletions('Identify all food items in this image', { imageUrls: ['data:image/png;base64,iVBORw0KGgoAAAA...='] });
109
+ ```
110
+
71
111
  Ensure that you replace `'path/to/firefall-client'` with the actual path to the `FirefallClient` class in your project and adjust the configuration parameters according to your Firefall API credentials.
72
112
 
73
113
  ## Testing
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adobe/spacecat-shared-gpt-client",
3
- "version": "1.2.21",
3
+ "version": "1.3.0",
4
4
  "description": "Shared modules of the Spacecat Services - GPT Client",
5
5
  "type": "module",
6
6
  "engines": {
@@ -37,12 +37,12 @@
37
37
  "@adobe/fetch": "4.1.9",
38
38
  "@adobe/helix-universal": "5.0.6",
39
39
  "@adobe/spacecat-shared-ims-client": "1.3.21",
40
- "@adobe/spacecat-shared-utils": "1.22.3"
40
+ "@adobe/spacecat-shared-utils": "1.22.4"
41
41
  },
42
42
  "devDependencies": {
43
43
  "chai": "5.1.2",
44
44
  "chai-as-promised": "8.0.0",
45
- "nock": "13.5.5",
45
+ "nock": "13.5.6",
46
46
  "sinon": "19.0.2",
47
47
  "sinon-chai": "4.0.0",
48
48
  "typescript": "5.6.3"
@@ -16,7 +16,14 @@ import { hasText, isObject, isValidUrl } from '@adobe/spacecat-shared-utils';
16
16
 
17
17
  import { fetch as httpFetch } from '../utils.js';
18
18
 
19
- function validateFirefallResponse(response) {
19
+ const USER_ROLE_IMAGE_URL_TYPE = 'image_url';
20
+ const USER_ROLE_TEXT_TYPE = 'text';
21
+ const SYSTEM_ROLE = 'system';
22
+ const USER_ROLE = 'user';
23
+ const AZURE_CHAT_OPENAI_LLM_TYPE = 'azure_chat_openai';
24
+ const JSON_OBJECT_RESPONSE_FORMAT = 'json_object';
25
+
26
+ function validateCapabilityExecutionResponse(response) {
20
27
  return !(!isObject(response)
21
28
  || !Array.isArray(response.generations)
22
29
  || response.generations.length === 0
@@ -25,6 +32,17 @@ function validateFirefallResponse(response) {
25
32
  || !hasText(response.generations[0][0].text));
26
33
  }
27
34
 
35
+ function validateChatCompletionResponse(response) {
36
+ return isObject(response)
37
+ && Array.isArray(response?.choices)
38
+ && response.choices.length > 0
39
+ && response.choices[0]?.message;
40
+ }
41
+
42
+ function isBase64UrlImage(base64String) {
43
+ return base64String.startsWith('data:image') && base64String.endsWith('=') && base64String.includes('base64');
44
+ }
45
+
28
46
  export default class FirefallClient {
29
47
  static createFrom(context) {
30
48
  const { log = console } = context;
@@ -89,15 +107,10 @@ export default class FirefallClient {
89
107
  this.log.debug(`${message}: took ${duration}ms`);
90
108
  }
91
109
 
92
- async #submitJob(prompt) {
110
+ async #submitPrompt(body, path) {
93
111
  const apiAuth = await this.#getApiAuth();
94
112
 
95
- const body = JSON.stringify({
96
- input: prompt,
97
- capability_name: this.config.capabilityName,
98
- });
99
-
100
- const url = createUrl(`${this.config.apiEndpoint}/v2/capability_execution/job`);
113
+ const url = createUrl(`${this.config.apiEndpoint}${path}`);
101
114
  const headers = {
102
115
  'Content-Type': 'application/json',
103
116
  Authorization: `Bearer ${apiAuth}`,
@@ -121,7 +134,7 @@ export default class FirefallClient {
121
134
  }
122
135
 
123
136
  /* eslint-disable no-await-in-loop */
124
- async #pollJobStatus(jobId) {
137
+ async #pollJobStatus(jobId, path) {
125
138
  const apiAuth = await this.#getApiAuth();
126
139
 
127
140
  let jobStatusResponse;
@@ -130,7 +143,7 @@ export default class FirefallClient {
130
143
  (resolve) => { setTimeout(resolve, this.config.pollInterval); },
131
144
  ); // Wait for 2 seconds before polling
132
145
 
133
- const url = `${this.config.apiEndpoint}/v2/capability_execution/job/${jobId}`;
146
+ const url = `${this.config.apiEndpoint}${path}/${jobId}`;
134
147
  const headers = {
135
148
  Authorization: `Bearer ${apiAuth}`,
136
149
  'x-api-key': this.config.apiKey,
@@ -161,23 +174,130 @@ export default class FirefallClient {
161
174
  return jobStatusResponse;
162
175
  }
163
176
 
164
- async fetch(prompt) {
177
+ /**
178
+ * Fetches data from Firefall Chat Completion API.
179
+ * @param prompt The text prompt to provide to Firefall
180
+ * @param options The options for the call, with optional properties:
181
+ * - imageUrls: An array of URLs of the images to provide to Firefall
182
+ * - model: LLM Model to use (default: gpt-4-turbo). Use 'gpt-4-vision' with images.
183
+ * - responseFormat: The response format to request from Firefall (accepts: json_object)
184
+ * @returns {Object} - AI response
185
+ */
186
+ async fetchChatCompletion(prompt, options = {}) {
187
+ const { imageUrls, responseFormat, model: llmModel = 'gpt-4-turbo' } = options || {};
188
+ const hasImageUrls = imageUrls && imageUrls.length > 0;
189
+
190
+ const getBody = () => {
191
+ const userRole = {
192
+ role: USER_ROLE,
193
+ content: [
194
+ {
195
+ type: USER_ROLE_TEXT_TYPE,
196
+ text: prompt,
197
+ },
198
+ ],
199
+ };
200
+
201
+ if (hasImageUrls) {
202
+ imageUrls
203
+ .filter((iu) => isValidUrl(iu) || isBase64UrlImage(iu))
204
+ .forEach((imageUrl) => {
205
+ userRole.content.push({
206
+ type: USER_ROLE_IMAGE_URL_TYPE,
207
+ image_url: {
208
+ url: imageUrl,
209
+ },
210
+ });
211
+ });
212
+ }
213
+
214
+ const body = {
215
+ llm_metadata: {
216
+ model_name: llmModel,
217
+ llm_type: AZURE_CHAT_OPENAI_LLM_TYPE,
218
+ },
219
+ messages: [
220
+ userRole,
221
+ ],
222
+ };
223
+ if (responseFormat === JSON_OBJECT_RESPONSE_FORMAT) {
224
+ body.response_format = {
225
+ type: JSON_OBJECT_RESPONSE_FORMAT,
226
+ };
227
+ body.messages.push({
228
+ role: SYSTEM_ROLE,
229
+ content: 'You are a helpful assistant designed to output JSON.',
230
+ });
231
+ }
232
+
233
+ return body;
234
+ };
235
+
236
+ // Validate inputs
165
237
  if (!hasText(prompt)) {
166
238
  throw new Error('Invalid prompt received');
167
239
  }
240
+ if (hasImageUrls && !Array.isArray(imageUrls)) {
241
+ throw new Error('imageUrls must be an array.');
242
+ }
168
243
 
244
+ let chatSubmissionResponse;
169
245
  try {
170
246
  const startTime = process.hrtime.bigint();
171
- const jobSubmissionResponse = await this.#submitJob(prompt);
172
- const jobStatusResponse = await this.#pollJobStatus(jobSubmissionResponse.job_id);
173
- this.#logDuration('Firefall API call', startTime);
247
+ const body = getBody();
248
+
249
+ chatSubmissionResponse = await this.#submitPrompt(
250
+ JSON.stringify(body),
251
+ '/v2/chat/completions',
252
+ );
253
+ this.#logDuration('Firefall API Chat Completion call', startTime);
254
+ } catch (error) {
255
+ this.log.error('Error while fetching data from Firefall chat API: ', error.message);
256
+ throw error;
257
+ }
258
+
259
+ if (!validateChatCompletionResponse(chatSubmissionResponse)) {
260
+ this.log.error(
261
+ 'Could not obtain data from Firefall: Invalid response format.',
262
+ );
263
+ throw new Error('Invalid response format.');
264
+ }
265
+ if (!chatSubmissionResponse.choices.some((ch) => hasText(ch?.message?.content))) {
266
+ throw new Error('Prompt completed but no output was found.');
267
+ }
268
+
269
+ return chatSubmissionResponse;
270
+ }
271
+
272
+ /**
273
+ * Fetches data from Firefall API.
274
+ * @param prompt The text prompt to provide to Firefall
275
+ * @returns {string} - AI response
276
+ */
277
+ async fetchCapabilityExecution(prompt) {
278
+ if (!hasText(prompt)) {
279
+ throw new Error('Invalid prompt received');
280
+ }
281
+
282
+ try {
283
+ const startTime = process.hrtime.bigint();
284
+
285
+ const body = JSON.stringify({
286
+ input: prompt,
287
+ capability_name: this.config.capabilityName,
288
+ });
289
+ const path = '/v2/capability_execution/job';
290
+
291
+ const jobSubmissionResponse = await this.#submitPrompt(body, path);
292
+ const jobStatusResponse = await this.#pollJobStatus(jobSubmissionResponse.job_id, path);
293
+ this.#logDuration('Firefall API Capability Execution call', startTime);
174
294
 
175
295
  const { output } = jobStatusResponse;
176
296
  if (!output || !output.capability_response) {
177
297
  throw new Error('Job completed but no output was found');
178
298
  }
179
299
 
180
- if (!validateFirefallResponse(output.capability_response)) {
300
+ if (!validateCapabilityExecutionResponse(output.capability_response)) {
181
301
  this.log.error('Could not obtain data from Firefall: Invalid response format.');
182
302
  throw new Error('Invalid response format.');
183
303
  }
@@ -189,8 +309,15 @@ export default class FirefallClient {
189
309
 
190
310
  return result.text;
191
311
  } catch (error) {
192
- this.log.error('Error while fetching data from Firefall API: ', error.message);
312
+ this.log.error('Error while fetching data from Firefall Capability Execution API: ', error.message);
193
313
  throw error;
194
314
  }
195
315
  }
316
+
317
+ /**
318
+ * @deprecated since version 1.2.19. Use fetchCapabilityExecution instead.
319
+ */
320
+ async fetch(prompt) {
321
+ return this.fetchCapabilityExecution(prompt);
322
+ }
196
323
  }