@adobe/spacecat-shared-gpt-client 1.5.20 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/README.md +136 -0
- package/package.json +1 -1
- package/src/clients/azure-openai-client.js +218 -0
- package/src/clients/index.d.ts +21 -81
- package/src/index.js +2 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,17 @@
|
|
|
1
|
+
# [@adobe/spacecat-shared-gpt-client-v1.6.0](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.5.21...@adobe/spacecat-shared-gpt-client-v1.6.0) (2025-09-03)
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
### Features
|
|
5
|
+
|
|
6
|
+
* **gpt-client:** add optional system prompt to Azure OpenAI client ([#945](https://github.com/adobe/spacecat-shared/issues/945)) ([90f93e5](https://github.com/adobe/spacecat-shared/commit/90f93e5074ad0fd2df464f01b30edc4fe98b94cc))
|
|
7
|
+
|
|
8
|
+
# [@adobe/spacecat-shared-gpt-client-v1.5.21](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.5.20...@adobe/spacecat-shared-gpt-client-v1.5.21) (2025-08-25)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
### Bug Fixes
|
|
12
|
+
|
|
13
|
+
* adding azure openai api client ([#931](https://github.com/adobe/spacecat-shared/issues/931)) ([3ed0204](https://github.com/adobe/spacecat-shared/commit/3ed020423a506f0a9f4abdc84b5afc64aa58a649))
|
|
14
|
+
|
|
1
15
|
# [@adobe/spacecat-shared-gpt-client-v1.5.20](https://github.com/adobe/spacecat-shared/compare/@adobe/spacecat-shared-gpt-client-v1.5.19...@adobe/spacecat-shared-gpt-client-v1.5.20) (2025-08-09)
|
|
2
16
|
|
|
3
17
|
|
package/README.md
CHANGED
|
@@ -1,5 +1,141 @@
|
|
|
1
1
|
# Spacecat Shared - GPT Client
|
|
2
2
|
|
|
3
|
+
## Azure OpenAI
|
|
4
|
+
|
|
5
|
+
The `AzureOpenAIClient` library provides a streamlined way to interact with Azure OpenAI's Chat Completions API, enabling applications to fetch AI-generated responses based on provided prompts. Designed with simplicity and efficiency in mind, this client handles all aspects of communication with Azure OpenAI, including request authentication, error handling, and response parsing.
|
|
6
|
+
|
|
7
|
+
### Configuration
|
|
8
|
+
|
|
9
|
+
To use the `AzureOpenAIClient`, you need to configure it with the following parameters:
|
|
10
|
+
|
|
11
|
+
- `AZURE_OPENAI_ENDPOINT`: The endpoint URL for your Azure OpenAI resource (e.g., `https://your-resource.openai.azure.com`).
|
|
12
|
+
- `AZURE_OPENAI_KEY`: Your API key for accessing the Azure OpenAI API.
|
|
13
|
+
- `AZURE_API_VERSION`: The API version to use (e.g., `2024-02-01`).
|
|
14
|
+
- `AZURE_COMPLETION_DEPLOYMENT`: The deployment name for your Azure OpenAI model (e.g., `gpt-4o`).
|
|
15
|
+
|
|
16
|
+
**All parameters are required.** The client will throw an error if any of these configuration values are missing or invalid.
|
|
17
|
+
|
|
18
|
+
These parameters can be set through environment variables or passed directly to the `AzureOpenAIClient.createFrom` method.
|
|
19
|
+
|
|
20
|
+
### Usage Examples
|
|
21
|
+
|
|
22
|
+
#### Instantiating the Azure OpenAI Client
|
|
23
|
+
|
|
24
|
+
```javascript
|
|
25
|
+
import AzureOpenAIClient from 'path/to/azure-openai-client';
|
|
26
|
+
|
|
27
|
+
// Assuming environment variables are set
|
|
28
|
+
const context = {
|
|
29
|
+
env: process.env,
|
|
30
|
+
log: console, // Using console for logging in this example
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
try {
|
|
34
|
+
const client = AzureOpenAIClient.createFrom(context);
|
|
35
|
+
console.log('AzureOpenAIClient created successfully.');
|
|
36
|
+
} catch (error) {
|
|
37
|
+
console.error('Error creating AzureOpenAIClient:', error.message);
|
|
38
|
+
}
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
#### Fetching Chat Completions
|
|
42
|
+
|
|
43
|
+
```javascript
|
|
44
|
+
/**
|
|
45
|
+
* Fetch chat completions using Azure OpenAI's Chat Completions API.
|
|
46
|
+
*/
|
|
47
|
+
async function fetchChatCompletion(prompt) {
|
|
48
|
+
try {
|
|
49
|
+
const client = AzureOpenAIClient.createFrom({
|
|
50
|
+
env: {
|
|
51
|
+
AZURE_OPENAI_ENDPOINT: 'https://your-resource.openai.azure.com',
|
|
52
|
+
AZURE_OPENAI_KEY: 'your-api-key',
|
|
53
|
+
AZURE_API_VERSION: '2024-02-01',
|
|
54
|
+
AZURE_COMPLETION_DEPLOYMENT: 'gpt-4o',
|
|
55
|
+
},
|
|
56
|
+
log: console,
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
const response = await client.fetchChatCompletion(prompt);
|
|
60
|
+
console.log('Response:', JSON.stringify(response));
|
|
61
|
+
} catch (error) {
|
|
62
|
+
console.error('Failed to fetch chat completion:', error.message);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
fetchChatCompletion('What is the capital of France?');
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
#### Using Images with Chat Completions
|
|
70
|
+
|
|
71
|
+
```javascript
|
|
72
|
+
/**
|
|
73
|
+
* Fetch chat completions with image analysis using Azure OpenAI.
|
|
74
|
+
*/
|
|
75
|
+
async function fetchChatCompletionWithImages(prompt, imageUrls) {
|
|
76
|
+
try {
|
|
77
|
+
const client = AzureOpenAIClient.createFrom({
|
|
78
|
+
env: {
|
|
79
|
+
AZURE_OPENAI_ENDPOINT: 'https://your-resource.openai.azure.com',
|
|
80
|
+
AZURE_OPENAI_KEY: 'your-api-key',
|
|
81
|
+
AZURE_API_VERSION: '2024-02-01',
|
|
82
|
+
AZURE_COMPLETION_DEPLOYMENT: 'gpt-4o',
|
|
83
|
+
},
|
|
84
|
+
log: console,
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
const options = {
|
|
88
|
+
imageUrls: imageUrls, // Array of image URLs or base64 data
|
|
89
|
+
};
|
|
90
|
+
|
|
91
|
+
const response = await client.fetchChatCompletion(prompt, options);
|
|
92
|
+
console.log('Response:', JSON.stringify(response));
|
|
93
|
+
} catch (error) {
|
|
94
|
+
console.error('Failed to fetch chat completion with images:', error.message);
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Example with image URLs
|
|
99
|
+
fetchChatCompletionWithImages(
|
|
100
|
+
'Identify all food items in this image',
|
|
101
|
+
['https://example.com/food-image.jpg', 'data:image/png;base64,iVBORw0KGgoAAAA...=']
|
|
102
|
+
);
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
#### Requesting JSON Responses
|
|
106
|
+
|
|
107
|
+
```javascript
|
|
108
|
+
/**
|
|
109
|
+
* Fetch chat completions with JSON response format.
|
|
110
|
+
*/
|
|
111
|
+
async function fetchJSONResponse(prompt) {
|
|
112
|
+
try {
|
|
113
|
+
const client = AzureOpenAIClient.createFrom({
|
|
114
|
+
env: {
|
|
115
|
+
AZURE_OPENAI_ENDPOINT: 'https://your-resource.openai.azure.com',
|
|
116
|
+
AZURE_OPENAI_KEY: 'your-api-key',
|
|
117
|
+
AZURE_API_VERSION: '2024-02-01',
|
|
118
|
+
AZURE_COMPLETION_DEPLOYMENT: 'gpt-4o',
|
|
119
|
+
},
|
|
120
|
+
log: console,
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
const options = {
|
|
124
|
+
responseFormat: 'json_object',
|
|
125
|
+
};
|
|
126
|
+
|
|
127
|
+
const response = await client.fetchChatCompletion(prompt, options);
|
|
128
|
+
console.log('JSON Response:', JSON.stringify(response));
|
|
129
|
+
} catch (error) {
|
|
130
|
+
console.error('Failed to fetch JSON response:', error.message);
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
fetchJSONResponse('Provide a list of 3 colors in JSON format');
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
Ensure that you replace `'path/to/azure-openai-client'` with the actual path to the `AzureOpenAIClient` class in your project and adjust the configuration parameters according to your Azure OpenAI resource credentials.
|
|
138
|
+
|
|
3
139
|
## Firefall
|
|
4
140
|
The `FirefallClient` library offers a streamlined way to interact with the Firefall API, enabling applications to fetch insights, recommendations, and codes based on provided prompts. Designed with simplicity and efficiency in mind, this client handles all aspects of communication with the Firefall API, including request authentication, error handling, and response parsing.
|
|
5
141
|
|
package/package.json
CHANGED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* Copyright 2025 Adobe. All rights reserved.
|
|
3
|
+
* This file is licensed to you under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
* you may not use this file except in compliance with the License. You may obtain a copy
|
|
5
|
+
* of the License at http://www.apache.org/licenses/LICENSE-2.0
|
|
6
|
+
*
|
|
7
|
+
* Unless required by applicable law or agreed to in writing, software distributed under
|
|
8
|
+
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
|
|
9
|
+
* OF ANY KIND, either express or implied. See the License for the specific language
|
|
10
|
+
* governing permissions and limitations under the License.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { createUrl } from '@adobe/fetch';
|
|
14
|
+
import { hasText, isObject, isValidUrl } from '@adobe/spacecat-shared-utils';
|
|
15
|
+
|
|
16
|
+
import { fetch as httpFetch, sanitizeHeaders } from '../utils.js';
|
|
17
|
+
|
|
18
|
+
const USER_ROLE_IMAGE_URL_TYPE = 'image_url';
|
|
19
|
+
const USER_ROLE_TEXT_TYPE = 'text';
|
|
20
|
+
const SYSTEM_ROLE = 'system';
|
|
21
|
+
const USER_ROLE = 'user';
|
|
22
|
+
const JSON_OBJECT_RESPONSE_FORMAT = 'json_object';
|
|
23
|
+
|
|
24
|
+
function validateChatCompletionResponse(response) {
|
|
25
|
+
return isObject(response)
|
|
26
|
+
&& Array.isArray(response?.choices)
|
|
27
|
+
&& response.choices.length > 0
|
|
28
|
+
&& response.choices[0]?.message;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function isBase64UrlImage(base64String) {
|
|
32
|
+
return base64String.startsWith('data:image') && base64String.endsWith('=') && base64String.includes('base64');
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export default class AzureOpenAIClient {
|
|
36
|
+
static createFrom(context) {
|
|
37
|
+
const { log = console } = context;
|
|
38
|
+
|
|
39
|
+
const {
|
|
40
|
+
AZURE_OPENAI_ENDPOINT: apiEndpoint,
|
|
41
|
+
AZURE_OPENAI_KEY: apiKey,
|
|
42
|
+
AZURE_API_VERSION: apiVersion,
|
|
43
|
+
AZURE_COMPLETION_DEPLOYMENT: deploymentName,
|
|
44
|
+
} = context.env;
|
|
45
|
+
|
|
46
|
+
if (!isValidUrl(apiEndpoint)) {
|
|
47
|
+
throw new Error('Missing Azure OpenAI API endpoint');
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
if (!hasText(apiKey)) {
|
|
51
|
+
throw new Error('Missing Azure OpenAI API key');
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (!hasText(apiVersion)) {
|
|
55
|
+
throw new Error('Missing Azure OpenAI API version');
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
if (!hasText(deploymentName)) {
|
|
59
|
+
throw new Error('Missing Azure OpenAI deployment name');
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
return new AzureOpenAIClient({
|
|
63
|
+
apiEndpoint,
|
|
64
|
+
apiKey,
|
|
65
|
+
apiVersion,
|
|
66
|
+
deploymentName,
|
|
67
|
+
}, log);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Creates a new Azure OpenAI client
|
|
72
|
+
*
|
|
73
|
+
* @param {Object} config - The configuration object.
|
|
74
|
+
* @param {string} config.apiEndpoint - The API endpoint for Azure OpenAI.
|
|
75
|
+
* @param {string} config.apiKey - The API Key for Azure OpenAI.
|
|
76
|
+
* @param {string} config.apiVersion - The API version for Azure OpenAI.
|
|
77
|
+
* @param {string} config.deploymentName - The deployment name for Azure OpenAI.
|
|
78
|
+
* @param {Object} log - The Logger.
|
|
79
|
+
* @returns {AzureOpenAIClient} - the Azure OpenAI client.
|
|
80
|
+
*/
|
|
81
|
+
constructor(config, log) {
|
|
82
|
+
this.config = config;
|
|
83
|
+
this.log = log;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
#logDuration(message, startTime) {
|
|
87
|
+
const endTime = process.hrtime.bigint();
|
|
88
|
+
const duration = (endTime - startTime) / BigInt(1e6);
|
|
89
|
+
this.log.debug(`${message}: took ${duration}ms`);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Submit a prompt to the Azure OpenAI API.
|
|
94
|
+
* @param body The body of the request.
|
|
95
|
+
* @param path The Azure OpenAI API path.
|
|
96
|
+
* @returns {Promise<unknown>}
|
|
97
|
+
*/
|
|
98
|
+
async #submitPrompt(body, path) {
|
|
99
|
+
const url = createUrl(`${this.config.apiEndpoint}${path}?api-version=${this.config.apiVersion}`);
|
|
100
|
+
const headers = {
|
|
101
|
+
'Content-Type': 'application/json',
|
|
102
|
+
'api-key': this.config.apiKey,
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
this.log.info(`[Azure OpenAI API Call]: ${url}, Headers: ${JSON.stringify(sanitizeHeaders(headers))}`);
|
|
106
|
+
|
|
107
|
+
const response = await httpFetch(url, {
|
|
108
|
+
method: 'POST',
|
|
109
|
+
headers,
|
|
110
|
+
body,
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
if (!response.ok) {
|
|
114
|
+
const errorBody = await response.text();
|
|
115
|
+
throw new Error(`API call failed with status code ${response.status} and body: ${errorBody}`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return response.json();
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Fetches data from Azure OpenAI Chat Completion API.
|
|
123
|
+
* @param prompt The text prompt to provide to Azure OpenAI
|
|
124
|
+
* @param options The options for the call, with optional properties:
|
|
125
|
+
* - imageUrls: An array of URLs of the images to provide to Azure OpenAI
|
|
126
|
+
* - responseFormat: The response format to request from Azure OpenAI
|
|
127
|
+
* (accepts: json_object)
|
|
128
|
+
* @returns {Object} - AI response
|
|
129
|
+
*/
|
|
130
|
+
async fetchChatCompletion(prompt, options = {}) {
|
|
131
|
+
const {
|
|
132
|
+
imageUrls,
|
|
133
|
+
responseFormat,
|
|
134
|
+
systemPrompt,
|
|
135
|
+
} = options || {};
|
|
136
|
+
const hasImageUrls = imageUrls && imageUrls.length > 0;
|
|
137
|
+
|
|
138
|
+
const getBody = () => {
|
|
139
|
+
const userRole = {
|
|
140
|
+
role: USER_ROLE,
|
|
141
|
+
content: [
|
|
142
|
+
{
|
|
143
|
+
type: USER_ROLE_TEXT_TYPE,
|
|
144
|
+
text: prompt,
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
if (hasImageUrls) {
|
|
150
|
+
imageUrls
|
|
151
|
+
.filter((iu) => isValidUrl(iu) || isBase64UrlImage(iu))
|
|
152
|
+
.forEach((imageUrl) => {
|
|
153
|
+
userRole.content.push({
|
|
154
|
+
type: USER_ROLE_IMAGE_URL_TYPE,
|
|
155
|
+
image_url: {
|
|
156
|
+
url: imageUrl,
|
|
157
|
+
},
|
|
158
|
+
});
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
const body = {
|
|
163
|
+
messages: [
|
|
164
|
+
userRole,
|
|
165
|
+
],
|
|
166
|
+
};
|
|
167
|
+
|
|
168
|
+
if (systemPrompt) {
|
|
169
|
+
body.messages.unshift({
|
|
170
|
+
role: SYSTEM_ROLE,
|
|
171
|
+
content: systemPrompt,
|
|
172
|
+
});
|
|
173
|
+
} else if (responseFormat === JSON_OBJECT_RESPONSE_FORMAT) {
|
|
174
|
+
body.response_format = {
|
|
175
|
+
type: JSON_OBJECT_RESPONSE_FORMAT,
|
|
176
|
+
};
|
|
177
|
+
body.messages.unshift({
|
|
178
|
+
role: SYSTEM_ROLE,
|
|
179
|
+
content: 'You are a helpful assistant designed to output JSON.',
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return body;
|
|
184
|
+
};
|
|
185
|
+
|
|
186
|
+
// Validate inputs
|
|
187
|
+
if (!hasText(prompt)) {
|
|
188
|
+
throw new Error('Invalid prompt received');
|
|
189
|
+
}
|
|
190
|
+
if (hasImageUrls && !Array.isArray(imageUrls)) {
|
|
191
|
+
throw new Error('imageUrls must be an array.');
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
let chatSubmissionResponse;
|
|
195
|
+
try {
|
|
196
|
+
const startTime = process.hrtime.bigint();
|
|
197
|
+
const body = getBody();
|
|
198
|
+
|
|
199
|
+
chatSubmissionResponse = await this.#submitPrompt(JSON.stringify(body), `/openai/deployments/${this.config.deploymentName}/chat/completions`);
|
|
200
|
+
this.#logDuration('Azure OpenAI API Chat Completion call', startTime);
|
|
201
|
+
} catch (error) {
|
|
202
|
+
this.log.error('Error while fetching data from Azure OpenAI chat API: ', error.message);
|
|
203
|
+
throw error;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
if (!validateChatCompletionResponse(chatSubmissionResponse)) {
|
|
207
|
+
this.log.error(
|
|
208
|
+
'Could not obtain data from Azure OpenAI: Invalid response format.',
|
|
209
|
+
);
|
|
210
|
+
throw new Error('Invalid response format.');
|
|
211
|
+
}
|
|
212
|
+
if (!chatSubmissionResponse.choices.some((ch) => hasText(ch?.message?.content))) {
|
|
213
|
+
throw new Error('Prompt completed but no output was found.');
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return chatSubmissionResponse;
|
|
217
|
+
}
|
|
218
|
+
}
|
package/src/clients/index.d.ts
CHANGED
|
@@ -12,111 +12,51 @@
|
|
|
12
12
|
|
|
13
13
|
import type { UniversalContext } from '@adobe/helix-universal';
|
|
14
14
|
|
|
15
|
-
export class
|
|
15
|
+
export class AzureOpenAIClient {
|
|
16
16
|
/**
|
|
17
|
-
* Creates a new
|
|
18
|
-
* @param {UniversalContext} context The UniversalContext to use for creating the
|
|
19
|
-
*
|
|
17
|
+
* Creates a new AzureOpenAIClient instance from the given UniversalContext.
|
|
18
|
+
* @param {UniversalContext} context - The UniversalContext to use for creating the
|
|
19
|
+
* AzureOpenAIClient.
|
|
20
|
+
* @returns {AzureOpenAIClient} The AzureOpenAIClient instance.
|
|
20
21
|
*/
|
|
21
|
-
static createFrom(context: UniversalContext):
|
|
22
|
+
static createFrom(context: UniversalContext): AzureOpenAIClient;
|
|
22
23
|
|
|
23
24
|
/**
|
|
24
|
-
*
|
|
25
|
-
* @param {string} prompt The prompt to send to the Firefall GPT API.
|
|
26
|
-
* @returns {Promise<string>} The response from the Firefall GPT API.
|
|
27
|
-
* @deprecated since version 1.2.19. Use fetchCapabilityExecution instead.
|
|
28
|
-
*/
|
|
29
|
-
fetch(prompt: string): Promise<string>;
|
|
30
|
-
|
|
31
|
-
/**
|
|
32
|
-
* Fetches data from Firefall Chat Completion API.
|
|
25
|
+
* Fetches data from Azure OpenAI Chat Completion API.
|
|
33
26
|
*
|
|
34
|
-
* @param {string} prompt - The text prompt to provide to
|
|
27
|
+
* @param {string} prompt - The text prompt to provide to Azure OpenAI
|
|
35
28
|
* @param {object} [options] - The options for the call, with optional properties:
|
|
36
|
-
* - imageUrls: An array of URLs of the images to provide to
|
|
37
|
-
* -
|
|
38
|
-
* Use 'gpt-4-vision' with images.
|
|
39
|
-
* JSON mode is only currently supported with the following models: gpt-35-turbo-1106, gpt-4-turbo
|
|
29
|
+
* - imageUrls: An array of URLs of the images to provide to Azure OpenAI
|
|
30
|
+
* - responseFormat: The response format to request from Azure OpenAI (accepts: json_object)
|
|
40
31
|
* @returns {Promise<object>} A promise that resolves to an object containing the chat completion.
|
|
41
32
|
*
|
|
42
33
|
* The returned object has the following structure:
|
|
43
34
|
*
|
|
44
35
|
* @example
|
|
45
36
|
* {
|
|
46
|
-
* "
|
|
47
|
-
* "
|
|
37
|
+
* "id": string,
|
|
38
|
+
* "object": string,
|
|
39
|
+
* "created": number,
|
|
48
40
|
* "model": string,
|
|
49
41
|
* "choices": [
|
|
50
42
|
* {
|
|
51
|
-
* "finish_reason": string,
|
|
52
43
|
* "index": number,
|
|
53
44
|
* "message": {
|
|
54
45
|
* "role": string,
|
|
55
|
-
* "content": string
|
|
56
|
-
* "function_call": object | null
|
|
46
|
+
* "content": string
|
|
57
47
|
* },
|
|
58
|
-
* "
|
|
59
|
-
* "hate": {
|
|
60
|
-
* "filtered": boolean,
|
|
61
|
-
* "severity": string
|
|
62
|
-
* },
|
|
63
|
-
* "self_harm": {
|
|
64
|
-
* "filtered": boolean,
|
|
65
|
-
* "severity": string
|
|
66
|
-
* },
|
|
67
|
-
* "sexual": {
|
|
68
|
-
* "filtered": boolean,
|
|
69
|
-
* "severity": string
|
|
70
|
-
* },
|
|
71
|
-
* "violence": {
|
|
72
|
-
* "filtered": boolean,
|
|
73
|
-
* "severity": string
|
|
74
|
-
* }
|
|
75
|
-
* },
|
|
76
|
-
* "logprobs": object | null
|
|
48
|
+
* "finish_reason": string
|
|
77
49
|
* }
|
|
78
50
|
* ],
|
|
79
|
-
* "created_at": string,
|
|
80
51
|
* "usage": {
|
|
81
|
-
* "completion_tokens": number,
|
|
82
52
|
* "prompt_tokens": number,
|
|
53
|
+
* "completion_tokens": number,
|
|
83
54
|
* "total_tokens": number
|
|
84
|
-
* }
|
|
85
|
-
* "prompt_filter_results": [
|
|
86
|
-
* {
|
|
87
|
-
* "prompt_index": number,
|
|
88
|
-
* "content_filter_results": {
|
|
89
|
-
* "hate": {
|
|
90
|
-
* "filtered": boolean,
|
|
91
|
-
* "severity": string
|
|
92
|
-
* },
|
|
93
|
-
* "jailbreak": {
|
|
94
|
-
* "filtered": boolean,
|
|
95
|
-
* "detected": boolean
|
|
96
|
-
* },
|
|
97
|
-
* "self_harm": {
|
|
98
|
-
* "filtered": boolean,
|
|
99
|
-
* "severity": string
|
|
100
|
-
* },
|
|
101
|
-
* "sexual": {
|
|
102
|
-
* "filtered": boolean,
|
|
103
|
-
* "severity": string
|
|
104
|
-
* },
|
|
105
|
-
* "violence": {
|
|
106
|
-
* "filtered": boolean,
|
|
107
|
-
* "severity": string
|
|
108
|
-
* }
|
|
109
|
-
* }
|
|
110
|
-
* }
|
|
111
|
-
* ]
|
|
55
|
+
* }
|
|
112
56
|
* }
|
|
113
57
|
*/
|
|
114
|
-
fetchChatCompletion(prompt: string, options?:
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
* @param prompt The text prompt to provide to Firefall
|
|
119
|
-
* @returns {Promise<string>} - AI response
|
|
120
|
-
*/
|
|
121
|
-
fetchCapabilityExecution(prompt: string): Promise<string>;
|
|
58
|
+
fetchChatCompletion(prompt: string, options?: {
|
|
59
|
+
imageUrls?: string[];
|
|
60
|
+
responseFormat?: string;
|
|
61
|
+
}): Promise<object>;
|
|
122
62
|
}
|
package/src/index.js
CHANGED
|
@@ -12,8 +12,10 @@
|
|
|
12
12
|
|
|
13
13
|
import FirefallClient from './clients/firefall-client.js';
|
|
14
14
|
import GenvarClient from './clients/genvar-client.js';
|
|
15
|
+
import AzureOpenAIClient from './clients/azure-openai-client.js';
|
|
15
16
|
|
|
16
17
|
export {
|
|
17
18
|
FirefallClient,
|
|
18
19
|
GenvarClient,
|
|
20
|
+
AzureOpenAIClient,
|
|
19
21
|
};
|