@squidcloud/client 1.0.349 → 1.0.350
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { AiAgentId, AiContextId, IntegrationId } from './communication.public-types';
|
|
2
2
|
import { IntegrationType } from './integration.public-types';
|
|
3
|
-
import {
|
|
3
|
+
import { AiFunctionId, AiFunctionIdWithContext } from './backend.public-types';
|
|
4
4
|
/**
|
|
5
5
|
* The supported OpenAI models.
|
|
6
6
|
* @category AI
|
|
@@ -47,15 +47,15 @@ export declare const OPENAI_IMAGE_MODEL_NAMES: readonly ["dall-e-3"];
|
|
|
47
47
|
/**
|
|
48
48
|
* @category AI
|
|
49
49
|
*/
|
|
50
|
-
export declare const OPENAI_AUDIO_TRANSCRIPTION_MODEL_NAMES: readonly ["whisper-1"];
|
|
50
|
+
export declare const OPENAI_AUDIO_TRANSCRIPTION_MODEL_NAMES: readonly ["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"];
|
|
51
51
|
/**
|
|
52
52
|
* @category AI
|
|
53
53
|
*/
|
|
54
|
-
export declare const OPENAI_AUDIO_CREATE_SPEECH_MODEL_NAMES: readonly ["tts-1", "tts-1-hd"];
|
|
54
|
+
export declare const OPENAI_AUDIO_CREATE_SPEECH_MODEL_NAMES: readonly ["tts-1", "tts-1-hd", "gpt-4o-mini-tts"];
|
|
55
55
|
/**
|
|
56
56
|
* @category AI
|
|
57
57
|
*/
|
|
58
|
-
export declare const OPENAI_AUDIO_MODEL_NAMES: readonly ["whisper-1", "tts-1", "tts-1-hd"];
|
|
58
|
+
export declare const OPENAI_AUDIO_MODEL_NAMES: readonly ["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe", "tts-1", "tts-1-hd", "gpt-4o-mini-tts"];
|
|
59
59
|
/**
|
|
60
60
|
* @category AI
|
|
61
61
|
*/
|
|
@@ -71,11 +71,11 @@ export declare const AI_IMAGE_MODEL_NAMES: readonly ["dall-e-3", "stable-diffusi
|
|
|
71
71
|
/**
|
|
72
72
|
* @category AI
|
|
73
73
|
*/
|
|
74
|
-
export declare const AI_AUDIO_TRANSCRIPTION_MODEL_NAMES: readonly ["whisper-1"];
|
|
74
|
+
export declare const AI_AUDIO_TRANSCRIPTION_MODEL_NAMES: readonly ["whisper-1", "gpt-4o-transcribe", "gpt-4o-mini-transcribe"];
|
|
75
75
|
/**
|
|
76
76
|
* @category AI
|
|
77
77
|
*/
|
|
78
|
-
export declare const AI_AUDIO_CREATE_SPEECH_MODEL_NAMES: readonly ["tts-1", "tts-1-hd"];
|
|
78
|
+
export declare const AI_AUDIO_CREATE_SPEECH_MODEL_NAMES: readonly ["tts-1", "tts-1-hd", "gpt-4o-mini-tts"];
|
|
79
79
|
/**
|
|
80
80
|
* @category AI
|
|
81
81
|
*/
|
|
@@ -155,7 +155,11 @@ export type AiGenerateImageOptions = DallEOptions | StableDiffusionCoreOptions |
|
|
|
155
155
|
/**
|
|
156
156
|
* @category AI
|
|
157
157
|
*/
|
|
158
|
-
export type
|
|
158
|
+
export type OpenAiAudioTranscribeOptions = WhisperTranscribeOptions | Gpt4oTranscribeOptions;
|
|
159
|
+
/**
|
|
160
|
+
* @category AI
|
|
161
|
+
*/
|
|
162
|
+
export type AiAudioTranscribeOptions = OpenAiAudioTranscribeOptions;
|
|
159
163
|
/**
|
|
160
164
|
* @category AI
|
|
161
165
|
*/
|
|
@@ -198,19 +202,33 @@ export interface DallEOptions extends BaseAiGenerateImageOptions {
|
|
|
198
202
|
/** The number of images to generate; defaults to 1 and limited to 1. */
|
|
199
203
|
numberOfImagesToGenerate?: 1;
|
|
200
204
|
}
|
|
205
|
+
interface BaseOpenAiAudioTranscribeOptions extends BaseAiAudioTranscribeOptions {
|
|
206
|
+
/** Specifies the model for audio transcription. */
|
|
207
|
+
modelName: OpenAiAudioTranscriptionModelName;
|
|
208
|
+
/** The temperature for sampling during transcription; defaults to model-specific value. */
|
|
209
|
+
temperature?: number;
|
|
210
|
+
/** An optional prompt to guide the transcription process. */
|
|
211
|
+
prompt?: string;
|
|
212
|
+
}
|
|
201
213
|
/**
|
|
202
214
|
* Options for transcribing audio using the Whisper model.
|
|
203
215
|
* @category AI
|
|
204
216
|
*/
|
|
205
|
-
export interface
|
|
217
|
+
export interface WhisperTranscribeOptions extends BaseOpenAiAudioTranscribeOptions {
|
|
206
218
|
/** Specifies the Whisper-1 model for audio transcription. */
|
|
207
219
|
modelName: 'whisper-1';
|
|
208
220
|
/** The format of the transcription response; defaults to 'json'. */
|
|
209
221
|
responseFormat?: 'json' | 'text' | 'srt' | 'verbose_json' | 'vtt';
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
222
|
+
}
|
|
223
|
+
/**
|
|
224
|
+
* Options for transcribing audio using the GPT-4o model.
|
|
225
|
+
* @category AI
|
|
226
|
+
*/
|
|
227
|
+
export interface Gpt4oTranscribeOptions extends BaseOpenAiAudioTranscribeOptions {
|
|
228
|
+
/** Specifies the Whisper-1 model for audio transcription. */
|
|
229
|
+
modelName: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe';
|
|
230
|
+
/** The format of the transcription response; defaults to 'json'. */
|
|
231
|
+
responseFormat?: 'json';
|
|
214
232
|
}
|
|
215
233
|
/**
|
|
216
234
|
* Options for creating speech using OpenAI's text-to-speech models.
|
|
@@ -220,9 +238,11 @@ export interface OpenAiCreateSpeechOptions extends BaseAiAudioCreateSpeechOption
|
|
|
220
238
|
/** The OpenAI model to use for speech creation (e.g., 'tts-1' or 'tts-1-hd'). */
|
|
221
239
|
modelName: OpenAiAudioCreateSpeechModelName;
|
|
222
240
|
/** The voice to use for speech synthesis; defaults to model-specific value. */
|
|
223
|
-
voice?: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
|
|
241
|
+
voice?: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
|
224
242
|
/** The format of the audio output; defaults to 'mp3'. */
|
|
225
243
|
responseFormat?: OpenAiCreateSpeechFormat;
|
|
244
|
+
/** An optional prompt to guide the speech synthesis process. */
|
|
245
|
+
instructions?: string;
|
|
226
246
|
/** The speed of the speech; defaults to 1.0. */
|
|
227
247
|
speed?: number;
|
|
228
248
|
}
|
|
@@ -354,7 +374,7 @@ export interface BaseAiAgentChatOptions {
|
|
|
354
374
|
* The parameter values must be valid serializable JSON values.
|
|
355
375
|
* Overrides the stored value.
|
|
356
376
|
*/
|
|
357
|
-
functions?: Array<
|
|
377
|
+
functions?: Array<AiFunctionId | AiFunctionIdWithContext>;
|
|
358
378
|
/** Instructions to include with the prompt. */
|
|
359
379
|
instructions?: string;
|
|
360
380
|
/** A set of filters that will limit the context the AI can access. */
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
/** Backend related public types that should be available on the client SDK */
|
|
2
|
+
/** A type alias for a string that represents an AI function ID. */
|
|
3
|
+
export type AiFunctionId = string;
|
|
2
4
|
/**
|
|
3
5
|
* A type alias for a service identifier.
|
|
4
6
|
* @category Queue
|
|
@@ -11,9 +13,9 @@ export type FunctionName = string;
|
|
|
11
13
|
/**
|
|
12
14
|
* Function name with contextual data, where the data must be serializable as JSON.
|
|
13
15
|
*/
|
|
14
|
-
export interface
|
|
15
|
-
/** The
|
|
16
|
-
name:
|
|
16
|
+
export interface AiFunctionIdWithContext {
|
|
17
|
+
/** The ID of the AI function as described in @aiFunction decorator. */
|
|
18
|
+
name: AiFunctionId;
|
|
17
19
|
/** A record of contextual data associated with the function call. */
|
|
18
20
|
context: Record<string, unknown>;
|
|
19
21
|
}
|