@firebase/ai 2.4.0-canary.91c218db2 → 2.4.0-canary.bc5a7c4a7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai-public.d.ts +110 -9
- package/dist/ai.d.ts +110 -9
- package/dist/esm/index.esm.js +129 -39
- package/dist/esm/index.esm.js.map +1 -1
- package/dist/esm/src/methods/live-session.d.ts +64 -9
- package/dist/esm/src/types/live-responses.d.ts +21 -3
- package/dist/esm/src/types/requests.d.ts +23 -0
- package/dist/esm/src/types/responses.d.ts +21 -0
- package/dist/index.cjs.js +129 -39
- package/dist/index.cjs.js.map +1 -1
- package/dist/index.node.cjs.js +129 -39
- package/dist/index.node.cjs.js.map +1 -1
- package/dist/index.node.mjs +129 -39
- package/dist/index.node.mjs.map +1 -1
- package/dist/src/methods/live-session.d.ts +64 -9
- package/dist/src/types/live-responses.d.ts +21 -3
- package/dist/src/types/requests.d.ts +23 -0
- package/dist/src/types/responses.d.ts +21 -0
- package/package.json +8 -8
|
@@ -53,32 +53,65 @@ export declare class LiveSession {
|
|
|
53
53
|
*/
|
|
54
54
|
send(request: string | Array<string | Part>, turnComplete?: boolean): Promise<void>;
|
|
55
55
|
/**
|
|
56
|
-
* Sends
|
|
56
|
+
* Sends text to the server in realtime.
|
|
57
57
|
*
|
|
58
|
-
* @
|
|
58
|
+
* @example
|
|
59
|
+
* ```javascript
|
|
60
|
+
* liveSession.sendTextRealtime("Hello, how are you?");
|
|
61
|
+
* ```
|
|
62
|
+
*
|
|
63
|
+
* @param text - The text data to send.
|
|
59
64
|
* @throws If this session has been closed.
|
|
60
65
|
*
|
|
61
66
|
* @beta
|
|
62
67
|
*/
|
|
63
|
-
|
|
68
|
+
sendTextRealtime(text: string): Promise<void>;
|
|
64
69
|
/**
|
|
65
|
-
* Sends
|
|
70
|
+
* Sends audio data to the server in realtime.
|
|
66
71
|
*
|
|
67
|
-
* @
|
|
72
|
+
* @remarks The server requires that the audio data is base64-encoded 16-bit PCM at 16kHz
|
|
73
|
+
* little-endian.
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* ```javascript
|
|
77
|
+
* // const pcmData = ... base64-encoded 16-bit PCM at 16kHz little-endian.
|
|
78
|
+
* const blob = { mimeType: "audio/pcm", data: pcmData };
|
|
79
|
+
* liveSession.sendAudioRealtime(blob);
|
|
80
|
+
* ```
|
|
81
|
+
*
|
|
82
|
+
* @param blob - The base64-encoded PCM data to send to the server in realtime.
|
|
68
83
|
* @throws If this session has been closed.
|
|
69
84
|
*
|
|
70
85
|
* @beta
|
|
71
86
|
*/
|
|
72
|
-
|
|
87
|
+
sendAudioRealtime(blob: GenerativeContentBlob): Promise<void>;
|
|
73
88
|
/**
|
|
74
|
-
* Sends
|
|
89
|
+
* Sends video data to the server in realtime.
|
|
75
90
|
*
|
|
76
|
-
* @
|
|
91
|
+
* @remarks The server requires that the video is sent as individual video frames at 1 FPS. It
|
|
92
|
+
* is recommended to set `mimeType` to `image/jpeg`.
|
|
93
|
+
*
|
|
94
|
+
* @example
|
|
95
|
+
* ```javascript
|
|
96
|
+
* // const videoFrame = ... base64-encoded JPEG data
|
|
97
|
+
* const blob = { mimeType: "image/jpeg", data: videoFrame };
|
|
98
|
+
* liveSession.sendVideoRealtime(blob);
|
|
99
|
+
* ```
|
|
100
|
+
* @param blob - The base64-encoded video data to send to the server in realtime.
|
|
77
101
|
* @throws If this session has been closed.
|
|
78
102
|
*
|
|
79
103
|
* @beta
|
|
80
104
|
*/
|
|
81
|
-
|
|
105
|
+
sendVideoRealtime(blob: GenerativeContentBlob): Promise<void>;
|
|
106
|
+
/**
|
|
107
|
+
* Sends function responses to the server.
|
|
108
|
+
*
|
|
109
|
+
* @param functionResponses - The function responses to send.
|
|
110
|
+
* @throws If this session has been closed.
|
|
111
|
+
*
|
|
112
|
+
* @beta
|
|
113
|
+
*/
|
|
114
|
+
sendFunctionResponses(functionResponses: FunctionResponse[]): Promise<void>;
|
|
82
115
|
/**
|
|
83
116
|
* Yields messages received from the server.
|
|
84
117
|
* This can only be used by one consumer at a time.
|
|
@@ -96,4 +129,26 @@ export declare class LiveSession {
|
|
|
96
129
|
* @beta
|
|
97
130
|
*/
|
|
98
131
|
close(): Promise<void>;
|
|
132
|
+
/**
|
|
133
|
+
* Sends realtime input to the server.
|
|
134
|
+
*
|
|
135
|
+
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
|
|
136
|
+
*
|
|
137
|
+
* @param mediaChunks - The media chunks to send.
|
|
138
|
+
* @throws If this session has been closed.
|
|
139
|
+
*
|
|
140
|
+
* @beta
|
|
141
|
+
*/
|
|
142
|
+
sendMediaChunks(mediaChunks: GenerativeContentBlob[]): Promise<void>;
|
|
143
|
+
/**
|
|
144
|
+
* @deprecated Use `sendTextRealtime()`, `sendAudioRealtime()`, and `sendVideoRealtime()` instead.
|
|
145
|
+
*
|
|
146
|
+
* Sends a stream of {@link GenerativeContentBlob}.
|
|
147
|
+
*
|
|
148
|
+
* @param mediaChunkStream - The stream of {@link GenerativeContentBlob} to send.
|
|
149
|
+
* @throws If this session has been closed.
|
|
150
|
+
*
|
|
151
|
+
* @beta
|
|
152
|
+
*/
|
|
153
|
+
sendMediaStream(mediaChunkStream: ReadableStream<GenerativeContentBlob>): Promise<void>;
|
|
99
154
|
}
|
|
@@ -15,7 +15,8 @@
|
|
|
15
15
|
* limitations under the License.
|
|
16
16
|
*/
|
|
17
17
|
import { Content, FunctionResponse, GenerativeContentBlob, Part } from './content';
|
|
18
|
-
import { LiveGenerationConfig, Tool, ToolConfig } from './requests';
|
|
18
|
+
import { AudioTranscriptionConfig, LiveGenerationConfig, Tool, ToolConfig } from './requests';
|
|
19
|
+
import { Transcription } from './responses';
|
|
19
20
|
/**
|
|
20
21
|
* User input that is sent to the model.
|
|
21
22
|
*
|
|
@@ -25,6 +26,8 @@ export interface _LiveClientContent {
|
|
|
25
26
|
clientContent: {
|
|
26
27
|
turns: [Content];
|
|
27
28
|
turnComplete: boolean;
|
|
29
|
+
inputTranscription?: Transcription;
|
|
30
|
+
outputTranscription?: Transcription;
|
|
28
31
|
};
|
|
29
32
|
}
|
|
30
33
|
/**
|
|
@@ -34,7 +37,13 @@ export interface _LiveClientContent {
|
|
|
34
37
|
*/
|
|
35
38
|
export interface _LiveClientRealtimeInput {
|
|
36
39
|
realtimeInput: {
|
|
37
|
-
|
|
40
|
+
text?: string;
|
|
41
|
+
audio?: GenerativeContentBlob;
|
|
42
|
+
video?: GenerativeContentBlob;
|
|
43
|
+
/**
|
|
44
|
+
* @deprecated Use `text`, `audio`, and `video` instead.
|
|
45
|
+
*/
|
|
46
|
+
mediaChunks?: GenerativeContentBlob[];
|
|
38
47
|
};
|
|
39
48
|
}
|
|
40
49
|
/**
|
|
@@ -53,9 +62,18 @@ export interface _LiveClientToolResponse {
|
|
|
53
62
|
export interface _LiveClientSetup {
|
|
54
63
|
setup: {
|
|
55
64
|
model: string;
|
|
56
|
-
generationConfig?:
|
|
65
|
+
generationConfig?: _LiveGenerationConfig;
|
|
57
66
|
tools?: Tool[];
|
|
58
67
|
toolConfig?: ToolConfig;
|
|
59
68
|
systemInstruction?: string | Part | Content;
|
|
69
|
+
inputAudioTranscription?: AudioTranscriptionConfig;
|
|
70
|
+
outputAudioTranscription?: AudioTranscriptionConfig;
|
|
60
71
|
};
|
|
61
72
|
}
|
|
73
|
+
/**
|
|
74
|
+
* The Live Generation Config.
|
|
75
|
+
*
|
|
76
|
+
* The public API ({@link LiveGenerationConfig}) has `inputAudioTranscription` and `outputAudioTranscription`,
|
|
77
|
+
* but the server expects these fields to be in the top-level `setup` message. This was a conscious API decision.
|
|
78
|
+
*/
|
|
79
|
+
export type _LiveGenerationConfig = Omit<LiveGenerationConfig, 'inputAudioTranscription' | 'outputAudioTranscription'>;
|
|
@@ -167,6 +167,24 @@ export interface LiveGenerationConfig {
|
|
|
167
167
|
* The modalities of the response.
|
|
168
168
|
*/
|
|
169
169
|
responseModalities?: ResponseModality[];
|
|
170
|
+
/**
|
|
171
|
+
* Enables transcription of audio input.
|
|
172
|
+
*
|
|
173
|
+
* When enabled, the model will respond with transcriptions of your audio input in the `inputTranscriptions` property
|
|
174
|
+
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
|
|
175
|
+
* messages, so you may only receive small amounts of text per message. For example, if you ask the model
|
|
176
|
+
* "How are you today?", the model may transcribe that input across three messages, broken up as "How a", "re yo", "u today?".
|
|
177
|
+
*/
|
|
178
|
+
inputAudioTranscription?: AudioTranscriptionConfig;
|
|
179
|
+
/**
|
|
180
|
+
* Enables transcription of audio input.
|
|
181
|
+
*
|
|
182
|
+
* When enabled, the model will respond with transcriptions of its audio output in the `outputTranscription` property
|
|
183
|
+
* in {@link LiveServerContent} messages. Note that the transcriptions are broken up across
|
|
184
|
+
* messages, so you may only receive small amounts of text per message. For example, if the model says
|
|
185
|
+
* "How are you today?", the model may transcribe that output across three messages, broken up as "How a", "re yo", "u today?".
|
|
186
|
+
*/
|
|
187
|
+
outputAudioTranscription?: AudioTranscriptionConfig;
|
|
170
188
|
}
|
|
171
189
|
/**
|
|
172
190
|
* Params for {@link GenerativeModel.startChat}.
|
|
@@ -439,3 +457,8 @@ export interface SpeechConfig {
|
|
|
439
457
|
*/
|
|
440
458
|
voiceConfig?: VoiceConfig;
|
|
441
459
|
}
|
|
460
|
+
/**
|
|
461
|
+
* The audio transcription configuration.
|
|
462
|
+
*/
|
|
463
|
+
export interface AudioTranscriptionConfig {
|
|
464
|
+
}
|
|
@@ -516,6 +516,27 @@ export interface LiveServerContent {
|
|
|
516
516
|
* model was not interrupted.
|
|
517
517
|
*/
|
|
518
518
|
interrupted?: boolean;
|
|
519
|
+
/**
|
|
520
|
+
* Transcription of the audio that was input to the model.
|
|
521
|
+
*/
|
|
522
|
+
inputTranscription?: Transcription;
|
|
523
|
+
/**
|
|
524
|
+
* Transcription of the audio output from the model.
|
|
525
|
+
*/
|
|
526
|
+
outputTranscription?: Transcription;
|
|
527
|
+
}
|
|
528
|
+
/**
|
|
529
|
+
* Transcription of audio. This can be returned from a {@link LiveGenerativeModel} if transcription
|
|
530
|
+
* is enabled with the `inputAudioTranscription` or `outputAudioTranscription` properties on
|
|
531
|
+
* the {@link LiveGenerationConfig}.
|
|
532
|
+
*
|
|
533
|
+
* @beta
|
|
534
|
+
*/
|
|
535
|
+
export interface Transcription {
|
|
536
|
+
/**
|
|
537
|
+
* The text transcription of the audio.
|
|
538
|
+
*/
|
|
539
|
+
text?: string;
|
|
519
540
|
}
|
|
520
541
|
/**
|
|
521
542
|
* A request from the model for the client to execute one or more functions.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@firebase/ai",
|
|
3
|
-
"version": "2.4.0-canary.
|
|
3
|
+
"version": "2.4.0-canary.bc5a7c4a7",
|
|
4
4
|
"description": "The Firebase AI SDK",
|
|
5
5
|
"author": "Firebase <firebase-support@google.com> (https://firebase.google.com/)",
|
|
6
6
|
"engines": {
|
|
@@ -48,19 +48,19 @@
|
|
|
48
48
|
"trusted-type-check": "tsec -p tsconfig.json --noEmit"
|
|
49
49
|
},
|
|
50
50
|
"peerDependencies": {
|
|
51
|
-
"@firebase/app": "0.14.4-canary.
|
|
52
|
-
"@firebase/app-types": "0.9.3-canary.
|
|
51
|
+
"@firebase/app": "0.14.4-canary.bc5a7c4a7",
|
|
52
|
+
"@firebase/app-types": "0.9.3-canary.bc5a7c4a7"
|
|
53
53
|
},
|
|
54
54
|
"dependencies": {
|
|
55
|
-
"@firebase/app-check-interop-types": "0.3.3-canary.
|
|
56
|
-
"@firebase/component": "0.7.0-canary.
|
|
57
|
-
"@firebase/logger": "0.5.0-canary.
|
|
58
|
-
"@firebase/util": "1.13.0-canary.
|
|
55
|
+
"@firebase/app-check-interop-types": "0.3.3-canary.bc5a7c4a7",
|
|
56
|
+
"@firebase/component": "0.7.0-canary.bc5a7c4a7",
|
|
57
|
+
"@firebase/logger": "0.5.0-canary.bc5a7c4a7",
|
|
58
|
+
"@firebase/util": "1.13.0-canary.bc5a7c4a7",
|
|
59
59
|
"tslib": "^2.1.0"
|
|
60
60
|
},
|
|
61
61
|
"license": "Apache-2.0",
|
|
62
62
|
"devDependencies": {
|
|
63
|
-
"@firebase/app": "0.14.4-canary.
|
|
63
|
+
"@firebase/app": "0.14.4-canary.bc5a7c4a7",
|
|
64
64
|
"@rollup/plugin-json": "6.1.0",
|
|
65
65
|
"rollup": "2.79.2",
|
|
66
66
|
"rollup-plugin-replace": "2.2.0",
|