@livekit/agents-plugin-baseten 1.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +92 -0
  3. package/dist/index.cjs +48 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +5 -0
  6. package/dist/index.d.ts +5 -0
  7. package/dist/index.d.ts.map +1 -0
  8. package/dist/index.js +21 -0
  9. package/dist/index.js.map +1 -0
  10. package/dist/llm.cjs +143 -0
  11. package/dist/llm.cjs.map +1 -0
  12. package/dist/llm.d.cts +44 -0
  13. package/dist/llm.d.ts +44 -0
  14. package/dist/llm.d.ts.map +1 -0
  15. package/dist/llm.js +117 -0
  16. package/dist/llm.js.map +1 -0
  17. package/dist/llm.test.cjs +14 -0
  18. package/dist/llm.test.cjs.map +1 -0
  19. package/dist/llm.test.d.cts +2 -0
  20. package/dist/llm.test.d.ts +2 -0
  21. package/dist/llm.test.d.ts.map +1 -0
  22. package/dist/llm.test.js +13 -0
  23. package/dist/llm.test.js.map +1 -0
  24. package/dist/stt.cjs +271 -0
  25. package/dist/stt.cjs.map +1 -0
  26. package/dist/stt.d.cts +18 -0
  27. package/dist/stt.d.ts +18 -0
  28. package/dist/stt.d.ts.map +1 -0
  29. package/dist/stt.js +246 -0
  30. package/dist/stt.js.map +1 -0
  31. package/dist/stt.test.cjs +9 -0
  32. package/dist/stt.test.cjs.map +1 -0
  33. package/dist/stt.test.d.cts +2 -0
  34. package/dist/stt.test.d.ts +2 -0
  35. package/dist/stt.test.d.ts.map +1 -0
  36. package/dist/stt.test.js +8 -0
  37. package/dist/stt.test.js.map +1 -0
  38. package/dist/tts.cjs +161 -0
  39. package/dist/tts.cjs.map +1 -0
  40. package/dist/tts.d.cts +45 -0
  41. package/dist/tts.d.ts +45 -0
  42. package/dist/tts.d.ts.map +1 -0
  43. package/dist/tts.js +141 -0
  44. package/dist/tts.js.map +1 -0
  45. package/dist/tts.test.cjs +9 -0
  46. package/dist/tts.test.cjs.map +1 -0
  47. package/dist/tts.test.d.cts +2 -0
  48. package/dist/tts.test.d.ts +2 -0
  49. package/dist/tts.test.d.ts.map +1 -0
  50. package/dist/tts.test.js +8 -0
  51. package/dist/tts.test.js.map +1 -0
  52. package/dist/types.cjs +17 -0
  53. package/dist/types.cjs.map +1 -0
  54. package/dist/types.d.cts +54 -0
  55. package/dist/types.d.ts +54 -0
  56. package/dist/types.d.ts.map +1 -0
  57. package/dist/types.js +1 -0
  58. package/dist/types.js.map +1 -0
  59. package/package.json +68 -0
  60. package/src/index.ts +20 -0
  61. package/src/llm.test.ts +16 -0
  62. package/src/llm.ts +172 -0
  63. package/src/stt.test.ts +11 -0
  64. package/src/stt.ts +298 -0
  65. package/src/tts.test.ts +11 -0
  66. package/src/tts.ts +202 -0
  67. package/src/types.ts +55 -0
package/src/stt.ts ADDED
@@ -0,0 +1,298 @@
1
+ // SPDX-FileCopyrightText: 2024 LiveKit, Inc.
2
+ //
3
+ // SPDX-License-Identifier: Apache-2.0
4
+ import { type AudioBuffer, AudioByteStream, Task, log, stt, waitForAbort } from '@livekit/agents';
5
+ import type { AudioFrame } from '@livekit/rtc-node';
6
+ import { WebSocket } from 'ws';
7
+ import type { BasetenSttOptions } from './types.js';
8
+
9
+ const defaultSTTOptions: Partial<BasetenSttOptions> = {
10
+ environment: 'production',
11
+ encoding: 'pcm_s16le',
12
+ sampleRate: 16000,
13
+ bufferSizeSeconds: 0.032,
14
+ enablePartialTranscripts: true,
15
+ partialTranscriptIntervalS: 0.5,
16
+ finalTranscriptMaxDurationS: 5,
17
+ audioLanguage: 'en',
18
+ languageDetectionOnly: false,
19
+ vadThreshold: 0.5,
20
+ vadMinSilenceDurationMs: 300,
21
+ vadSpeechPadMs: 30,
22
+ };
23
+
24
+ export class STT extends stt.STT {
25
+ #opts: BasetenSttOptions;
26
+ #logger = log();
27
+ label = 'baseten.STT';
28
+
29
+ constructor(opts: Partial<BasetenSttOptions> = {}) {
30
+ super({
31
+ streaming: true,
32
+ interimResults: opts.enablePartialTranscripts ?? defaultSTTOptions.enablePartialTranscripts!,
33
+ });
34
+
35
+ const apiKey = opts.apiKey ?? process.env.BASETEN_API_KEY;
36
+ const modelId = opts.modelId ?? process.env.BASETEN_STT_MODEL_ID;
37
+
38
+ if (!apiKey) {
39
+ throw new Error(
40
+ 'Baseten API key is required, either pass it as `apiKey` or set $BASETEN_API_KEY',
41
+ );
42
+ }
43
+ if (!modelId) {
44
+ throw new Error(
45
+ 'Baseten model ID is required, either pass it as `modelId` or set $BASETEN_STT_MODEL_ID',
46
+ );
47
+ }
48
+
49
+ this.#opts = {
50
+ ...defaultSTTOptions,
51
+ ...opts,
52
+ apiKey,
53
+ modelId,
54
+ } as BasetenSttOptions;
55
+ }
56
+
57
+ // eslint-disable-next-line
58
+ async _recognize(_: AudioBuffer): Promise<stt.SpeechEvent> {
59
+ throw new Error('Recognize is not supported on Baseten STT');
60
+ }
61
+
62
+ updateOptions(opts: Partial<BasetenSttOptions>) {
63
+ this.#opts = { ...this.#opts, ...opts };
64
+ }
65
+
66
+ stream(): SpeechStream {
67
+ return new SpeechStream(this, this.#opts);
68
+ }
69
+ }
70
+
71
+ export class SpeechStream extends stt.SpeechStream {
72
+ #opts: BasetenSttOptions;
73
+ #logger = log();
74
+ #speaking = false;
75
+ #requestId = '';
76
+ label = 'baseten.SpeechStream';
77
+
78
+ constructor(stt: STT, opts: BasetenSttOptions) {
79
+ super(stt, opts.sampleRate);
80
+ this.#opts = opts;
81
+ this.closed = false;
82
+ }
83
+
84
+ private getWsUrl(): string {
85
+ return `wss://model-${this.#opts.modelId}.api.baseten.co/environments/${this.#opts.environment}/websocket`;
86
+ }
87
+
88
+ protected async run() {
89
+ const maxRetry = 32;
90
+ let retries = 0;
91
+
92
+ while (!this.input.closed && !this.closed) {
93
+ const url = this.getWsUrl();
94
+ const headers = {
95
+ Authorization: `Api-Key ${this.#opts.apiKey}`,
96
+ };
97
+
98
+ const ws = new WebSocket(url, { headers });
99
+
100
+ try {
101
+ await new Promise((resolve, reject) => {
102
+ ws.on('open', resolve);
103
+ ws.on('error', (error) => reject(error));
104
+ ws.on('close', (code) => reject(`WebSocket returned ${code}`));
105
+ });
106
+
107
+ await this.#runWS(ws);
108
+ } catch (e) {
109
+ if (!this.closed && !this.input.closed) {
110
+ if (retries >= maxRetry) {
111
+ throw new Error(`failed to connect to Baseten after ${retries} attempts: ${e}`);
112
+ }
113
+
114
+ const delay = Math.min(retries * 5, 10);
115
+ retries++;
116
+
117
+ this.#logger.warn(
118
+ `failed to connect to Baseten, retrying in ${delay} seconds: ${e} (${retries}/${maxRetry})`,
119
+ );
120
+ await new Promise((resolve) => setTimeout(resolve, delay * 1000));
121
+ } else {
122
+ this.#logger.warn(
123
+ `Baseten disconnected, connection is closed: ${e} (inputClosed: ${this.input.closed}, isClosed: ${this.closed})`,
124
+ );
125
+ }
126
+ }
127
+ }
128
+
129
+ this.closed = true;
130
+ }
131
+
132
+ async #runWS(ws: WebSocket) {
133
+ let closing = false;
134
+
135
+ // Send initial metadata
136
+ const metadata = {
137
+ streaming_vad_config: {
138
+ threshold: this.#opts.vadThreshold,
139
+ min_silence_duration_ms: this.#opts.vadMinSilenceDurationMs,
140
+ speech_pad_ms: this.#opts.vadSpeechPadMs,
141
+ },
142
+ streaming_params: {
143
+ encoding: this.#opts.encoding ?? 'pcm_s16le',
144
+ sample_rate: this.#opts.sampleRate ?? 16000,
145
+ enable_partial_transcripts: this.#opts.enablePartialTranscripts,
146
+ partial_transcript_interval_s: this.#opts.partialTranscriptIntervalS,
147
+ final_transcript_max_duration_s: this.#opts.finalTranscriptMaxDurationS,
148
+ },
149
+ whisper_params: {
150
+ prompt: this.#opts.prompt,
151
+ audio_language: this.#opts.audioLanguage ?? 'en',
152
+ language_detection_only: this.#opts.languageDetectionOnly ?? false,
153
+ },
154
+ };
155
+ ws.send(JSON.stringify(metadata));
156
+
157
+ const sendTask = async () => {
158
+ const sampleRate = this.#opts.sampleRate ?? 16000;
159
+ const samplesPerChunk = sampleRate === 16000 ? 512 : 256;
160
+ const audioByteStream = new AudioByteStream(sampleRate, 1, samplesPerChunk);
161
+
162
+ try {
163
+ while (!this.closed) {
164
+ const result = await this.input.next();
165
+ if (result.done) {
166
+ break;
167
+ }
168
+
169
+ const data = result.value;
170
+
171
+ let frames: AudioFrame[];
172
+ if (data === SpeechStream.FLUSH_SENTINEL) {
173
+ // Flush any remaining buffered audio
174
+ frames = audioByteStream.flush();
175
+ } else {
176
+ if (data.sampleRate !== sampleRate || data.channels !== 1) {
177
+ throw new Error(
178
+ `sample rate or channel count mismatch: expected ${sampleRate}Hz/1ch, got ${data.sampleRate}Hz/${data.channels}ch`,
179
+ );
180
+ }
181
+ frames = audioByteStream.write(data.data.buffer as ArrayBuffer);
182
+ }
183
+
184
+ for (const frame of frames) {
185
+ const buffer = Buffer.from(
186
+ frame.data.buffer,
187
+ frame.data.byteOffset,
188
+ frame.data.byteLength,
189
+ );
190
+ ws.send(buffer);
191
+ }
192
+ }
193
+ } finally {
194
+ closing = true;
195
+ ws.close();
196
+ }
197
+ };
198
+
199
+ const listenTask = Task.from(async (controller) => {
200
+ const listenMessage = new Promise<void>((resolve, reject) => {
201
+ ws.on('message', (data) => {
202
+ try {
203
+ let jsonString: string;
204
+
205
+ if (typeof data === 'string') {
206
+ jsonString = data;
207
+ } else if (data instanceof Buffer) {
208
+ jsonString = data.toString('utf-8');
209
+ } else if (Array.isArray(data)) {
210
+ jsonString = Buffer.concat(data).toString('utf-8');
211
+ } else {
212
+ return;
213
+ }
214
+
215
+ const msg = JSON.parse(jsonString);
216
+
217
+ // Parse response format matching Python implementation
218
+ const isFinal = msg.is_final ?? true;
219
+ const segments = msg.segments ?? [];
220
+ const transcript = msg.transcript ?? '';
221
+ const confidence = msg.confidence ?? 0.0;
222
+ const languageCode = msg.language_code ?? this.#opts.audioLanguage;
223
+
224
+ // Skip if no transcript text
225
+ if (!transcript) {
226
+ this.#logger.debug('Received non-transcript message:', msg);
227
+ return;
228
+ }
229
+
230
+ // Emit START_OF_SPEECH if not already speaking (only for interim or first final)
231
+ if (!this.#speaking && !isFinal) {
232
+ this.#speaking = true;
233
+ this.queue.put({ type: stt.SpeechEventType.START_OF_SPEECH });
234
+ }
235
+
236
+ // Extract timing from segments
237
+ const startTime = segments.length > 0 ? segments[0].start ?? 0.0 : 0.0;
238
+ const endTime = segments.length > 0 ? segments[segments.length - 1].end ?? 0.0 : 0.0;
239
+
240
+ const speechData: stt.SpeechData = {
241
+ language: languageCode!,
242
+ text: transcript,
243
+ startTime,
244
+ endTime,
245
+ confidence,
246
+ };
247
+
248
+ // Handle interim vs final transcripts (matching Python implementation)
249
+ if (!isFinal) {
250
+ // Interim transcript
251
+ this.queue.put({
252
+ type: stt.SpeechEventType.INTERIM_TRANSCRIPT,
253
+ alternatives: [speechData],
254
+ });
255
+ } else {
256
+ // Final transcript
257
+ this.queue.put({
258
+ type: stt.SpeechEventType.FINAL_TRANSCRIPT,
259
+ alternatives: [speechData],
260
+ });
261
+
262
+ // Emit END_OF_SPEECH after final transcript
263
+ if (this.#speaking) {
264
+ this.#speaking = false;
265
+ this.queue.put({ type: stt.SpeechEventType.END_OF_SPEECH });
266
+ }
267
+ }
268
+
269
+ if (this.closed || closing) {
270
+ resolve();
271
+ }
272
+ } catch (err) {
273
+ this.#logger.error(`STT: Error processing message: ${data}`);
274
+ reject(err);
275
+ }
276
+ });
277
+
278
+ ws.on('error', (err) => {
279
+ if (!closing) {
280
+ reject(err);
281
+ }
282
+ });
283
+
284
+ ws.on('close', () => {
285
+ if (!closing) {
286
+ resolve();
287
+ }
288
+ });
289
+ });
290
+
291
+ await Promise.race([listenMessage, waitForAbort(controller.signal)]);
292
+ }, this.abortController);
293
+
294
+ await Promise.all([sendTask(), listenTask.result]);
295
+ closing = true;
296
+ ws.close();
297
+ }
298
+ }
@@ -0,0 +1,11 @@
1
+ // SPDX-FileCopyrightText: 2024 LiveKit, Inc.
2
+ //
3
+ // SPDX-License-Identifier: Apache-2.0
4
+ import { tts } from '@livekit/agents-plugins-test';
5
+ import { describe } from 'vitest';
6
+ import { STT } from './stt.js';
7
+ import { TTS } from './tts.js';
8
+
9
+ describe('Baseten', async () => {
10
+ await tts(new TTS(), new STT(), { streaming: false });
11
+ });
package/src/tts.ts ADDED
@@ -0,0 +1,202 @@
1
+ // SPDX-FileCopyrightText: 2024 LiveKit, Inc.
2
+ //
3
+ // SPDX-License-Identifier: Apache-2.0
4
+ import {
5
+ type APIConnectOptions,
6
+ AudioByteStream,
7
+ shortuuid,
8
+ tts,
9
+ waitForAbort,
10
+ } from '@livekit/agents';
11
+ import type { AudioFrame } from '@livekit/rtc-node';
12
+ import type { BasetenTTSOptions } from './types.js';
13
+
14
+ const defaultTTSOptions: Partial<BasetenTTSOptions> = {
15
+ voice: 'tara',
16
+ language: 'en',
17
+ temperature: 0.6,
18
+ };
19
+
20
+ /**
21
+ * Baseten TTS implementation (streaming, 24kHz mono)
22
+ */
23
+ export class TTS extends tts.TTS {
24
+ private opts: BasetenTTSOptions;
25
+ label = 'baseten.TTS';
26
+ private abortController = new AbortController();
27
+ constructor(opts: Partial<BasetenTTSOptions> = {}) {
28
+ /**
29
+ * Baseten audio is 24kHz mono.
30
+ * The Orpheus model generates audio chunks that are processed as they arrive,
31
+ * which reduces latency and improves agent responsiveness.
32
+ */
33
+ super(24000, 1, { streaming: false });
34
+
35
+ // Apply defaults and environment fallbacks.
36
+ const apiKey = opts.apiKey ?? process.env.BASETEN_API_KEY;
37
+ const modelEndpoint = opts.modelEndpoint ?? process.env.BASETEN_MODEL_ENDPOINT;
38
+
39
+ if (!apiKey) {
40
+ throw new Error(
41
+ 'Baseten API key is required, either pass it as `apiKey` or set $BASETEN_API_KEY',
42
+ );
43
+ }
44
+ if (!modelEndpoint) {
45
+ throw new Error(
46
+ 'Baseten model endpoint is required, either pass it as `modelEndpoint` or set $BASETEN_MODEL_ENDPOINT',
47
+ );
48
+ }
49
+
50
+ this.opts = {
51
+ ...defaultTTSOptions,
52
+ ...opts,
53
+ apiKey,
54
+ modelEndpoint,
55
+ } as BasetenTTSOptions;
56
+ }
57
+
58
+ updateOptions(opts: Partial<Omit<BasetenTTSOptions, 'apiKey' | 'modelEndpoint'>>) {
59
+ this.opts = {
60
+ ...this.opts,
61
+ ...opts,
62
+ } as BasetenTTSOptions;
63
+ }
64
+
65
+ /**
66
+ * Synthesize speech for a given piece of text. Returns a `ChunkedStream`
67
+ * which will asynchronously fetch audio from Baseten and push frames into
68
+ * LiveKit's playback pipeline. If you need to cancel synthesis you can
69
+ * call {@link ChunkedStream.stop} on the returned object.
70
+ */
71
+ synthesize(
72
+ text: string,
73
+ connOptions?: APIConnectOptions,
74
+ abortSignal?: AbortSignal,
75
+ ): ChunkedStream {
76
+ return new ChunkedStream(this, text, this.opts, connOptions, abortSignal);
77
+ }
78
+
79
+ stream(): tts.SynthesizeStream {
80
+ throw new Error('Streaming is not supported on Baseten TTS');
81
+ }
82
+
83
+ async close(): Promise<void> {
84
+ this.abortController.abort();
85
+ }
86
+ }
87
+
88
+ /**
89
+ * Internal helper that performs the actual HTTP request and converts the
90
+ * response into audio frames. It inherits from `tts.ChunkedStream` to
91
+ * integrate with LiveKit's event and cancellation framework.
92
+ *
93
+ * This implementation streams audio chunks as they arrive from the Baseten
94
+ * model endpoint, processing them incrementally instead of waiting for the
95
+ * complete response.
96
+ */
97
+ export class ChunkedStream extends tts.ChunkedStream {
98
+ label = 'baseten.ChunkedStream';
99
+ private readonly opts: BasetenTTSOptions;
100
+
101
+ constructor(
102
+ tts: TTS,
103
+ text: string,
104
+ opts: BasetenTTSOptions,
105
+ connOptions?: APIConnectOptions,
106
+ abortSignal?: AbortSignal,
107
+ ) {
108
+ super(text, tts, connOptions, abortSignal);
109
+ this.opts = opts;
110
+ }
111
+
112
+ /**
113
+ * Execute the synthesis request. This method is automatically invoked
114
+ * by the base class when the stream starts. It performs a POST request
115
+ * to the configured `modelEndpoint` with the input text and optional
116
+ * parameters. Audio chunks are streamed as they arrive and transformed
117
+ * into a sequence of `AudioFrame` objects that are enqueued immediately
118
+ * for playback.
119
+ */
120
+ protected async run() {
121
+ const { apiKey, modelEndpoint, voice, language, temperature, maxTokens } = this.opts;
122
+ const payload: Record<string, unknown> = {
123
+ prompt: this.inputText,
124
+ };
125
+ if (voice) payload.voice = voice;
126
+ if (language) payload.language = language;
127
+ if (temperature !== undefined) payload.temperature = temperature;
128
+ if (maxTokens !== undefined) payload.max_tokens = maxTokens;
129
+
130
+ const headers: Record<string, string> = {
131
+ Authorization: `Api-Key ${apiKey}`,
132
+ 'Content-Type': 'application/json',
133
+ };
134
+
135
+ const response = await fetch(modelEndpoint, {
136
+ method: 'POST',
137
+ headers,
138
+ body: JSON.stringify(payload),
139
+ signal: this.abortSignal,
140
+ });
141
+
142
+ if (!response.ok) {
143
+ let errText: string;
144
+ try {
145
+ errText = await response.text();
146
+ } catch {
147
+ errText = response.statusText;
148
+ }
149
+ throw new Error(`Baseten TTS request failed: ${response.status} ${errText}`);
150
+ }
151
+
152
+ // Stream the response body as chunks arrive
153
+ if (!response.body) {
154
+ throw new Error('Response body is not available for streaming');
155
+ }
156
+
157
+ const requestId = shortuuid();
158
+ const audioByteStream = new AudioByteStream(24000, 1);
159
+ const reader = response.body.getReader();
160
+
161
+ try {
162
+ let lastFrame: AudioFrame | undefined;
163
+ const sendLastFrame = (segmentId: string, final: boolean) => {
164
+ if (lastFrame) {
165
+ this.queue.put({ requestId, segmentId, frame: lastFrame, final });
166
+ lastFrame = undefined;
167
+ }
168
+ };
169
+
170
+ // waitForAbort internally sets up an abort listener on the abort signal
171
+ // we need to put it outside loop to avoid constant re-registration of the listener
172
+ const abortPromise = waitForAbort(this.abortSignal);
173
+
174
+ while (!this.abortSignal.aborted) {
175
+ const result = await Promise.race([reader.read(), abortPromise]);
176
+
177
+ if (result === undefined) break; // aborted
178
+
179
+ const { done, value } = result;
180
+
181
+ if (done) {
182
+ break;
183
+ }
184
+
185
+ // Process the chunk and convert to audio frames
186
+ // Convert Uint8Array to ArrayBuffer for AudioByteStream
187
+ const frames = audioByteStream.write(value.buffer);
188
+
189
+ for (const frame of frames) {
190
+ sendLastFrame(requestId, false);
191
+ lastFrame = frame;
192
+ }
193
+ }
194
+
195
+ // Send the final frame
196
+ sendLastFrame(requestId, true);
197
+ } finally {
198
+ reader.releaseLock();
199
+ this.queue.close();
200
+ }
201
+ }
202
+ }
package/src/types.ts ADDED
@@ -0,0 +1,55 @@
1
+ // SPDX-FileCopyrightText: 2024 LiveKit, Inc.
2
+ //
3
+ // SPDX-License-Identifier: Apache-2.0
4
+
5
+ /**
6
+ * Baseten plugin types and interfaces
7
+ */
8
+
9
+ /**
10
+ * Options for configuring the Baseten LLM
11
+ * Since Baseten provides an OpenAI-compatible API, these options
12
+ * map to standard OpenAI parameters.
13
+ */
14
+ export interface BasetenLLMOptions {
15
+ apiKey?: string;
16
+ model: string;
17
+ temperature?: number;
18
+ maxTokens?: number;
19
+ user?: string;
20
+ toolChoice?: 'none' | 'auto' | 'required' | { type: 'function'; function: { name: string } };
21
+ parallelToolCalls?: boolean;
22
+ }
23
+
24
+ /**
25
+ * Options for configuring the Baseten STT service
26
+ */
27
+ export interface BasetenSttOptions {
28
+ apiKey: string;
29
+ modelId: string;
30
+ environment?: string;
31
+ encoding?: string;
32
+ sampleRate?: number;
33
+ bufferSizeSeconds?: number;
34
+ vadThreshold?: number;
35
+ vadMinSilenceDurationMs?: number;
36
+ vadSpeechPadMs?: number;
37
+ enablePartialTranscripts?: boolean;
38
+ partialTranscriptIntervalS?: number;
39
+ finalTranscriptMaxDurationS?: number;
40
+ audioLanguage?: string;
41
+ prompt?: string;
42
+ languageDetectionOnly?: boolean;
43
+ }
44
+
45
+ /**
46
+ * Options for configuring the Baseten TTS service
47
+ */
48
+ export interface BasetenTTSOptions {
49
+ apiKey: string;
50
+ modelEndpoint: string;
51
+ voice?: string;
52
+ language?: string;
53
+ temperature?: number;
54
+ maxTokens?: number;
55
+ }