@opensecret/react 1.4.1 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -53,6 +53,7 @@ declare namespace api {
53
53
  uploadDocument,
54
54
  checkDocumentStatus,
55
55
  uploadDocumentWithPolling,
56
+ transcribeAudio,
56
57
  LoginResponse,
57
58
  UserResponse,
58
59
  KVListItem,
@@ -78,7 +79,9 @@ declare namespace api {
78
79
  ApiKeyCreateResponse,
79
80
  ApiKeyListResponse,
80
81
  DocumentStatusRequest,
81
- DocumentStatusResponse
82
+ DocumentStatusResponse,
83
+ WhisperTranscriptionRequest,
84
+ WhisperTranscriptionResponse
82
85
  }
83
86
  }
84
87
 
@@ -1386,6 +1389,36 @@ export declare type OpenSecretContextType = {
1386
1389
  * Names are unique per user, so this uniquely identifies the key to delete.
1387
1390
  */
1388
1391
  deleteApiKey: typeof api.deleteApiKey;
1392
+ /**
1393
+ * Transcribes audio using the Whisper API
1394
+ * @param file - The audio file to transcribe (File or Blob object)
1395
+ * @param options - Optional transcription parameters
1396
+ * @returns A promise resolving to the transcription response
1397
+ * @throws {Error} If the user is not authenticated or transcription fails
1398
+ *
1399
+ * @description
1400
+ * This function transcribes audio using OpenAI's Whisper model via the encrypted API.
1401
+ *
1402
+ * Options:
1403
+ * - model: Model to use (default: "whisper-large-v3", routes to Tinfoil's whisper-large-v3-turbo)
1404
+ * - language: Optional ISO-639-1 language code (e.g., "en", "es", "fr")
1405
+ * - prompt: Optional context or previous segment transcript
1406
+ * - response_format: Format of the response (default: "json")
1407
+ * - temperature: Sampling temperature between 0 and 1 (default: 0.0)
1408
+ *
1409
+ * Supported audio formats: MP3, WAV, MP4, M4A, FLAC, OGG, WEBM
1410
+ *
1411
+ * Example usage:
1412
+ * ```typescript
1413
+ * const audioFile = new File([audioData], "recording.mp3", { type: "audio/mpeg" });
1414
+ * const result = await context.transcribeAudio(audioFile, {
1415
+ * language: "en",
1416
+ * prompt: "This is a technical discussion about AI"
1417
+ * });
1418
+ * console.log(result.text);
1419
+ * ```
1420
+ */
1421
+ transcribeAudio: typeof api.transcribeAudio;
1389
1422
  };
1390
1423
 
1391
1424
  /**
@@ -2130,6 +2163,54 @@ declare type ThirdPartyTokenResponse = {
2130
2163
  token: string;
2131
2164
  };
2132
2165
 
2166
+ /**
2167
+ * Transcribes audio using the Whisper API
2168
+ * @param file - The audio file to transcribe (File or Blob object)
2169
+ * @param options - Optional transcription parameters
2170
+ * @returns A promise resolving to the transcription response
2171
+ * @throws {Error} If:
2172
+ * - The user is not authenticated
2173
+ * - The file cannot be read
2174
+ * - The transcription fails
2175
+ *
2176
+ * @description
2177
+ * This function transcribes audio using OpenAI's Whisper model via the encrypted API.
2178
+ *
2179
+ * Options:
2180
+ * - model: Model to use (default: "whisper-large-v3", routes to Tinfoil's whisper-large-v3-turbo)
2181
+ * - language: Optional ISO-639-1 language code (e.g., "en", "es", "fr")
2182
+ * - prompt: Optional context or previous segment transcript
2183
+ * - response_format: Format of the response (default: "json")
2184
+ * - temperature: Sampling temperature between 0 and 1 (default: 0.0)
2185
+ *
2186
+ * Supported audio formats:
2187
+ * - MP3 (audio/mpeg)
2188
+ * - WAV (audio/wav)
2189
+ * - MP4 (audio/mp4)
2190
+ * - M4A (audio/m4a)
2191
+ * - FLAC (audio/flac)
2192
+ * - OGG (audio/ogg)
2193
+ * - WEBM (audio/webm)
2194
+ *
2195
+ * Example usage:
2196
+ * ```typescript
2197
+ * const audioFile = new File([audioData], "recording.mp3", { type: "audio/mpeg" });
2198
+ * const result = await transcribeAudio(audioFile, {
2199
+ * language: "en",
2200
+ * prompt: "This is a technical discussion about AI"
2201
+ * });
2202
+ * console.log(result.text);
2203
+ * ```
2204
+ */
2205
+ declare function transcribeAudio(file: File | Blob, options?: {
2206
+ model?: string;
2207
+ language?: string;
2208
+ prompt?: string;
2209
+ response_format?: string;
2210
+ temperature?: number;
2211
+ apiKey?: string;
2212
+ }): Promise<WhisperTranscriptionResponse>;
2213
+
2133
2214
  declare function updateEmailSettings(orgId: string, projectId: string, settings: EmailSettings): Promise<EmailSettings>;
2134
2215
 
2135
2216
  declare function updateMemberRole(orgId: string, userId: string, role: string): Promise<OrganizationMember>;
@@ -2234,4 +2315,19 @@ declare function verifyEmail(code: string): Promise<void>;
2234
2315
  */
2235
2316
  declare function verifyPlatformEmail(code: string): Promise<void>;
2236
2317
 
2318
+ declare type WhisperTranscriptionRequest = {
2319
+ file: string;
2320
+ filename: string;
2321
+ content_type: string;
2322
+ model: string;
2323
+ language?: string;
2324
+ prompt?: string;
2325
+ response_format?: string;
2326
+ temperature?: number;
2327
+ };
2328
+
2329
+ declare type WhisperTranscriptionResponse = {
2330
+ text: string;
2331
+ };
2332
+
2237
2333
  export { }