@audicle/sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +244 -0
- package/dist/index.cjs +538 -0
- package/dist/index.d.cts +966 -0
- package/dist/index.d.ts +966 -0
- package/dist/index.js +492 -0
- package/package.json +44 -0
package/README.md
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
# Audicle Node.js SDK
|
|
2
|
+
|
|
3
|
+
The official Node.js/TypeScript client for the [Audicle](https://audicle.ai) transcription API. Supports batch transcription, real-time streaming over WebSocket, and full transcript management.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @audicle/sdk
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { Audicle } from "@audicle/sdk";
|
|
15
|
+
|
|
16
|
+
const client = new Audicle({ apiKey: "ak_..." });
|
|
17
|
+
|
|
18
|
+
// Transcribe a file — returns completed transcript synchronously for small files
|
|
19
|
+
const result = await client.transcribe({
|
|
20
|
+
file: new Blob([audioBuffer], { type: "audio/wav" }),
|
|
21
|
+
});
|
|
22
|
+
console.log(result.result.text);
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## Batch Transcription
|
|
26
|
+
|
|
27
|
+
### From a file
|
|
28
|
+
|
|
29
|
+
```typescript
|
|
30
|
+
import { readFileSync } from "fs";
|
|
31
|
+
|
|
32
|
+
const audio = readFileSync("recording.mp3");
|
|
33
|
+
const result = await client.transcribe({
|
|
34
|
+
file: new Blob([audio], { type: "audio/mpeg" }),
|
|
35
|
+
});
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### From a URL
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
const result = await client.transcribe({
|
|
42
|
+
audioUrl: "https://example.com/audio.mp3",
|
|
43
|
+
});
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### With options
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
const result = await client.transcribe(
|
|
50
|
+
{ file: audioBlob },
|
|
51
|
+
{
|
|
52
|
+
model: "default",
|
|
53
|
+
language: "en",
|
|
54
|
+
speakerLabels: true,
|
|
55
|
+
wordTimestamps: true,
|
|
56
|
+
webhookUrl: "https://example.com/webhook",
|
|
57
|
+
metadata: { source: "upload" },
|
|
58
|
+
},
|
|
59
|
+
);
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Async mode for large files
|
|
63
|
+
|
|
64
|
+
Small files are transcribed synchronously (result returned inline). For large files or when you want immediate acknowledgment:
|
|
65
|
+
|
|
66
|
+
```typescript
|
|
67
|
+
// Force async — returns immediately with status "queued"
|
|
68
|
+
const job = await client.transcribe({ file: largeFile }, { async: true });
|
|
69
|
+
|
|
70
|
+
// Poll until complete
|
|
71
|
+
const done = await client.transcripts.wait(job.id);
|
|
72
|
+
console.log(done.result.text);
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### Idempotency
|
|
76
|
+
|
|
77
|
+
```typescript
|
|
78
|
+
const result = await client.transcribe(
|
|
79
|
+
{ file: audioBlob },
|
|
80
|
+
{ idempotencyKey: "upload-abc-123" },
|
|
81
|
+
);
|
|
82
|
+
// Retrying with the same key returns the cached result
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Transcripts
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
// Get a transcript
|
|
89
|
+
const txn = await client.transcripts.get("txn_abc123");
|
|
90
|
+
|
|
91
|
+
// Get as plain text, SRT, or VTT
|
|
92
|
+
const text = await client.transcripts.getText("txn_abc123");
|
|
93
|
+
const srt = await client.transcripts.getSrt("txn_abc123");
|
|
94
|
+
const vtt = await client.transcripts.getVtt("txn_abc123");
|
|
95
|
+
|
|
96
|
+
// List with filters
|
|
97
|
+
const page = await client.transcripts.list({
|
|
98
|
+
status: "completed",
|
|
99
|
+
limit: 25,
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// Auto-paginate through all results
|
|
103
|
+
for await (const txn of client.transcripts.listAll()) {
|
|
104
|
+
console.log(txn.id, txn.status);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
// Delete
|
|
108
|
+
await client.transcripts.delete("txn_abc123");
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### Polling
|
|
112
|
+
|
|
113
|
+
```typescript
|
|
114
|
+
// Wait for a queued/processing transcript to finish
|
|
115
|
+
const result = await client.transcripts.wait("txn_abc123", {
|
|
116
|
+
interval: 2000, // poll every 2s (default: 3s, minimum: 500ms)
|
|
117
|
+
timeout: 60000, // give up after 60s (default: 5 min)
|
|
118
|
+
});
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
## Real-Time Streaming
|
|
122
|
+
|
|
123
|
+
Stream audio over WebSocket for live transcription:
|
|
124
|
+
|
|
125
|
+
```typescript
|
|
126
|
+
const stream = client.streaming.transcribe({
|
|
127
|
+
model: "default",
|
|
128
|
+
sample_rate: 16000,
|
|
129
|
+
encoding: "pcm_s16le",
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
stream.on("session.begin", (msg) => {
|
|
133
|
+
console.log(`Session started: ${msg.id}`);
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
stream.on("transcript", (msg) => {
|
|
137
|
+
if (msg.is_final) {
|
|
138
|
+
console.log(msg.transcript.text);
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
stream.on("session.end", (msg) => {
|
|
143
|
+
console.log(`Done — ${msg.usage.duration_seconds}s`);
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
// Send PCM audio chunks
|
|
147
|
+
stream.sendAudio(pcmBuffer);
|
|
148
|
+
|
|
149
|
+
// Signal end of audio
|
|
150
|
+
stream.finalize();
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Models
|
|
154
|
+
|
|
155
|
+
| Model | Description |
|
|
156
|
+
|---|---|
|
|
157
|
+
| `default` | Whisper-based streaming transcription |
|
|
158
|
+
| `turbo` | Faster Whisper variant |
|
|
159
|
+
| `deepgram-nova-3` | Deepgram Nova 3 (supports interim results + VAD) |
|
|
160
|
+
| `gpt-realtime-mini` | OpenAI Realtime |
|
|
161
|
+
|
|
162
|
+
### Observing a session
|
|
163
|
+
|
|
164
|
+
Watch a live transcription session from the dashboard or another client:
|
|
165
|
+
|
|
166
|
+
```typescript
|
|
167
|
+
const observer = client.streaming.observe("txn_abc123");
|
|
168
|
+
|
|
169
|
+
observer.on("transcript", (msg) => {
|
|
170
|
+
console.log(msg.transcript.text);
|
|
171
|
+
});
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
## Usage
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
const usage = await client.usage.get({
|
|
178
|
+
startDate: "2025-01-01",
|
|
179
|
+
endDate: "2025-01-31",
|
|
180
|
+
granularity: "day", // "hour" | "day"
|
|
181
|
+
});
|
|
182
|
+
|
|
183
|
+
console.log(`${usage.total_requests} requests`);
|
|
184
|
+
console.log(`${usage.total_duration_seconds}s total audio`);
|
|
185
|
+
console.log(`$${(usage.total_cost_cents / 100).toFixed(2)} cost`);
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
## Health Check
|
|
189
|
+
|
|
190
|
+
```typescript
|
|
191
|
+
const health = await client.health();
|
|
192
|
+
// { status: "healthy", version: "1.0.0", checks: { ... } }
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
## Error Handling
|
|
196
|
+
|
|
197
|
+
All API errors throw typed exceptions:
|
|
198
|
+
|
|
199
|
+
```typescript
|
|
200
|
+
import {
|
|
201
|
+
AudicleApiError,
|
|
202
|
+
AudicleAuthError,
|
|
203
|
+
AudicleNotFoundError,
|
|
204
|
+
AudicleRateLimitError,
|
|
205
|
+
AudicleTimeoutError,
|
|
206
|
+
} from "@audicle/sdk";
|
|
207
|
+
|
|
208
|
+
try {
|
|
209
|
+
await client.transcripts.get("txn_nonexistent");
|
|
210
|
+
} catch (err) {
|
|
211
|
+
if (err instanceof AudicleNotFoundError) {
|
|
212
|
+
console.log(err.status); // 404
|
|
213
|
+
console.log(err.code); // "transcription_not_found"
|
|
214
|
+
console.log(err.message); // "Transcription not found"
|
|
215
|
+
console.log(err.requestId); // "req_..."
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
| Error Class | Status | When |
|
|
221
|
+
|---|---|---|
|
|
222
|
+
| `AudicleAuthError` | 401 | Invalid or missing API key |
|
|
223
|
+
| `AudicleNotFoundError` | 404 | Resource doesn't exist |
|
|
224
|
+
| `AudicleRateLimitError` | 429 | Too many requests |
|
|
225
|
+
| `AudicleApiError` | any | All other API errors (400, 402, 500, etc.) |
|
|
226
|
+
| `AudicleTimeoutError` | — | `transcripts.wait()` exceeded timeout |
|
|
227
|
+
|
|
228
|
+
## Configuration
|
|
229
|
+
|
|
230
|
+
```typescript
|
|
231
|
+
const client = new Audicle({
|
|
232
|
+
apiKey: "ak_...", // required
|
|
233
|
+
baseUrl: "https://api.audicle.ai", // default
|
|
234
|
+
});
|
|
235
|
+
```
|
|
236
|
+
|
|
237
|
+
## Requirements
|
|
238
|
+
|
|
239
|
+
- Node.js 18+ or any runtime with `fetch` and `WebSocket` globals
|
|
240
|
+
- Works in Node.js, Bun, Deno, and Cloudflare Workers
|
|
241
|
+
|
|
242
|
+
## License
|
|
243
|
+
|
|
244
|
+
MIT
|