@mux/ai 0.1.6 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2168 +0,0 @@
1
- // src/workflows/burned-in-captions.ts
2
- import { generateObject } from "ai";
3
-
4
- // node_modules/dedent/dist/dedent.mjs
5
- function ownKeys(object, enumerableOnly) {
6
- var keys = Object.keys(object);
7
- if (Object.getOwnPropertySymbols) {
8
- var symbols = Object.getOwnPropertySymbols(object);
9
- enumerableOnly && (symbols = symbols.filter(function(sym) {
10
- return Object.getOwnPropertyDescriptor(object, sym).enumerable;
11
- })), keys.push.apply(keys, symbols);
12
- }
13
- return keys;
14
- }
15
- function _objectSpread(target) {
16
- for (var i = 1; i < arguments.length; i++) {
17
- var source = null != arguments[i] ? arguments[i] : {};
18
- i % 2 ? ownKeys(Object(source), true).forEach(function(key) {
19
- _defineProperty(target, key, source[key]);
20
- }) : Object.getOwnPropertyDescriptors ? Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)) : ownKeys(Object(source)).forEach(function(key) {
21
- Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key));
22
- });
23
- }
24
- return target;
25
- }
26
- function _defineProperty(obj, key, value) {
27
- key = _toPropertyKey(key);
28
- if (key in obj) {
29
- Object.defineProperty(obj, key, { value, enumerable: true, configurable: true, writable: true });
30
- } else {
31
- obj[key] = value;
32
- }
33
- return obj;
34
- }
35
- function _toPropertyKey(arg) {
36
- var key = _toPrimitive(arg, "string");
37
- return typeof key === "symbol" ? key : String(key);
38
- }
39
- function _toPrimitive(input, hint) {
40
- if (typeof input !== "object" || input === null) return input;
41
- var prim = input[Symbol.toPrimitive];
42
- if (prim !== void 0) {
43
- var res = prim.call(input, hint || "default");
44
- if (typeof res !== "object") return res;
45
- throw new TypeError("@@toPrimitive must return a primitive value.");
46
- }
47
- return (hint === "string" ? String : Number)(input);
48
- }
49
- var dedent = createDedent({});
50
- var dedent_default = dedent;
51
- function createDedent(options) {
52
- dedent2.withOptions = (newOptions) => createDedent(_objectSpread(_objectSpread({}, options), newOptions));
53
- return dedent2;
54
- function dedent2(strings, ...values) {
55
- const raw = typeof strings === "string" ? [strings] : strings.raw;
56
- const {
57
- alignValues = false,
58
- escapeSpecialCharacters = Array.isArray(strings),
59
- trimWhitespace = true
60
- } = options;
61
- let result = "";
62
- for (let i = 0; i < raw.length; i++) {
63
- let next = raw[i];
64
- if (escapeSpecialCharacters) {
65
- next = next.replace(/\\\n[ \t]*/g, "").replace(/\\`/g, "`").replace(/\\\$/g, "$").replace(/\\\{/g, "{");
66
- }
67
- result += next;
68
- if (i < values.length) {
69
- const value = alignValues ? alignValue(values[i], result) : values[i];
70
- result += value;
71
- }
72
- }
73
- const lines = result.split("\n");
74
- let mindent = null;
75
- for (const l of lines) {
76
- const m = l.match(/^(\s+)\S+/);
77
- if (m) {
78
- const indent = m[1].length;
79
- if (!mindent) {
80
- mindent = indent;
81
- } else {
82
- mindent = Math.min(mindent, indent);
83
- }
84
- }
85
- }
86
- if (mindent !== null) {
87
- const m = mindent;
88
- result = lines.map((l) => l[0] === " " || l[0] === " " ? l.slice(m) : l).join("\n");
89
- }
90
- if (trimWhitespace) {
91
- result = result.trim();
92
- }
93
- if (escapeSpecialCharacters) {
94
- result = result.replace(/\\n/g, "\n");
95
- }
96
- return result;
97
- }
98
- }
99
- function alignValue(value, precedingText) {
100
- if (typeof value !== "string" || !value.includes("\n")) {
101
- return value;
102
- }
103
- const currentLine = precedingText.slice(precedingText.lastIndexOf("\n") + 1);
104
- const indentMatch = currentLine.match(/^(\s+)/);
105
- if (indentMatch) {
106
- const indent = indentMatch[1];
107
- return value.replace(/\n/g, `
108
- ${indent}`);
109
- }
110
- return value;
111
- }
112
-
113
- // src/workflows/burned-in-captions.ts
114
- import { z as z2 } from "zod";
115
-
116
- // src/lib/client-factory.ts
117
- import Mux from "@mux/mux-node";
118
-
119
- // src/env.ts
120
- import path from "path";
121
- import { config } from "dotenv";
122
- import { expand } from "dotenv-expand";
123
- import { z } from "zod";
124
- expand(config({
125
- path: path.resolve(
126
- process.cwd(),
127
- process.env.NODE_ENV === "test" ? ".env.test" : ".env"
128
- )
129
- }));
130
- function optionalString(description, message) {
131
- return z.preprocess(
132
- (value) => typeof value === "string" && value.trim().length === 0 ? void 0 : value,
133
- z.string().trim().min(1, message).optional()
134
- ).describe(description);
135
- }
136
- function requiredString(description, message) {
137
- return z.preprocess(
138
- (value) => typeof value === "string" ? value.trim().length > 0 ? value.trim() : void 0 : value,
139
- z.string().trim().min(1, message)
140
- ).describe(description);
141
- }
142
- var EnvSchema = z.object({
143
- NODE_ENV: z.string().default("development").describe("Runtime environment."),
144
- MUX_TOKEN_ID: requiredString("Mux access token ID.", "Required to access Mux APIs"),
145
- MUX_TOKEN_SECRET: requiredString("Mux access token secret.", "Required to access Mux APIs"),
146
- MUX_SIGNING_KEY: optionalString("Mux signing key ID for signed playback URLs.", "Used to sign playback URLs"),
147
- MUX_PRIVATE_KEY: optionalString("Mux signing private key for signed playback URLs.", "Used to sign playback URLs"),
148
- OPENAI_API_KEY: optionalString("OpenAI API key for OpenAI-backed workflows.", "OpenAI API key"),
149
- ANTHROPIC_API_KEY: optionalString("Anthropic API key for Claude-backed workflows.", "Anthropic API key"),
150
- GOOGLE_GENERATIVE_AI_API_KEY: optionalString("Google Generative AI API key for Gemini-backed workflows.", "Google Generative AI API key"),
151
- ELEVENLABS_API_KEY: optionalString("ElevenLabs API key for audio translation.", "ElevenLabs API key"),
152
- HIVE_API_KEY: optionalString("Hive Visual Moderation API key.", "Hive API key"),
153
- S3_ENDPOINT: optionalString("S3-compatible endpoint for uploads.", "S3 endpoint"),
154
- S3_REGION: optionalString("S3 region (defaults to 'auto' when omitted)."),
155
- S3_BUCKET: optionalString("Bucket used for caption and audio uploads.", "S3 bucket"),
156
- S3_ACCESS_KEY_ID: optionalString("Access key ID for S3-compatible uploads.", "S3 access key id"),
157
- S3_SECRET_ACCESS_KEY: optionalString("Secret access key for S3-compatible uploads.", "S3 secret access key")
158
- });
159
- function parseEnv() {
160
- const parsedEnv = EnvSchema.safeParse(process.env);
161
- if (!parsedEnv.success) {
162
- console.error("\u274C Invalid env:");
163
- console.error(JSON.stringify(parsedEnv.error.flatten().fieldErrors, null, 2));
164
- process.exit(1);
165
- }
166
- return parsedEnv.data;
167
- }
168
- var env = parseEnv();
169
- var env_default = env;
170
-
171
- // src/lib/providers.ts
172
- import { createAnthropic } from "@ai-sdk/anthropic";
173
- import { createGoogleGenerativeAI } from "@ai-sdk/google";
174
- import { createOpenAI } from "@ai-sdk/openai";
175
- var DEFAULT_LANGUAGE_MODELS = {
176
- openai: "gpt-5-mini",
177
- anthropic: "claude-haiku-4-5",
178
- google: "gemini-2.5-flash"
179
- };
180
- var DEFAULT_EMBEDDING_MODELS = {
181
- openai: "text-embedding-3-small",
182
- google: "gemini-embedding-001"
183
- };
184
- function requireEnv(value, name) {
185
- if (!value) {
186
- throw new Error(`Missing ${name}. Set ${name} in your environment or pass it in options.`);
187
- }
188
- return value;
189
- }
190
- function resolveLanguageModel(options = {}) {
191
- const provider = options.provider || "openai";
192
- const modelId = options.model || DEFAULT_LANGUAGE_MODELS[provider];
193
- switch (provider) {
194
- case "openai": {
195
- const apiKey = options.openaiApiKey ?? env_default.OPENAI_API_KEY;
196
- requireEnv(apiKey, "OPENAI_API_KEY");
197
- const openai = createOpenAI({
198
- apiKey
199
- });
200
- return {
201
- provider,
202
- modelId,
203
- model: openai(modelId)
204
- };
205
- }
206
- case "anthropic": {
207
- const apiKey = options.anthropicApiKey ?? env_default.ANTHROPIC_API_KEY;
208
- requireEnv(apiKey, "ANTHROPIC_API_KEY");
209
- const anthropic = createAnthropic({
210
- apiKey
211
- });
212
- return {
213
- provider,
214
- modelId,
215
- model: anthropic(modelId)
216
- };
217
- }
218
- case "google": {
219
- const apiKey = options.googleApiKey ?? env_default.GOOGLE_GENERATIVE_AI_API_KEY;
220
- requireEnv(apiKey, "GOOGLE_GENERATIVE_AI_API_KEY");
221
- const google = createGoogleGenerativeAI({
222
- apiKey
223
- });
224
- return {
225
- provider,
226
- modelId,
227
- model: google(modelId)
228
- };
229
- }
230
- default: {
231
- const exhaustiveCheck = provider;
232
- throw new Error(`Unsupported provider: ${exhaustiveCheck}`);
233
- }
234
- }
235
- }
236
- function resolveEmbeddingModel(options = {}) {
237
- const provider = options.provider || "openai";
238
- const modelId = options.model || DEFAULT_EMBEDDING_MODELS[provider];
239
- switch (provider) {
240
- case "openai": {
241
- const apiKey = options.openaiApiKey ?? env_default.OPENAI_API_KEY;
242
- requireEnv(apiKey, "OPENAI_API_KEY");
243
- const openai = createOpenAI({
244
- apiKey
245
- });
246
- return {
247
- provider,
248
- modelId,
249
- model: openai.embedding(modelId)
250
- };
251
- }
252
- case "google": {
253
- const apiKey = options.googleApiKey ?? env_default.GOOGLE_GENERATIVE_AI_API_KEY;
254
- requireEnv(apiKey, "GOOGLE_GENERATIVE_AI_API_KEY");
255
- const google = createGoogleGenerativeAI({
256
- apiKey
257
- });
258
- return {
259
- provider,
260
- modelId,
261
- model: google.textEmbeddingModel(modelId)
262
- };
263
- }
264
- default: {
265
- const exhaustiveCheck = provider;
266
- throw new Error(`Unsupported embedding provider: ${exhaustiveCheck}`);
267
- }
268
- }
269
- }
270
-
271
- // src/lib/client-factory.ts
272
- function validateCredentials(options, requiredProvider) {
273
- const muxTokenId = options.muxTokenId ?? env_default.MUX_TOKEN_ID;
274
- const muxTokenSecret = options.muxTokenSecret ?? env_default.MUX_TOKEN_SECRET;
275
- const openaiApiKey = options.openaiApiKey ?? env_default.OPENAI_API_KEY;
276
- const anthropicApiKey = options.anthropicApiKey ?? env_default.ANTHROPIC_API_KEY;
277
- const googleApiKey = options.googleApiKey ?? env_default.GOOGLE_GENERATIVE_AI_API_KEY;
278
- if (!muxTokenId || !muxTokenSecret) {
279
- throw new Error(
280
- "Mux credentials are required. Provide muxTokenId and muxTokenSecret in options or set MUX_TOKEN_ID and MUX_TOKEN_SECRET environment variables."
281
- );
282
- }
283
- if (requiredProvider === "openai" && !openaiApiKey) {
284
- throw new Error(
285
- "OpenAI API key is required. Provide openaiApiKey in options or set OPENAI_API_KEY environment variable."
286
- );
287
- }
288
- if (requiredProvider === "anthropic" && !anthropicApiKey) {
289
- throw new Error(
290
- "Anthropic API key is required. Provide anthropicApiKey in options or set ANTHROPIC_API_KEY environment variable."
291
- );
292
- }
293
- if (requiredProvider === "google" && !googleApiKey) {
294
- throw new Error(
295
- "Google Generative AI API key is required. Provide googleApiKey in options or set GOOGLE_GENERATIVE_AI_API_KEY environment variable."
296
- );
297
- }
298
- return {
299
- muxTokenId,
300
- muxTokenSecret,
301
- openaiApiKey,
302
- anthropicApiKey,
303
- googleApiKey
304
- };
305
- }
306
- function createMuxClient(credentials) {
307
- if (!credentials.muxTokenId || !credentials.muxTokenSecret) {
308
- throw new Error("Mux credentials are required. Provide muxTokenId and muxTokenSecret in options or set MUX_TOKEN_ID and MUX_TOKEN_SECRET environment variables.");
309
- }
310
- return new Mux({
311
- tokenId: credentials.muxTokenId,
312
- tokenSecret: credentials.muxTokenSecret
313
- });
314
- }
315
- function createWorkflowClients(options, provider) {
316
- const providerToUse = provider || options.provider || "openai";
317
- const credentials = validateCredentials(options, providerToUse);
318
- const languageModel = resolveLanguageModel({
319
- ...options,
320
- provider: providerToUse
321
- });
322
- return {
323
- mux: createMuxClient(credentials),
324
- languageModel,
325
- credentials
326
- };
327
- }
328
-
329
- // src/lib/image-download.ts
330
- import { Buffer } from "buffer";
331
- import pRetry, { AbortError } from "p-retry";
332
- var DEFAULT_OPTIONS = {
333
- timeout: 1e4,
334
- retries: 3,
335
- retryDelay: 1e3,
336
- maxRetryDelay: 1e4,
337
- exponentialBackoff: true
338
- };
339
- async function downloadImageAsBase64(url, options = {}) {
340
- const opts = { ...DEFAULT_OPTIONS, ...options };
341
- let attemptCount = 0;
342
- return pRetry(
343
- async () => {
344
- attemptCount++;
345
- const controller = new AbortController();
346
- const timeoutId = setTimeout(() => controller.abort(), opts.timeout);
347
- try {
348
- const response = await fetch(url, {
349
- signal: controller.signal,
350
- headers: {
351
- "User-Agent": "@mux/ai image downloader"
352
- }
353
- });
354
- clearTimeout(timeoutId);
355
- if (!response.ok) {
356
- if (response.status >= 400 && response.status < 500 && response.status !== 429) {
357
- throw new AbortError(`HTTP ${response.status}: ${response.statusText}`);
358
- }
359
- throw new Error(`HTTP ${response.status}: ${response.statusText}`);
360
- }
361
- const contentType = response.headers.get("content-type");
362
- if (!contentType?.startsWith("image/")) {
363
- throw new AbortError(`Invalid content type: ${contentType}. Expected image/*`);
364
- }
365
- const arrayBuffer = await response.arrayBuffer();
366
- const buffer = Buffer.from(arrayBuffer);
367
- if (buffer.length === 0) {
368
- throw new AbortError("Downloaded image is empty");
369
- }
370
- const base64Data = `data:${contentType};base64,${buffer.toString("base64")}`;
371
- return {
372
- base64Data,
373
- buffer,
374
- url,
375
- contentType,
376
- sizeBytes: buffer.length,
377
- attempts: attemptCount
378
- };
379
- } catch (error) {
380
- clearTimeout(timeoutId);
381
- if (error instanceof AbortError) {
382
- throw error;
383
- }
384
- if (error instanceof Error) {
385
- if (error.name === "AbortError") {
386
- throw new Error(`Request timeout after ${opts.timeout}ms`);
387
- }
388
- throw new Error(`Download failed: ${error.message}`);
389
- }
390
- throw new Error("Unknown download error");
391
- }
392
- },
393
- {
394
- retries: opts.retries,
395
- minTimeout: opts.retryDelay,
396
- maxTimeout: opts.maxRetryDelay,
397
- factor: opts.exponentialBackoff ? 2 : 1,
398
- randomize: true,
399
- // Add jitter to prevent thundering herd
400
- onFailedAttempt: (error) => {
401
- console.warn(`Image download attempt ${error.attemptNumber} failed for ${url}`);
402
- if (error.retriesLeft > 0) {
403
- console.warn(`Retrying... (${error.retriesLeft} attempts left)`);
404
- }
405
- }
406
- }
407
- );
408
- }
409
- async function downloadImagesAsBase64(urls, options = {}, maxConcurrent = 5) {
410
- const results = [];
411
- for (let i = 0; i < urls.length; i += maxConcurrent) {
412
- const batch = urls.slice(i, i + maxConcurrent);
413
- const batchPromises = batch.map((url) => downloadImageAsBase64(url, options));
414
- const batchResults = await Promise.all(batchPromises);
415
- results.push(...batchResults);
416
- }
417
- return results;
418
- }
419
-
420
- // src/lib/mux-assets.ts
421
- function getPlaybackId(asset) {
422
- const playbackIds = asset.playback_ids || [];
423
- const publicPlaybackId = playbackIds.find((pid) => pid.policy === "public");
424
- if (publicPlaybackId?.id) {
425
- return { id: publicPlaybackId.id, policy: "public" };
426
- }
427
- const signedPlaybackId = playbackIds.find((pid) => pid.policy === "signed");
428
- if (signedPlaybackId?.id) {
429
- return { id: signedPlaybackId.id, policy: "signed" };
430
- }
431
- throw new Error(
432
- "No public or signed playback ID found for this asset. A public or signed playback ID is required. DRM playback IDs are not currently supported."
433
- );
434
- }
435
- async function getPlaybackIdForAsset(mux, assetId) {
436
- const asset = await mux.video.assets.retrieve(assetId);
437
- const { id: playbackId, policy } = getPlaybackId(asset);
438
- return { asset, playbackId, policy };
439
- }
440
-
441
- // src/lib/prompt-builder.ts
442
- function renderSection(section) {
443
- const { tag, content, attributes } = section;
444
- const XML_NAME_PATTERN = /^[A-Z_][\w.:-]*$/i;
445
- const assertValidXmlName = (name, context) => {
446
- if (!XML_NAME_PATTERN.test(name)) {
447
- throw new Error(`Invalid XML ${context} name: "${name}"`);
448
- }
449
- };
450
- const escapeXmlText = (value) => value.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;/");
451
- const escapeXmlAttribute = (value) => escapeXmlText(value).replace(/"/g, "&quot;");
452
- if (!content.trim()) {
453
- return "";
454
- }
455
- assertValidXmlName(tag, "tag");
456
- const attrString = attributes ? ` ${Object.entries(attributes).map(([key, value]) => {
457
- assertValidXmlName(key, "attribute");
458
- return `${key}="${escapeXmlAttribute(value)}"`;
459
- }).join(" ")}` : "";
460
- const safeContent = escapeXmlText(content.trim());
461
- return `<${tag}${attrString}>
462
- ${safeContent}
463
- </${tag}>`;
464
- }
465
- function resolveSection(defaultSection, override) {
466
- if (override === void 0) {
467
- return defaultSection;
468
- }
469
- if (typeof override === "string") {
470
- return { ...defaultSection, content: override };
471
- }
472
- return override;
473
- }
474
- function createPromptBuilder(config2) {
475
- const { template, sectionOrder } = config2;
476
- const getSection = (section, override) => {
477
- const resolved = resolveSection(template[section], override);
478
- return renderSection(resolved);
479
- };
480
- const build = (overrides) => {
481
- const sections = sectionOrder.map((sectionKey) => getSection(sectionKey, overrides?.[sectionKey])).filter(Boolean);
482
- return sections.join("\n\n");
483
- };
484
- const buildWithContext = (overrides, additionalSections) => {
485
- const basePrompt = build(overrides);
486
- if (!additionalSections?.length) {
487
- return basePrompt;
488
- }
489
- const additional = additionalSections.map(renderSection).filter(Boolean).join("\n\n");
490
- return additional ? `${basePrompt}
491
-
492
- ${additional}` : basePrompt;
493
- };
494
- return {
495
- template,
496
- build,
497
- buildWithContext,
498
- getSection
499
- };
500
- }
501
- function createTranscriptSection(transcriptText, format = "plain text") {
502
- return {
503
- tag: "transcript",
504
- content: transcriptText,
505
- attributes: { format }
506
- };
507
- }
508
- function createToneSection(instruction) {
509
- return {
510
- tag: "tone",
511
- content: instruction
512
- };
513
- }
514
-
515
- // src/lib/url-signing.ts
516
- import Mux2 from "@mux/mux-node";
517
- function resolveSigningContext(config2) {
518
- const keyId = config2.muxSigningKey ?? env_default.MUX_SIGNING_KEY;
519
- const keySecret = config2.muxPrivateKey ?? env_default.MUX_PRIVATE_KEY;
520
- if (!keyId || !keySecret) {
521
- return void 0;
522
- }
523
- return { keyId, keySecret };
524
- }
525
- function createSigningClient(context) {
526
- return new Mux2({
527
- // These are not needed for signing, but the SDK requires them
528
- // Using empty strings as we only need the jwt functionality
529
- tokenId: env_default.MUX_TOKEN_ID || "",
530
- tokenSecret: env_default.MUX_TOKEN_SECRET || "",
531
- jwtSigningKey: context.keyId,
532
- jwtPrivateKey: context.keySecret
533
- });
534
- }
535
- async function signPlaybackId(playbackId, context, type = "video", params) {
536
- const client = createSigningClient(context);
537
- const stringParams = params ? Object.fromEntries(
538
- Object.entries(params).map(([key, value]) => [key, String(value)])
539
- ) : void 0;
540
- return client.jwt.signPlaybackId(playbackId, {
541
- type,
542
- expiration: context.expiration || "1h",
543
- params: stringParams
544
- });
545
- }
546
- async function signUrl(url, playbackId, context, type = "video", params) {
547
- const token = await signPlaybackId(playbackId, context, type, params);
548
- const separator = url.includes("?") ? "&" : "?";
549
- return `${url}${separator}token=${token}`;
550
- }
551
-
552
- // src/primitives/storyboards.ts
553
- var DEFAULT_STORYBOARD_WIDTH = 640;
554
- async function getStoryboardUrl(playbackId, width = DEFAULT_STORYBOARD_WIDTH, signingContext) {
555
- const baseUrl = `https://image.mux.com/${playbackId}/storyboard.png`;
556
- if (signingContext) {
557
- return signUrl(baseUrl, playbackId, signingContext, "storyboard", { width });
558
- }
559
- return `${baseUrl}?width=${width}`;
560
- }
561
-
562
- // src/workflows/burned-in-captions.ts
563
- var burnedInCaptionsSchema = z2.object({
564
- hasBurnedInCaptions: z2.boolean(),
565
- confidence: z2.number().min(0).max(1),
566
- detectedLanguage: z2.string().nullable()
567
- });
568
- var SYSTEM_PROMPT = dedent_default`
569
- <role>
570
- You are an expert at analyzing video frames to detect burned-in captions (also called open captions or hardcoded subtitles).
571
- These are text overlays that are permanently embedded in the video image, common on TikTok, Instagram Reels, and other social media platforms.
572
- </role>
573
-
574
- <critical_note>
575
- Burned-in captions must appear consistently across MOST frames in the storyboard.
576
- Text appearing in only 1-2 frames at the end is typically marketing copy, taglines, or end-cards - NOT burned-in captions.
577
- </critical_note>
578
-
579
- <confidence_scoring>
580
- Use this rubric to determine your confidence score (0.0-1.0):
581
-
582
- - Score 1.0: Definitive captions - text overlays visible in most frames, consistent positioning, content changes between frames indicating dialogue/narration, clear caption-style formatting
583
- - Score 0.7-0.9: Strong evidence - captions visible across multiple frames with consistent placement, but minor ambiguity (e.g., some frames unclear, atypical styling)
584
- - Score 0.4-0.6: Moderate evidence - text present in several frames but uncertain classification (e.g., could be captions or persistent on-screen graphics, ambiguous formatting)
585
- - Score 0.1-0.3: Weak evidence - minimal text detected, appears in only a few frames, likely marketing copy or end-cards rather than captions
586
- - Score 0.0: No captions - no text overlays detected, or text is clearly not captions (logos, watermarks, scene content, single end-card)
587
- </confidence_scoring>
588
-
589
- <context>
590
- You receive storyboard images containing multiple sequential frames extracted from a video.
591
- These frames are arranged in a grid and represent the visual progression of the content over time.
592
- Read frames left-to-right, top-to-bottom to understand the temporal sequence.
593
- </context>
594
-
595
- <capabilities>
596
- - Detect and analyze text overlays in video frames
597
- - Distinguish between captions and other text elements (marketing, logos, UI)
598
- - Identify language of detected caption text
599
- - Assess confidence in caption detection
600
- </capabilities>
601
-
602
- <constraints>
603
- - Only classify as burned-in captions when evidence is clear across multiple frames
604
- - Base decisions on observable visual evidence
605
- - Return structured data matching the requested schema
606
- </constraints>`;
607
- var burnedInCaptionsPromptBuilder = createPromptBuilder({
608
- template: {
609
- task: {
610
- tag: "task",
611
- content: dedent_default`
612
- Analyze the provided video storyboard to detect burned-in captions (hardcoded subtitles).
613
- Count frames with text vs no text, note position consistency and whether text changes across frames.
614
- Decide if captions exist, with confidence (0.0-1.0) and detected language if any.`
615
- },
616
- analysisSteps: {
617
- tag: "analysis_steps",
618
- content: dedent_default`
619
- 1. COUNT how many frames contain text overlays vs. how many don't
620
- 2. Check if text appears in consistent positions across multiple frames
621
- 3. Verify text changes content between frames (indicating dialogue/narration)
622
- 4. Ensure text has caption-style formatting (contrasting colors, readable fonts)
623
- 5. If captions are detected, identify the language of the text`
624
- },
625
- positiveIndicators: {
626
- tag: "classify_as_captions",
627
- content: dedent_default`
628
- ONLY classify as burned-in captions if:
629
- - Text appears in multiple frames (not just 1-2 end frames)
630
- - Text positioning is consistent across those frames
631
- - Content suggests dialogue, narration, or subtitles (not marketing)
632
- - Formatting looks like captions (not graphics/logos)`
633
- },
634
- negativeIndicators: {
635
- tag: "not_captions",
636
- content: dedent_default`
637
- DO NOT classify as burned-in captions:
638
- - Marketing taglines appearing only in final 1-2 frames
639
- - Single words or phrases that don't change between frames
640
- - Graphics, logos, watermarks, or UI elements
641
- - Text that's part of the original scene content
642
- - End-cards with calls-to-action or brand messaging`
643
- }
644
- },
645
- sectionOrder: ["task", "analysisSteps", "positiveIndicators", "negativeIndicators"]
646
- });
647
- function buildUserPrompt(promptOverrides) {
648
- return burnedInCaptionsPromptBuilder.build(promptOverrides);
649
- }
650
- var DEFAULT_PROVIDER = "openai";
651
- async function hasBurnedInCaptions(assetId, options = {}) {
652
- const {
653
- provider = DEFAULT_PROVIDER,
654
- model,
655
- imageSubmissionMode = "url",
656
- imageDownloadOptions,
657
- promptOverrides,
658
- ...config2
659
- } = options;
660
- const userPrompt = buildUserPrompt(promptOverrides);
661
- const clients = createWorkflowClients(
662
- { ...config2, model },
663
- provider
664
- );
665
- const { playbackId, policy } = await getPlaybackIdForAsset(clients.mux, assetId);
666
- const signingContext = resolveSigningContext(options);
667
- if (policy === "signed" && !signingContext) {
668
- throw new Error(
669
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
670
- );
671
- }
672
- const imageUrl = await getStoryboardUrl(playbackId, 640, policy === "signed" ? signingContext : void 0);
673
- const analyzeStoryboard = async (imageDataUrl) => {
674
- const response = await generateObject({
675
- model: clients.languageModel.model,
676
- schema: burnedInCaptionsSchema,
677
- abortSignal: options.abortSignal,
678
- experimental_telemetry: { isEnabled: true },
679
- messages: [
680
- {
681
- role: "system",
682
- content: SYSTEM_PROMPT
683
- },
684
- {
685
- role: "user",
686
- content: [
687
- { type: "text", text: userPrompt },
688
- { type: "image", image: imageDataUrl }
689
- ]
690
- }
691
- ]
692
- });
693
- return {
694
- result: response.object,
695
- usage: {
696
- inputTokens: response.usage.inputTokens,
697
- outputTokens: response.usage.outputTokens,
698
- totalTokens: response.usage.totalTokens,
699
- reasoningTokens: response.usage.reasoningTokens,
700
- cachedInputTokens: response.usage.cachedInputTokens
701
- }
702
- };
703
- };
704
- let analysisResponse;
705
- if (imageSubmissionMode === "base64") {
706
- const downloadResult = await downloadImageAsBase64(imageUrl, imageDownloadOptions);
707
- analysisResponse = await analyzeStoryboard(downloadResult.base64Data);
708
- } else {
709
- analysisResponse = await analyzeStoryboard(imageUrl);
710
- }
711
- if (!analysisResponse.result) {
712
- throw new Error("No analysis result received from AI provider");
713
- }
714
- return {
715
- assetId,
716
- hasBurnedInCaptions: analysisResponse.result.hasBurnedInCaptions ?? false,
717
- confidence: analysisResponse.result.confidence ?? 0,
718
- detectedLanguage: analysisResponse.result.detectedLanguage ?? null,
719
- storyboardUrl: imageUrl,
720
- usage: analysisResponse.usage
721
- };
722
- }
723
-
724
- // src/workflows/chapters.ts
725
- import { generateObject as generateObject2 } from "ai";
726
- import { z as z3 } from "zod";
727
-
728
- // src/lib/retry.ts
729
- var DEFAULT_RETRY_OPTIONS = {
730
- maxRetries: 3,
731
- baseDelay: 2e3,
732
- maxDelay: 1e4
733
- };
734
- function defaultShouldRetry(error, _attempt) {
735
- return Boolean(error.message && error.message.includes("Timeout while downloading"));
736
- }
737
- function calculateDelay(attempt, baseDelay, maxDelay) {
738
- const exponentialDelay = baseDelay * 2 ** (attempt - 1);
739
- const delayWithJitter = exponentialDelay * (0.5 + Math.random() * 0.5);
740
- return Math.min(delayWithJitter, maxDelay);
741
- }
742
- async function withRetry(fn, {
743
- maxRetries = DEFAULT_RETRY_OPTIONS.maxRetries,
744
- baseDelay = DEFAULT_RETRY_OPTIONS.baseDelay,
745
- maxDelay = DEFAULT_RETRY_OPTIONS.maxDelay,
746
- shouldRetry = defaultShouldRetry
747
- } = {}) {
748
- let lastError;
749
- for (let attempt = 0; attempt <= maxRetries; attempt++) {
750
- try {
751
- return await fn();
752
- } catch (error) {
753
- lastError = error instanceof Error ? error : new Error(String(error));
754
- const isLastAttempt = attempt === maxRetries;
755
- if (isLastAttempt || !shouldRetry(lastError, attempt + 1)) {
756
- throw lastError;
757
- }
758
- const delay2 = calculateDelay(attempt + 1, baseDelay, maxDelay);
759
- console.warn(
760
- `Attempt ${attempt + 1} failed: ${lastError.message}. Retrying in ${Math.round(delay2)}ms...`
761
- );
762
- await new Promise((resolve) => setTimeout(resolve, delay2));
763
- }
764
- }
765
- throw lastError || new Error("Retry failed with unknown error");
766
- }
767
-
768
- // src/primitives/transcripts.ts
769
- function getReadyTextTracks(asset) {
770
- return (asset.tracks || []).filter(
771
- (track) => track.type === "text" && track.status === "ready"
772
- );
773
- }
774
- function findCaptionTrack(asset, languageCode) {
775
- const tracks = getReadyTextTracks(asset);
776
- if (!tracks.length)
777
- return void 0;
778
- if (!languageCode) {
779
- return tracks[0];
780
- }
781
- return tracks.find(
782
- (track) => track.text_type === "subtitles" && track.language_code === languageCode
783
- );
784
- }
785
- function extractTextFromVTT(vttContent) {
786
- if (!vttContent.trim()) {
787
- return "";
788
- }
789
- const lines = vttContent.split("\n");
790
- const textLines = [];
791
- for (let i = 0; i < lines.length; i++) {
792
- const line = lines[i].trim();
793
- if (!line)
794
- continue;
795
- if (line === "WEBVTT")
796
- continue;
797
- if (line.startsWith("NOTE "))
798
- continue;
799
- if (line.includes("-->"))
800
- continue;
801
- if (/^[\w-]+$/.test(line) && !line.includes(" "))
802
- continue;
803
- if (line.startsWith("STYLE") || line.startsWith("REGION"))
804
- continue;
805
- const cleanLine = line.replace(/<[^>]*>/g, "").trim();
806
- if (cleanLine) {
807
- textLines.push(cleanLine);
808
- }
809
- }
810
- return textLines.join(" ").replace(/\s+/g, " ").trim();
811
- }
812
- function vttTimestampToSeconds(timestamp) {
813
- const parts = timestamp.split(":");
814
- if (parts.length !== 3)
815
- return 0;
816
- const hours = Number.parseInt(parts[0], 10) || 0;
817
- const minutes = Number.parseInt(parts[1], 10) || 0;
818
- const seconds = Number.parseFloat(parts[2]) || 0;
819
- return hours * 3600 + minutes * 60 + seconds;
820
- }
821
- function extractTimestampedTranscript(vttContent) {
822
- if (!vttContent.trim()) {
823
- return "";
824
- }
825
- const lines = vttContent.split("\n");
826
- const segments = [];
827
- for (let i = 0; i < lines.length; i++) {
828
- const line = lines[i].trim();
829
- if (line.includes("-->")) {
830
- const startTime = line.split(" --> ")[0].trim();
831
- const timeInSeconds = vttTimestampToSeconds(startTime);
832
- let j = i + 1;
833
- while (j < lines.length && !lines[j].trim()) {
834
- j++;
835
- }
836
- if (j < lines.length) {
837
- const text = lines[j].trim().replace(/<[^>]*>/g, "");
838
- if (text) {
839
- segments.push({ time: timeInSeconds, text });
840
- }
841
- }
842
- }
843
- }
844
- return segments.map((segment) => `[${Math.floor(segment.time)}s] ${segment.text}`).join("\n");
845
- }
846
- function parseVTTCues(vttContent) {
847
- if (!vttContent.trim())
848
- return [];
849
- const lines = vttContent.split("\n");
850
- const cues = [];
851
- for (let i = 0; i < lines.length; i++) {
852
- const line = lines[i].trim();
853
- if (line.includes("-->")) {
854
- const [startStr, endStr] = line.split(" --> ").map((s) => s.trim());
855
- const startTime = vttTimestampToSeconds(startStr);
856
- const endTime = vttTimestampToSeconds(endStr.split(" ")[0]);
857
- const textLines = [];
858
- let j = i + 1;
859
- while (j < lines.length && lines[j].trim() && !lines[j].includes("-->")) {
860
- const cleanLine = lines[j].trim().replace(/<[^>]*>/g, "");
861
- if (cleanLine)
862
- textLines.push(cleanLine);
863
- j++;
864
- }
865
- if (textLines.length > 0) {
866
- cues.push({
867
- startTime,
868
- endTime,
869
- text: textLines.join(" ")
870
- });
871
- }
872
- }
873
- }
874
- return cues;
875
- }
876
- async function buildTranscriptUrl(playbackId, trackId, signingContext) {
877
- const baseUrl = `https://stream.mux.com/${playbackId}/text/${trackId}.vtt`;
878
- if (signingContext) {
879
- return signUrl(baseUrl, playbackId, signingContext, "video");
880
- }
881
- return baseUrl;
882
- }
883
- async function fetchTranscriptForAsset(asset, playbackId, options = {}) {
884
- const { languageCode, cleanTranscript = true, signingContext } = options;
885
- const track = findCaptionTrack(asset, languageCode);
886
- if (!track) {
887
- return { transcriptText: "" };
888
- }
889
- if (!track.id) {
890
- return { transcriptText: "", track };
891
- }
892
- const transcriptUrl = await buildTranscriptUrl(playbackId, track.id, signingContext);
893
- try {
894
- const response = await fetch(transcriptUrl);
895
- if (!response.ok) {
896
- return { transcriptText: "", transcriptUrl, track };
897
- }
898
- const rawVtt = await response.text();
899
- const transcriptText = cleanTranscript ? extractTextFromVTT(rawVtt) : rawVtt;
900
- return { transcriptText, transcriptUrl, track };
901
- } catch (error) {
902
- console.warn("Failed to fetch transcript:", error);
903
- return { transcriptText: "", transcriptUrl, track };
904
- }
905
- }
906
-
907
- // src/workflows/chapters.ts
908
- var chapterSchema = z3.object({
909
- startTime: z3.number(),
910
- title: z3.string()
911
- });
912
- var chaptersSchema = z3.object({
913
- chapters: z3.array(chapterSchema)
914
- });
915
- var DEFAULT_PROVIDER2 = "openai";
916
- var SYSTEM_PROMPT2 = `Your role is to segment the following captions into chunked chapters, summarising each chapter with a title.
917
-
918
- Analyze the transcript and create logical chapter breaks based on topic changes, major transitions, or distinct sections of content. Each chapter should represent a meaningful segment of the video.
919
-
920
- You must respond with valid JSON in exactly this format:
921
- {
922
- "chapters": [
923
- {"startTime": 0, "title": "Introduction"},
924
- {"startTime": 45.5, "title": "Main Topic Discussion"},
925
- {"startTime": 120.0, "title": "Conclusion"}
926
- ]
927
- }
928
-
929
- Important rules:
930
- - startTime must be in seconds (not HH:MM:SS format)
931
- - Always start with startTime: 0 for the first chapter
932
- - Create 3-8 chapters depending on content length and natural breaks
933
- - Chapter titles should be concise and descriptive
934
- - Do not include any text before or after the JSON
935
- - The JSON must be valid and parseable`;
936
- async function generateChapters(assetId, languageCode, options = {}) {
937
- const { provider = DEFAULT_PROVIDER2, model, abortSignal } = options;
938
- const clients = createWorkflowClients({ ...options, model }, provider);
939
- const { asset: assetData, playbackId, policy } = await getPlaybackIdForAsset(clients.mux, assetId);
940
- const signingContext = resolveSigningContext(options);
941
- if (policy === "signed" && !signingContext) {
942
- throw new Error(
943
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
944
- );
945
- }
946
- const transcriptResult = await fetchTranscriptForAsset(assetData, playbackId, {
947
- languageCode,
948
- cleanTranscript: false,
949
- // keep timestamps for chapter segmentation
950
- signingContext: policy === "signed" ? signingContext : void 0
951
- });
952
- if (!transcriptResult.track || !transcriptResult.transcriptText) {
953
- const availableLanguages = getReadyTextTracks(assetData).map((t) => t.language_code).filter(Boolean).join(", ");
954
- throw new Error(
955
- `No caption track found for language '${languageCode}'. Available languages: ${availableLanguages || "none"}`
956
- );
957
- }
958
- const timestampedTranscript = extractTimestampedTranscript(transcriptResult.transcriptText);
959
- if (!timestampedTranscript) {
960
- throw new Error("No usable content found in caption track");
961
- }
962
- let chaptersData = null;
963
- try {
964
- const response = await withRetry(
965
- () => generateObject2({
966
- model: clients.languageModel.model,
967
- schema: chaptersSchema,
968
- abortSignal,
969
- messages: [
970
- {
971
- role: "system",
972
- content: SYSTEM_PROMPT2
973
- },
974
- {
975
- role: "user",
976
- content: timestampedTranscript
977
- }
978
- ]
979
- })
980
- );
981
- chaptersData = response.object;
982
- } catch (error) {
983
- throw new Error(
984
- `Failed to generate chapters with ${provider}: ${error instanceof Error ? error.message : "Unknown error"}`
985
- );
986
- }
987
- if (!chaptersData || !chaptersData.chapters) {
988
- throw new Error("No chapters generated from AI response");
989
- }
990
- const validChapters = chaptersData.chapters.filter((chapter) => typeof chapter.startTime === "number" && typeof chapter.title === "string").sort((a, b) => a.startTime - b.startTime);
991
- if (validChapters.length === 0) {
992
- throw new Error("No valid chapters found in AI response");
993
- }
994
- if (validChapters[0].startTime !== 0) {
995
- validChapters[0].startTime = 0;
996
- }
997
- return {
998
- assetId,
999
- languageCode,
1000
- chapters: validChapters
1001
- };
1002
- }
1003
-
1004
- // src/workflows/embeddings.ts
1005
- import { embed } from "ai";
1006
-
1007
- // src/primitives/text-chunking.ts
1008
- function estimateTokenCount(text) {
1009
- const words = text.trim().split(/\s+/).length;
1010
- return Math.ceil(words / 0.75);
1011
- }
1012
- function chunkByTokens(text, maxTokens, overlapTokens = 0) {
1013
- if (!text.trim()) {
1014
- return [];
1015
- }
1016
- const chunks = [];
1017
- const words = text.trim().split(/\s+/);
1018
- const wordsPerChunk = Math.floor(maxTokens * 0.75);
1019
- const overlapWords = Math.floor(overlapTokens * 0.75);
1020
- let chunkIndex = 0;
1021
- let currentPosition = 0;
1022
- while (currentPosition < words.length) {
1023
- const chunkWords = words.slice(
1024
- currentPosition,
1025
- currentPosition + wordsPerChunk
1026
- );
1027
- const chunkText2 = chunkWords.join(" ");
1028
- const tokenCount = estimateTokenCount(chunkText2);
1029
- chunks.push({
1030
- id: `chunk-${chunkIndex}`,
1031
- text: chunkText2,
1032
- tokenCount
1033
- });
1034
- currentPosition += wordsPerChunk - overlapWords;
1035
- chunkIndex++;
1036
- if (currentPosition <= (chunkIndex - 1) * (wordsPerChunk - overlapWords)) {
1037
- break;
1038
- }
1039
- }
1040
- return chunks;
1041
- }
1042
- function createChunkFromCues(cues, index) {
1043
- const text = cues.map((c) => c.text).join(" ");
1044
- return {
1045
- id: `chunk-${index}`,
1046
- text,
1047
- tokenCount: estimateTokenCount(text),
1048
- startTime: cues[0].startTime,
1049
- endTime: cues[cues.length - 1].endTime
1050
- };
1051
- }
1052
- function chunkVTTCues(cues, maxTokens, overlapCues = 2) {
1053
- if (cues.length === 0)
1054
- return [];
1055
- const chunks = [];
1056
- let currentCues = [];
1057
- let currentTokens = 0;
1058
- let chunkIndex = 0;
1059
- for (let i = 0; i < cues.length; i++) {
1060
- const cue = cues[i];
1061
- const cueTokens = estimateTokenCount(cue.text);
1062
- if (currentTokens + cueTokens > maxTokens && currentCues.length > 0) {
1063
- chunks.push(createChunkFromCues(currentCues, chunkIndex));
1064
- chunkIndex++;
1065
- const overlapStart = Math.max(0, currentCues.length - overlapCues);
1066
- currentCues = currentCues.slice(overlapStart);
1067
- currentTokens = currentCues.reduce(
1068
- (sum, c) => sum + estimateTokenCount(c.text),
1069
- 0
1070
- );
1071
- }
1072
- currentCues.push(cue);
1073
- currentTokens += cueTokens;
1074
- }
1075
- if (currentCues.length > 0) {
1076
- chunks.push(createChunkFromCues(currentCues, chunkIndex));
1077
- }
1078
- return chunks;
1079
- }
1080
- function chunkText(text, strategy) {
1081
- switch (strategy.type) {
1082
- case "token": {
1083
- return chunkByTokens(text, strategy.maxTokens, strategy.overlap ?? 0);
1084
- }
1085
- default: {
1086
- const exhaustiveCheck = strategy;
1087
- throw new Error(`Unsupported chunking strategy: ${exhaustiveCheck}`);
1088
- }
1089
- }
1090
- }
1091
-
1092
- // src/workflows/embeddings.ts
1093
- var DEFAULT_PROVIDER3 = "openai";
1094
- var DEFAULT_CHUNKING_STRATEGY = {
1095
- type: "token",
1096
- maxTokens: 500,
1097
- overlap: 100
1098
- };
1099
- var DEFAULT_BATCH_SIZE = 5;
1100
- function averageEmbeddings(embeddings) {
1101
- if (embeddings.length === 0) {
1102
- return [];
1103
- }
1104
- const dimensions = embeddings[0].length;
1105
- const averaged = Array.from({ length: dimensions }, () => 0);
1106
- for (const embedding of embeddings) {
1107
- for (let i = 0; i < dimensions; i++) {
1108
- averaged[i] += embedding[i];
1109
- }
1110
- }
1111
- for (let i = 0; i < dimensions; i++) {
1112
- averaged[i] /= embeddings.length;
1113
- }
1114
- return averaged;
1115
- }
1116
- async function generateChunkEmbeddings(chunks, model, batchSize, abortSignal) {
1117
- const results = [];
1118
- for (let i = 0; i < chunks.length; i += batchSize) {
1119
- const batch = chunks.slice(i, i + batchSize);
1120
- const batchResults = await Promise.all(
1121
- batch.map(async (chunk) => {
1122
- const response = await withRetry(
1123
- () => embed({
1124
- model,
1125
- value: chunk.text,
1126
- abortSignal
1127
- })
1128
- );
1129
- return {
1130
- chunkId: chunk.id,
1131
- embedding: response.embedding,
1132
- metadata: {
1133
- startTime: chunk.startTime,
1134
- endTime: chunk.endTime,
1135
- tokenCount: chunk.tokenCount
1136
- }
1137
- };
1138
- })
1139
- );
1140
- results.push(...batchResults);
1141
- }
1142
- return results;
1143
- }
1144
- async function generateVideoEmbeddings(assetId, options = {}) {
1145
- const {
1146
- provider = DEFAULT_PROVIDER3,
1147
- model,
1148
- languageCode,
1149
- chunkingStrategy = DEFAULT_CHUNKING_STRATEGY,
1150
- batchSize = DEFAULT_BATCH_SIZE,
1151
- abortSignal
1152
- } = options;
1153
- const credentials = validateCredentials(options, provider === "google" ? "google" : "openai");
1154
- const muxClient = createMuxClient(credentials);
1155
- const embeddingModel = resolveEmbeddingModel({ ...options, provider, model });
1156
- const { asset: assetData, playbackId, policy } = await getPlaybackIdForAsset(
1157
- muxClient,
1158
- assetId
1159
- );
1160
- const signingContext = resolveSigningContext(options);
1161
- if (policy === "signed" && !signingContext) {
1162
- throw new Error(
1163
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
1164
- );
1165
- }
1166
- const useVttChunking = chunkingStrategy.type === "vtt";
1167
- const transcriptResult = await fetchTranscriptForAsset(assetData, playbackId, {
1168
- languageCode,
1169
- cleanTranscript: !useVttChunking,
1170
- signingContext: policy === "signed" ? signingContext : void 0
1171
- });
1172
- if (!transcriptResult.track || !transcriptResult.transcriptText) {
1173
- const availableLanguages = getReadyTextTracks(assetData).map((t) => t.language_code).filter(Boolean).join(", ");
1174
- throw new Error(
1175
- `No caption track found${languageCode ? ` for language '${languageCode}'` : ""}. Available languages: ${availableLanguages || "none"}`
1176
- );
1177
- }
1178
- const transcriptText = transcriptResult.transcriptText;
1179
- if (!transcriptText.trim()) {
1180
- throw new Error("Transcript is empty");
1181
- }
1182
- const chunks = useVttChunking ? chunkVTTCues(
1183
- parseVTTCues(transcriptText),
1184
- chunkingStrategy.maxTokens,
1185
- chunkingStrategy.overlapCues
1186
- ) : chunkText(transcriptText, chunkingStrategy);
1187
- if (chunks.length === 0) {
1188
- throw new Error("No chunks generated from transcript");
1189
- }
1190
- let chunkEmbeddings;
1191
- try {
1192
- chunkEmbeddings = await generateChunkEmbeddings(
1193
- chunks,
1194
- embeddingModel.model,
1195
- batchSize,
1196
- abortSignal
1197
- );
1198
- } catch (error) {
1199
- throw new Error(
1200
- `Failed to generate embeddings with ${provider}: ${error instanceof Error ? error.message : "Unknown error"}`
1201
- );
1202
- }
1203
- if (chunkEmbeddings.length === 0) {
1204
- throw new Error("No embeddings generated");
1205
- }
1206
- const averagedEmbedding = averageEmbeddings(chunkEmbeddings.map((ce) => ce.embedding));
1207
- const totalTokens = chunks.reduce((sum, chunk) => sum + chunk.tokenCount, 0);
1208
- return {
1209
- assetId,
1210
- chunks: chunkEmbeddings,
1211
- averagedEmbedding,
1212
- provider,
1213
- model: embeddingModel.modelId,
1214
- metadata: {
1215
- totalChunks: chunks.length,
1216
- totalTokens,
1217
- chunkingStrategy: JSON.stringify(chunkingStrategy),
1218
- embeddingDimensions: chunkEmbeddings[0].embedding.length,
1219
- generatedAt: (/* @__PURE__ */ new Date()).toISOString()
1220
- }
1221
- };
1222
- }
1223
-
1224
- // src/primitives/thumbnails.ts
1225
- async function getThumbnailUrls(playbackId, duration, options = {}) {
1226
- const { interval = 10, width = 640, signingContext } = options;
1227
- const timestamps = [];
1228
- if (duration <= 50) {
1229
- const spacing = duration / 6;
1230
- for (let i = 1; i <= 5; i++) {
1231
- timestamps.push(Math.round(i * spacing));
1232
- }
1233
- } else {
1234
- for (let time = 0; time < duration; time += interval) {
1235
- timestamps.push(time);
1236
- }
1237
- }
1238
- const baseUrl = `https://image.mux.com/${playbackId}/thumbnail.png`;
1239
- const urlPromises = timestamps.map(async (time) => {
1240
- if (signingContext) {
1241
- return signUrl(baseUrl, playbackId, signingContext, "thumbnail", { time, width });
1242
- }
1243
- return `${baseUrl}?time=${time}&width=${width}`;
1244
- });
1245
- return Promise.all(urlPromises);
1246
- }
1247
-
1248
- // src/workflows/moderation.ts
1249
- var DEFAULT_THRESHOLDS = {
1250
- sexual: 0.7,
1251
- violence: 0.8
1252
- };
1253
- var DEFAULT_PROVIDER4 = "openai";
1254
- var HIVE_ENDPOINT = "https://api.thehive.ai/api/v2/task/sync";
1255
- var HIVE_SEXUAL_CATEGORIES = [
1256
- "general_nsfw",
1257
- "general_suggestive",
1258
- "yes_sexual_activity",
1259
- "female_underwear",
1260
- "male_underwear",
1261
- "bra",
1262
- "panties",
1263
- "sex_toys",
1264
- "nudity_female",
1265
- "nudity_male",
1266
- "cleavage",
1267
- "swimwear"
1268
- ];
1269
- var HIVE_VIOLENCE_CATEGORIES = [
1270
- "gun_in_hand",
1271
- "gun_not_in_hand",
1272
- "animated_gun",
1273
- "knife_in_hand",
1274
- "knife_not_in_hand",
1275
- "culinary_knife_not_in_hand",
1276
- "culinary_knife_in_hand",
1277
- "very_bloody",
1278
- "a_little_bloody",
1279
- "other_blood",
1280
- "hanging",
1281
- "noose",
1282
- "human_corpse",
1283
- "animated_corpse",
1284
- "emaciated_body",
1285
- "self_harm",
1286
- "animal_abuse",
1287
- "fights",
1288
- "garm_death_injury_or_military_conflict"
1289
- ];
1290
- async function processConcurrently(items, processor, maxConcurrent = 5) {
1291
- const results = [];
1292
- for (let i = 0; i < items.length; i += maxConcurrent) {
1293
- const batch = items.slice(i, i + maxConcurrent);
1294
- const batchPromises = batch.map(processor);
1295
- const batchResults = await Promise.all(batchPromises);
1296
- results.push(...batchResults);
1297
- }
1298
- return results;
1299
- }
1300
- async function requestOpenAIModeration(imageUrls, apiKey, model, maxConcurrent = 5, submissionMode = "url", downloadOptions) {
1301
- const targetUrls = submissionMode === "base64" ? (await downloadImagesAsBase64(imageUrls, downloadOptions, maxConcurrent)).map(
1302
- (img) => ({ url: img.url, image: img.base64Data })
1303
- ) : imageUrls.map((url) => ({ url, image: url }));
1304
- const moderate = async (entry) => {
1305
- try {
1306
- const res = await fetch("https://api.openai.com/v1/moderations", {
1307
- method: "POST",
1308
- headers: {
1309
- "Content-Type": "application/json",
1310
- "Authorization": `Bearer ${apiKey}`
1311
- },
1312
- body: JSON.stringify({
1313
- model,
1314
- input: [
1315
- {
1316
- type: "image_url",
1317
- image_url: {
1318
- url: entry.image
1319
- }
1320
- }
1321
- ]
1322
- })
1323
- });
1324
- const json = await res.json();
1325
- if (!res.ok) {
1326
- throw new Error(
1327
- `OpenAI moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1328
- );
1329
- }
1330
- const categoryScores = json.results?.[0]?.category_scores || {};
1331
- return {
1332
- url: entry.url,
1333
- sexual: categoryScores.sexual || 0,
1334
- violence: categoryScores.violence || 0,
1335
- error: false
1336
- };
1337
- } catch (error) {
1338
- console.error("OpenAI moderation failed:", error);
1339
- return {
1340
- url: entry.url,
1341
- sexual: 0,
1342
- violence: 0,
1343
- error: true
1344
- };
1345
- }
1346
- };
1347
- return processConcurrently(targetUrls, moderate, maxConcurrent);
1348
- }
1349
- function getHiveCategoryScores(classes, categoryNames) {
1350
- const scoreMap = Object.fromEntries(
1351
- classes.map((c) => [c.class, c.score])
1352
- );
1353
- const scores = categoryNames.map((category) => scoreMap[category] || 0);
1354
- return Math.max(...scores, 0);
1355
- }
1356
- async function requestHiveModeration(imageUrls, apiKey, maxConcurrent = 5, submissionMode = "url", downloadOptions) {
1357
- const targets = submissionMode === "base64" ? (await downloadImagesAsBase64(imageUrls, downloadOptions, maxConcurrent)).map((img) => ({
1358
- url: img.url,
1359
- source: {
1360
- kind: "file",
1361
- buffer: img.buffer,
1362
- contentType: img.contentType
1363
- }
1364
- })) : imageUrls.map((url) => ({
1365
- url,
1366
- source: { kind: "url", value: url }
1367
- }));
1368
- const moderate = async (entry) => {
1369
- try {
1370
- const formData = new FormData();
1371
- if (entry.source.kind === "url") {
1372
- formData.append("url", entry.source.value);
1373
- } else {
1374
- const extension = entry.source.contentType.split("/")[1] || "jpg";
1375
- const blob = new Blob([entry.source.buffer], {
1376
- type: entry.source.contentType
1377
- });
1378
- formData.append("media", blob, `thumbnail.${extension}`);
1379
- }
1380
- const res = await fetch(HIVE_ENDPOINT, {
1381
- method: "POST",
1382
- headers: {
1383
- Accept: "application/json",
1384
- Authorization: `Token ${apiKey}`
1385
- },
1386
- body: formData
1387
- });
1388
- const json = await res.json().catch(() => void 0);
1389
- if (!res.ok) {
1390
- throw new Error(
1391
- `Hive moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1392
- );
1393
- }
1394
- const classes = json?.status?.[0]?.response?.output?.[0]?.classes || [];
1395
- return {
1396
- url: entry.url,
1397
- sexual: getHiveCategoryScores(classes, HIVE_SEXUAL_CATEGORIES),
1398
- violence: getHiveCategoryScores(classes, HIVE_VIOLENCE_CATEGORIES),
1399
- error: false
1400
- };
1401
- } catch (error) {
1402
- console.error("Hive moderation failed:", error);
1403
- return {
1404
- url: entry.url,
1405
- sexual: 0,
1406
- violence: 0,
1407
- error: true
1408
- };
1409
- }
1410
- };
1411
- return processConcurrently(targets, moderate, maxConcurrent);
1412
- }
1413
- async function getModerationScores(assetId, options = {}) {
1414
- const {
1415
- provider = DEFAULT_PROVIDER4,
1416
- model = provider === "openai" ? "omni-moderation-latest" : void 0,
1417
- thresholds = DEFAULT_THRESHOLDS,
1418
- thumbnailInterval = 10,
1419
- thumbnailWidth = 640,
1420
- maxConcurrent = 5,
1421
- imageSubmissionMode = "url",
1422
- imageDownloadOptions
1423
- } = options;
1424
- const credentials = validateCredentials(options, provider === "openai" ? "openai" : void 0);
1425
- const muxClient = createMuxClient(credentials);
1426
- const { asset, playbackId, policy } = await getPlaybackIdForAsset(muxClient, assetId);
1427
- const duration = asset.duration || 0;
1428
- const signingContext = resolveSigningContext(options);
1429
- if (policy === "signed" && !signingContext) {
1430
- throw new Error(
1431
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
1432
- );
1433
- }
1434
- const thumbnailUrls = await getThumbnailUrls(playbackId, duration, {
1435
- interval: thumbnailInterval,
1436
- width: thumbnailWidth,
1437
- signingContext: policy === "signed" ? signingContext : void 0
1438
- });
1439
- let thumbnailScores;
1440
- if (provider === "openai") {
1441
- const apiKey = credentials.openaiApiKey;
1442
- if (!apiKey) {
1443
- throw new Error("OpenAI API key is required for moderation. Set OPENAI_API_KEY or pass openaiApiKey.");
1444
- }
1445
- thumbnailScores = await requestOpenAIModeration(
1446
- thumbnailUrls,
1447
- apiKey,
1448
- model || "omni-moderation-latest",
1449
- maxConcurrent,
1450
- imageSubmissionMode,
1451
- imageDownloadOptions
1452
- );
1453
- } else if (provider === "hive") {
1454
- const hiveApiKey = options.hiveApiKey || env_default.HIVE_API_KEY;
1455
- if (!hiveApiKey) {
1456
- throw new Error("Hive API key is required for moderation. Set HIVE_API_KEY or pass hiveApiKey.");
1457
- }
1458
- thumbnailScores = await requestHiveModeration(
1459
- thumbnailUrls,
1460
- hiveApiKey,
1461
- maxConcurrent,
1462
- imageSubmissionMode,
1463
- imageDownloadOptions
1464
- );
1465
- } else {
1466
- throw new Error(`Unsupported moderation provider: ${provider}`);
1467
- }
1468
- const maxSexual = Math.max(...thumbnailScores.map((s) => s.sexual));
1469
- const maxViolence = Math.max(...thumbnailScores.map((s) => s.violence));
1470
- const finalThresholds = { ...DEFAULT_THRESHOLDS, ...thresholds };
1471
- return {
1472
- assetId,
1473
- thumbnailScores,
1474
- maxScores: {
1475
- sexual: maxSexual,
1476
- violence: maxViolence
1477
- },
1478
- exceedsThreshold: maxSexual > finalThresholds.sexual || maxViolence > finalThresholds.violence,
1479
- thresholds: finalThresholds
1480
- };
1481
- }
1482
-
1483
- // src/workflows/summarization.ts
1484
- import { generateObject as generateObject3 } from "ai";
1485
- import { z as z4 } from "zod";
1486
- var SUMMARY_KEYWORD_LIMIT = 10;
1487
- var summarySchema = z4.object({
1488
- keywords: z4.array(z4.string()),
1489
- title: z4.string(),
1490
- description: z4.string()
1491
- });
1492
- var TONE_INSTRUCTIONS = {
1493
- normal: "Provide a clear, straightforward analysis.",
1494
- sassy: "Answer with a sassy, playful attitude and personality.",
1495
- professional: "Provide a professional, executive-level analysis suitable for business reporting."
1496
- };
1497
- var summarizationPromptBuilder = createPromptBuilder({
1498
- template: {
1499
- task: {
1500
- tag: "task",
1501
- content: "Analyze the storyboard frames and generate metadata that captures the essence of the video content."
1502
- },
1503
- title: {
1504
- tag: "title_requirements",
1505
- content: dedent_default`
1506
- A short, compelling headline that immediately communicates the subject or action.
1507
- Aim for brevity - typically under 10 words. Think of how a news headline or video card title would read.
1508
- Start with the primary subject, action, or topic - never begin with "A video of" or similar phrasing.
1509
- Use active, specific language.`
1510
- },
1511
- description: {
1512
- tag: "description_requirements",
1513
- content: dedent_default`
1514
- A concise summary (2-4 sentences) that describes what happens across the video.
1515
- Cover the main subjects, actions, setting, and any notable progression visible across frames.
1516
- Write in present tense. Be specific about observable details rather than making assumptions.
1517
- If the transcript provides dialogue or narration, incorporate key points but prioritize visual content.`
1518
- },
1519
- keywords: {
1520
- tag: "keywords_requirements",
1521
- content: dedent_default`
1522
- Specific, searchable terms (up to 10) that capture:
1523
- - Primary subjects (people, animals, objects)
1524
- - Actions and activities being performed
1525
- - Setting and environment
1526
- - Notable objects or tools
1527
- - Style or genre (if applicable)
1528
- Prefer concrete nouns and action verbs over abstract concepts.
1529
- Use lowercase. Avoid redundant or overly generic terms like "video" or "content".`
1530
- },
1531
- qualityGuidelines: {
1532
- tag: "quality_guidelines",
1533
- content: dedent_default`
1534
- - Examine all frames to understand the full context and progression
1535
- - Be precise: "golden retriever" is better than "dog" when identifiable
1536
- - Capture the narrative: what begins, develops, and concludes
1537
- - Balance brevity with informativeness`
1538
- }
1539
- },
1540
- sectionOrder: ["task", "title", "description", "keywords", "qualityGuidelines"]
1541
- });
1542
- var SYSTEM_PROMPT3 = dedent_default`
1543
- <role>
1544
- You are a video content analyst specializing in storyboard interpretation and multimodal analysis.
1545
- </role>
1546
-
1547
- <context>
1548
- You receive storyboard images containing multiple sequential frames extracted from a video.
1549
- These frames are arranged in a grid and represent the visual progression of the content over time.
1550
- Read frames left-to-right, top-to-bottom to understand the temporal sequence.
1551
- </context>
1552
-
1553
- <transcript_guidance>
1554
- When a transcript is provided alongside the storyboard:
1555
- - Use it to understand spoken content, dialogue, narration, and audio context
1556
- - Correlate transcript content with visual frames to build a complete picture
1557
- - Extract key terminology, names, and specific language used by speakers
1558
- - Let the transcript inform keyword selection, especially for topics not visually obvious
1559
- - Prioritize visual content for the description, but enrich it with transcript insights
1560
- - If transcript and visuals conflict, trust the visual evidence
1561
- </transcript_guidance>
1562
-
1563
- <capabilities>
1564
- - Extract meaning from visual sequences
1565
- - Identify subjects, actions, settings, and narrative arcs
1566
- - Generate accurate, searchable metadata
1567
- - Synthesize visual and transcript information when provided
1568
- </capabilities>
1569
-
1570
- <constraints>
1571
- - Only describe what is clearly observable in the frames or explicitly stated in the transcript
1572
- - Do not fabricate details or make unsupported assumptions
1573
- - Return structured data matching the requested schema
1574
- </constraints>`;
1575
- function buildUserPrompt2({
1576
- tone,
1577
- transcriptText,
1578
- isCleanTranscript = true,
1579
- promptOverrides
1580
- }) {
1581
- const contextSections = [createToneSection(TONE_INSTRUCTIONS[tone])];
1582
- if (transcriptText) {
1583
- const format = isCleanTranscript ? "plain text" : "WebVTT";
1584
- contextSections.push(createTranscriptSection(transcriptText, format));
1585
- }
1586
- return summarizationPromptBuilder.buildWithContext(promptOverrides, contextSections);
1587
- }
1588
- var DEFAULT_PROVIDER5 = "openai";
1589
- var DEFAULT_TONE = "normal";
1590
- function normalizeKeywords(keywords) {
1591
- if (!Array.isArray(keywords) || keywords.length === 0) {
1592
- return [];
1593
- }
1594
- const uniqueLowercase = /* @__PURE__ */ new Set();
1595
- const normalized = [];
1596
- for (const keyword of keywords) {
1597
- const trimmed = keyword?.trim();
1598
- if (!trimmed) {
1599
- continue;
1600
- }
1601
- const lower = trimmed.toLowerCase();
1602
- if (uniqueLowercase.has(lower)) {
1603
- continue;
1604
- }
1605
- uniqueLowercase.add(lower);
1606
- normalized.push(trimmed);
1607
- if (normalized.length === SUMMARY_KEYWORD_LIMIT) {
1608
- break;
1609
- }
1610
- }
1611
- return normalized;
1612
- }
1613
- async function getSummaryAndTags(assetId, options) {
1614
- const {
1615
- provider = DEFAULT_PROVIDER5,
1616
- model,
1617
- tone = DEFAULT_TONE,
1618
- includeTranscript = true,
1619
- cleanTranscript = true,
1620
- imageSubmissionMode = "url",
1621
- imageDownloadOptions,
1622
- abortSignal,
1623
- promptOverrides
1624
- } = options ?? {};
1625
- const clients = createWorkflowClients(
1626
- { ...options, model },
1627
- provider
1628
- );
1629
- const { asset: assetData, playbackId, policy } = await getPlaybackIdForAsset(clients.mux, assetId);
1630
- const signingContext = resolveSigningContext(options ?? {});
1631
- if (policy === "signed" && !signingContext) {
1632
- throw new Error(
1633
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
1634
- );
1635
- }
1636
- const transcriptText = includeTranscript ? (await fetchTranscriptForAsset(assetData, playbackId, {
1637
- cleanTranscript,
1638
- signingContext: policy === "signed" ? signingContext : void 0
1639
- })).transcriptText : "";
1640
- const userPrompt = buildUserPrompt2({
1641
- tone,
1642
- transcriptText,
1643
- isCleanTranscript: cleanTranscript,
1644
- promptOverrides
1645
- });
1646
- const imageUrl = await getStoryboardUrl(playbackId, 640, policy === "signed" ? signingContext : void 0);
1647
- const analyzeStoryboard = async (imageDataUrl) => {
1648
- const response = await generateObject3({
1649
- model: clients.languageModel.model,
1650
- schema: summarySchema,
1651
- abortSignal,
1652
- messages: [
1653
- {
1654
- role: "system",
1655
- content: SYSTEM_PROMPT3
1656
- },
1657
- {
1658
- role: "user",
1659
- content: [
1660
- { type: "text", text: userPrompt },
1661
- { type: "image", image: imageDataUrl }
1662
- ]
1663
- }
1664
- ]
1665
- });
1666
- return response.object;
1667
- };
1668
- let aiAnalysis = null;
1669
- try {
1670
- if (imageSubmissionMode === "base64") {
1671
- const downloadResult = await downloadImageAsBase64(imageUrl, imageDownloadOptions);
1672
- aiAnalysis = await analyzeStoryboard(downloadResult.base64Data);
1673
- } else {
1674
- aiAnalysis = await withRetry(() => analyzeStoryboard(imageUrl));
1675
- }
1676
- } catch (error) {
1677
- throw new Error(
1678
- `Failed to analyze video content with ${provider}: ${error instanceof Error ? error.message : "Unknown error"}`
1679
- );
1680
- }
1681
- if (!aiAnalysis) {
1682
- throw new Error(`Failed to analyze video content for asset ${assetId}`);
1683
- }
1684
- if (!aiAnalysis.title) {
1685
- throw new Error(`Failed to generate title for asset ${assetId}`);
1686
- }
1687
- if (!aiAnalysis.description) {
1688
- throw new Error(`Failed to generate description for asset ${assetId}`);
1689
- }
1690
- return {
1691
- assetId,
1692
- title: aiAnalysis.title,
1693
- description: aiAnalysis.description,
1694
- tags: normalizeKeywords(aiAnalysis.keywords),
1695
- storyboardUrl: imageUrl
1696
- };
1697
- }
1698
-
1699
- // src/workflows/translate-audio.ts
1700
- import { GetObjectCommand, S3Client } from "@aws-sdk/client-s3";
1701
- import { Upload } from "@aws-sdk/lib-storage";
1702
- import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
1703
- import Mux3 from "@mux/mux-node";
1704
- var STATIC_RENDITION_POLL_INTERVAL_MS = 5e3;
1705
- var STATIC_RENDITION_MAX_ATTEMPTS = 36;
1706
- var delay = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
1707
- function getReadyAudioStaticRendition(asset) {
1708
- const files = asset.static_renditions?.files;
1709
- if (!files || files.length === 0) {
1710
- return void 0;
1711
- }
1712
- return files.find(
1713
- (rendition) => rendition.name === "audio.m4a" && rendition.status === "ready"
1714
- );
1715
- }
1716
- var hasReadyAudioStaticRendition = (asset) => Boolean(getReadyAudioStaticRendition(asset));
1717
- async function requestStaticRenditionCreation(muxClient, assetId) {
1718
- console.log("\u{1F4FC} Requesting static rendition from Mux...");
1719
- try {
1720
- await muxClient.video.assets.createStaticRendition(assetId, {
1721
- resolution: "audio-only"
1722
- });
1723
- console.log("\u{1F4FC} Static rendition request accepted by Mux.");
1724
- } catch (error) {
1725
- const statusCode = error?.status ?? error?.statusCode;
1726
- const messages = error?.error?.messages;
1727
- const alreadyDefined = messages?.some((message2) => message2.toLowerCase().includes("already defined")) ?? error?.message?.toLowerCase().includes("already defined");
1728
- if (statusCode === 409 || alreadyDefined) {
1729
- console.log("\u2139\uFE0F Static rendition already requested. Waiting for it to finish...");
1730
- return;
1731
- }
1732
- const message = error instanceof Error ? error.message : "Unknown error";
1733
- throw new Error(`Failed to request static rendition from Mux: ${message}`);
1734
- }
1735
- }
1736
- async function waitForAudioStaticRendition({
1737
- assetId,
1738
- muxClient,
1739
- initialAsset
1740
- }) {
1741
- let currentAsset = initialAsset;
1742
- if (hasReadyAudioStaticRendition(currentAsset)) {
1743
- return currentAsset;
1744
- }
1745
- const status = currentAsset.static_renditions?.status ?? "not_requested";
1746
- if (status === "not_requested" || status === void 0) {
1747
- await requestStaticRenditionCreation(muxClient, assetId);
1748
- } else if (status === "errored") {
1749
- console.log("\u26A0\uFE0F Previous static rendition request errored. Creating a new one...");
1750
- await requestStaticRenditionCreation(muxClient, assetId);
1751
- } else {
1752
- console.log(`\u2139\uFE0F Static rendition already ${status}. Waiting for it to finish...`);
1753
- }
1754
- for (let attempt = 1; attempt <= STATIC_RENDITION_MAX_ATTEMPTS; attempt++) {
1755
- await delay(STATIC_RENDITION_POLL_INTERVAL_MS);
1756
- currentAsset = await muxClient.video.assets.retrieve(assetId);
1757
- if (hasReadyAudioStaticRendition(currentAsset)) {
1758
- console.log("\u2705 Audio static rendition is ready!");
1759
- return currentAsset;
1760
- }
1761
- const currentStatus = currentAsset.static_renditions?.status || "unknown";
1762
- console.log(
1763
- `\u231B Waiting for static rendition (attempt ${attempt}/${STATIC_RENDITION_MAX_ATTEMPTS}) \u2192 ${currentStatus}`
1764
- );
1765
- if (currentStatus === "errored") {
1766
- throw new Error(
1767
- "Mux failed to create the static rendition for this asset. Please check the asset in the Mux dashboard."
1768
- );
1769
- }
1770
- }
1771
- throw new Error(
1772
- "Timed out waiting for the static rendition to become ready. Please try again in a moment."
1773
- );
1774
- }
1775
- async function translateAudio(assetId, toLanguageCode, options = {}) {
1776
- const {
1777
- provider = "elevenlabs",
1778
- numSpeakers = 0,
1779
- // 0 = auto-detect
1780
- muxTokenId,
1781
- muxTokenSecret,
1782
- elevenLabsApiKey,
1783
- uploadToMux = true
1784
- } = options;
1785
- if (provider !== "elevenlabs") {
1786
- throw new Error("Only ElevenLabs provider is currently supported for audio translation");
1787
- }
1788
- const muxId = muxTokenId ?? env_default.MUX_TOKEN_ID;
1789
- const muxSecret = muxTokenSecret ?? env_default.MUX_TOKEN_SECRET;
1790
- const elevenLabsKey = elevenLabsApiKey ?? env_default.ELEVENLABS_API_KEY;
1791
- const s3Endpoint = options.s3Endpoint ?? env_default.S3_ENDPOINT;
1792
- const s3Region = options.s3Region ?? env_default.S3_REGION ?? "auto";
1793
- const s3Bucket = options.s3Bucket ?? env_default.S3_BUCKET;
1794
- const s3AccessKeyId = options.s3AccessKeyId ?? env_default.S3_ACCESS_KEY_ID;
1795
- const s3SecretAccessKey = options.s3SecretAccessKey ?? env_default.S3_SECRET_ACCESS_KEY;
1796
- if (!muxId || !muxSecret) {
1797
- throw new Error("Mux credentials are required. Provide muxTokenId and muxTokenSecret in options or set MUX_TOKEN_ID and MUX_TOKEN_SECRET environment variables.");
1798
- }
1799
- if (!elevenLabsKey) {
1800
- throw new Error("ElevenLabs API key is required. Provide elevenLabsApiKey in options or set ELEVENLABS_API_KEY environment variable.");
1801
- }
1802
- if (uploadToMux && (!s3Endpoint || !s3Bucket || !s3AccessKeyId || !s3SecretAccessKey)) {
1803
- throw new Error("S3 configuration is required for uploading to Mux. Provide s3Endpoint, s3Bucket, s3AccessKeyId, and s3SecretAccessKey in options or set S3_ENDPOINT, S3_BUCKET, S3_ACCESS_KEY_ID, and S3_SECRET_ACCESS_KEY environment variables.");
1804
- }
1805
- const mux = new Mux3({
1806
- tokenId: muxId,
1807
- tokenSecret: muxSecret
1808
- });
1809
- console.log(`\u{1F3AC} Fetching Mux asset: ${assetId}`);
1810
- const { asset: initialAsset, playbackId, policy } = await getPlaybackIdForAsset(mux, assetId);
1811
- const signingContext = resolveSigningContext(options);
1812
- if (policy === "signed" && !signingContext) {
1813
- throw new Error(
1814
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
1815
- );
1816
- }
1817
- console.log("\u{1F50D} Checking for audio-only static rendition...");
1818
- let currentAsset = initialAsset;
1819
- if (!hasReadyAudioStaticRendition(currentAsset)) {
1820
- console.log("\u274C No ready audio static rendition found. Requesting one now...");
1821
- currentAsset = await waitForAudioStaticRendition({
1822
- assetId,
1823
- muxClient: mux,
1824
- initialAsset: currentAsset
1825
- });
1826
- }
1827
- const audioRendition = getReadyAudioStaticRendition(currentAsset);
1828
- if (!audioRendition) {
1829
- throw new Error(
1830
- "Unable to obtain an audio-only static rendition for this asset. Please verify static renditions are enabled in Mux."
1831
- );
1832
- }
1833
- let audioUrl = `https://stream.mux.com/${playbackId}/audio.m4a`;
1834
- if (policy === "signed" && signingContext) {
1835
- audioUrl = await signUrl(audioUrl, playbackId, signingContext, "video");
1836
- }
1837
- console.log(`\u2705 Found audio rendition: ${audioUrl}`);
1838
- console.log(`\u{1F399}\uFE0F Creating ElevenLabs dubbing job (auto-detect \u2192 ${toLanguageCode})`);
1839
- let dubbingId;
1840
- try {
1841
- const audioResponse = await fetch(audioUrl);
1842
- if (!audioResponse.ok) {
1843
- throw new Error(`Failed to fetch audio file: ${audioResponse.statusText}`);
1844
- }
1845
- const audioBuffer = await audioResponse.arrayBuffer();
1846
- const audioBlob = new Blob([audioBuffer], { type: "audio/mp4" });
1847
- const audioFile = audioBlob;
1848
- const formData = new FormData();
1849
- formData.append("file", audioFile);
1850
- formData.append("target_lang", toLanguageCode);
1851
- formData.append("num_speakers", numSpeakers.toString());
1852
- formData.append("name", `Mux Asset ${assetId} - auto to ${toLanguageCode}`);
1853
- const dubbingResponse = await fetch("https://api.elevenlabs.io/v1/dubbing", {
1854
- method: "POST",
1855
- headers: {
1856
- "xi-api-key": elevenLabsKey
1857
- },
1858
- body: formData
1859
- });
1860
- if (!dubbingResponse.ok) {
1861
- throw new Error(`ElevenLabs API error: ${dubbingResponse.statusText}`);
1862
- }
1863
- const dubbingData = await dubbingResponse.json();
1864
- dubbingId = dubbingData.dubbing_id;
1865
- console.log(`\u2705 Dubbing job created: ${dubbingId}`);
1866
- console.log(`\u23F1\uFE0F Expected duration: ${dubbingData.expected_duration_sec}s`);
1867
- } catch (error) {
1868
- throw new Error(`Failed to create ElevenLabs dubbing job: ${error instanceof Error ? error.message : "Unknown error"}`);
1869
- }
1870
- console.log("\u23F3 Waiting for dubbing to complete...");
1871
- let dubbingStatus = "dubbing";
1872
- let pollAttempts = 0;
1873
- const maxPollAttempts = 180;
1874
- while (dubbingStatus === "dubbing" && pollAttempts < maxPollAttempts) {
1875
- await new Promise((resolve) => setTimeout(resolve, 1e4));
1876
- pollAttempts++;
1877
- try {
1878
- const statusResponse = await fetch(`https://api.elevenlabs.io/v1/dubbing/${dubbingId}`, {
1879
- headers: {
1880
- "xi-api-key": elevenLabsKey
1881
- }
1882
- });
1883
- if (!statusResponse.ok) {
1884
- throw new Error(`Status check failed: ${statusResponse.statusText}`);
1885
- }
1886
- const statusData = await statusResponse.json();
1887
- dubbingStatus = statusData.status;
1888
- console.log(`\u{1F4CA} Status check ${pollAttempts}: ${dubbingStatus}`);
1889
- if (dubbingStatus === "failed") {
1890
- throw new Error("ElevenLabs dubbing job failed");
1891
- }
1892
- } catch (error) {
1893
- throw new Error(`Failed to check dubbing status: ${error instanceof Error ? error.message : "Unknown error"}`);
1894
- }
1895
- }
1896
- if (dubbingStatus !== "dubbed") {
1897
- throw new Error(`Dubbing job timed out or failed. Final status: ${dubbingStatus}`);
1898
- }
1899
- console.log("\u2705 Dubbing completed successfully!");
1900
- if (!uploadToMux) {
1901
- return {
1902
- assetId,
1903
- targetLanguageCode: toLanguageCode,
1904
- dubbingId
1905
- };
1906
- }
1907
- console.log("\u{1F4E5} Downloading dubbed audio from ElevenLabs...");
1908
- let dubbedAudioBuffer;
1909
- try {
1910
- const audioUrl2 = `https://api.elevenlabs.io/v1/dubbing/${dubbingId}/audio/${toLanguageCode}`;
1911
- const audioResponse = await fetch(audioUrl2, {
1912
- headers: {
1913
- "xi-api-key": elevenLabsKey
1914
- }
1915
- });
1916
- if (!audioResponse.ok) {
1917
- throw new Error(`Failed to fetch dubbed audio: ${audioResponse.statusText}`);
1918
- }
1919
- dubbedAudioBuffer = await audioResponse.arrayBuffer();
1920
- console.log(`\u2705 Downloaded dubbed audio (${dubbedAudioBuffer.byteLength} bytes)`);
1921
- } catch (error) {
1922
- throw new Error(`Failed to download dubbed audio: ${error instanceof Error ? error.message : "Unknown error"}`);
1923
- }
1924
- console.log("\u{1F4E4} Uploading dubbed audio to S3-compatible storage...");
1925
- const s3Client = new S3Client({
1926
- region: s3Region,
1927
- endpoint: s3Endpoint,
1928
- credentials: {
1929
- accessKeyId: s3AccessKeyId,
1930
- secretAccessKey: s3SecretAccessKey
1931
- },
1932
- forcePathStyle: true
1933
- });
1934
- const audioKey = `audio-translations/${assetId}/auto-to-${toLanguageCode}-${Date.now()}.m4a`;
1935
- let presignedUrl;
1936
- try {
1937
- const upload = new Upload({
1938
- client: s3Client,
1939
- params: {
1940
- Bucket: s3Bucket,
1941
- Key: audioKey,
1942
- Body: new Uint8Array(dubbedAudioBuffer),
1943
- ContentType: "audio/mp4"
1944
- }
1945
- });
1946
- await upload.done();
1947
- console.log(`\u2705 Audio uploaded successfully to: ${audioKey}`);
1948
- const getObjectCommand = new GetObjectCommand({
1949
- Bucket: s3Bucket,
1950
- Key: audioKey
1951
- });
1952
- presignedUrl = await getSignedUrl(s3Client, getObjectCommand, {
1953
- expiresIn: 3600
1954
- // 1 hour
1955
- });
1956
- console.log(`\u{1F517} Generated presigned URL (expires in 1 hour)`);
1957
- } catch (error) {
1958
- throw new Error(`Failed to upload audio to S3: ${error instanceof Error ? error.message : "Unknown error"}`);
1959
- }
1960
- console.log("\u{1F3AC} Adding translated audio track to Mux asset...");
1961
- let uploadedTrackId;
1962
- try {
1963
- const languageName = new Intl.DisplayNames(["en"], { type: "language" }).of(toLanguageCode) || toLanguageCode.toUpperCase();
1964
- const trackName = `${languageName} (auto-dubbed)`;
1965
- const trackResponse = await mux.video.assets.createTrack(assetId, {
1966
- type: "audio",
1967
- language_code: toLanguageCode,
1968
- name: trackName,
1969
- url: presignedUrl
1970
- });
1971
- uploadedTrackId = trackResponse.id;
1972
- console.log(`\u2705 Audio track added to Mux asset with ID: ${uploadedTrackId}`);
1973
- console.log(`\u{1F3B5} Track name: "${trackName}"`);
1974
- } catch (error) {
1975
- console.warn(`\u26A0\uFE0F Failed to add audio track to Mux asset: ${error instanceof Error ? error.message : "Unknown error"}`);
1976
- console.log("\u{1F517} You can manually add the track using this presigned URL:");
1977
- console.log(presignedUrl);
1978
- }
1979
- return {
1980
- assetId,
1981
- targetLanguageCode: toLanguageCode,
1982
- dubbingId,
1983
- uploadedTrackId,
1984
- presignedUrl
1985
- };
1986
- }
1987
-
1988
- // src/workflows/translate-captions.ts
1989
- import { GetObjectCommand as GetObjectCommand2, S3Client as S3Client2 } from "@aws-sdk/client-s3";
1990
- import { Upload as Upload2 } from "@aws-sdk/lib-storage";
1991
- import { getSignedUrl as getSignedUrl2 } from "@aws-sdk/s3-request-presigner";
1992
- import { generateObject as generateObject4 } from "ai";
1993
- import { z as z5 } from "zod";
1994
- var translationSchema = z5.object({
1995
- translation: z5.string()
1996
- });
1997
- var DEFAULT_PROVIDER6 = "openai";
1998
- async function translateCaptions(assetId, fromLanguageCode, toLanguageCode, options) {
1999
- const {
2000
- provider = DEFAULT_PROVIDER6,
2001
- model,
2002
- s3Endpoint: providedS3Endpoint,
2003
- s3Region: providedS3Region,
2004
- s3Bucket: providedS3Bucket,
2005
- s3AccessKeyId: providedS3AccessKeyId,
2006
- s3SecretAccessKey: providedS3SecretAccessKey,
2007
- uploadToMux: uploadToMuxOption,
2008
- ...clientConfig
2009
- } = options;
2010
- const resolvedProvider = provider;
2011
- const s3Endpoint = providedS3Endpoint ?? env_default.S3_ENDPOINT;
2012
- const s3Region = providedS3Region ?? env_default.S3_REGION ?? "auto";
2013
- const s3Bucket = providedS3Bucket ?? env_default.S3_BUCKET;
2014
- const s3AccessKeyId = providedS3AccessKeyId ?? env_default.S3_ACCESS_KEY_ID;
2015
- const s3SecretAccessKey = providedS3SecretAccessKey ?? env_default.S3_SECRET_ACCESS_KEY;
2016
- const uploadToMux = uploadToMuxOption !== false;
2017
- const clients = createWorkflowClients(
2018
- { ...clientConfig, provider: resolvedProvider, model },
2019
- resolvedProvider
2020
- );
2021
- if (uploadToMux && (!s3Endpoint || !s3Bucket || !s3AccessKeyId || !s3SecretAccessKey)) {
2022
- throw new Error("S3 configuration is required for uploading to Mux. Provide s3Endpoint, s3Bucket, s3AccessKeyId, and s3SecretAccessKey in options or set S3_ENDPOINT, S3_BUCKET, S3_ACCESS_KEY_ID, and S3_SECRET_ACCESS_KEY environment variables.");
2023
- }
2024
- const { asset: assetData, playbackId, policy } = await getPlaybackIdForAsset(clients.mux, assetId);
2025
- const signingContext = resolveSigningContext(options);
2026
- if (policy === "signed" && !signingContext) {
2027
- throw new Error(
2028
- "Signed playback ID requires signing credentials. Provide muxSigningKey and muxPrivateKey in options or set MUX_SIGNING_KEY and MUX_PRIVATE_KEY environment variables."
2029
- );
2030
- }
2031
- if (!assetData.tracks) {
2032
- throw new Error("No tracks found for this asset");
2033
- }
2034
- const sourceTextTrack = assetData.tracks.find(
2035
- (track) => track.type === "text" && track.status === "ready" && track.language_code === fromLanguageCode
2036
- );
2037
- if (!sourceTextTrack) {
2038
- throw new Error(`No ready text track found with language code '${fromLanguageCode}' for this asset`);
2039
- }
2040
- let vttUrl = `https://stream.mux.com/${playbackId}/text/${sourceTextTrack.id}.vtt`;
2041
- if (policy === "signed" && signingContext) {
2042
- vttUrl = await signUrl(vttUrl, playbackId, signingContext, "video");
2043
- }
2044
- let vttContent;
2045
- try {
2046
- const vttResponse = await fetch(vttUrl);
2047
- if (!vttResponse.ok) {
2048
- throw new Error(`Failed to fetch VTT file: ${vttResponse.statusText}`);
2049
- }
2050
- vttContent = await vttResponse.text();
2051
- } catch (error) {
2052
- throw new Error(`Failed to fetch VTT content: ${error instanceof Error ? error.message : "Unknown error"}`);
2053
- }
2054
- console.log(`\u2705 Found VTT content for language '${fromLanguageCode}'`);
2055
- let translatedVtt;
2056
- try {
2057
- const response = await generateObject4({
2058
- model: clients.languageModel.model,
2059
- schema: translationSchema,
2060
- abortSignal: options.abortSignal,
2061
- messages: [
2062
- {
2063
- role: "user",
2064
- content: `Translate the following VTT subtitle file from ${fromLanguageCode} to ${toLanguageCode}. Preserve all timestamps and VTT formatting exactly as they appear. Return JSON with a single key "translation" containing the translated VTT.
2065
-
2066
- ${vttContent}`
2067
- }
2068
- ]
2069
- });
2070
- translatedVtt = response.object.translation;
2071
- } catch (error) {
2072
- throw new Error(`Failed to translate VTT with ${resolvedProvider}: ${error instanceof Error ? error.message : "Unknown error"}`);
2073
- }
2074
- console.log(`
2075
- \u2705 Translation completed successfully!`);
2076
- if (!uploadToMux) {
2077
- console.log(`\u2705 VTT translated to ${toLanguageCode} successfully!`);
2078
- return {
2079
- assetId,
2080
- sourceLanguageCode: fromLanguageCode,
2081
- targetLanguageCode: toLanguageCode,
2082
- originalVtt: vttContent,
2083
- translatedVtt
2084
- };
2085
- }
2086
- console.log("\u{1F4E4} Uploading translated VTT to S3-compatible storage...");
2087
- const s3Client = new S3Client2({
2088
- region: s3Region,
2089
- endpoint: s3Endpoint,
2090
- credentials: {
2091
- accessKeyId: s3AccessKeyId,
2092
- secretAccessKey: s3SecretAccessKey
2093
- },
2094
- forcePathStyle: true
2095
- // Often needed for non-AWS S3 services
2096
- });
2097
- const vttKey = `translations/${assetId}/${fromLanguageCode}-to-${toLanguageCode}-${Date.now()}.vtt`;
2098
- let presignedUrl;
2099
- try {
2100
- const upload = new Upload2({
2101
- client: s3Client,
2102
- params: {
2103
- Bucket: s3Bucket,
2104
- Key: vttKey,
2105
- Body: translatedVtt,
2106
- ContentType: "text/vtt"
2107
- }
2108
- });
2109
- await upload.done();
2110
- console.log(`\u2705 VTT uploaded successfully to: ${vttKey}`);
2111
- const getObjectCommand = new GetObjectCommand2({
2112
- Bucket: s3Bucket,
2113
- Key: vttKey
2114
- });
2115
- presignedUrl = await getSignedUrl2(s3Client, getObjectCommand, {
2116
- expiresIn: 3600
2117
- // 1 hour
2118
- });
2119
- console.log(`\u{1F517} Generated presigned URL (expires in 1 hour)`);
2120
- } catch (error) {
2121
- throw new Error(`Failed to upload VTT to S3: ${error instanceof Error ? error.message : "Unknown error"}`);
2122
- }
2123
- console.log("\u{1F4F9} Adding translated track to Mux asset...");
2124
- let uploadedTrackId;
2125
- try {
2126
- const languageName = new Intl.DisplayNames(["en"], { type: "language" }).of(toLanguageCode) || toLanguageCode.toUpperCase();
2127
- const trackName = `${languageName} (auto-translated)`;
2128
- const trackResponse = await clients.mux.video.assets.createTrack(assetId, {
2129
- type: "text",
2130
- text_type: "subtitles",
2131
- language_code: toLanguageCode,
2132
- name: trackName,
2133
- url: presignedUrl
2134
- });
2135
- uploadedTrackId = trackResponse.id;
2136
- console.log(`\u2705 Track added to Mux asset with ID: ${uploadedTrackId}`);
2137
- console.log(`\u{1F4CB} Track name: "${trackName}"`);
2138
- } catch (error) {
2139
- console.warn(`\u26A0\uFE0F Failed to add track to Mux asset: ${error instanceof Error ? error.message : "Unknown error"}`);
2140
- console.log("\u{1F517} You can manually add the track using this presigned URL:");
2141
- console.log(presignedUrl);
2142
- }
2143
- return {
2144
- assetId,
2145
- sourceLanguageCode: fromLanguageCode,
2146
- targetLanguageCode: toLanguageCode,
2147
- originalVtt: vttContent,
2148
- translatedVtt,
2149
- uploadedTrackId,
2150
- presignedUrl
2151
- };
2152
- }
2153
- export {
2154
- SUMMARY_KEYWORD_LIMIT,
2155
- burnedInCaptionsSchema,
2156
- chapterSchema,
2157
- chaptersSchema,
2158
- generateChapters,
2159
- generateVideoEmbeddings,
2160
- getModerationScores,
2161
- getSummaryAndTags,
2162
- hasBurnedInCaptions,
2163
- summarySchema,
2164
- translateAudio,
2165
- translateCaptions,
2166
- translationSchema
2167
- };
2168
- //# sourceMappingURL=index.mjs.map