@mux/ai 0.3.1 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -83,12 +83,12 @@ S3_SECRET_ACCESS_KEY=your-secret-key
83
83
 
84
84
  | Workflow | Description | Providers | Default Models | Mux Asset Requirements | Cloud Infrastructure Requirements |
85
85
  | ------------------------------------------------------------------------ | ----------------------------------------------------------------- | ------------------------- | ------------------------------------------------------------------ | ---------------------- | --------------------------------- |
86
- | [`getSummaryAndTags`](./docs/WORKFLOWS.md#video-summarization)<br/>[API](./docs/API.md#getsummaryandtagsassetid-options) · [Source](./src/workflows/summarization.ts) | Generate titles, descriptions, and tags for an asset | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-2.5-flash` (Google) | Video (required), Captions (optional) | None |
86
+ | [`getSummaryAndTags`](./docs/WORKFLOWS.md#video-summarization)<br/>[API](./docs/API.md#getsummaryandtagsassetid-options) · [Source](./src/workflows/summarization.ts) | Generate titles, descriptions, and tags for an asset | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-3-flash-preview` (Google) | Video (required), Captions (optional) | None |
87
87
  | [`getModerationScores`](./docs/WORKFLOWS.md#content-moderation)<br/>[API](./docs/API.md#getmoderationscoresassetid-options) · [Source](./src/workflows/moderation.ts) | Detect inappropriate (sexual or violent) content in an asset | OpenAI, Hive | `omni-moderation-latest` (OpenAI) or Hive visual moderation task | Video (required) | None |
88
- | [`hasBurnedInCaptions`](./docs/WORKFLOWS.md#burned-in-caption-detection)<br/>[API](./docs/API.md#hasburnedincaptionsassetid-options) · [Source](./src/workflows/burned-in-captions.ts) | Detect burned-in captions (hardcoded subtitles) in an asset | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-2.5-flash` (Google) | Video (required) | None |
89
- | [`generateChapters`](./docs/WORKFLOWS.md#chapter-generation)<br/>[API](./docs/API.md#generatechaptersassetid-languagecode-options) · [Source](./src/workflows/chapters.ts) | Generate chapter markers for an asset using the transcript | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-2.5-flash` (Google) | Video (required), Captions (required) | None |
88
+ | [`hasBurnedInCaptions`](./docs/WORKFLOWS.md#burned-in-caption-detection)<br/>[API](./docs/API.md#hasburnedincaptionsassetid-options) · [Source](./src/workflows/burned-in-captions.ts) | Detect burned-in captions (hardcoded subtitles) in an asset | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-3-flash-preview` (Google) | Video (required) | None |
89
+ | [`generateChapters`](./docs/WORKFLOWS.md#chapter-generation)<br/>[API](./docs/API.md#generatechaptersassetid-languagecode-options) · [Source](./src/workflows/chapters.ts) | Generate chapter markers for an asset using the transcript | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-3-flash-preview` (Google) | Video (required), Captions (required) | None |
90
90
  | [`generateVideoEmbeddings`](./docs/WORKFLOWS.md#video-embeddings)<br/>[API](./docs/API.md#generatevideoembeddingsassetid-options) · [Source](./src/workflows/embeddings.ts) | Generate vector embeddings for an asset's transcript chunks | OpenAI, Google | `text-embedding-3-small` (OpenAI), `gemini-embedding-001` (Google) | Video (required), Captions (required) | None |
91
- | [`translateCaptions`](./docs/WORKFLOWS.md#caption-translation)<br/>[API](./docs/API.md#translatecaptionsassetid-fromlanguagecode-tolanguagecode-options) · [Source](./src/workflows/translate-captions.ts) | Translate an asset's captions into different languages | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-2.5-flash` (Google) | Video (required), Captions (required) | AWS S3 (if `uploadToMux=true`) |
91
+ | [`translateCaptions`](./docs/WORKFLOWS.md#caption-translation)<br/>[API](./docs/API.md#translatecaptionsassetid-fromlanguagecode-tolanguagecode-options) · [Source](./src/workflows/translate-captions.ts) | Translate an asset's captions into different languages | OpenAI, Anthropic, Google | `gpt-5.1` (OpenAI), `claude-sonnet-4-5` (Anthropic), `gemini-3-flash-preview` (Google) | Video (required), Captions (required) | AWS S3 (if `uploadToMux=true`) |
92
92
  | [`translateAudio`](./docs/WORKFLOWS.md#audio-dubbing)<br/>[API](./docs/API.md#translateaudioassetid-tolanguagecode-options) · [Source](./src/workflows/translate-audio.ts) | Create AI-dubbed audio tracks in different languages for an asset | ElevenLabs only | ElevenLabs Dubbing API | Video (required), Audio (required) | AWS S3 (if `uploadToMux=true`) |
93
93
 
94
94
  ## Compatability with Workflow DevKit
@@ -233,7 +233,7 @@ for (const chunk of result.chunks) {
233
233
 
234
234
  # Key Features
235
235
 
236
- - **Cost-Effective by Default**: Uses affordable frontier models like `gpt-5.1`, `claude-sonnet-4-5`, and `gemini-2.5-flash` to keep analysis costs low while maintaining high quality results
236
+ - **Cost-Effective by Default**: Uses affordable frontier models like `gpt-5.1`, `claude-sonnet-4-5`, and `gemini-3-flash-preview` to keep analysis costs low while maintaining high quality results
237
237
  - **Multi-modal Analysis**: Combines storyboard images with video transcripts for richer understanding
238
238
  - **Tone Control**: Choose between neutral, playful, or professional analysis styles for summarization
239
239
  - **Prompt Customization**: Override specific prompt sections to tune workflows to your exact use case
package/dist/index.js CHANGED
@@ -404,7 +404,7 @@ import { createOpenAI } from "@ai-sdk/openai";
404
404
  var DEFAULT_LANGUAGE_MODELS = {
405
405
  openai: "gpt-5.1",
406
406
  anthropic: "claude-sonnet-4-5",
407
- google: "gemini-2.5-flash"
407
+ google: "gemini-3-flash-preview"
408
408
  };
409
409
  var DEFAULT_EMBEDDING_MODELS = {
410
410
  openai: "text-embedding-3-small",
@@ -1338,57 +1338,57 @@ async function processConcurrently(items, processor, maxConcurrent = 5) {
1338
1338
  }
1339
1339
  return results;
1340
1340
  }
1341
+ async function moderateImageWithOpenAI(entry) {
1342
+ "use step";
1343
+ const apiKey = getApiKeyFromEnv("openai");
1344
+ try {
1345
+ const res = await fetch("https://api.openai.com/v1/moderations", {
1346
+ method: "POST",
1347
+ headers: {
1348
+ "Content-Type": "application/json",
1349
+ "Authorization": `Bearer ${apiKey}`
1350
+ },
1351
+ body: JSON.stringify({
1352
+ model: entry.model,
1353
+ input: [
1354
+ {
1355
+ type: "image_url",
1356
+ image_url: {
1357
+ url: entry.image
1358
+ }
1359
+ }
1360
+ ]
1361
+ })
1362
+ });
1363
+ const json = await res.json();
1364
+ if (!res.ok) {
1365
+ throw new Error(
1366
+ `OpenAI moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1367
+ );
1368
+ }
1369
+ const categoryScores = json.results?.[0]?.category_scores || {};
1370
+ return {
1371
+ url: entry.url,
1372
+ sexual: categoryScores.sexual || 0,
1373
+ violence: categoryScores.violence || 0,
1374
+ error: false
1375
+ };
1376
+ } catch (error) {
1377
+ console.error("OpenAI moderation failed:", error);
1378
+ return {
1379
+ url: entry.url,
1380
+ sexual: 0,
1381
+ violence: 0,
1382
+ error: true
1383
+ };
1384
+ }
1385
+ }
1341
1386
  async function requestOpenAIModeration(imageUrls, model, maxConcurrent = 5, submissionMode = "url", downloadOptions) {
1342
1387
  "use step";
1343
1388
  const targetUrls = submissionMode === "base64" ? (await downloadImagesAsBase64(imageUrls, downloadOptions, maxConcurrent)).map(
1344
1389
  (img) => ({ url: img.url, image: img.base64Data, model })
1345
1390
  ) : imageUrls.map((url) => ({ url, image: url, model }));
1346
- const moderate = async (entry) => {
1347
- "use step";
1348
- const apiKey = getApiKeyFromEnv("openai");
1349
- try {
1350
- const res = await fetch("https://api.openai.com/v1/moderations", {
1351
- method: "POST",
1352
- headers: {
1353
- "Content-Type": "application/json",
1354
- "Authorization": `Bearer ${apiKey}`
1355
- },
1356
- body: JSON.stringify({
1357
- model: entry.model,
1358
- input: [
1359
- {
1360
- type: "image_url",
1361
- image_url: {
1362
- url: entry.image
1363
- }
1364
- }
1365
- ]
1366
- })
1367
- });
1368
- const json = await res.json();
1369
- if (!res.ok) {
1370
- throw new Error(
1371
- `OpenAI moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1372
- );
1373
- }
1374
- const categoryScores = json.results?.[0]?.category_scores || {};
1375
- return {
1376
- url: entry.url,
1377
- sexual: categoryScores.sexual || 0,
1378
- violence: categoryScores.violence || 0,
1379
- error: false
1380
- };
1381
- } catch (error) {
1382
- console.error("OpenAI moderation failed:", error);
1383
- return {
1384
- url: entry.url,
1385
- sexual: 0,
1386
- violence: 0,
1387
- error: true
1388
- };
1389
- }
1390
- };
1391
- return processConcurrently(targetUrls, moderate, maxConcurrent);
1391
+ return processConcurrently(targetUrls, moderateImageWithOpenAI, maxConcurrent);
1392
1392
  }
1393
1393
  function getHiveCategoryScores(classes, categoryNames) {
1394
1394
  const scoreMap = Object.fromEntries(
@@ -1397,6 +1397,51 @@ function getHiveCategoryScores(classes, categoryNames) {
1397
1397
  const scores = categoryNames.map((category) => scoreMap[category] || 0);
1398
1398
  return Math.max(...scores, 0);
1399
1399
  }
1400
+ async function moderateImageWithHive(entry) {
1401
+ "use step";
1402
+ const apiKey = getApiKeyFromEnv("hive");
1403
+ try {
1404
+ const formData = new FormData();
1405
+ if (entry.source.kind === "url") {
1406
+ formData.append("url", entry.source.value);
1407
+ } else {
1408
+ const extension = entry.source.contentType.split("/")[1] || "jpg";
1409
+ const blob = new Blob([entry.source.buffer], {
1410
+ type: entry.source.contentType
1411
+ });
1412
+ formData.append("media", blob, `thumbnail.${extension}`);
1413
+ }
1414
+ const res = await fetch(HIVE_ENDPOINT, {
1415
+ method: "POST",
1416
+ headers: {
1417
+ Accept: "application/json",
1418
+ Authorization: `Token ${apiKey}`
1419
+ },
1420
+ body: formData
1421
+ });
1422
+ const json = await res.json().catch(() => void 0);
1423
+ if (!res.ok) {
1424
+ throw new Error(
1425
+ `Hive moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1426
+ );
1427
+ }
1428
+ const classes = json?.status?.[0]?.response?.output?.[0]?.classes || [];
1429
+ return {
1430
+ url: entry.url,
1431
+ sexual: getHiveCategoryScores(classes, HIVE_SEXUAL_CATEGORIES),
1432
+ violence: getHiveCategoryScores(classes, HIVE_VIOLENCE_CATEGORIES),
1433
+ error: false
1434
+ };
1435
+ } catch (error) {
1436
+ console.error("Hive moderation failed:", error);
1437
+ return {
1438
+ url: entry.url,
1439
+ sexual: 0,
1440
+ violence: 0,
1441
+ error: true
1442
+ };
1443
+ }
1444
+ }
1400
1445
  async function requestHiveModeration(imageUrls, maxConcurrent = 5, submissionMode = "url", downloadOptions) {
1401
1446
  "use step";
1402
1447
  const targets = submissionMode === "base64" ? (await downloadImagesAsBase64(imageUrls, downloadOptions, maxConcurrent)).map((img) => ({
@@ -1410,52 +1455,7 @@ async function requestHiveModeration(imageUrls, maxConcurrent = 5, submissionMod
1410
1455
  url,
1411
1456
  source: { kind: "url", value: url }
1412
1457
  }));
1413
- const moderate = async (entry) => {
1414
- "use step";
1415
- const apiKey = getApiKeyFromEnv("hive");
1416
- try {
1417
- const formData = new FormData();
1418
- if (entry.source.kind === "url") {
1419
- formData.append("url", entry.source.value);
1420
- } else {
1421
- const extension = entry.source.contentType.split("/")[1] || "jpg";
1422
- const blob = new Blob([entry.source.buffer], {
1423
- type: entry.source.contentType
1424
- });
1425
- formData.append("media", blob, `thumbnail.${extension}`);
1426
- }
1427
- const res = await fetch(HIVE_ENDPOINT, {
1428
- method: "POST",
1429
- headers: {
1430
- Accept: "application/json",
1431
- Authorization: `Token ${apiKey}`
1432
- },
1433
- body: formData
1434
- });
1435
- const json = await res.json().catch(() => void 0);
1436
- if (!res.ok) {
1437
- throw new Error(
1438
- `Hive moderation error: ${res.status} ${res.statusText} - ${JSON.stringify(json)}`
1439
- );
1440
- }
1441
- const classes = json?.status?.[0]?.response?.output?.[0]?.classes || [];
1442
- return {
1443
- url: entry.url,
1444
- sexual: getHiveCategoryScores(classes, HIVE_SEXUAL_CATEGORIES),
1445
- violence: getHiveCategoryScores(classes, HIVE_VIOLENCE_CATEGORIES),
1446
- error: false
1447
- };
1448
- } catch (error) {
1449
- console.error("Hive moderation failed:", error);
1450
- return {
1451
- url: entry.url,
1452
- sexual: 0,
1453
- violence: 0,
1454
- error: true
1455
- };
1456
- }
1457
- };
1458
- return processConcurrently(targets, moderate, maxConcurrent);
1458
+ return processConcurrently(targets, moderateImageWithHive, maxConcurrent);
1459
1459
  }
1460
1460
  async function getModerationScores(assetId, options = {}) {
1461
1461
  "use workflow";