@vargai/sdk 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.github/workflows/ci.yml +23 -0
  2. package/.husky/README.md +102 -0
  3. package/.husky/commit-msg +9 -0
  4. package/.husky/pre-commit +12 -0
  5. package/.husky/pre-push +9 -0
  6. package/.size-limit.json +8 -0
  7. package/.test-hooks.ts +5 -0
  8. package/CONTRIBUTING.md +150 -0
  9. package/LICENSE.md +53 -0
  10. package/README.md +7 -0
  11. package/action/captions/index.ts +202 -12
  12. package/action/captions/tiktok.ts +538 -0
  13. package/action/cut/index.ts +119 -0
  14. package/action/fade/index.ts +116 -0
  15. package/action/merge/index.ts +177 -0
  16. package/action/remove/index.ts +184 -0
  17. package/action/split/index.ts +133 -0
  18. package/action/transition/index.ts +154 -0
  19. package/action/trim/index.ts +117 -0
  20. package/bun.lock +299 -8
  21. package/cli/index.ts +1 -1
  22. package/commitlint.config.js +22 -0
  23. package/index.ts +12 -0
  24. package/lib/ass.ts +547 -0
  25. package/lib/fal.ts +75 -1
  26. package/lib/ffmpeg.ts +400 -0
  27. package/lib/higgsfield/example.ts +22 -29
  28. package/lib/higgsfield/index.ts +3 -2
  29. package/lib/higgsfield/soul.ts +0 -5
  30. package/lib/remotion/SKILL.md +240 -21
  31. package/lib/remotion/cli.ts +34 -0
  32. package/package.json +20 -3
  33. package/pipeline/cookbooks/scripts/animate-frames-parallel.ts +83 -0
  34. package/pipeline/cookbooks/scripts/combine-scenes.sh +53 -0
  35. package/pipeline/cookbooks/scripts/generate-frames-parallel.ts +98 -0
  36. package/pipeline/cookbooks/scripts/still-to-video.sh +37 -0
  37. package/pipeline/cookbooks/text-to-tiktok.md +669 -0
  38. package/scripts/.gitkeep +0 -0
  39. package/service/music/index.ts +29 -14
  40. package/tsconfig.json +1 -1
  41. package/HIGGSFIELD_REWRITE_SUMMARY.md +0 -300
  42. package/TEST_RESULTS.md +0 -122
  43. package/output.txt +0 -1
  44. package/scripts/produce-menopause-campaign.sh +0 -202
  45. package/test-import.ts +0 -7
  46. package/test-services.ts +0 -97
package/lib/fal.ts CHANGED
@@ -15,6 +15,7 @@ interface FalImageToVideoArgs {
15
15
  imageUrl: string; // can be url or local file path
16
16
  modelVersion?: string;
17
17
  duration?: 5 | 10;
18
+ tailImageUrl?: string; // end frame for looping
18
19
  }
19
20
 
20
21
  /**
@@ -52,6 +53,7 @@ interface FalTextToVideoArgs {
52
53
  prompt: string;
53
54
  modelVersion?: string;
54
55
  duration?: 5 | 10;
56
+ aspectRatio?: "16:9" | "9:16" | "1:1";
55
57
  }
56
58
 
57
59
  export async function imageToVideo(args: FalImageToVideoArgs) {
@@ -60,9 +62,15 @@ export async function imageToVideo(args: FalImageToVideoArgs) {
60
62
  console.log(`[fal] starting image-to-video: ${modelId}`);
61
63
  console.log(`[fal] prompt: ${args.prompt}`);
62
64
  console.log(`[fal] image: ${args.imageUrl}`);
65
+ if (args.tailImageUrl) {
66
+ console.log(`[fal] tail image (loop): ${args.tailImageUrl}`);
67
+ }
63
68
 
64
69
  // upload local file if needed
65
70
  const imageUrl = await ensureImageUrl(args.imageUrl);
71
+ const tailImageUrl = args.tailImageUrl
72
+ ? await ensureImageUrl(args.tailImageUrl)
73
+ : undefined;
66
74
 
67
75
  try {
68
76
  const result = await fal.subscribe(modelId, {
@@ -70,6 +78,7 @@ export async function imageToVideo(args: FalImageToVideoArgs) {
70
78
  prompt: args.prompt,
71
79
  image_url: imageUrl,
72
80
  duration: args.duration || 5,
81
+ ...(tailImageUrl && { tail_image_url: tailImageUrl }),
73
82
  },
74
83
  logs: true,
75
84
  onQueueUpdate: (update: {
@@ -103,6 +112,7 @@ export async function textToVideo(args: FalTextToVideoArgs) {
103
112
  input: {
104
113
  prompt: args.prompt,
105
114
  duration: args.duration || 5,
115
+ aspect_ratio: args.aspectRatio || "16:9",
106
116
  },
107
117
  logs: true,
108
118
  onQueueUpdate: (update: {
@@ -302,6 +312,61 @@ export async function wan25(args: FalWan25Args) {
302
312
  }
303
313
  }
304
314
 
315
+ interface FalTextToMusicArgs {
316
+ prompt?: string;
317
+ tags?: string[];
318
+ lyricsPrompt?: string;
319
+ seed?: number;
320
+ promptStrength?: number;
321
+ balanceStrength?: number;
322
+ numSongs?: 1 | 2;
323
+ outputFormat?: "flac" | "mp3" | "wav" | "ogg" | "m4a";
324
+ outputBitRate?: 128 | 192 | 256 | 320;
325
+ bpm?: number | "auto";
326
+ }
327
+
328
+ export async function textToMusic(args: FalTextToMusicArgs) {
329
+ const modelId = "fal-ai/sonauto/bark";
330
+
331
+ console.log(`[fal] starting text-to-music: ${modelId}`);
332
+ if (args.prompt) console.log(`[fal] prompt: ${args.prompt}`);
333
+ if (args.tags) console.log(`[fal] tags: ${args.tags.join(", ")}`);
334
+
335
+ try {
336
+ const result = await fal.subscribe(modelId, {
337
+ input: {
338
+ prompt: args.prompt,
339
+ tags: args.tags,
340
+ lyrics_prompt: args.lyricsPrompt,
341
+ seed: args.seed,
342
+ prompt_strength: args.promptStrength,
343
+ balance_strength: args.balanceStrength,
344
+ num_songs: args.numSongs,
345
+ output_format: args.outputFormat,
346
+ output_bit_rate: args.outputBitRate,
347
+ bpm: args.bpm,
348
+ },
349
+ logs: true,
350
+ onQueueUpdate: (update: {
351
+ status: string;
352
+ logs?: Array<{ message: string }>;
353
+ }) => {
354
+ if (update.status === "IN_PROGRESS") {
355
+ console.log(
356
+ `[fal] ${update.logs?.map((l) => l.message).join(" ") || "processing..."}`,
357
+ );
358
+ }
359
+ },
360
+ });
361
+
362
+ console.log("[fal] completed!");
363
+ return result;
364
+ } catch (error) {
365
+ console.error("[fal] error:", error);
366
+ throw error;
367
+ }
368
+ }
369
+
305
370
  // cli runner
306
371
  if (import.meta.main) {
307
372
  const [command, ...args] = process.argv.slice(2);
@@ -336,10 +401,13 @@ examples:
336
401
  if (!args[0]) {
337
402
  console.log(`
338
403
  usage:
339
- bun run lib/fal.ts text_to_video <prompt> [duration]
404
+ bun run lib/fal.ts text_to_video <prompt> [duration] [aspect_ratio]
340
405
 
341
406
  examples:
342
407
  bun run lib/fal.ts text_to_video "ocean waves crashing" 5
408
+ bun run lib/fal.ts text_to_video "man walking in rain" 10 9:16
409
+
410
+ aspect_ratio: 16:9 (landscape), 9:16 (portrait/tiktok), 1:1 (square)
343
411
  `);
344
412
  process.exit(1);
345
413
  }
@@ -348,9 +416,15 @@ examples:
348
416
  console.error("duration must be 5 or 10");
349
417
  process.exit(1);
350
418
  }
419
+ const aspectRatio = args[2] as "16:9" | "9:16" | "1:1" | undefined;
420
+ if (aspectRatio && !["16:9", "9:16", "1:1"].includes(aspectRatio)) {
421
+ console.error("aspect_ratio must be 16:9, 9:16, or 1:1");
422
+ process.exit(1);
423
+ }
351
424
  const t2vResult = await textToVideo({
352
425
  prompt: args[0],
353
426
  duration: duration === "10" ? 10 : 5,
427
+ aspectRatio: aspectRatio || "16:9",
354
428
  });
355
429
  console.log(JSON.stringify(t2vResult, null, 2));
356
430
  break;
package/lib/ffmpeg.ts CHANGED
@@ -260,6 +260,34 @@ export interface ProbeResult {
260
260
  format: string;
261
261
  }
262
262
 
263
+ export interface FadeVideoOptions {
264
+ input: string;
265
+ output: string;
266
+ type: "in" | "out" | "both";
267
+ duration: number;
268
+ }
269
+
270
+ export interface XfadeOptions {
271
+ input1: string;
272
+ input2: string;
273
+ output: string;
274
+ transition:
275
+ | "crossfade"
276
+ | "dissolve"
277
+ | "wipeleft"
278
+ | "wiperight"
279
+ | "slideup"
280
+ | "slidedown";
281
+ duration: number;
282
+ fit?: "pad" | "crop" | "blur" | "stretch";
283
+ }
284
+
285
+ export interface SplitAtTimestampsOptions {
286
+ input: string;
287
+ timestamps: number[];
288
+ outputPrefix: string;
289
+ }
290
+
263
291
  export async function probe(input: string): Promise<ProbeResult> {
264
292
  if (!input) {
265
293
  throw new Error("input is required");
@@ -304,6 +332,378 @@ export async function probe(input: string): Promise<ProbeResult> {
304
332
  });
305
333
  }
306
334
 
335
+ /**
336
+ * get video duration in seconds
337
+ */
338
+ export async function getVideoDuration(input: string): Promise<number> {
339
+ const result = await probe(input);
340
+ return result.duration;
341
+ }
342
+
343
+ /**
344
+ * apply fade in/out effects to video
345
+ */
346
+ export async function fadeVideo(options: FadeVideoOptions): Promise<string> {
347
+ const { input, output, type, duration } = options;
348
+
349
+ if (!input || !output) {
350
+ throw new Error("input and output are required");
351
+ }
352
+
353
+ if (!existsSync(input)) {
354
+ throw new Error(`input file not found: ${input}`);
355
+ }
356
+
357
+ console.log(`[ffmpeg] applying fade ${type} effect...`);
358
+
359
+ const videoDuration = await getVideoDuration(input);
360
+ const filters: string[] = [];
361
+
362
+ if (type === "in" || type === "both") {
363
+ filters.push(`fade=t=in:st=0:d=${duration}`);
364
+ }
365
+
366
+ if (type === "out" || type === "both") {
367
+ const fadeOutStart = videoDuration - duration;
368
+ filters.push(`fade=t=out:st=${fadeOutStart}:d=${duration}`);
369
+ }
370
+
371
+ // Also fade audio
372
+ const audioFilters: string[] = [];
373
+ if (type === "in" || type === "both") {
374
+ audioFilters.push(`afade=t=in:st=0:d=${duration}`);
375
+ }
376
+ if (type === "out" || type === "both") {
377
+ const fadeOutStart = videoDuration - duration;
378
+ audioFilters.push(`afade=t=out:st=${fadeOutStart}:d=${duration}`);
379
+ }
380
+
381
+ return new Promise((resolve, reject) => {
382
+ const command = ffmpeg(input);
383
+
384
+ if (filters.length > 0) {
385
+ command.videoFilters(filters);
386
+ }
387
+ if (audioFilters.length > 0) {
388
+ command.audioFilters(audioFilters);
389
+ }
390
+
391
+ command
392
+ .output(output)
393
+ .on("end", () => {
394
+ console.log(`[ffmpeg] saved to ${output}`);
395
+ resolve(output);
396
+ })
397
+ .on("error", (err) => {
398
+ console.error(`[ffmpeg] error:`, err);
399
+ reject(err);
400
+ })
401
+ .run();
402
+ });
403
+ }
404
+
405
+ /**
406
+ * check if video has audio track
407
+ */
408
+ async function hasAudioTrack(input: string): Promise<boolean> {
409
+ return new Promise((resolve) => {
410
+ ffmpeg.ffprobe(input, (err, metadata) => {
411
+ if (err) {
412
+ resolve(false);
413
+ return;
414
+ }
415
+ const audioStream = metadata.streams.find(
416
+ (s) => s.codec_type === "audio",
417
+ );
418
+ resolve(!!audioStream);
419
+ });
420
+ });
421
+ }
422
+
423
+ /**
424
+ * Build scale filter for fitting video to target resolution
425
+ * @param fit - how to handle aspect ratio differences
426
+ * @param targetW - target width
427
+ * @param targetH - target height
428
+ * @param inputLabel - input stream label (e.g., "1:v")
429
+ * @param outputLabel - output stream label (e.g., "v1scaled")
430
+ */
431
+ function buildScaleFilter(
432
+ fit: "pad" | "crop" | "blur" | "stretch",
433
+ targetW: number,
434
+ targetH: number,
435
+ inputLabel: string,
436
+ outputLabel: string,
437
+ ): string {
438
+ switch (fit) {
439
+ case "crop":
440
+ // Scale up to cover, then crop center
441
+ return `[${inputLabel}]scale=${targetW}:${targetH}:force_original_aspect_ratio=increase,crop=${targetW}:${targetH}[${outputLabel}]`;
442
+
443
+ case "stretch":
444
+ // Simple stretch (distorts aspect ratio)
445
+ return `[${inputLabel}]scale=${targetW}:${targetH}[${outputLabel}]`;
446
+
447
+ case "blur":
448
+ // Blur background fill (like TikTok/Instagram)
449
+ // 1. Create blurred scaled background
450
+ // 2. Overlay scaled video on top
451
+ return `[${inputLabel}]split[bg][fg];[bg]scale=${targetW}:${targetH},boxblur=20:20[bgblur];[fg]scale=${targetW}:${targetH}:force_original_aspect_ratio=decrease[fgscaled];[bgblur][fgscaled]overlay=(W-w)/2:(H-h)/2[${outputLabel}]`;
452
+
453
+ case "pad":
454
+ default:
455
+ // Add black bars (letterbox/pillarbox)
456
+ return `[${inputLabel}]scale=${targetW}:${targetH}:force_original_aspect_ratio=decrease,pad=${targetW}:${targetH}:(ow-iw)/2:(oh-ih)/2[${outputLabel}]`;
457
+ }
458
+ }
459
+
460
+ /**
461
+ * crossfade transition between two videos using xfade filter
462
+ * automatically scales second video to match first if resolutions differ
463
+ * @param fit - how to handle resolution differences: pad (black bars), crop, blur (TikTok style), stretch
464
+ */
465
+ export async function xfadeVideos(options: XfadeOptions): Promise<string> {
466
+ const { input1, input2, output, transition, duration, fit = "pad" } = options;
467
+
468
+ if (!input1 || !input2 || !output) {
469
+ throw new Error("input1, input2, and output are required");
470
+ }
471
+
472
+ if (!existsSync(input1)) {
473
+ throw new Error(`input file not found: ${input1}`);
474
+ }
475
+ if (!existsSync(input2)) {
476
+ throw new Error(`input file not found: ${input2}`);
477
+ }
478
+
479
+ console.log(`[ffmpeg] applying ${transition} transition...`);
480
+
481
+ // Get info for both videos
482
+ const [info1, info2] = await Promise.all([probe(input1), probe(input2)]);
483
+
484
+ const video1Duration = info1.duration;
485
+ const offset = video1Duration - duration;
486
+
487
+ // Check if videos have audio
488
+ const [hasAudio1, hasAudio2] = await Promise.all([
489
+ hasAudioTrack(input1),
490
+ hasAudioTrack(input2),
491
+ ]);
492
+ const hasAudio = hasAudio1 && hasAudio2;
493
+
494
+ // Check if resolutions differ
495
+ const needsScale =
496
+ info1.width !== info2.width || info1.height !== info2.height;
497
+
498
+ if (needsScale) {
499
+ console.log(
500
+ `[ffmpeg] fitting video2 (${info2.width}x${info2.height}) to (${info1.width}x${info1.height}) using "${fit}" mode`,
501
+ );
502
+ }
503
+
504
+ // Map transition names to ffmpeg xfade transition names
505
+ const transitionMap: Record<string, string> = {
506
+ crossfade: "fade",
507
+ dissolve: "dissolve",
508
+ wipeleft: "wipeleft",
509
+ wiperight: "wiperight",
510
+ slideup: "slideup",
511
+ slidedown: "slidedown",
512
+ };
513
+
514
+ const xfadeTransition = transitionMap[transition] || "fade";
515
+
516
+ return new Promise((resolve, reject) => {
517
+ const command = ffmpeg().input(input1).input(input2);
518
+
519
+ // Build filter complex based on audio and scale requirements
520
+ const filters: string[] = [];
521
+
522
+ if (needsScale) {
523
+ // Scale second video to match first using specified fit mode
524
+ filters.push(
525
+ buildScaleFilter(fit, info1.width, info1.height, "1:v", "v1scaled"),
526
+ );
527
+ filters.push(
528
+ `[0:v][v1scaled]xfade=transition=${xfadeTransition}:duration=${duration}:offset=${offset}[vout]`,
529
+ );
530
+ } else {
531
+ filters.push(
532
+ `[0:v][1:v]xfade=transition=${xfadeTransition}:duration=${duration}:offset=${offset}[vout]`,
533
+ );
534
+ }
535
+
536
+ // Common output options for proper codec compatibility
537
+ const codecOptions = [
538
+ "-c:v",
539
+ "libx264",
540
+ "-preset",
541
+ "fast",
542
+ "-crf",
543
+ "22",
544
+ "-pix_fmt",
545
+ "yuv420p", // Ensures compatibility with most players
546
+ ];
547
+
548
+ if (hasAudio) {
549
+ filters.push(`[0:a][1:a]acrossfade=d=${duration}[aout]`);
550
+ command
551
+ .complexFilter(filters)
552
+ .outputOptions([
553
+ "-map",
554
+ "[vout]",
555
+ "-map",
556
+ "[aout]",
557
+ ...codecOptions,
558
+ "-c:a",
559
+ "aac",
560
+ "-b:a",
561
+ "192k",
562
+ ]);
563
+ } else {
564
+ command
565
+ .complexFilter(filters)
566
+ .outputOptions(["-map", "[vout]", ...codecOptions]);
567
+ }
568
+
569
+ command
570
+ .output(output)
571
+ .on("end", () => {
572
+ console.log(`[ffmpeg] saved to ${output}`);
573
+ resolve(output);
574
+ })
575
+ .on("error", (err) => {
576
+ console.error(`[ffmpeg] error:`, err);
577
+ reject(err);
578
+ })
579
+ .run();
580
+ });
581
+ }
582
+
583
+ /**
584
+ * split video at specific timestamps into multiple files
585
+ */
586
+ export async function splitAtTimestamps(
587
+ options: SplitAtTimestampsOptions,
588
+ ): Promise<string[]> {
589
+ const { input, timestamps, outputPrefix } = options;
590
+
591
+ if (!input || !outputPrefix) {
592
+ throw new Error("input and outputPrefix are required");
593
+ }
594
+
595
+ if (!existsSync(input)) {
596
+ throw new Error(`input file not found: ${input}`);
597
+ }
598
+
599
+ if (!timestamps || timestamps.length === 0) {
600
+ throw new Error("at least one timestamp is required");
601
+ }
602
+
603
+ console.log(`[ffmpeg] splitting video at ${timestamps.length} timestamps...`);
604
+
605
+ const videoDuration = await getVideoDuration(input);
606
+ const sortedTimestamps = [0, ...timestamps.sort((a, b) => a - b)];
607
+
608
+ // Add video duration as the last point if not already included
609
+ const lastTimestamp = sortedTimestamps[sortedTimestamps.length - 1];
610
+ if (lastTimestamp !== undefined && lastTimestamp < videoDuration) {
611
+ sortedTimestamps.push(videoDuration);
612
+ }
613
+
614
+ const outputs: string[] = [];
615
+
616
+ for (let i = 0; i < sortedTimestamps.length - 1; i++) {
617
+ const start = sortedTimestamps[i];
618
+ const end = sortedTimestamps[i + 1];
619
+ if (start === undefined || end === undefined) continue;
620
+
621
+ const duration = end - start;
622
+ const partNumber = String(i + 1).padStart(3, "0");
623
+ const outputPath = `${outputPrefix}_${partNumber}.mp4`;
624
+
625
+ console.log(
626
+ `[ffmpeg] extracting part ${i + 1}: ${start}s - ${end}s (${duration}s)`,
627
+ );
628
+
629
+ await trimVideo({
630
+ input,
631
+ output: outputPath,
632
+ start,
633
+ duration,
634
+ });
635
+
636
+ outputs.push(outputPath);
637
+ }
638
+
639
+ console.log(`[ffmpeg] created ${outputs.length} parts`);
640
+ return outputs;
641
+ }
642
+
643
+ /**
644
+ * concatenate videos using a file list (safer for many files)
645
+ */
646
+ export async function concatWithFileList(
647
+ inputs: string[],
648
+ output: string,
649
+ ): Promise<string> {
650
+ if (!inputs || inputs.length === 0) {
651
+ throw new Error("at least one input is required");
652
+ }
653
+ if (!output) {
654
+ throw new Error("output is required");
655
+ }
656
+
657
+ // validate all inputs exist
658
+ for (const input of inputs) {
659
+ if (!existsSync(input)) {
660
+ throw new Error(`input file not found: ${input}`);
661
+ }
662
+ }
663
+
664
+ console.log(
665
+ `[ffmpeg] concatenating ${inputs.length} videos with file list...`,
666
+ );
667
+
668
+ // Create a temporary file list
669
+ const { writeFileSync, unlinkSync } = await import("node:fs");
670
+ const { join } = await import("node:path");
671
+ const { tmpdir } = await import("node:os");
672
+
673
+ const listPath = join(tmpdir(), `concat-list-${Date.now()}.txt`);
674
+ const listContent = inputs.map((f) => `file '${f}'`).join("\n");
675
+ writeFileSync(listPath, listContent);
676
+
677
+ return new Promise((resolve, reject) => {
678
+ ffmpeg()
679
+ .input(listPath)
680
+ .inputOptions(["-f", "concat", "-safe", "0"])
681
+ .outputOptions(["-c", "copy"])
682
+ .output(output)
683
+ .on("end", () => {
684
+ // Cleanup temp file
685
+ try {
686
+ unlinkSync(listPath);
687
+ } catch {
688
+ // ignore cleanup errors
689
+ }
690
+ console.log(`[ffmpeg] saved to ${output}`);
691
+ resolve(output);
692
+ })
693
+ .on("error", (err) => {
694
+ // Cleanup temp file
695
+ try {
696
+ unlinkSync(listPath);
697
+ } catch {
698
+ // ignore cleanup errors
699
+ }
700
+ console.error(`[ffmpeg] error:`, err);
701
+ reject(err);
702
+ })
703
+ .run();
704
+ });
705
+ }
706
+
307
707
  // cli
308
708
  async function cli() {
309
709
  const args = process.argv.slice(2);
@@ -4,12 +4,12 @@
4
4
  */
5
5
 
6
6
  import {
7
+ BatchSize,
7
8
  generateSoul,
8
9
  listSoulStyles,
9
10
  SoulClient,
10
- SoulSize,
11
11
  SoulQuality,
12
- BatchSize,
12
+ SoulSize,
13
13
  } from "./soul";
14
14
 
15
15
  // Example 1: Simple generation
@@ -23,7 +23,7 @@ async function simpleGeneration() {
23
23
  batch_size: BatchSize.SINGLE,
24
24
  });
25
25
 
26
- if (result.status === "completed" && result.images) {
26
+ if (result.status === "completed" && result.images && result.images[0]) {
27
27
  console.log("✓ Generation successful!");
28
28
  console.log(`Image URL: ${result.images[0].url}`);
29
29
  } else {
@@ -41,17 +41,19 @@ async function generationWithStyle() {
41
41
 
42
42
  if (styles.length > 0) {
43
43
  const firstStyle = styles[0];
44
- console.log(`Using style: ${firstStyle.name} (${firstStyle.id})`);
45
-
46
- const result = await generateSoul({
47
- prompt: "portrait of a wise old wizard",
48
- style_id: firstStyle.id,
49
- quality: SoulQuality.HD,
50
- });
51
-
52
- if (result.status === "completed" && result.images) {
53
- console.log(" Generation with style successful!");
54
- console.log(`Image URL: ${result.images[0].url}`);
44
+ if (firstStyle) {
45
+ console.log(`Using style: ${firstStyle.name} (${firstStyle.id})`);
46
+
47
+ const result = await generateSoul({
48
+ prompt: "portrait of a wise old wizard",
49
+ style_id: firstStyle.id,
50
+ quality: SoulQuality.HD,
51
+ });
52
+
53
+ if (result.status === "completed" && result.images && result.images[0]) {
54
+ console.log("✓ Generation with style successful!");
55
+ console.log(`Image URL: ${result.images[0].url}`);
56
+ }
55
57
  }
56
58
  }
57
59
  }
@@ -80,7 +82,7 @@ async function manualQueueManagement() {
80
82
  },
81
83
  });
82
84
 
83
- if (result.status === "completed" && result.images) {
85
+ if (result.status === "completed" && result.images && result.images[0]) {
84
86
  console.log("✓ Generation complete!");
85
87
  console.log(`Image URL: ${result.images[0].url}`);
86
88
  }
@@ -90,19 +92,12 @@ async function manualQueueManagement() {
90
92
  async function generationWithWebhook() {
91
93
  console.log("\n=== Example 4: Generation with Webhook ===\n");
92
94
 
93
- const result = await generateSoul(
94
- {
95
- prompt: "abstract art with vibrant colors",
96
- quality: SoulQuality.HD,
97
- },
98
- {
99
- webhook: "https://your-webhook.url/higgsfield",
100
- },
101
- );
95
+ const result = await generateSoul({
96
+ prompt: "abstract art with vibrant colors",
97
+ quality: SoulQuality.HD,
98
+ });
102
99
 
103
- console.log(
104
- "Request submitted with webhook - will receive results at webhook URL",
105
- );
100
+ console.log("Request submitted - check status for completion");
106
101
  console.log(`Request ID: ${result.request_id}`);
107
102
  console.log(`Status URL: ${result.status_url}`);
108
103
  }
@@ -224,5 +219,3 @@ Examples:
224
219
  process.exit(1);
225
220
  }
226
221
  }
227
-
228
-
@@ -155,7 +155,9 @@ export class HiggsfieldClient {
155
155
  }
156
156
 
157
157
  if (response.status === 400) {
158
- console.log(`[higgsfield] request cannot be canceled (already processing)`);
158
+ console.log(
159
+ `[higgsfield] request cannot be canceled (already processing)`,
160
+ );
159
161
  return false;
160
162
  }
161
163
 
@@ -238,4 +240,3 @@ export class HiggsfieldClient {
238
240
  }
239
241
 
240
242
  export default HiggsfieldClient;
241
-
@@ -55,10 +55,6 @@ export interface SoulStyle {
55
55
  export class SoulClient extends HiggsfieldClient {
56
56
  private static readonly MODEL_ID = "soul";
57
57
 
58
- constructor(config?: HiggsfieldConfig) {
59
- super(config);
60
- }
61
-
62
58
  /**
63
59
  * Generate Soul images
64
60
  */
@@ -259,4 +255,3 @@ examples:
259
255
  process.exit(1);
260
256
  }
261
257
  }
262
-