nextjs-hackathon-stack 0.1.12 → 0.1.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -20,7 +20,7 @@ npx nextjs-hackathon-stack my-app
20
20
  | Forms | React Hook Form + Zod resolver |
21
21
  | UI | shadcn/ui + Tailwind CSS v4 |
22
22
  | AI Streaming | Vercel AI SDK + AI Gateway |
23
- | LLM | MiniMax M2.7 (`minimax/minimax-m2.7`) |
23
+ | LLM | Google Gemini 2.0 Flash (`google/gemini-2.0-flash`) |
24
24
  | Testing | Vitest + React Testing Library + Playwright |
25
25
 
26
26
  ## Quick start
@@ -34,7 +34,6 @@ cd my-app
34
34
  # NEXT_PUBLIC_SUPABASE_URL → supabase.com > Project Settings > API
35
35
  # NEXT_PUBLIC_SUPABASE_ANON_KEY → supabase.com > Project Settings > API
36
36
  # DATABASE_URL → supabase.com > Project Settings > Database
37
- # MINIMAX_API_KEY → minimaxi.chat > API Keys
38
37
  # AI_GATEWAY_URL → vercel.com > AI > Gateways
39
38
 
40
39
  npm run dev
@@ -53,9 +52,7 @@ npx nextjs-hackathon-stack my-app --skip-install
53
52
  ## Features
54
53
 
55
54
  - **Auth** — Email/password login with Supabase Auth, Server Actions, protected routes
56
- - **AI Chat** — Streaming chat with MiniMax M2.7 via Vercel AI Gateway (Edge runtime)
57
- - **Video Generation** — MiniMax Video-01 via direct API
58
- - **Text-to-Speech** — MiniMax Speech 2.6 via direct API
55
+ - **AI Chat** — Streaming chat with Gemini 2.0 Flash via Vercel AI Gateway (Edge runtime)
59
56
  - **TDD-ready** — 100% coverage enforced, Vitest + Playwright preconfigured
60
57
  - **Cursor AI** — Rules, agents, and skills preconfigured for the full stack
61
58
 
@@ -68,11 +65,9 @@ src/
68
65
  ├── app/ # Next.js routing + layouts
69
66
  ├── features/
70
67
  │ ├── auth/ # Login form, server actions, session hook
71
- ├── chat/ # AI chat (streaming)
72
- │ ├── video/ # Video generation
73
- │ └── tts/ # Text-to-speech
68
+ └── chat/ # AI chat (streaming)
74
69
  ├── shared/
75
- │ ├── lib/ # Supabase clients, AI, MiniMax
70
+ │ ├── lib/ # Supabase clients, AI
76
71
  │ ├── db/ # Drizzle schema + migrations
77
72
  │ └── components/# Providers + shadcn/ui
78
73
  └── e2e/ # Playwright e2e tests
package/dist/index.js CHANGED
@@ -159,8 +159,7 @@ async function scaffold(projectName, skipInstall) {
159
159
  console.log(` ${pc2.dim("NEXT_PUBLIC_SUPABASE_URL")} \u2014 from supabase.com > Project Settings > API`);
160
160
  console.log(` ${pc2.dim("NEXT_PUBLIC_SUPABASE_ANON_KEY")} \u2014 from supabase.com > Project Settings > API`);
161
161
  console.log(` ${pc2.dim("DATABASE_URL")} \u2014 from supabase.com > Project Settings > Database`);
162
- console.log(` ${pc2.dim("AI_GATEWAY_URL")} \u2014 Vercel AI Gateway URL`);
163
- console.log(` ${pc2.dim("MINIMAX_API_KEY")} \u2014 from minimaxi.chat`);
162
+ console.log(` ${pc2.dim("AI_GATEWAY_URL")} \u2014 from vercel.com > AI > Gateways`);
164
163
  console.log(` ${pc2.cyan("pnpm dev")}
165
164
  `);
166
165
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nextjs-hackathon-stack",
3
- "version": "0.1.12",
3
+ "version": "0.1.13",
4
4
  "description": "Scaffold a full-stack Next.js hackathon starter",
5
5
  "type": "module",
6
6
  "bin": {
@@ -22,7 +22,7 @@
22
22
  "drizzle",
23
23
  "tanstack-query",
24
24
  "shadcn",
25
- "minimax",
25
+ "gemini",
26
26
  "starter"
27
27
  ],
28
28
  "repository": {
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  description: AI SDK + Edge runtime rules. RISK 3 (cold starts).
3
- globs: ["src/features/chat/**", "src/features/video/**", "src/features/tts/**", "src/shared/lib/ai*"]
3
+ globs: ["src/features/chat/**", "src/shared/lib/ai*"]
4
4
  ---
5
5
 
6
6
  # AI Rules (Risk 3: Vercel Cold Starts)
@@ -30,12 +30,12 @@ If you need DB data in an AI route:
30
30
  ```typescript
31
31
  import { createOpenAI } from "@ai-sdk/openai";
32
32
 
33
- const gateway = createOpenAI({
33
+ const provider = createOpenAI({
34
34
  baseURL: process.env.AI_GATEWAY_URL,
35
- apiKey: process.env.MINIMAX_API_KEY,
35
+ apiKey: process.env.AI_API_KEY,
36
36
  });
37
37
 
38
- export const aiModel = gateway("MiniMax-Text-01");
38
+ export const aiModel = provider(process.env.AI_MODEL ?? "google/gemini-2.0-flash");
39
39
  ```
40
40
 
41
41
  ## Streaming Pattern
@@ -15,14 +15,14 @@ alwaysApply: true
15
15
  - **Validation**: Zod (auto-generated via `drizzle-zod`)
16
16
  - **Forms**: React Hook Form + Zod resolver
17
17
  - **UI**: shadcn/ui + Tailwind CSS v4
18
- - **AI**: Vercel AI SDK + MiniMax M2.7 via AI Gateway
18
+ - **AI**: Vercel AI SDK + Google Gemini 2.0 Flash via AI Gateway
19
19
  - **Testing**: Vitest + React Testing Library + Playwright
20
20
 
21
21
  ## Project Structure
22
22
  ```
23
23
  src/
24
24
  ├── app/ # Next.js App Router pages
25
- ├── features/ # Feature modules (auth, chat, video, tts)
25
+ ├── features/ # Feature modules (auth, chat)
26
26
  │ └── <feature>/
27
27
  │ ├── components/
28
28
  │ ├── actions/
@@ -14,7 +14,7 @@ Full-stack Next.js 15 hackathon starter.
14
14
  | State | TanStack Query v5 |
15
15
  | Forms | React Hook Form + Zod |
16
16
  | UI | shadcn/ui + Tailwind CSS v4 |
17
- | AI | Vercel AI SDK + MiniMax |
17
+ | AI | Vercel AI SDK + Gemini 2.0 Flash |
18
18
  | Testing | Vitest + Playwright |
19
19
 
20
20
  ## Getting Started
@@ -29,11 +29,10 @@ NEXT_PUBLIC_SUPABASE_URL=https://your-project-id.supabase.co
29
29
  NEXT_PUBLIC_SUPABASE_ANON_KEY=your-anon-key
30
30
  DATABASE_URL=postgresql://postgres:[password]@db.your-project-id.supabase.co:5432/postgres
31
31
 
32
- # MiniMaxhttps://www.minimaxi.chat > API Keys
33
- MINIMAX_API_KEY=your-minimax-api-key
34
-
35
- # Vercel AI Gateway — https://vercel.com > AI > Gateways
32
+ # AIVercel AI Gateway (default: Google Gemini 2.0 Flash free tier)
33
+ # Create gateway at: https://vercel.com > AI > Gateways
36
34
  AI_GATEWAY_URL=https://gateway.ai.vercel.app/v1/your-team-id/your-gateway-id
35
+ AI_API_KEY=
37
36
  ```
38
37
 
39
38
  ### 2. Run the dev server
@@ -70,8 +69,8 @@ pnpm db:migrate # Apply migrations
70
69
  | `NEXT_PUBLIC_SUPABASE_URL` | Supabase > Project Settings > API |
71
70
  | `NEXT_PUBLIC_SUPABASE_ANON_KEY` | Supabase > Project Settings > API |
72
71
  | `DATABASE_URL` | Supabase > Project Settings > Database > URI |
73
- | `MINIMAX_API_KEY` | minimaxi.chat > API Keys |
74
72
  | `AI_GATEWAY_URL` | Vercel > AI > Gateways |
73
+ | `AI_API_KEY` | Your AI provider API key |
75
74
  | `NEXT_PUBLIC_APP_URL` | Your deployment URL (default: `http://localhost:3000`) |
76
75
 
77
76
  See `.env.example` for all required variables with comments.
@@ -82,7 +81,7 @@ Feature-based structure:
82
81
  ```
83
82
  src/
84
83
  ├── app/ # Next.js routing + layouts
85
- ├── features/ # auth | chat | video | tts
84
+ ├── features/ # auth | chat
86
85
  ├── shared/ # lib | db | components/ui
87
86
  └── e2e/ # Playwright tests
88
87
  ```
@@ -9,13 +9,13 @@ NEXT_PUBLIC_SUPABASE_ANON_KEY=your-anon-key
9
9
  # Supabase DB — https://supabase.com > Project Settings > Database > Connection string (URI)
10
10
  DATABASE_URL=postgresql://postgres:[password]@db.your-project-id.supabase.co:5432/postgres
11
11
 
12
- # AI Provider defaults to MiniMax direct API
13
- AI_API_KEY=your-api-key
14
- # Optional: override base URL (default: https://api.minimax.io/v1)
15
- # For Vercel Gateway: https://ai-gateway.vercel.sh/v1
16
- # AI_BASE_URL=https://api.minimax.io/v1
17
- # Optional: override model (default: MiniMax-M2.7)
18
- # AI_MODEL=MiniMax-M2.7
12
+ # AI — Vercel AI Gateway (default: Google Gemini 2.0 Flash — free tier)
13
+ # Create gateway at: https://vercel.com > AI > Gateways
14
+ AI_GATEWAY_URL=https://gateway.ai.vercel.app/v1/your-team-id/your-gateway-id
15
+ AI_API_KEY=
16
+ # Optional: override model (default: google/gemini-2.0-flash)
17
+ # To use MiniMax: AI_MODEL=minimax/minimax-m2.7
18
+ # AI_MODEL=google/gemini-2.0-flash
19
19
 
20
20
  # =============================================================================
21
21
  # OPTIONAL
@@ -1,8 +1,8 @@
1
1
  import { createOpenAI } from "@ai-sdk/openai";
2
2
 
3
3
  const provider = createOpenAI({
4
- baseURL: process.env.AI_BASE_URL ?? "https://api.minimax.io/v1",
4
+ baseURL: process.env.AI_GATEWAY_URL ?? "https://gateway.ai.vercel.app/v1",
5
5
  apiKey: process.env.AI_API_KEY ?? "",
6
6
  });
7
7
 
8
- export const aiModel = provider(process.env.AI_MODEL ?? "MiniMax-M2.7");
8
+ export const aiModel = provider(process.env.AI_MODEL ?? "google/gemini-2.0-flash");
@@ -1,88 +0,0 @@
1
- import { describe, it, expect, vi, beforeEach } from "vitest";
2
-
3
- const mockSynthesizeSpeech = vi.fn();
4
-
5
- vi.mock("@/shared/lib/minimax-media", () => ({
6
- synthesizeSpeech: (...args: unknown[]) => mockSynthesizeSpeech(...args),
7
- }));
8
-
9
- describe("tts route", () => {
10
- beforeEach(() => {
11
- vi.clearAllMocks();
12
- });
13
-
14
- it("exports runtime as edge and POST handler", async () => {
15
- // Arrange + Act
16
- const mod = await import("../api/route");
17
-
18
- // Assert
19
- expect(mod.runtime).toBe("edge");
20
- expect(typeof mod.POST).toBe("function");
21
- });
22
-
23
- it("returns 400 when text is missing", async () => {
24
- // Arrange
25
- const mod = await import("../api/route");
26
- const req = new Request("http://localhost/api/tts", {
27
- method: "POST",
28
- body: JSON.stringify({}),
29
- });
30
-
31
- // Act
32
- const response = await mod.POST(req);
33
-
34
- // Assert
35
- expect(response.status).toBe(400);
36
- const body = await response.json() as { error: string };
37
- expect(body.error).toBe("text is required");
38
- });
39
-
40
- it("returns 400 when text is not a string", async () => {
41
- // Arrange
42
- const mod = await import("../api/route");
43
- const req = new Request("http://localhost/api/tts", {
44
- method: "POST",
45
- body: JSON.stringify({ text: 123 }),
46
- });
47
-
48
- // Act
49
- const response = await mod.POST(req);
50
-
51
- // Assert
52
- expect(response.status).toBe(400);
53
- });
54
-
55
- it("returns audioFile on success", async () => {
56
- // Arrange
57
- mockSynthesizeSpeech.mockResolvedValue("base64audiodata");
58
- const mod = await import("../api/route");
59
- const req = new Request("http://localhost/api/tts", {
60
- method: "POST",
61
- body: JSON.stringify({ text: "Hello world" }),
62
- });
63
-
64
- // Act
65
- const response = await mod.POST(req);
66
-
67
- // Assert
68
- expect(response.status).toBe(200);
69
- const body = await response.json() as { audioFile: string };
70
- expect(body.audioFile).toBe("base64audiodata");
71
- });
72
-
73
- it("passes voiceId to synthesizeSpeech when provided", async () => {
74
- // Arrange
75
- mockSynthesizeSpeech.mockResolvedValue("audio");
76
- const mod = await import("../api/route");
77
- const req = new Request("http://localhost/api/tts", {
78
- method: "POST",
79
- body: JSON.stringify({ text: "Hello", voiceId: "custom-voice" }),
80
- });
81
-
82
- // Act
83
- await mod.POST(req);
84
-
85
- // Assert
86
- expect(mockSynthesizeSpeech).toHaveBeenCalledWith("Hello", "custom-voice");
87
- });
88
- });
@@ -1,83 +0,0 @@
1
- import { render, screen, waitFor } from "@testing-library/react";
2
- import userEvent from "@testing-library/user-event";
3
- import { describe, it, expect, vi, beforeEach } from "vitest";
4
-
5
- import { TtsPlayer } from "../components/tts-player";
6
-
7
- describe("TtsPlayer", () => {
8
- beforeEach(() => {
9
- vi.restoreAllMocks();
10
- });
11
-
12
- it("renders text input", () => {
13
- // Arrange + Act
14
- render(<TtsPlayer />);
15
-
16
- // Assert
17
- expect(screen.getByPlaceholderText(/enter text to speak/i)).toBeInTheDocument();
18
- });
19
-
20
- it("renders speak button", () => {
21
- // Arrange + Act
22
- render(<TtsPlayer />);
23
-
24
- // Assert
25
- expect(screen.getByRole("button", { name: /speak/i })).toBeInTheDocument();
26
- });
27
-
28
- it("button is disabled when input is empty", () => {
29
- // Arrange + Act
30
- render(<TtsPlayer />);
31
-
32
- // Assert
33
- expect(screen.getByRole("button", { name: /speak/i })).toBeDisabled();
34
- });
35
-
36
- it("button is enabled when text has content", async () => {
37
- // Arrange
38
- const user = userEvent.setup();
39
- render(<TtsPlayer />);
40
-
41
- // Act
42
- await user.type(screen.getByPlaceholderText(/enter text to speak/i), "Hello world");
43
-
44
- // Assert
45
- expect(screen.getByRole("button", { name: /speak/i })).not.toBeDisabled();
46
- });
47
-
48
- it("shows audio player after successful synthesis", async () => {
49
- // Arrange
50
- vi.spyOn(global, "fetch").mockResolvedValue(
51
- new Response(JSON.stringify({ audioFile: "data:audio/mp3;base64,abc" }), { status: 200 })
52
- );
53
- const user = userEvent.setup();
54
- render(<TtsPlayer />);
55
-
56
- // Act
57
- await user.type(screen.getByPlaceholderText(/enter text to speak/i), "Say something");
58
- await user.click(screen.getByRole("button", { name: /speak/i }));
59
-
60
- // Assert
61
- await waitFor(() => {
62
- expect(screen.getByTestId("tts-player").querySelector("audio")).toBeInTheDocument();
63
- });
64
- });
65
-
66
- it("shows error message when synthesis fails", async () => {
67
- // Arrange
68
- vi.spyOn(global, "fetch").mockResolvedValue(
69
- new Response(null, { status: 500 })
70
- );
71
- const user = userEvent.setup();
72
- render(<TtsPlayer />);
73
-
74
- // Act
75
- await user.type(screen.getByPlaceholderText(/enter text to speak/i), "Say something");
76
- await user.click(screen.getByRole("button", { name: /speak/i }));
77
-
78
- // Assert
79
- await waitFor(() => {
80
- expect(screen.getByText(/failed to synthesize speech/i)).toBeInTheDocument();
81
- });
82
- });
83
- });
@@ -1,14 +0,0 @@
1
- import { synthesizeSpeech } from "@/shared/lib/minimax-media";
2
-
3
- export const runtime = "edge";
4
-
5
- export async function POST(req: Request) {
6
- const { text, voiceId } = (await req.json()) as { text: string; voiceId?: string };
7
-
8
- if (!text || typeof text !== "string") {
9
- return Response.json({ error: "text is required" }, { status: 400 });
10
- }
11
-
12
- const audioFile = await synthesizeSpeech(text, voiceId);
13
- return Response.json({ audioFile });
14
- }
@@ -1,59 +0,0 @@
1
- "use client";
2
-
3
- import { useState } from "react";
4
-
5
- export function TtsPlayer() {
6
- const [text, setText] = useState("");
7
- const [audioUrl, setAudioUrl] = useState<string | null>(null);
8
- const [isLoading, setIsLoading] = useState(false);
9
- const [error, setError] = useState<string | null>(null);
10
-
11
- const handleSubmit = async (e: React.SyntheticEvent) => {
12
- e.preventDefault();
13
- setIsLoading(true);
14
- setError(null);
15
-
16
- const response = await fetch("/features/tts/api", {
17
- method: "POST",
18
- headers: { "Content-Type": "application/json" },
19
- body: JSON.stringify({ text }),
20
- });
21
-
22
- if (!response.ok) {
23
- setError("Failed to synthesize speech");
24
- setIsLoading(false);
25
- return;
26
- }
27
-
28
- const data = (await response.json()) as { audioFile: string };
29
- setAudioUrl(data.audioFile);
30
- setIsLoading(false);
31
- };
32
-
33
- return (
34
- <div className="space-y-4" data-testid="tts-player">
35
- <form onSubmit={(e) => { void handleSubmit(e); }} className="flex gap-2">
36
- <textarea
37
- value={text}
38
- onChange={(e) => { setText(e.target.value); }}
39
- placeholder="Enter text to speak..."
40
- className="flex-1 rounded border px-3 py-2"
41
- rows={3}
42
- />
43
- <button
44
- type="submit"
45
- disabled={isLoading || !text.trim()}
46
- className="rounded bg-primary px-4 py-2 text-primary-foreground disabled:opacity-50"
47
- >
48
- {isLoading ? "Synthesizing..." : "Speak"}
49
- </button>
50
- </form>
51
- {error && <p className="text-sm text-red-600">{error}</p>}
52
- {audioUrl && (
53
- <audio controls src={audioUrl} className="w-full">
54
- <track kind="captions" />
55
- </audio>
56
- )}
57
- </div>
58
- );
59
- }
@@ -1,88 +0,0 @@
1
- import { describe, it, expect, vi, beforeEach } from "vitest";
2
-
3
- const mockGenerateVideo = vi.fn();
4
-
5
- vi.mock("@/shared/lib/minimax-media", () => ({
6
- generateVideo: (...args: unknown[]) => mockGenerateVideo(...args),
7
- }));
8
-
9
- describe("video route", () => {
10
- beforeEach(() => {
11
- vi.clearAllMocks();
12
- });
13
-
14
- it("exports runtime as edge and POST handler", async () => {
15
- // Arrange + Act
16
- const mod = await import("../api/route");
17
-
18
- // Assert
19
- expect(mod.runtime).toBe("edge");
20
- expect(typeof mod.POST).toBe("function");
21
- });
22
-
23
- it("returns 400 when prompt is missing", async () => {
24
- // Arrange
25
- const mod = await import("../api/route");
26
- const req = new Request("http://localhost/api/video", {
27
- method: "POST",
28
- body: JSON.stringify({}),
29
- });
30
-
31
- // Act
32
- const response = await mod.POST(req);
33
-
34
- // Assert
35
- expect(response.status).toBe(400);
36
- const body = await response.json() as { error: string };
37
- expect(body.error).toBe("prompt is required");
38
- });
39
-
40
- it("returns 400 when prompt is not a string", async () => {
41
- // Arrange
42
- const mod = await import("../api/route");
43
- const req = new Request("http://localhost/api/video", {
44
- method: "POST",
45
- body: JSON.stringify({ prompt: 42 }),
46
- });
47
-
48
- // Act
49
- const response = await mod.POST(req);
50
-
51
- // Assert
52
- expect(response.status).toBe(400);
53
- });
54
-
55
- it("returns taskId on success", async () => {
56
- // Arrange
57
- mockGenerateVideo.mockResolvedValue("task-123");
58
- const mod = await import("../api/route");
59
- const req = new Request("http://localhost/api/video", {
60
- method: "POST",
61
- body: JSON.stringify({ prompt: "A sunset over the ocean" }),
62
- });
63
-
64
- // Act
65
- const response = await mod.POST(req);
66
-
67
- // Assert
68
- expect(response.status).toBe(200);
69
- const body = await response.json() as { taskId: string };
70
- expect(body.taskId).toBe("task-123");
71
- });
72
-
73
- it("passes prompt to generateVideo", async () => {
74
- // Arrange
75
- mockGenerateVideo.mockResolvedValue("task-abc");
76
- const mod = await import("../api/route");
77
- const req = new Request("http://localhost/api/video", {
78
- method: "POST",
79
- body: JSON.stringify({ prompt: "Dancing robots" }),
80
- });
81
-
82
- // Act
83
- await mod.POST(req);
84
-
85
- // Assert
86
- expect(mockGenerateVideo).toHaveBeenCalledWith("Dancing robots");
87
- });
88
- });
@@ -1,83 +0,0 @@
1
- import { render, screen, waitFor } from "@testing-library/react";
2
- import userEvent from "@testing-library/user-event";
3
- import { describe, it, expect, vi, beforeEach } from "vitest";
4
-
5
- import { VideoGenerator } from "../components/video-generator";
6
-
7
- describe("VideoGenerator", () => {
8
- beforeEach(() => {
9
- vi.restoreAllMocks();
10
- });
11
-
12
- it("renders prompt input", () => {
13
- // Arrange + Act
14
- render(<VideoGenerator />);
15
-
16
- // Assert
17
- expect(screen.getByPlaceholderText(/describe a video/i)).toBeInTheDocument();
18
- });
19
-
20
- it("renders generate button", () => {
21
- // Arrange + Act
22
- render(<VideoGenerator />);
23
-
24
- // Assert
25
- expect(screen.getByRole("button", { name: /generate/i })).toBeInTheDocument();
26
- });
27
-
28
- it("button is disabled when input is empty", () => {
29
- // Arrange + Act
30
- render(<VideoGenerator />);
31
-
32
- // Assert
33
- expect(screen.getByRole("button", { name: /generate/i })).toBeDisabled();
34
- });
35
-
36
- it("button is enabled when prompt has content", async () => {
37
- // Arrange
38
- const user = userEvent.setup();
39
- render(<VideoGenerator />);
40
-
41
- // Act
42
- await user.type(screen.getByPlaceholderText(/describe a video/i), "A sunset");
43
-
44
- // Assert
45
- expect(screen.getByRole("button", { name: /generate/i })).not.toBeDisabled();
46
- });
47
-
48
- it("shows task ID after successful generation", async () => {
49
- // Arrange
50
- vi.spyOn(global, "fetch").mockResolvedValue(
51
- new Response(JSON.stringify({ taskId: "task-xyz" }), { status: 200 })
52
- );
53
- const user = userEvent.setup();
54
- render(<VideoGenerator />);
55
-
56
- // Act
57
- await user.type(screen.getByPlaceholderText(/describe a video/i), "A dancing robot");
58
- await user.click(screen.getByRole("button", { name: /generate/i }));
59
-
60
- // Assert
61
- await waitFor(() => {
62
- expect(screen.getByText(/task id: task-xyz/i)).toBeInTheDocument();
63
- });
64
- });
65
-
66
- it("shows error message when fetch fails", async () => {
67
- // Arrange
68
- vi.spyOn(global, "fetch").mockResolvedValue(
69
- new Response(null, { status: 500 })
70
- );
71
- const user = userEvent.setup();
72
- render(<VideoGenerator />);
73
-
74
- // Act
75
- await user.type(screen.getByPlaceholderText(/describe a video/i), "Something");
76
- await user.click(screen.getByRole("button", { name: /generate/i }));
77
-
78
- // Assert
79
- await waitFor(() => {
80
- expect(screen.getByText(/failed to generate video/i)).toBeInTheDocument();
81
- });
82
- });
83
- });
@@ -1,14 +0,0 @@
1
- import { generateVideo } from "@/shared/lib/minimax-media";
2
-
3
- export const runtime = "edge";
4
-
5
- export async function POST(req: Request) {
6
- const { prompt } = (await req.json()) as { prompt: string };
7
-
8
- if (!prompt || typeof prompt !== "string") {
9
- return Response.json({ error: "prompt is required" }, { status: 400 });
10
- }
11
-
12
- const taskId = await generateVideo(prompt);
13
- return Response.json({ taskId });
14
- }
@@ -1,56 +0,0 @@
1
- "use client";
2
-
3
- import { useState } from "react";
4
-
5
- export function VideoGenerator() {
6
- const [prompt, setPrompt] = useState("");
7
- const [taskId, setTaskId] = useState<string | null>(null);
8
- const [isLoading, setIsLoading] = useState(false);
9
- const [error, setError] = useState<string | null>(null);
10
-
11
- const handleSubmit = async (e: React.SyntheticEvent) => {
12
- e.preventDefault();
13
- setIsLoading(true);
14
- setError(null);
15
-
16
- const response = await fetch("/features/video/api", {
17
- method: "POST",
18
- headers: { "Content-Type": "application/json" },
19
- body: JSON.stringify({ prompt }),
20
- });
21
-
22
- if (!response.ok) {
23
- setError("Failed to generate video");
24
- setIsLoading(false);
25
- return;
26
- }
27
-
28
- const data = (await response.json()) as { taskId: string };
29
- setTaskId(data.taskId);
30
- setIsLoading(false);
31
- };
32
-
33
- return (
34
- <div className="space-y-4" data-testid="video-generator">
35
- <form onSubmit={(e) => { void handleSubmit(e); }} className="flex gap-2">
36
- <input
37
- value={prompt}
38
- onChange={(e) => { setPrompt(e.target.value); }}
39
- placeholder="Describe a video..."
40
- className="flex-1 rounded border px-3 py-2"
41
- />
42
- <button
43
- type="submit"
44
- disabled={isLoading || !prompt.trim()}
45
- className="rounded bg-primary px-4 py-2 text-primary-foreground disabled:opacity-50"
46
- >
47
- {isLoading ? "Generating..." : "Generate"}
48
- </button>
49
- </form>
50
- {error && <p className="text-sm text-red-600">{error}</p>}
51
- {taskId && (
52
- <p className="text-sm text-muted-foreground">Task ID: {taskId}</p>
53
- )}
54
- </div>
55
- );
56
- }
@@ -1,58 +0,0 @@
1
- import { describe, it, expect, vi, beforeEach } from "vitest";
2
-
3
- import { generateVideo, synthesizeSpeech } from "../lib/minimax-media";
4
-
5
- const mockFetch = vi.fn();
6
- global.fetch = mockFetch;
7
-
8
- describe("generateVideo", () => {
9
- beforeEach(() => {
10
- vi.clearAllMocks();
11
- });
12
-
13
- it("returns task_id when API responds successfully", async () => {
14
- // Arrange
15
- mockFetch.mockResolvedValue({
16
- ok: true,
17
- json: () => Promise.resolve({ task_id: "abc123" }),
18
- });
19
-
20
- // Act
21
- const taskId = await generateVideo("A cat on the moon");
22
-
23
- // Assert
24
- expect(taskId).toBe("abc123");
25
- });
26
-
27
- it("throws when API returns non-ok response", async () => {
28
- // Arrange
29
- mockFetch.mockResolvedValue({ ok: false, statusText: "Bad Request" });
30
-
31
- // Act + Assert
32
- await expect(generateVideo("test")).rejects.toThrow("Video generation failed");
33
- });
34
- });
35
-
36
- describe("synthesizeSpeech", () => {
37
- it("returns audio_file URL when API responds successfully", async () => {
38
- // Arrange
39
- mockFetch.mockResolvedValue({
40
- ok: true,
41
- json: () => Promise.resolve({ audio_file: "https://example.com/audio.mp3" }),
42
- });
43
-
44
- // Act
45
- const url = await synthesizeSpeech("Hello world");
46
-
47
- // Assert
48
- expect(url).toBe("https://example.com/audio.mp3");
49
- });
50
-
51
- it("throws when API returns non-ok response", async () => {
52
- // Arrange
53
- mockFetch.mockResolvedValue({ ok: false, statusText: "Server Error" });
54
-
55
- // Act + Assert
56
- await expect(synthesizeSpeech("test")).rejects.toThrow("TTS failed");
57
- });
58
- });
@@ -1,63 +0,0 @@
1
- const MINIMAX_API_BASE = "https://api.minimaxi.chat/v1";
2
-
3
- interface VideoGenerationRequest {
4
- model: string;
5
- prompt: string;
6
- }
7
-
8
- interface VideoGenerationResponse {
9
- task_id: string;
10
- }
11
-
12
- interface TtsRequest {
13
- model: string;
14
- text: string;
15
- voice_id: string;
16
- }
17
-
18
- interface TtsResponse {
19
- audio_file: string;
20
- }
21
-
22
- export async function generateVideo(prompt: string): Promise<string> {
23
- const response = await fetch(`${MINIMAX_API_BASE}/video_generation`, {
24
- method: "POST",
25
- headers: {
26
- Authorization: `Bearer ${process.env.MINIMAX_API_KEY ?? ""}`,
27
- "Content-Type": "application/json",
28
- },
29
- body: JSON.stringify({
30
- model: "video-01",
31
- prompt,
32
- } satisfies VideoGenerationRequest),
33
- });
34
-
35
- if (!response.ok) {
36
- throw new Error(`Video generation failed: ${response.statusText}`);
37
- }
38
-
39
- const data = (await response.json()) as VideoGenerationResponse;
40
- return data.task_id;
41
- }
42
-
43
- export async function synthesizeSpeech(text: string, voiceId = "female-shaonv"): Promise<string> {
44
- const response = await fetch(`${MINIMAX_API_BASE}/t2a_v2`, {
45
- method: "POST",
46
- headers: {
47
- Authorization: `Bearer ${process.env.MINIMAX_API_KEY ?? ""}`,
48
- "Content-Type": "application/json",
49
- },
50
- body: JSON.stringify({
51
- model: "speech-02-hd",
52
- text,
53
- voice_id: voiceId,
54
- } satisfies TtsRequest),
55
- });
56
-
57
- if (!response.ok) {
58
- throw new Error(`TTS failed: ${response.statusText}`);
59
- }
60
-
61
- const data = (await response.json()) as TtsResponse;
62
- return data.audio_file;
63
- }