@tanstack/cta-framework-react-cra 0.43.1 → 0.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/add-ons/apollo-client/README.md +150 -0
  2. package/add-ons/apollo-client/assets/src/routes/demo.apollo-client.tsx +75 -0
  3. package/add-ons/apollo-client/info.json +19 -0
  4. package/add-ons/apollo-client/package.json +8 -0
  5. package/add-ons/apollo-client/small-logo.svg +11 -0
  6. package/add-ons/convex/package.json +2 -2
  7. package/add-ons/db/assets/src/hooks/demo.useChat.ts +1 -1
  8. package/add-ons/db/assets/src/routes/demo/db-chat-api.ts +4 -1
  9. package/add-ons/db/package.json +1 -1
  10. package/add-ons/mcp/package.json +1 -1
  11. package/add-ons/neon/package.json +1 -1
  12. package/add-ons/sentry/assets/instrument.server.mjs +16 -9
  13. package/add-ons/sentry/assets/src/routes/demo/sentry.testing.tsx +42 -2
  14. package/add-ons/shadcn/package.json +1 -1
  15. package/add-ons/start/assets/src/router.tsx.ejs +34 -10
  16. package/add-ons/start/package.json +2 -2
  17. package/add-ons/store/package.json +3 -3
  18. package/add-ons/storybook/package.json +2 -2
  19. package/examples/tanchat/assets/src/hooks/useAudioRecorder.ts +85 -0
  20. package/examples/tanchat/assets/src/hooks/useTTS.ts +78 -0
  21. package/examples/tanchat/assets/src/lib/model-selection.ts +78 -0
  22. package/examples/tanchat/assets/src/lib/vendor-capabilities.ts +55 -0
  23. package/examples/tanchat/assets/src/routes/demo/api.available-providers.ts +35 -0
  24. package/examples/tanchat/assets/src/routes/demo/api.image.ts +74 -0
  25. package/examples/tanchat/assets/src/routes/demo/api.structured.ts +168 -0
  26. package/examples/tanchat/assets/src/routes/demo/api.tanchat.ts +89 -0
  27. package/examples/tanchat/assets/src/routes/demo/api.transcription.ts +89 -0
  28. package/examples/tanchat/assets/src/routes/demo/api.tts.ts +81 -0
  29. package/examples/tanchat/assets/src/routes/demo/image.tsx +257 -0
  30. package/examples/tanchat/assets/src/routes/demo/structured.tsx +460 -0
  31. package/examples/tanchat/assets/src/routes/demo/tanchat.css +14 -7
  32. package/examples/tanchat/assets/src/routes/demo/tanchat.tsx +301 -81
  33. package/examples/tanchat/info.json +10 -7
  34. package/examples/tanchat/package.json +8 -5
  35. package/package.json +2 -2
  36. package/project/base/src/routes/__root.tsx.ejs +14 -6
  37. package/tests/react-cra.test.ts +14 -0
  38. package/tests/snapshots/react-cra/cr-ts-start-apollo-client-npm.json +31 -0
  39. package/tests/snapshots/react-cra/cr-ts-start-npm.json +2 -2
  40. package/tests/snapshots/react-cra/cr-ts-start-tanstack-query-npm.json +2 -2
  41. package/examples/tanchat/assets/src/routes/demo/api.tanchat.ts.ejs +0 -72
@@ -0,0 +1,78 @@
1
+ import { useCallback, useRef, useState } from 'react'
2
+
3
+ /**
4
+ * Hook for text-to-speech playback via the TTS API.
5
+ */
6
+ export function useTTS() {
7
+ const [playingId, setPlayingId] = useState<string | null>(null)
8
+ const audioRef = useRef<HTMLAudioElement | null>(null)
9
+
10
+ const speak = useCallback(async (text: string, id: string) => {
11
+ // Stop any currently playing audio
12
+ if (audioRef.current) {
13
+ audioRef.current.pause()
14
+ audioRef.current = null
15
+ }
16
+
17
+ setPlayingId(id)
18
+
19
+ try {
20
+ const response = await fetch('/demo/api/tts', {
21
+ method: 'POST',
22
+ headers: { 'Content-Type': 'application/json' },
23
+ body: JSON.stringify({
24
+ text,
25
+ voice: 'nova',
26
+ model: 'tts-1',
27
+ format: 'mp3',
28
+ }),
29
+ })
30
+
31
+ if (!response.ok) {
32
+ const errorData = await response.json()
33
+ throw new Error(errorData.error || 'TTS failed')
34
+ }
35
+
36
+ const result = await response.json()
37
+
38
+ // Convert base64 to audio and play
39
+ const audioData = atob(result.audio)
40
+ const bytes = new Uint8Array(audioData.length)
41
+ for (let i = 0; i < audioData.length; i++) {
42
+ bytes[i] = audioData.charCodeAt(i)
43
+ }
44
+ const blob = new Blob([bytes], { type: result.contentType })
45
+ const url = URL.createObjectURL(blob)
46
+
47
+ const audio = new Audio(url)
48
+ audioRef.current = audio
49
+
50
+ audio.onended = () => {
51
+ URL.revokeObjectURL(url)
52
+ setPlayingId(null)
53
+ audioRef.current = null
54
+ }
55
+
56
+ audio.onerror = () => {
57
+ URL.revokeObjectURL(url)
58
+ setPlayingId(null)
59
+ audioRef.current = null
60
+ }
61
+
62
+ await audio.play()
63
+ } catch (error) {
64
+ console.error('TTS error:', error)
65
+ setPlayingId(null)
66
+ }
67
+ }, [])
68
+
69
+ const stop = useCallback(() => {
70
+ if (audioRef.current) {
71
+ audioRef.current.pause()
72
+ audioRef.current = null
73
+ }
74
+ setPlayingId(null)
75
+ }, [])
76
+
77
+ return { playingId, speak, stop }
78
+ }
@@ -0,0 +1,78 @@
1
+ export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama'
2
+
3
+ export interface ModelOption {
4
+ provider: Provider
5
+ model: string
6
+ label: string
7
+ }
8
+
9
+ export const MODEL_OPTIONS: Array<ModelOption> = [
10
+ // OpenAI models
11
+ { provider: 'openai', model: 'gpt-4o', label: 'OpenAI - GPT-4o' },
12
+ { provider: 'openai', model: 'gpt-4o-mini', label: 'OpenAI - GPT-4o Mini' },
13
+
14
+ // Anthropic models
15
+ {
16
+ provider: 'anthropic',
17
+ model: 'claude-haiku-4-5',
18
+ label: 'Anthropic - Claude Haiku 4.5',
19
+ },
20
+ {
21
+ provider: 'anthropic',
22
+ model: 'claude-sonnet-4-5-20250929',
23
+ label: 'Anthropic - Claude Sonnet 4.5',
24
+ },
25
+
26
+ // Gemini models
27
+ {
28
+ provider: 'gemini',
29
+ model: 'gemini-2.0-flash-exp',
30
+ label: 'Gemini - 2.0 Flash',
31
+ },
32
+
33
+ // Ollama models
34
+ { provider: 'ollama', model: 'mistral:7b', label: 'Ollama - Mistral 7B' },
35
+ ]
36
+
37
+ const STORAGE_KEY = 'tanstack-ai-model-preference'
38
+
39
+ export function getStoredModelPreference(): ModelOption | null {
40
+ if (typeof window === 'undefined') return null
41
+ try {
42
+ const stored = localStorage.getItem(STORAGE_KEY)
43
+ if (stored) {
44
+ const parsed = JSON.parse(stored)
45
+ // Validate that the stored option still exists in MODEL_OPTIONS
46
+ const found = MODEL_OPTIONS.find(
47
+ (o) => o.provider === parsed.provider && o.model === parsed.model,
48
+ )
49
+ if (found) return found
50
+ }
51
+ } catch {
52
+ // Ignore storage errors
53
+ }
54
+ return null
55
+ }
56
+
57
+ export function setStoredModelPreference(option: ModelOption): void {
58
+ if (typeof window === 'undefined') return
59
+ try {
60
+ localStorage.setItem(STORAGE_KEY, JSON.stringify(option))
61
+ } catch {
62
+ // Ignore storage errors
63
+ }
64
+ }
65
+
66
+ export function getDefaultModelOption(): ModelOption {
67
+ return getStoredModelPreference() || MODEL_OPTIONS[0]
68
+ }
69
+
70
+ export function getModelOptionsForProvider(provider: Provider): ModelOption[] {
71
+ return MODEL_OPTIONS.filter((o) => o.provider === provider)
72
+ }
73
+
74
+ export function getAvailableModelOptions(
75
+ availableProviders: Provider[],
76
+ ): ModelOption[] {
77
+ return MODEL_OPTIONS.filter((o) => availableProviders.includes(o.provider))
78
+ }
@@ -0,0 +1,55 @@
1
+ import type { Provider } from './model-selection'
2
+
3
+ export interface VendorCapabilities {
4
+ chat: boolean
5
+ structured: boolean
6
+ image: boolean
7
+ transcription: boolean
8
+ tts: boolean
9
+ }
10
+
11
+ export const VENDOR_CAPABILITIES: Record<Provider, VendorCapabilities> = {
12
+ openai: {
13
+ chat: true,
14
+ structured: true,
15
+ image: true,
16
+ transcription: true,
17
+ tts: true,
18
+ },
19
+ anthropic: {
20
+ chat: true,
21
+ structured: true,
22
+ image: false,
23
+ transcription: false,
24
+ tts: false,
25
+ },
26
+ gemini: {
27
+ chat: true,
28
+ structured: true,
29
+ image: false,
30
+ transcription: false,
31
+ tts: false,
32
+ },
33
+ ollama: {
34
+ chat: true,
35
+ structured: true,
36
+ image: false,
37
+ transcription: false,
38
+ tts: false,
39
+ },
40
+ }
41
+
42
+ export function hasCapability(
43
+ provider: Provider,
44
+ capability: keyof VendorCapabilities,
45
+ ): boolean {
46
+ return VENDOR_CAPABILITIES[provider]?.[capability] ?? false
47
+ }
48
+
49
+ export function getProvidersWithCapability(
50
+ capability: keyof VendorCapabilities,
51
+ ): Provider[] {
52
+ return (Object.keys(VENDOR_CAPABILITIES) as Provider[]).filter(
53
+ (provider) => VENDOR_CAPABILITIES[provider][capability],
54
+ )
55
+ }
@@ -0,0 +1,35 @@
1
+ import { createFileRoute } from '@tanstack/react-router'
2
+ import type { Provider } from '@/lib/model-selection'
3
+
4
+ export const Route = createFileRoute('/demo/api/available-providers')({
5
+ server: {
6
+ handlers: {
7
+ GET: async () => {
8
+ const available: Provider[] = []
9
+
10
+ if (process.env.OPENAI_API_KEY) {
11
+ available.push('openai')
12
+ }
13
+ if (process.env.ANTHROPIC_API_KEY) {
14
+ available.push('anthropic')
15
+ }
16
+ if (process.env.GEMINI_API_KEY) {
17
+ available.push('gemini')
18
+ }
19
+ // Ollama is always available (local, no key needed)
20
+ available.push('ollama')
21
+
22
+ return new Response(
23
+ JSON.stringify({
24
+ providers: available,
25
+ hasOpenAI: available.includes('openai'),
26
+ }),
27
+ {
28
+ status: 200,
29
+ headers: { 'Content-Type': 'application/json' },
30
+ },
31
+ )
32
+ },
33
+ },
34
+ },
35
+ })
@@ -0,0 +1,74 @@
1
+ import { createFileRoute } from '@tanstack/react-router'
2
+ import { generateImage, createImageOptions } from '@tanstack/ai'
3
+ import { openaiImage } from '@tanstack/ai-openai'
4
+
5
+ export const Route = createFileRoute('/demo/api/image')({
6
+ server: {
7
+ handlers: {
8
+ POST: async ({ request }) => {
9
+ const body = await request.json()
10
+ const { prompt, numberOfImages = 1, size = '1024x1024' } = body
11
+ const data = body.data || {}
12
+ const model: string = data.model || body.model || 'gpt-image-1'
13
+
14
+ if (!prompt || prompt.trim().length === 0) {
15
+ return new Response(
16
+ JSON.stringify({
17
+ error: 'Prompt is required',
18
+ }),
19
+ {
20
+ status: 400,
21
+ headers: { 'Content-Type': 'application/json' },
22
+ },
23
+ )
24
+ }
25
+
26
+ if (!process.env.OPENAI_API_KEY) {
27
+ return new Response(
28
+ JSON.stringify({
29
+ error: 'OPENAI_API_KEY is not configured',
30
+ }),
31
+ {
32
+ status: 500,
33
+ headers: { 'Content-Type': 'application/json' },
34
+ },
35
+ )
36
+ }
37
+
38
+ try {
39
+ const options = createImageOptions({
40
+ adapter: openaiImage((model || 'gpt-image-1') as any),
41
+ })
42
+
43
+ const result = await generateImage({
44
+ ...options,
45
+ prompt,
46
+ numberOfImages,
47
+ size,
48
+ })
49
+
50
+ return new Response(
51
+ JSON.stringify({
52
+ images: result.images,
53
+ model,
54
+ }),
55
+ {
56
+ status: 200,
57
+ headers: { 'Content-Type': 'application/json' },
58
+ },
59
+ )
60
+ } catch (error: any) {
61
+ return new Response(
62
+ JSON.stringify({
63
+ error: error.message || 'An error occurred',
64
+ }),
65
+ {
66
+ status: 500,
67
+ headers: { 'Content-Type': 'application/json' },
68
+ },
69
+ )
70
+ }
71
+ },
72
+ },
73
+ },
74
+ })
@@ -0,0 +1,168 @@
1
+ import { createFileRoute } from "@tanstack/react-router";
2
+ import { chat, createChatOptions } from "@tanstack/ai";
3
+ import { anthropicText } from "@tanstack/ai-anthropic";
4
+ import { geminiText } from "@tanstack/ai-gemini";
5
+ import { openaiText } from "@tanstack/ai-openai";
6
+ import { ollamaText } from "@tanstack/ai-ollama";
7
+ import { z } from "zod";
8
+
9
+ import type { Provider } from "@/lib/model-selection";
10
+
11
+ // Schema for structured recipe output
12
+ const RecipeSchema = z.object({
13
+ name: z.string().describe("The name of the recipe"),
14
+ description: z.string().describe("A brief description of the dish"),
15
+ prepTime: z.string().describe('Preparation time (e.g., "15 minutes")'),
16
+ cookTime: z.string().describe('Cooking time (e.g., "30 minutes")'),
17
+ servings: z.number().describe("Number of servings"),
18
+ difficulty: z.enum(["easy", "medium", "hard"]).describe("Difficulty level"),
19
+ ingredients: z
20
+ .array(
21
+ z.object({
22
+ item: z.string().describe("Ingredient name"),
23
+ amount: z.string().describe('Amount needed (e.g., "2 cups")'),
24
+ notes: z.string().optional().describe("Optional preparation notes"),
25
+ })
26
+ )
27
+ .describe("List of ingredients"),
28
+ instructions: z
29
+ .array(z.string())
30
+ .describe("Step-by-step cooking instructions"),
31
+ tips: z.array(z.string()).optional().describe("Optional cooking tips"),
32
+ nutritionPerServing: z
33
+ .object({
34
+ calories: z.number().optional(),
35
+ protein: z.string().optional(),
36
+ carbs: z.string().optional(),
37
+ fat: z.string().optional(),
38
+ })
39
+ .optional()
40
+ .describe("Nutritional information per serving"),
41
+ });
42
+
43
+ export type Recipe = z.infer<typeof RecipeSchema>;
44
+
45
+ export const Route = createFileRoute("/demo/api/structured")({
46
+ server: {
47
+ handlers: {
48
+ POST: async ({ request }) => {
49
+ const body = await request.json();
50
+ const { recipeName, mode = "structured" } = body;
51
+ const data = body.data || {};
52
+ const provider: Provider = data.provider || body.provider || "openai";
53
+ const model: string = data.model || body.model || "gpt-4o";
54
+
55
+ if (!recipeName || recipeName.trim().length === 0) {
56
+ return new Response(
57
+ JSON.stringify({
58
+ error: "Recipe name is required",
59
+ }),
60
+ {
61
+ status: 400,
62
+ headers: { "Content-Type": "application/json" },
63
+ }
64
+ );
65
+ }
66
+
67
+ try {
68
+ // Pre-define typed adapter configurations
69
+ const adapterConfig = {
70
+ anthropic: () =>
71
+ createChatOptions({
72
+ adapter: anthropicText(
73
+ (model || "claude-sonnet-4-5-20250929") as any
74
+ ),
75
+ }),
76
+ gemini: () =>
77
+ createChatOptions({
78
+ adapter: geminiText((model || "gemini-2.0-flash-exp") as any),
79
+ }),
80
+ ollama: () =>
81
+ createChatOptions({
82
+ adapter: ollamaText((model || "mistral:7b") as any),
83
+ }),
84
+ openai: () =>
85
+ createChatOptions({
86
+ adapter: openaiText((model || "gpt-4o") as any),
87
+ }),
88
+ };
89
+
90
+ const options = adapterConfig[provider]();
91
+
92
+ if (mode === "structured") {
93
+ // Structured output mode - returns validated object
94
+ const result = await chat({
95
+ ...options,
96
+ messages: [
97
+ {
98
+ role: "user",
99
+ content: `Generate a complete recipe for: ${recipeName}. Include all ingredients with amounts, step-by-step instructions, prep/cook times, and difficulty level.`,
100
+ },
101
+ ],
102
+ outputSchema: RecipeSchema,
103
+ } as any);
104
+
105
+ return new Response(
106
+ JSON.stringify({
107
+ mode: "structured",
108
+ recipe: result,
109
+ provider,
110
+ model,
111
+ }),
112
+ {
113
+ status: 200,
114
+ headers: { "Content-Type": "application/json" },
115
+ }
116
+ );
117
+ } else {
118
+ // One-shot markdown mode - returns text
119
+ const markdown = await chat({
120
+ ...options,
121
+ stream: false,
122
+ messages: [
123
+ {
124
+ role: "user",
125
+ content: `Generate a complete recipe for: ${recipeName}.
126
+
127
+ Format the recipe in beautiful markdown with:
128
+ - A title with the recipe name
129
+ - A brief description
130
+ - Prep time, cook time, and servings
131
+ - Ingredients list with amounts
132
+ - Numbered step-by-step instructions
133
+ - Optional tips section
134
+ - Nutritional info if applicable
135
+
136
+ Make it detailed and easy to follow.`,
137
+ },
138
+ ],
139
+ } as any);
140
+
141
+ return new Response(
142
+ JSON.stringify({
143
+ mode: "oneshot",
144
+ markdown,
145
+ provider,
146
+ model,
147
+ }),
148
+ {
149
+ status: 200,
150
+ headers: { "Content-Type": "application/json" },
151
+ }
152
+ );
153
+ }
154
+ } catch (error: any) {
155
+ return new Response(
156
+ JSON.stringify({
157
+ error: error.message || "An error occurred",
158
+ }),
159
+ {
160
+ status: 500,
161
+ headers: { "Content-Type": "application/json" },
162
+ }
163
+ );
164
+ }
165
+ },
166
+ },
167
+ },
168
+ });
@@ -0,0 +1,89 @@
1
+ import { createFileRoute } from "@tanstack/react-router";
2
+ import { chat, maxIterations, toStreamResponse } from "@tanstack/ai";
3
+ import { anthropicText } from "@tanstack/ai-anthropic";
4
+ import { openaiText } from "@tanstack/ai-openai";
5
+ import { geminiText } from "@tanstack/ai-gemini";
6
+ import { ollamaText } from "@tanstack/ai-ollama";
7
+
8
+ import { getGuitars, recommendGuitarToolDef } from "@/lib/example.guitar-tools";
9
+ import type { Provider } from "@/lib/model-selection";
10
+
11
+ const SYSTEM_PROMPT = `You are a helpful assistant for a store that sells guitars.
12
+
13
+ CRITICAL INSTRUCTIONS - YOU MUST FOLLOW THIS EXACT WORKFLOW:
14
+
15
+ When a user asks for a guitar recommendation:
16
+ 1. FIRST: Use the getGuitars tool (no parameters needed)
17
+ 2. SECOND: Use the recommendGuitar tool with the ID of the guitar you want to recommend
18
+ 3. NEVER write a recommendation directly - ALWAYS use the recommendGuitar tool
19
+
20
+ IMPORTANT:
21
+ - The recommendGuitar tool will display the guitar in a special, appealing format
22
+ - You MUST use recommendGuitar for ANY guitar recommendation
23
+ - ONLY recommend guitars from our inventory (use getGuitars first)
24
+ - The recommendGuitar tool has a buy button - this is how customers purchase
25
+ - Do NOT describe the guitar yourself - let the recommendGuitar tool do it
26
+ `;
27
+
28
+ export const Route = createFileRoute("/demo/api/tanchat")({
29
+ server: {
30
+ handlers: {
31
+ POST: async ({ request }) => {
32
+ // Capture request signal before reading body (it may be aborted after body is consumed)
33
+ const requestSignal = request.signal;
34
+
35
+ // If request is already aborted, return early
36
+ if (requestSignal.aborted) {
37
+ return new Response(null, { status: 499 }); // 499 = Client Closed Request
38
+ }
39
+
40
+ const abortController = new AbortController();
41
+
42
+ try {
43
+ const body = await request.json();
44
+ const { messages } = body;
45
+ const data = body.data || {};
46
+ const provider: Provider = data.provider || "anthropic";
47
+ const model: string = data.model || "claude-haiku-4-5";
48
+
49
+ // Adapter factory pattern for multi-vendor support
50
+ const adapterConfig = {
51
+ anthropic: () =>
52
+ anthropicText((model || "claude-haiku-4-5") as any),
53
+ openai: () => openaiText((model || "gpt-4o") as any),
54
+ gemini: () => geminiText((model || "gemini-2.0-flash-exp") as any),
55
+ ollama: () => ollamaText((model || "mistral:7b") as any),
56
+ };
57
+
58
+ const adapter = adapterConfig[provider]();
59
+
60
+ const stream = chat({
61
+ adapter,
62
+ tools: [
63
+ getGuitars, // Server tool
64
+ recommendGuitarToolDef, // No server execute - client will handle
65
+ ],
66
+ systemPrompts: [SYSTEM_PROMPT],
67
+ agentLoopStrategy: maxIterations(5),
68
+ messages,
69
+ abortController,
70
+ });
71
+
72
+ return toStreamResponse(stream, { abortController });
73
+ } catch (error: any) {
74
+ // If request was aborted, return early (don't send error response)
75
+ if (error.name === "AbortError" || abortController.signal.aborted) {
76
+ return new Response(null, { status: 499 }); // 499 = Client Closed Request
77
+ }
78
+ return new Response(
79
+ JSON.stringify({ error: "Failed to process chat request" }),
80
+ {
81
+ status: 500,
82
+ headers: { "Content-Type": "application/json" },
83
+ }
84
+ );
85
+ }
86
+ },
87
+ },
88
+ },
89
+ });
@@ -0,0 +1,89 @@
1
+ import { createFileRoute } from "@tanstack/react-router";
2
+ import { generateTranscription } from "@tanstack/ai";
3
+ import { openaiTranscription } from "@tanstack/ai-openai";
4
+
5
+ export const Route = createFileRoute("/demo/api/transcription")({
6
+ server: {
7
+ handlers: {
8
+ POST: async ({ request }) => {
9
+ const formData = await request.formData();
10
+ const audioFile = formData.get("audio") as File | null;
11
+ const audioBase64 = formData.get("audioBase64") as string | null;
12
+ const model = (formData.get("model") as string) || "whisper-1";
13
+ const language = formData.get("language") as string | null;
14
+ const responseFormat = formData.get("responseFormat") as string | null;
15
+
16
+ if (!audioFile && !audioBase64) {
17
+ return new Response(
18
+ JSON.stringify({
19
+ error: "Audio file or base64 data is required",
20
+ }),
21
+ {
22
+ status: 400,
23
+ headers: { "Content-Type": "application/json" },
24
+ }
25
+ );
26
+ }
27
+
28
+ if (!process.env.OPENAI_API_KEY) {
29
+ return new Response(
30
+ JSON.stringify({
31
+ error: "OPENAI_API_KEY is not configured",
32
+ }),
33
+ {
34
+ status: 500,
35
+ headers: { "Content-Type": "application/json" },
36
+ }
37
+ );
38
+ }
39
+
40
+ try {
41
+ const adapter = openaiTranscription(model as any);
42
+
43
+ // Prepare audio data
44
+ let audioData: string | File;
45
+ if (audioFile) {
46
+ audioData = audioFile;
47
+ } else if (audioBase64) {
48
+ audioData = audioBase64;
49
+ } else {
50
+ throw new Error("No audio data provided");
51
+ }
52
+
53
+ const result = await generateTranscription({
54
+ adapter,
55
+ audio: audioData,
56
+ language: language || undefined,
57
+ responseFormat: (responseFormat as any) || "verbose_json",
58
+ });
59
+
60
+ return new Response(
61
+ JSON.stringify({
62
+ id: result.id,
63
+ model: result.model,
64
+ text: result.text,
65
+ language: result.language,
66
+ duration: result.duration,
67
+ segments: result.segments,
68
+ words: result.words,
69
+ }),
70
+ {
71
+ status: 200,
72
+ headers: { "Content-Type": "application/json" },
73
+ }
74
+ );
75
+ } catch (error: any) {
76
+ return new Response(
77
+ JSON.stringify({
78
+ error: error.message || "An error occurred",
79
+ }),
80
+ {
81
+ status: 500,
82
+ headers: { "Content-Type": "application/json" },
83
+ }
84
+ );
85
+ }
86
+ },
87
+ },
88
+ },
89
+ });