@tanstack/cta-framework-react-cra 0.43.0 → 0.44.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/add-ons/apollo-client/README.md +150 -0
- package/add-ons/apollo-client/assets/src/routes/demo.apollo-client.tsx +75 -0
- package/add-ons/apollo-client/info.json +19 -0
- package/add-ons/apollo-client/package.json +8 -0
- package/add-ons/apollo-client/small-logo.svg +11 -0
- package/add-ons/convex/package.json +2 -2
- package/add-ons/db/assets/src/hooks/demo.useChat.ts +1 -1
- package/add-ons/db/assets/src/routes/demo/db-chat-api.ts +4 -1
- package/add-ons/db/package.json +1 -1
- package/add-ons/mcp/package.json +1 -1
- package/add-ons/neon/package.json +1 -1
- package/add-ons/prisma/package.json.ejs +1 -1
- package/add-ons/sentry/assets/instrument.server.mjs +16 -9
- package/add-ons/sentry/assets/src/routes/demo/sentry.testing.tsx +42 -2
- package/add-ons/shadcn/package.json +1 -1
- package/add-ons/start/assets/src/router.tsx.ejs +34 -10
- package/add-ons/start/package.json +2 -2
- package/add-ons/store/package.json +3 -3
- package/add-ons/storybook/package.json +2 -2
- package/dist/index.js +0 -3
- package/dist/types/index.d.ts +0 -2
- package/examples/tanchat/assets/src/hooks/useAudioRecorder.ts +85 -0
- package/examples/tanchat/assets/src/hooks/useTTS.ts +78 -0
- package/examples/tanchat/assets/src/lib/model-selection.ts +78 -0
- package/examples/tanchat/assets/src/lib/vendor-capabilities.ts +55 -0
- package/examples/tanchat/assets/src/routes/demo/api.available-providers.ts +35 -0
- package/examples/tanchat/assets/src/routes/demo/api.image.ts +74 -0
- package/examples/tanchat/assets/src/routes/demo/api.structured.ts +168 -0
- package/examples/tanchat/assets/src/routes/demo/api.tanchat.ts +89 -0
- package/examples/tanchat/assets/src/routes/demo/api.transcription.ts +89 -0
- package/examples/tanchat/assets/src/routes/demo/api.tts.ts +81 -0
- package/examples/tanchat/assets/src/routes/demo/image.tsx +257 -0
- package/examples/tanchat/assets/src/routes/demo/structured.tsx +460 -0
- package/examples/tanchat/assets/src/routes/demo/tanchat.css +14 -7
- package/examples/tanchat/assets/src/routes/demo/tanchat.tsx +301 -81
- package/examples/tanchat/info.json +10 -7
- package/examples/tanchat/package.json +8 -5
- package/package.json +2 -3
- package/project/base/src/routes/__root.tsx.ejs +14 -6
- package/src/index.ts +0 -5
- package/tests/react-cra.test.ts +14 -0
- package/tests/snapshots/react-cra/cr-ts-start-apollo-client-npm.json +31 -0
- package/tests/snapshots/react-cra/cr-ts-start-npm.json +2 -2
- package/tests/snapshots/react-cra/cr-ts-start-tanstack-query-npm.json +2 -2
- package/dist/checksum.js +0 -3
- package/dist/types/checksum.d.ts +0 -1
- package/examples/tanchat/assets/src/routes/demo/api.tanchat.ts.ejs +0 -72
- package/src/checksum.ts +0 -3
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { createFileRoute } from "@tanstack/react-router";
|
|
2
|
+
import { generateTranscription } from "@tanstack/ai";
|
|
3
|
+
import { openaiTranscription } from "@tanstack/ai-openai";
|
|
4
|
+
|
|
5
|
+
export const Route = createFileRoute("/demo/api/transcription")({
|
|
6
|
+
server: {
|
|
7
|
+
handlers: {
|
|
8
|
+
POST: async ({ request }) => {
|
|
9
|
+
const formData = await request.formData();
|
|
10
|
+
const audioFile = formData.get("audio") as File | null;
|
|
11
|
+
const audioBase64 = formData.get("audioBase64") as string | null;
|
|
12
|
+
const model = (formData.get("model") as string) || "whisper-1";
|
|
13
|
+
const language = formData.get("language") as string | null;
|
|
14
|
+
const responseFormat = formData.get("responseFormat") as string | null;
|
|
15
|
+
|
|
16
|
+
if (!audioFile && !audioBase64) {
|
|
17
|
+
return new Response(
|
|
18
|
+
JSON.stringify({
|
|
19
|
+
error: "Audio file or base64 data is required",
|
|
20
|
+
}),
|
|
21
|
+
{
|
|
22
|
+
status: 400,
|
|
23
|
+
headers: { "Content-Type": "application/json" },
|
|
24
|
+
}
|
|
25
|
+
);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
29
|
+
return new Response(
|
|
30
|
+
JSON.stringify({
|
|
31
|
+
error: "OPENAI_API_KEY is not configured",
|
|
32
|
+
}),
|
|
33
|
+
{
|
|
34
|
+
status: 500,
|
|
35
|
+
headers: { "Content-Type": "application/json" },
|
|
36
|
+
}
|
|
37
|
+
);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
try {
|
|
41
|
+
const adapter = openaiTranscription(model as any);
|
|
42
|
+
|
|
43
|
+
// Prepare audio data
|
|
44
|
+
let audioData: string | File;
|
|
45
|
+
if (audioFile) {
|
|
46
|
+
audioData = audioFile;
|
|
47
|
+
} else if (audioBase64) {
|
|
48
|
+
audioData = audioBase64;
|
|
49
|
+
} else {
|
|
50
|
+
throw new Error("No audio data provided");
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
const result = await generateTranscription({
|
|
54
|
+
adapter,
|
|
55
|
+
audio: audioData,
|
|
56
|
+
language: language || undefined,
|
|
57
|
+
responseFormat: (responseFormat as any) || "verbose_json",
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
return new Response(
|
|
61
|
+
JSON.stringify({
|
|
62
|
+
id: result.id,
|
|
63
|
+
model: result.model,
|
|
64
|
+
text: result.text,
|
|
65
|
+
language: result.language,
|
|
66
|
+
duration: result.duration,
|
|
67
|
+
segments: result.segments,
|
|
68
|
+
words: result.words,
|
|
69
|
+
}),
|
|
70
|
+
{
|
|
71
|
+
status: 200,
|
|
72
|
+
headers: { "Content-Type": "application/json" },
|
|
73
|
+
}
|
|
74
|
+
);
|
|
75
|
+
} catch (error: any) {
|
|
76
|
+
return new Response(
|
|
77
|
+
JSON.stringify({
|
|
78
|
+
error: error.message || "An error occurred",
|
|
79
|
+
}),
|
|
80
|
+
{
|
|
81
|
+
status: 500,
|
|
82
|
+
headers: { "Content-Type": "application/json" },
|
|
83
|
+
}
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
},
|
|
87
|
+
},
|
|
88
|
+
},
|
|
89
|
+
});
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import { createFileRoute } from '@tanstack/react-router'
|
|
2
|
+
import { generateSpeech } from '@tanstack/ai'
|
|
3
|
+
import { openaiSpeech } from '@tanstack/ai-openai'
|
|
4
|
+
|
|
5
|
+
export const Route = createFileRoute('/demo/api/tts')({
|
|
6
|
+
server: {
|
|
7
|
+
handlers: {
|
|
8
|
+
POST: async ({ request }) => {
|
|
9
|
+
const body = await request.json()
|
|
10
|
+
const {
|
|
11
|
+
text,
|
|
12
|
+
voice = 'alloy',
|
|
13
|
+
model = 'tts-1',
|
|
14
|
+
format = 'mp3',
|
|
15
|
+
speed = 1.0,
|
|
16
|
+
} = body
|
|
17
|
+
|
|
18
|
+
if (!text || text.trim().length === 0) {
|
|
19
|
+
return new Response(
|
|
20
|
+
JSON.stringify({
|
|
21
|
+
error: 'Text is required',
|
|
22
|
+
}),
|
|
23
|
+
{
|
|
24
|
+
status: 400,
|
|
25
|
+
headers: { 'Content-Type': 'application/json' },
|
|
26
|
+
},
|
|
27
|
+
)
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
31
|
+
return new Response(
|
|
32
|
+
JSON.stringify({
|
|
33
|
+
error: 'OPENAI_API_KEY is not configured',
|
|
34
|
+
}),
|
|
35
|
+
{
|
|
36
|
+
status: 500,
|
|
37
|
+
headers: { 'Content-Type': 'application/json' },
|
|
38
|
+
},
|
|
39
|
+
)
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
try {
|
|
43
|
+
const adapter = openaiSpeech(model)
|
|
44
|
+
|
|
45
|
+
const result = await generateSpeech({
|
|
46
|
+
adapter,
|
|
47
|
+
text,
|
|
48
|
+
voice,
|
|
49
|
+
format,
|
|
50
|
+
speed,
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
return new Response(
|
|
54
|
+
JSON.stringify({
|
|
55
|
+
id: result.id,
|
|
56
|
+
model: result.model,
|
|
57
|
+
audio: result.audio,
|
|
58
|
+
format: result.format,
|
|
59
|
+
contentType: result.contentType,
|
|
60
|
+
duration: result.duration,
|
|
61
|
+
}),
|
|
62
|
+
{
|
|
63
|
+
status: 200,
|
|
64
|
+
headers: { 'Content-Type': 'application/json' },
|
|
65
|
+
},
|
|
66
|
+
)
|
|
67
|
+
} catch (error: any) {
|
|
68
|
+
return new Response(
|
|
69
|
+
JSON.stringify({
|
|
70
|
+
error: error.message || 'An error occurred',
|
|
71
|
+
}),
|
|
72
|
+
{
|
|
73
|
+
status: 500,
|
|
74
|
+
headers: { 'Content-Type': 'application/json' },
|
|
75
|
+
},
|
|
76
|
+
)
|
|
77
|
+
}
|
|
78
|
+
},
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
})
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import { useState, useEffect } from 'react'
|
|
2
|
+
import { createFileRoute } from '@tanstack/react-router'
|
|
3
|
+
import { ImageIcon, Loader2, Download } from 'lucide-react'
|
|
4
|
+
|
|
5
|
+
const SIZES = ['1024x1024', '1536x1024', '1024x1536', 'auto']
|
|
6
|
+
|
|
7
|
+
interface GeneratedImage {
|
|
8
|
+
url?: string
|
|
9
|
+
b64Json?: string
|
|
10
|
+
revisedPrompt?: string
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
function ImagePage() {
|
|
14
|
+
const [prompt, setPrompt] = useState(
|
|
15
|
+
'A cute baby sea otter wearing a beret and glasses, sitting at a small cafe table, sipping a cappuccino',
|
|
16
|
+
)
|
|
17
|
+
const [size, setSize] = useState('1024x1024')
|
|
18
|
+
const [numberOfImages, setNumberOfImages] = useState(1)
|
|
19
|
+
const [images, setImages] = useState<Array<GeneratedImage>>([])
|
|
20
|
+
const [isLoading, setIsLoading] = useState(false)
|
|
21
|
+
const [error, setError] = useState<string | null>(null)
|
|
22
|
+
const [usedModel, setUsedModel] = useState<string | null>(null)
|
|
23
|
+
const [hasOpenAI, setHasOpenAI] = useState<boolean | null>(null)
|
|
24
|
+
|
|
25
|
+
// Check if OpenAI is available
|
|
26
|
+
useEffect(() => {
|
|
27
|
+
fetch('/demo/api/available-providers')
|
|
28
|
+
.then((res) => res.json())
|
|
29
|
+
.then((data) => setHasOpenAI(data.hasOpenAI))
|
|
30
|
+
.catch(() => setHasOpenAI(false))
|
|
31
|
+
}, [])
|
|
32
|
+
|
|
33
|
+
const handleGenerate = async () => {
|
|
34
|
+
setIsLoading(true)
|
|
35
|
+
setError(null)
|
|
36
|
+
setImages([])
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
const response = await fetch('/demo/api/image', {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers: { 'Content-Type': 'application/json' },
|
|
42
|
+
body: JSON.stringify({ prompt, size, numberOfImages }),
|
|
43
|
+
})
|
|
44
|
+
|
|
45
|
+
const data = await response.json()
|
|
46
|
+
|
|
47
|
+
if (!response.ok) {
|
|
48
|
+
throw new Error(data.error || 'Failed to generate image')
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
setImages(data.images)
|
|
52
|
+
setUsedModel(data.model)
|
|
53
|
+
} catch (err: any) {
|
|
54
|
+
setError(err.message)
|
|
55
|
+
} finally {
|
|
56
|
+
setIsLoading(false)
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const getImageSrc = (image: GeneratedImage) => {
|
|
61
|
+
if (image.url) return image.url
|
|
62
|
+
if (image.b64Json) return `data:image/png;base64,${image.b64Json}`
|
|
63
|
+
return ''
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const handleDownload = async (image: GeneratedImage, index: number) => {
|
|
67
|
+
const src = getImageSrc(image)
|
|
68
|
+
if (!src) return
|
|
69
|
+
|
|
70
|
+
try {
|
|
71
|
+
const response = await fetch(src)
|
|
72
|
+
const blob = await response.blob()
|
|
73
|
+
const url = URL.createObjectURL(blob)
|
|
74
|
+
const a = document.createElement('a')
|
|
75
|
+
a.href = url
|
|
76
|
+
a.download = `generated-image-${index + 1}.png`
|
|
77
|
+
document.body.appendChild(a)
|
|
78
|
+
a.click()
|
|
79
|
+
document.body.removeChild(a)
|
|
80
|
+
URL.revokeObjectURL(url)
|
|
81
|
+
} catch (err) {
|
|
82
|
+
// Failed to download image
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
// Show loading state while checking for OpenAI
|
|
87
|
+
if (hasOpenAI === null) {
|
|
88
|
+
return (
|
|
89
|
+
<div className="min-h-[calc(100vh-80px)] bg-gray-900 p-6 flex items-center justify-center">
|
|
90
|
+
<Loader2 className="w-8 h-8 text-orange-500 animate-spin" />
|
|
91
|
+
</div>
|
|
92
|
+
)
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Show message if OpenAI is not available
|
|
96
|
+
if (!hasOpenAI) {
|
|
97
|
+
return (
|
|
98
|
+
<div className="min-h-[calc(100vh-80px)] bg-gray-900 p-6">
|
|
99
|
+
<div className="max-w-2xl mx-auto text-center py-16">
|
|
100
|
+
<ImageIcon className="w-16 h-16 text-gray-600 mx-auto mb-4" />
|
|
101
|
+
<h1 className="text-2xl font-bold text-white mb-4">
|
|
102
|
+
Image Generation Unavailable
|
|
103
|
+
</h1>
|
|
104
|
+
<p className="text-gray-400 mb-4">
|
|
105
|
+
Image generation requires an OpenAI API key. Please add your{' '}
|
|
106
|
+
<code className="text-orange-400">OPENAI_API_KEY</code> to your{' '}
|
|
107
|
+
<code className="text-orange-400">.env.local</code> file.
|
|
108
|
+
</p>
|
|
109
|
+
</div>
|
|
110
|
+
</div>
|
|
111
|
+
)
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return (
|
|
115
|
+
<div className="min-h-[calc(100vh-80px)] bg-gray-900 p-6">
|
|
116
|
+
<div className="max-w-6xl mx-auto">
|
|
117
|
+
<div className="flex items-center gap-3 mb-6">
|
|
118
|
+
<ImageIcon className="w-8 h-8 text-orange-500" />
|
|
119
|
+
<h1 className="text-2xl font-bold text-white">Image Generation</h1>
|
|
120
|
+
</div>
|
|
121
|
+
|
|
122
|
+
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6">
|
|
123
|
+
{/* Input Panel */}
|
|
124
|
+
<div className="space-y-4">
|
|
125
|
+
<div className="grid grid-cols-2 gap-4">
|
|
126
|
+
<div>
|
|
127
|
+
<label className="block text-sm font-medium text-gray-300 mb-2">
|
|
128
|
+
Size
|
|
129
|
+
</label>
|
|
130
|
+
<select
|
|
131
|
+
value={size}
|
|
132
|
+
onChange={(e) => setSize(e.target.value)}
|
|
133
|
+
disabled={isLoading}
|
|
134
|
+
className="w-full rounded-lg border border-orange-500/20 bg-gray-800 px-3 py-2 text-sm text-white focus:outline-none focus:ring-2 focus:ring-orange-500/50"
|
|
135
|
+
>
|
|
136
|
+
{SIZES.map((s) => (
|
|
137
|
+
<option key={s} value={s}>
|
|
138
|
+
{s}
|
|
139
|
+
</option>
|
|
140
|
+
))}
|
|
141
|
+
</select>
|
|
142
|
+
</div>
|
|
143
|
+
<div>
|
|
144
|
+
<label className="block text-sm font-medium text-gray-300 mb-2">
|
|
145
|
+
Count
|
|
146
|
+
</label>
|
|
147
|
+
<input
|
|
148
|
+
type="number"
|
|
149
|
+
value={numberOfImages}
|
|
150
|
+
onChange={(e) =>
|
|
151
|
+
setNumberOfImages(
|
|
152
|
+
Math.max(1, Math.min(4, parseInt(e.target.value) || 1)),
|
|
153
|
+
)
|
|
154
|
+
}
|
|
155
|
+
min={1}
|
|
156
|
+
max={4}
|
|
157
|
+
disabled={isLoading}
|
|
158
|
+
className="w-full rounded-lg border border-orange-500/20 bg-gray-800 px-3 py-2 text-sm text-white focus:outline-none focus:ring-2 focus:ring-orange-500/50"
|
|
159
|
+
/>
|
|
160
|
+
</div>
|
|
161
|
+
</div>
|
|
162
|
+
|
|
163
|
+
<div>
|
|
164
|
+
<label className="block text-sm font-medium text-gray-300 mb-2">
|
|
165
|
+
Prompt
|
|
166
|
+
</label>
|
|
167
|
+
<textarea
|
|
168
|
+
value={prompt}
|
|
169
|
+
onChange={(e) => setPrompt(e.target.value)}
|
|
170
|
+
disabled={isLoading}
|
|
171
|
+
rows={6}
|
|
172
|
+
className="w-full rounded-lg border border-orange-500/20 bg-gray-800 px-3 py-2 text-sm text-white focus:outline-none focus:ring-2 focus:ring-orange-500/50 resize-none"
|
|
173
|
+
placeholder="Describe the image you want to generate..."
|
|
174
|
+
/>
|
|
175
|
+
</div>
|
|
176
|
+
|
|
177
|
+
<button
|
|
178
|
+
onClick={handleGenerate}
|
|
179
|
+
disabled={isLoading || !prompt.trim()}
|
|
180
|
+
className="w-full px-4 py-3 bg-orange-600 hover:bg-orange-700 disabled:bg-gray-600 text-white rounded-lg font-medium transition-colors flex items-center justify-center gap-2"
|
|
181
|
+
>
|
|
182
|
+
{isLoading ? (
|
|
183
|
+
<>
|
|
184
|
+
<Loader2 className="w-5 h-5 animate-spin" />
|
|
185
|
+
Generating...
|
|
186
|
+
</>
|
|
187
|
+
) : (
|
|
188
|
+
'Generate Image'
|
|
189
|
+
)}
|
|
190
|
+
</button>
|
|
191
|
+
</div>
|
|
192
|
+
|
|
193
|
+
{/* Output Panel */}
|
|
194
|
+
<div className="lg:col-span-2 bg-gray-800 rounded-lg p-6 border border-orange-500/20">
|
|
195
|
+
<h2 className="text-lg font-semibold text-white mb-4">
|
|
196
|
+
Generated Images
|
|
197
|
+
</h2>
|
|
198
|
+
|
|
199
|
+
{error && (
|
|
200
|
+
<div className="p-4 bg-red-500/10 border border-red-500/20 rounded-lg text-red-400 mb-4">
|
|
201
|
+
{error}
|
|
202
|
+
</div>
|
|
203
|
+
)}
|
|
204
|
+
|
|
205
|
+
{images.length > 0 ? (
|
|
206
|
+
<div className="space-y-4">
|
|
207
|
+
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
|
208
|
+
{images.map((image, index) => (
|
|
209
|
+
<div key={index} className="relative group">
|
|
210
|
+
<img
|
|
211
|
+
src={getImageSrc(image)}
|
|
212
|
+
alt={`Generated image ${index + 1}`}
|
|
213
|
+
className="w-full rounded-lg border border-gray-700"
|
|
214
|
+
/>
|
|
215
|
+
<button
|
|
216
|
+
onClick={() => handleDownload(image, index)}
|
|
217
|
+
className="absolute top-2 right-2 p-2 bg-gray-900/80 hover:bg-gray-900 rounded-lg opacity-0 group-hover:opacity-100 transition-opacity"
|
|
218
|
+
title="Download image"
|
|
219
|
+
>
|
|
220
|
+
<Download className="w-4 h-4 text-white" />
|
|
221
|
+
</button>
|
|
222
|
+
{image.revisedPrompt && (
|
|
223
|
+
<p className="mt-2 text-xs text-gray-400 italic">
|
|
224
|
+
Revised: {image.revisedPrompt}
|
|
225
|
+
</p>
|
|
226
|
+
)}
|
|
227
|
+
</div>
|
|
228
|
+
))}
|
|
229
|
+
</div>
|
|
230
|
+
<div className="pt-4 border-t border-gray-700 text-sm text-gray-400">
|
|
231
|
+
<p>
|
|
232
|
+
Provider:{' '}
|
|
233
|
+
<span className="text-orange-400">OpenAI</span>
|
|
234
|
+
</p>
|
|
235
|
+
<p>
|
|
236
|
+
Model: <span className="text-orange-400">{usedModel}</span>
|
|
237
|
+
</p>
|
|
238
|
+
</div>
|
|
239
|
+
</div>
|
|
240
|
+
) : !error && !isLoading ? (
|
|
241
|
+
<div className="flex flex-col items-center justify-center h-64 text-gray-500">
|
|
242
|
+
<ImageIcon className="w-16 h-16 mb-4 opacity-50" />
|
|
243
|
+
<p>
|
|
244
|
+
Enter a prompt and click "Generate Image" to create an image.
|
|
245
|
+
</p>
|
|
246
|
+
</div>
|
|
247
|
+
) : null}
|
|
248
|
+
</div>
|
|
249
|
+
</div>
|
|
250
|
+
</div>
|
|
251
|
+
</div>
|
|
252
|
+
)
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
export const Route = createFileRoute('/demo/image')({
|
|
256
|
+
component: ImagePage,
|
|
257
|
+
})
|