@kood/claude-code 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/index.js +81 -38
  2. package/package.json +2 -2
  3. package/templates/hono/CLAUDE.md +20 -2
  4. package/templates/hono/docs/architecture/architecture.md +909 -0
  5. package/templates/hono/docs/deployment/cloudflare.md +537 -190
  6. package/templates/hono/docs/deployment/docker.md +517 -0
  7. package/templates/hono/docs/deployment/index.md +181 -213
  8. package/templates/hono/docs/deployment/railway.md +416 -0
  9. package/templates/hono/docs/deployment/vercel.md +572 -0
  10. package/templates/hono/docs/git/git.md +285 -0
  11. package/templates/hono/docs/library/ai-sdk/index.md +427 -0
  12. package/templates/hono/docs/library/ai-sdk/openrouter.md +479 -0
  13. package/templates/hono/docs/library/ai-sdk/providers.md +468 -0
  14. package/templates/hono/docs/library/ai-sdk/streaming.md +447 -0
  15. package/templates/hono/docs/library/ai-sdk/structured-output.md +493 -0
  16. package/templates/hono/docs/library/ai-sdk/tools.md +513 -0
  17. package/templates/hono/docs/library/hono/env-setup.md +458 -0
  18. package/templates/hono/docs/library/hono/index.md +1 -0
  19. package/templates/hono/docs/library/pino/index.md +437 -0
  20. package/templates/hono/docs/library/prisma/cloudflare-d1.md +503 -0
  21. package/templates/hono/docs/library/prisma/config.md +362 -0
  22. package/templates/hono/docs/library/prisma/index.md +86 -13
  23. package/templates/hono/docs/skills/gemini-review/SKILL.md +116 -116
  24. package/templates/hono/docs/skills/gemini-review/references/checklists.md +125 -125
  25. package/templates/hono/docs/skills/gemini-review/references/prompt-templates.md +191 -191
  26. package/templates/npx/CLAUDE.md +309 -0
  27. package/templates/npx/docs/git/git.md +307 -0
  28. package/templates/npx/docs/library/commander/index.md +164 -0
  29. package/templates/npx/docs/library/fs-extra/index.md +171 -0
  30. package/templates/npx/docs/library/prompts/index.md +253 -0
  31. package/templates/npx/docs/mcp/index.md +60 -0
  32. package/templates/npx/docs/skills/gemini-review/SKILL.md +220 -0
  33. package/templates/npx/docs/skills/gemini-review/references/checklists.md +134 -0
  34. package/templates/npx/docs/skills/gemini-review/references/prompt-templates.md +301 -0
  35. package/templates/tanstack-start/CLAUDE.md +43 -5
  36. package/templates/tanstack-start/docs/architecture/architecture.md +134 -4
  37. package/templates/tanstack-start/docs/deployment/cloudflare.md +234 -51
  38. package/templates/tanstack-start/docs/deployment/index.md +322 -32
  39. package/templates/tanstack-start/docs/deployment/nitro.md +201 -20
  40. package/templates/tanstack-start/docs/deployment/railway.md +305 -153
  41. package/templates/tanstack-start/docs/deployment/vercel.md +353 -78
  42. package/templates/tanstack-start/docs/git/{index.md → git.md} +81 -7
  43. package/templates/tanstack-start/docs/guides/best-practices.md +203 -1
  44. package/templates/tanstack-start/docs/guides/env-setup.md +450 -0
  45. package/templates/tanstack-start/docs/library/ai-sdk/hooks.md +472 -0
  46. package/templates/tanstack-start/docs/library/ai-sdk/index.md +264 -0
  47. package/templates/tanstack-start/docs/library/ai-sdk/openrouter.md +371 -0
  48. package/templates/tanstack-start/docs/library/ai-sdk/providers.md +403 -0
  49. package/templates/tanstack-start/docs/library/ai-sdk/streaming.md +320 -0
  50. package/templates/tanstack-start/docs/library/ai-sdk/structured-output.md +454 -0
  51. package/templates/tanstack-start/docs/library/ai-sdk/tools.md +473 -0
  52. package/templates/tanstack-start/docs/library/pino/index.md +320 -0
  53. package/templates/tanstack-start/docs/library/prisma/cloudflare-d1.md +404 -0
  54. package/templates/tanstack-start/docs/library/prisma/config.md +377 -0
  55. package/templates/tanstack-start/docs/library/prisma/index.md +3 -1
  56. package/templates/tanstack-start/docs/library/prisma/schema.md +123 -25
  57. package/templates/tanstack-start/docs/library/tanstack-start/server-functions.md +80 -2
  58. package/templates/tanstack-start/docs/skills/gemini-review/SKILL.md +116 -116
  59. package/templates/tanstack-start/docs/skills/gemini-review/references/checklists.md +138 -144
  60. package/templates/tanstack-start/docs/skills/gemini-review/references/prompt-templates.md +186 -187
  61. package/templates/hono/docs/git/index.md +0 -180
@@ -0,0 +1,403 @@
1
+ # AI SDK - 프로바이더
2
+
3
+ > **상위 문서**: [AI SDK](./index.md)
4
+
5
+ ---
6
+
7
+ ## 개요
8
+
9
+ AI SDK 프로바이더는 다양한 AI 모델 서비스와의 연결을 표준화합니다.
10
+
11
+ ---
12
+
13
+ ## 설치
14
+
15
+ ```bash
16
+ # 필요한 프로바이더만 설치
17
+ npm install @ai-sdk/openai # OpenAI (GPT-4, GPT-4o)
18
+ npm install @ai-sdk/anthropic # Anthropic (Claude)
19
+ npm install @ai-sdk/google # Google (Gemini)
20
+ npm install @ai-sdk/mistral # Mistral
21
+ npm install @ai-sdk/groq # Groq
22
+ npm install @ai-sdk/cohere # Cohere
23
+ ```
24
+
25
+ ---
26
+
27
+ ## OpenAI
28
+
29
+ ### 기본 사용
30
+
31
+ ```typescript
32
+ import { openai } from '@ai-sdk/openai'
33
+ import { generateText } from 'ai'
34
+
35
+ const { text } = await generateText({
36
+ model: openai('gpt-4o'),
37
+ prompt: 'Hello!',
38
+ })
39
+ ```
40
+
41
+ ### 커스텀 설정
42
+
43
+ ```typescript
44
+ import { createOpenAI } from '@ai-sdk/openai'
45
+
46
+ const openai = createOpenAI({
47
+ apiKey: process.env.OPENAI_API_KEY,
48
+ baseURL: 'https://api.openai.com/v1', // 커스텀 엔드포인트
49
+ organization: 'org-xxx', // 조직 ID
50
+ })
51
+
52
+ const { text } = await generateText({
53
+ model: openai('gpt-4o'),
54
+ prompt: 'Hello!',
55
+ })
56
+ ```
57
+
58
+ ### 사용 가능한 모델
59
+
60
+ | 모델 | 설명 |
61
+ |------|------|
62
+ | `gpt-4o` | 최신 GPT-4 Omni |
63
+ | `gpt-4o-mini` | 경량화된 GPT-4o |
64
+ | `gpt-4-turbo` | GPT-4 Turbo |
65
+ | `gpt-3.5-turbo` | GPT-3.5 |
66
+ | `o1` | 추론 최적화 모델 |
67
+ | `o1-mini` | 경량 추론 모델 |
68
+
69
+ ### 환경 변수
70
+
71
+ ```bash
72
+ OPENAI_API_KEY=sk-...
73
+ ```
74
+
75
+ ---
76
+
77
+ ## Anthropic (Claude)
78
+
79
+ ### 기본 사용
80
+
81
+ ```typescript
82
+ import { anthropic } from '@ai-sdk/anthropic'
83
+ import { generateText } from 'ai'
84
+
85
+ const { text } = await generateText({
86
+ model: anthropic('claude-3-5-sonnet-20241022'),
87
+ prompt: 'Explain TypeScript.',
88
+ })
89
+ ```
90
+
91
+ ### 커스텀 설정
92
+
93
+ ```typescript
94
+ import { createAnthropic } from '@ai-sdk/anthropic'
95
+
96
+ const anthropic = createAnthropic({
97
+ apiKey: process.env.ANTHROPIC_API_KEY,
98
+ baseURL: 'https://api.anthropic.com', // 커스텀 엔드포인트
99
+ })
100
+
101
+ const { text } = await generateText({
102
+ model: anthropic('claude-3-5-sonnet-20241022'),
103
+ prompt: 'Hello!',
104
+ })
105
+ ```
106
+
107
+ ### 사용 가능한 모델
108
+
109
+ | 모델 | 설명 |
110
+ |------|------|
111
+ | `claude-3-5-sonnet-20241022` | Claude 3.5 Sonnet (최신) |
112
+ | `claude-3-5-haiku-20241022` | Claude 3.5 Haiku (빠름) |
113
+ | `claude-3-opus-20240229` | Claude 3 Opus (강력) |
114
+ | `claude-3-sonnet-20240229` | Claude 3 Sonnet |
115
+ | `claude-3-haiku-20240307` | Claude 3 Haiku |
116
+
117
+ ### 환경 변수
118
+
119
+ ```bash
120
+ ANTHROPIC_API_KEY=sk-ant-...
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Google (Gemini)
126
+
127
+ ### 기본 사용
128
+
129
+ ```typescript
130
+ import { google } from '@ai-sdk/google'
131
+ import { generateText } from 'ai'
132
+
133
+ const { text } = await generateText({
134
+ model: google('gemini-1.5-pro'),
135
+ prompt: 'What is machine learning?',
136
+ })
137
+ ```
138
+
139
+ ### 커스텀 설정
140
+
141
+ ```typescript
142
+ import { createGoogleGenerativeAI } from '@ai-sdk/google'
143
+
144
+ const google = createGoogleGenerativeAI({
145
+ apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
146
+ })
147
+
148
+ const { text } = await generateText({
149
+ model: google('gemini-1.5-pro'),
150
+ prompt: 'Hello!',
151
+ })
152
+ ```
153
+
154
+ ### 사용 가능한 모델
155
+
156
+ | 모델 | 설명 |
157
+ |------|------|
158
+ | `gemini-1.5-pro` | Gemini 1.5 Pro |
159
+ | `gemini-1.5-flash` | Gemini 1.5 Flash (빠름) |
160
+ | `gemini-2.0-flash-exp` | Gemini 2.0 Flash (실험) |
161
+
162
+ ### 환경 변수
163
+
164
+ ```bash
165
+ GOOGLE_GENERATIVE_AI_API_KEY=...
166
+ ```
167
+
168
+ ---
169
+
170
+ ## Google Vertex AI
171
+
172
+ Vertex AI는 Google Cloud 환경에서 사용합니다.
173
+
174
+ ### 설치
175
+
176
+ ```bash
177
+ npm install @ai-sdk/google-vertex
178
+ ```
179
+
180
+ ### 기본 사용
181
+
182
+ ```typescript
183
+ import { vertex } from '@ai-sdk/google-vertex'
184
+ import { generateText } from 'ai'
185
+
186
+ const { text } = await generateText({
187
+ model: vertex('gemini-1.5-pro'),
188
+ prompt: 'Hello!',
189
+ })
190
+ ```
191
+
192
+ ### Vertex AI + Anthropic
193
+
194
+ ```typescript
195
+ import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic'
196
+ import { generateText } from 'ai'
197
+
198
+ const { text } = await generateText({
199
+ model: vertexAnthropic('claude-3-5-sonnet@20240620'),
200
+ prompt: 'Write a poem.',
201
+ })
202
+ ```
203
+
204
+ ### 커스텀 설정
205
+
206
+ ```typescript
207
+ import { createVertexAnthropic } from '@ai-sdk/google-vertex/anthropic'
208
+
209
+ const vertexAnthropic = createVertexAnthropic({
210
+ project: 'my-project-id',
211
+ location: 'us-central1',
212
+ })
213
+
214
+ // Edge 런타임용
215
+ import { createVertexAnthropic } from '@ai-sdk/google-vertex/anthropic/edge'
216
+ ```
217
+
218
+ ### 환경 변수
219
+
220
+ ```bash
221
+ GOOGLE_VERTEX_PROJECT=my-project-id
222
+ GOOGLE_VERTEX_LOCATION=us-central1
223
+ ```
224
+
225
+ ---
226
+
227
+ ## Groq
228
+
229
+ ### 설치
230
+
231
+ ```bash
232
+ npm install @ai-sdk/groq
233
+ ```
234
+
235
+ ### 기본 사용
236
+
237
+ ```typescript
238
+ import { groq } from '@ai-sdk/groq'
239
+ import { generateText } from 'ai'
240
+
241
+ const { text } = await generateText({
242
+ model: groq('llama-3.1-70b-versatile'),
243
+ prompt: 'Hello!',
244
+ })
245
+ ```
246
+
247
+ ### 환경 변수
248
+
249
+ ```bash
250
+ GROQ_API_KEY=gsk_...
251
+ ```
252
+
253
+ ---
254
+
255
+ ## Mistral
256
+
257
+ ### 설치
258
+
259
+ ```bash
260
+ npm install @ai-sdk/mistral
261
+ ```
262
+
263
+ ### 기본 사용
264
+
265
+ ```typescript
266
+ import { mistral } from '@ai-sdk/mistral'
267
+ import { generateText } from 'ai'
268
+
269
+ const { text } = await generateText({
270
+ model: mistral('mistral-large-latest'),
271
+ prompt: 'Hello!',
272
+ })
273
+ ```
274
+
275
+ ### 환경 변수
276
+
277
+ ```bash
278
+ MISTRAL_API_KEY=...
279
+ ```
280
+
281
+ ---
282
+
283
+ ## AI Gateway (통합 프로바이더)
284
+
285
+ AI Gateway는 여러 프로바이더를 하나의 인터페이스로 통합합니다.
286
+
287
+ ```typescript
288
+ import { gateway } from 'ai'
289
+ import { generateText } from 'ai'
290
+
291
+ const { text } = await generateText({
292
+ model: gateway('openai/gpt-4o'), // 프로바이더/모델 형식
293
+ prompt: 'Hello!',
294
+ })
295
+
296
+ // 다른 프로바이더도 동일한 방식
297
+ const { text: text2 } = await generateText({
298
+ model: gateway('anthropic/claude-3-5-sonnet'),
299
+ prompt: 'Hello!',
300
+ })
301
+ ```
302
+
303
+ ---
304
+
305
+ ## 프로바이더 비교
306
+
307
+ | 프로바이더 | 장점 | 단점 |
308
+ |-----------|------|------|
309
+ | **OpenAI** | 가장 다양한 모델, 안정적 | 비용이 높을 수 있음 |
310
+ | **Anthropic** | 긴 컨텍스트, 안전성 | 모델 종류 적음 |
311
+ | **Google** | 멀티모달 강점, 무료 티어 | API 제한 |
312
+ | **Groq** | 매우 빠른 추론 속도 | 모델 종류 제한 |
313
+ | **Mistral** | 유럽 데이터 레지던시, 오픈소스 | 상대적으로 새로운 서비스 |
314
+
315
+ ---
316
+
317
+ ## 모델 파라미터
318
+
319
+ 모든 프로바이더에서 공통으로 사용 가능한 파라미터:
320
+
321
+ ```typescript
322
+ const { text } = await generateText({
323
+ model: openai('gpt-4o'),
324
+ prompt: 'Hello!',
325
+
326
+ // 공통 파라미터
327
+ temperature: 0.7, // 창의성 (0-2, 기본 1)
328
+ maxTokens: 1000, // 최대 토큰 수
329
+ topP: 0.9, // 핵 샘플링
330
+ frequencyPenalty: 0.5, // 반복 억제
331
+ presencePenalty: 0.5, // 새로운 주제 유도
332
+ stopSequences: ['\n\n'], // 생성 중단 시퀀스
333
+ })
334
+ ```
335
+
336
+ ---
337
+
338
+ ## OpenRouter (통합 게이트웨이)
339
+
340
+ 하나의 API 키로 수백 개 모델에 접근할 수 있는 통합 게이트웨이입니다.
341
+
342
+ ### 설치
343
+
344
+ ```bash
345
+ npm install @openrouter/ai-sdk-provider
346
+ ```
347
+
348
+ ### 기본 사용
349
+
350
+ ```typescript
351
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider'
352
+ import { generateText } from 'ai'
353
+
354
+ const openrouter = createOpenRouter({
355
+ apiKey: process.env.OPENROUTER_API_KEY,
356
+ })
357
+
358
+ const { text } = await generateText({
359
+ model: openrouter.chat('anthropic/claude-3.5-sonnet'),
360
+ prompt: 'Hello!',
361
+ })
362
+ ```
363
+
364
+ ### 인기 모델
365
+
366
+ | 모델 ID | 설명 |
367
+ |---------|------|
368
+ | `anthropic/claude-3.5-sonnet` | Claude 3.5 Sonnet |
369
+ | `openai/gpt-4o` | GPT-4o |
370
+ | `google/gemini-pro-1.5` | Gemini Pro 1.5 |
371
+ | `meta-llama/llama-3.1-70b-instruct` | Llama 3.1 70B |
372
+
373
+ 자세한 내용: [OpenRouter 가이드](./openrouter.md)
374
+
375
+ ---
376
+
377
+ ## 환경 변수 요약
378
+
379
+ ```bash
380
+ # .env
381
+
382
+ # OpenAI
383
+ OPENAI_API_KEY=sk-...
384
+
385
+ # Anthropic
386
+ ANTHROPIC_API_KEY=sk-ant-...
387
+
388
+ # Google
389
+ GOOGLE_GENERATIVE_AI_API_KEY=...
390
+
391
+ # Google Vertex AI
392
+ GOOGLE_VERTEX_PROJECT=my-project
393
+ GOOGLE_VERTEX_LOCATION=us-central1
394
+
395
+ # Groq
396
+ GROQ_API_KEY=gsk_...
397
+
398
+ # Mistral
399
+ MISTRAL_API_KEY=...
400
+
401
+ # OpenRouter
402
+ OPENROUTER_API_KEY=sk-or-v1-...
403
+ ```
@@ -0,0 +1,320 @@
1
+ # AI SDK - 텍스트 생성
2
+
3
+ > **상위 문서**: [AI SDK](./index.md)
4
+
5
+ ---
6
+
7
+ ## 개요
8
+
9
+ AI SDK는 두 가지 주요 텍스트 생성 방식을 제공합니다:
10
+ - `generateText`: 전체 응답을 한 번에 반환
11
+ - `streamText`: 실시간 스트리밍 응답
12
+
13
+ ---
14
+
15
+ ## generateText
16
+
17
+ 전체 응답이 완료될 때까지 기다린 후 결과를 반환합니다.
18
+
19
+ ### 기본 사용
20
+
21
+ ```typescript
22
+ import { generateText } from 'ai'
23
+ import { openai } from '@ai-sdk/openai'
24
+
25
+ const { text, usage, finishReason } = await generateText({
26
+ model: openai('gpt-4o'),
27
+ prompt: 'Explain quantum computing.',
28
+ })
29
+
30
+ console.log(text)
31
+ console.log(`Tokens: ${usage.totalTokens}`)
32
+ console.log(`Finish reason: ${finishReason}`)
33
+ ```
34
+
35
+ ### 메시지 기반 (채팅)
36
+
37
+ ```typescript
38
+ import { generateText } from 'ai'
39
+ import { openai } from '@ai-sdk/openai'
40
+
41
+ const { text } = await generateText({
42
+ model: openai('gpt-4o'),
43
+ system: 'You are a helpful assistant.',
44
+ messages: [
45
+ { role: 'user', content: 'What is TypeScript?' },
46
+ { role: 'assistant', content: 'TypeScript is a typed superset of JavaScript.' },
47
+ { role: 'user', content: 'How do I get started?' },
48
+ ],
49
+ })
50
+ ```
51
+
52
+ ### 반환값
53
+
54
+ ```typescript
55
+ const result = await generateText({
56
+ model: openai('gpt-4o'),
57
+ prompt: 'Hello!',
58
+ })
59
+
60
+ // 주요 속성
61
+ result.text // 생성된 텍스트
62
+ result.usage // { promptTokens, completionTokens, totalTokens }
63
+ result.finishReason // 'stop' | 'length' | 'content-filter' | 'tool-calls' | ...
64
+ result.toolCalls // 도구 호출 목록
65
+ result.toolResults // 도구 실행 결과
66
+ result.response // 원본 응답 객체
67
+ result.steps // 다중 단계 실행 시 각 단계 정보
68
+ ```
69
+
70
+ ---
71
+
72
+ ## streamText
73
+
74
+ 실시간으로 응답을 스트리밍합니다. 사용자 경험이 중요한 채팅 애플리케이션에 적합합니다.
75
+
76
+ ### 기본 사용
77
+
78
+ ```typescript
79
+ import { streamText } from 'ai'
80
+ import { openai } from '@ai-sdk/openai'
81
+
82
+ const result = streamText({
83
+ model: openai('gpt-4o'),
84
+ prompt: 'Write a haiku about programming.',
85
+ })
86
+
87
+ // AsyncIterable로 스트리밍
88
+ for await (const chunk of result.textStream) {
89
+ process.stdout.write(chunk)
90
+ }
91
+ ```
92
+
93
+ ### 메시지 기반 스트리밍
94
+
95
+ ```typescript
96
+ import { streamText, convertToModelMessages } from 'ai'
97
+ import { openai } from '@ai-sdk/openai'
98
+
99
+ const result = streamText({
100
+ model: openai('gpt-4o'),
101
+ system: 'You are a helpful assistant.',
102
+ messages: convertToModelMessages([
103
+ { id: '1', role: 'user', content: 'Hello!' },
104
+ { id: '2', role: 'assistant', content: 'Hi there!' },
105
+ { id: '3', role: 'user', content: 'How are you?' },
106
+ ]),
107
+ })
108
+
109
+ for await (const chunk of result.textStream) {
110
+ console.log(chunk)
111
+ }
112
+ ```
113
+
114
+ ### 스트리밍 속성
115
+
116
+ ```typescript
117
+ const result = streamText({
118
+ model: openai('gpt-4o'),
119
+ prompt: 'Hello!',
120
+ })
121
+
122
+ // 다양한 스트림 접근
123
+ result.textStream // 텍스트만 스트리밍
124
+ result.fullStream // 전체 이벤트 스트리밍 (도구 호출 포함)
125
+
126
+ // 최종 결과 (Promise)
127
+ const finalText = await result.text
128
+ const finalUsage = await result.usage
129
+ const finalFinishReason = await result.finishReason
130
+ ```
131
+
132
+ ### fullStream 이벤트
133
+
134
+ ```typescript
135
+ for await (const event of result.fullStream) {
136
+ switch (event.type) {
137
+ case 'text-delta':
138
+ console.log('Text:', event.textDelta)
139
+ break
140
+ case 'tool-call':
141
+ console.log('Tool call:', event.toolName, event.input)
142
+ break
143
+ case 'tool-result':
144
+ console.log('Tool result:', event.output)
145
+ break
146
+ case 'finish':
147
+ console.log('Finished:', event.finishReason)
148
+ break
149
+ case 'error':
150
+ console.error('Error:', event.error)
151
+ break
152
+ }
153
+ }
154
+ ```
155
+
156
+ ---
157
+
158
+ ## API Route에서 사용
159
+
160
+ ### TanStack Start / Next.js
161
+
162
+ ```typescript
163
+ // app/api/chat/route.ts
164
+ import { streamText, convertToModelMessages } from 'ai'
165
+ import { openai } from '@ai-sdk/openai'
166
+
167
+ export async function POST(req: Request) {
168
+ const { messages } = await req.json()
169
+
170
+ const result = streamText({
171
+ model: openai('gpt-4o'),
172
+ messages: convertToModelMessages(messages),
173
+ })
174
+
175
+ // UI 메시지 스트림으로 변환
176
+ return result.toUIMessageStreamResponse()
177
+ }
178
+ ```
179
+
180
+ ### 텍스트 스트림으로 반환
181
+
182
+ ```typescript
183
+ export async function POST(req: Request) {
184
+ const { prompt } = await req.json()
185
+
186
+ const result = streamText({
187
+ model: openai('gpt-4o'),
188
+ prompt,
189
+ })
190
+
191
+ // 순수 텍스트 스트림
192
+ return result.toTextStreamResponse()
193
+ }
194
+ ```
195
+
196
+ ### 에러 핸들링
197
+
198
+ ```typescript
199
+ export async function POST(req: Request) {
200
+ const { messages } = await req.json()
201
+
202
+ const result = streamText({
203
+ model: openai('gpt-4o'),
204
+ messages: convertToModelMessages(messages),
205
+ })
206
+
207
+ // 에러 메시지를 UI로 전달
208
+ return result.toUIMessageStreamResponse({
209
+ getErrorMessage: (error) => {
210
+ if (error instanceof Error) {
211
+ return error.message
212
+ }
213
+ return 'An error occurred'
214
+ },
215
+ })
216
+ }
217
+ ```
218
+
219
+ ---
220
+
221
+ ## 고급 옵션
222
+
223
+ ### 온도 및 토큰 제한
224
+
225
+ ```typescript
226
+ const result = await generateText({
227
+ model: openai('gpt-4o'),
228
+ prompt: 'Write a creative story.',
229
+ temperature: 0.9, // 높을수록 창의적 (0-2)
230
+ maxTokens: 2000, // 최대 출력 토큰
231
+ topP: 0.95, // 핵 샘플링
232
+ })
233
+ ```
234
+
235
+ ### 중단 조건
236
+
237
+ ```typescript
238
+ import { generateText, stopWhen, stepCountIs } from 'ai'
239
+
240
+ const result = await generateText({
241
+ model: openai('gpt-4o'),
242
+ prompt: 'Solve this problem step by step.',
243
+ stopWhen: stepCountIs(5), // 최대 5단계
244
+ })
245
+ ```
246
+
247
+ ### 응답 메시지 접근
248
+
249
+ ```typescript
250
+ const { text, response } = await generateText({
251
+ model: openai('gpt-4o'),
252
+ prompt: 'Hello!',
253
+ })
254
+
255
+ // v5에서 responseMessages 접근 방법
256
+ const responseMessages = response.messages
257
+ ```
258
+
259
+ ---
260
+
261
+ ## onFinish 콜백
262
+
263
+ 스트리밍 완료 시 실행되는 콜백:
264
+
265
+ ```typescript
266
+ const result = streamText({
267
+ model: openai('gpt-4o'),
268
+ prompt: 'Hello!',
269
+ onFinish: async ({ text, usage, finishReason }) => {
270
+ console.log('Completed:', text)
271
+ console.log('Tokens used:', usage.totalTokens)
272
+
273
+ // DB에 저장 등
274
+ await saveToDatabase({ text, usage })
275
+ },
276
+ })
277
+ ```
278
+
279
+ ---
280
+
281
+ ## TanStack Start Server Function 통합
282
+
283
+ ```typescript
284
+ // lib/ai.ts
285
+ import { createServerFn } from '@tanstack/react-start'
286
+ import { generateText, streamText } from 'ai'
287
+ import { openai } from '@ai-sdk/openai'
288
+
289
+ // 비스트리밍 버전
290
+ export const generateResponse = createServerFn({ method: 'POST' })
291
+ .inputValidator((data: { prompt: string }) => data)
292
+ .handler(async ({ data }) => {
293
+ const { text } = await generateText({
294
+ model: openai('gpt-4o'),
295
+ prompt: data.prompt,
296
+ })
297
+ return { text }
298
+ })
299
+
300
+ // 스트리밍은 API Route 사용 권장
301
+ ```
302
+
303
+ ---
304
+
305
+ ## 파라미터 요약
306
+
307
+ | 파라미터 | 타입 | 설명 |
308
+ |---------|------|------|
309
+ | `model` | Model | AI 모델 인스턴스 |
310
+ | `prompt` | string | 단일 프롬프트 |
311
+ | `messages` | Message[] | 메시지 배열 (채팅) |
312
+ | `system` | string | 시스템 프롬프트 |
313
+ | `temperature` | number | 창의성 (0-2) |
314
+ | `maxTokens` | number | 최대 출력 토큰 |
315
+ | `topP` | number | 핵 샘플링 (0-1) |
316
+ | `frequencyPenalty` | number | 반복 억제 (-2 to 2) |
317
+ | `presencePenalty` | number | 새로운 주제 유도 (-2 to 2) |
318
+ | `stopSequences` | string[] | 생성 중단 시퀀스 |
319
+ | `tools` | ToolSet | 도구 정의 |
320
+ | `onFinish` | Function | 완료 콜백 |