@kood/claude-code 0.1.2 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +12 -3
- package/package.json +2 -2
- package/templates/hono/CLAUDE.md +20 -2
- package/templates/hono/docs/architecture/architecture.md +909 -0
- package/templates/hono/docs/deployment/cloudflare.md +537 -190
- package/templates/hono/docs/deployment/docker.md +517 -0
- package/templates/hono/docs/deployment/index.md +181 -213
- package/templates/hono/docs/deployment/railway.md +416 -0
- package/templates/hono/docs/deployment/vercel.md +572 -0
- package/templates/hono/docs/git/git.md +285 -0
- package/templates/hono/docs/library/ai-sdk/index.md +427 -0
- package/templates/hono/docs/library/ai-sdk/openrouter.md +479 -0
- package/templates/hono/docs/library/ai-sdk/providers.md +468 -0
- package/templates/hono/docs/library/ai-sdk/streaming.md +447 -0
- package/templates/hono/docs/library/ai-sdk/structured-output.md +493 -0
- package/templates/hono/docs/library/ai-sdk/tools.md +513 -0
- package/templates/hono/docs/library/hono/env-setup.md +458 -0
- package/templates/hono/docs/library/hono/index.md +1 -0
- package/templates/hono/docs/library/pino/index.md +437 -0
- package/templates/hono/docs/library/prisma/cloudflare-d1.md +503 -0
- package/templates/hono/docs/library/prisma/config.md +362 -0
- package/templates/hono/docs/library/prisma/index.md +86 -13
- package/templates/hono/docs/skills/gemini-review/SKILL.md +116 -116
- package/templates/hono/docs/skills/gemini-review/references/checklists.md +125 -125
- package/templates/hono/docs/skills/gemini-review/references/prompt-templates.md +191 -191
- package/templates/npx/CLAUDE.md +309 -0
- package/templates/npx/docs/git/git.md +307 -0
- package/templates/npx/docs/library/commander/index.md +164 -0
- package/templates/npx/docs/library/fs-extra/index.md +171 -0
- package/templates/npx/docs/library/prompts/index.md +253 -0
- package/templates/npx/docs/mcp/index.md +60 -0
- package/templates/npx/docs/skills/gemini-review/SKILL.md +220 -0
- package/templates/npx/docs/skills/gemini-review/references/checklists.md +134 -0
- package/templates/npx/docs/skills/gemini-review/references/prompt-templates.md +301 -0
- package/templates/tanstack-start/CLAUDE.md +43 -5
- package/templates/tanstack-start/docs/architecture/architecture.md +134 -4
- package/templates/tanstack-start/docs/deployment/cloudflare.md +234 -51
- package/templates/tanstack-start/docs/deployment/index.md +322 -32
- package/templates/tanstack-start/docs/deployment/nitro.md +201 -20
- package/templates/tanstack-start/docs/deployment/railway.md +305 -153
- package/templates/tanstack-start/docs/deployment/vercel.md +353 -78
- package/templates/tanstack-start/docs/git/{index.md → git.md} +81 -7
- package/templates/tanstack-start/docs/guides/best-practices.md +203 -1
- package/templates/tanstack-start/docs/guides/env-setup.md +450 -0
- package/templates/tanstack-start/docs/library/ai-sdk/hooks.md +472 -0
- package/templates/tanstack-start/docs/library/ai-sdk/index.md +264 -0
- package/templates/tanstack-start/docs/library/ai-sdk/openrouter.md +371 -0
- package/templates/tanstack-start/docs/library/ai-sdk/providers.md +403 -0
- package/templates/tanstack-start/docs/library/ai-sdk/streaming.md +320 -0
- package/templates/tanstack-start/docs/library/ai-sdk/structured-output.md +454 -0
- package/templates/tanstack-start/docs/library/ai-sdk/tools.md +473 -0
- package/templates/tanstack-start/docs/library/pino/index.md +320 -0
- package/templates/tanstack-start/docs/library/prisma/cloudflare-d1.md +404 -0
- package/templates/tanstack-start/docs/library/prisma/config.md +377 -0
- package/templates/tanstack-start/docs/library/prisma/index.md +3 -1
- package/templates/tanstack-start/docs/library/prisma/schema.md +123 -25
- package/templates/tanstack-start/docs/library/tanstack-start/server-functions.md +80 -2
- package/templates/tanstack-start/docs/skills/gemini-review/SKILL.md +116 -116
- package/templates/tanstack-start/docs/skills/gemini-review/references/checklists.md +138 -144
- package/templates/tanstack-start/docs/skills/gemini-review/references/prompt-templates.md +186 -187
- package/templates/hono/docs/git/index.md +0 -180
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
# AI SDK - 텍스트 생성 & 스트리밍 (Hono)
|
|
2
|
+
|
|
3
|
+
> **상위 문서**: [AI SDK](./index.md)
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## 개요
|
|
8
|
+
|
|
9
|
+
AI SDK의 `generateText`와 `streamText`를 Hono에서 사용하는 방법입니다.
|
|
10
|
+
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
## generateText
|
|
14
|
+
|
|
15
|
+
텍스트를 한 번에 생성합니다. 짧은 응답이나 후처리가 필요한 경우에 적합합니다.
|
|
16
|
+
|
|
17
|
+
### 기본 사용
|
|
18
|
+
|
|
19
|
+
```typescript
|
|
20
|
+
import { Hono } from 'hono'
|
|
21
|
+
import { generateText } from 'ai'
|
|
22
|
+
import { openai } from '@ai-sdk/openai'
|
|
23
|
+
|
|
24
|
+
const app = new Hono()
|
|
25
|
+
|
|
26
|
+
app.post('/api/generate', async (c) => {
|
|
27
|
+
const { prompt } = await c.req.json()
|
|
28
|
+
|
|
29
|
+
const { text } = await generateText({
|
|
30
|
+
model: openai('gpt-4o'),
|
|
31
|
+
prompt,
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
return c.json({ text })
|
|
35
|
+
})
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### 메시지 형식
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
import { Hono } from 'hono'
|
|
42
|
+
import { generateText } from 'ai'
|
|
43
|
+
import { openai } from '@ai-sdk/openai'
|
|
44
|
+
|
|
45
|
+
const app = new Hono()
|
|
46
|
+
|
|
47
|
+
app.post('/api/chat', async (c) => {
|
|
48
|
+
const { messages } = await c.req.json()
|
|
49
|
+
|
|
50
|
+
const { text } = await generateText({
|
|
51
|
+
model: openai('gpt-4o'),
|
|
52
|
+
messages,
|
|
53
|
+
})
|
|
54
|
+
|
|
55
|
+
return c.json({ response: text })
|
|
56
|
+
})
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### 시스템 프롬프트
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
app.post('/api/assistant', async (c) => {
|
|
63
|
+
const { prompt } = await c.req.json()
|
|
64
|
+
|
|
65
|
+
const { text } = await generateText({
|
|
66
|
+
model: openai('gpt-4o'),
|
|
67
|
+
system: 'You are a helpful coding assistant.',
|
|
68
|
+
prompt,
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
return c.json({ text })
|
|
72
|
+
})
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### 반환값 활용
|
|
76
|
+
|
|
77
|
+
```typescript
|
|
78
|
+
app.post('/api/analyze', async (c) => {
|
|
79
|
+
const { prompt } = await c.req.json()
|
|
80
|
+
|
|
81
|
+
const result = await generateText({
|
|
82
|
+
model: openai('gpt-4o'),
|
|
83
|
+
prompt,
|
|
84
|
+
})
|
|
85
|
+
|
|
86
|
+
return c.json({
|
|
87
|
+
text: result.text,
|
|
88
|
+
usage: result.usage, // 토큰 사용량
|
|
89
|
+
finishReason: result.finishReason, // 완료 이유
|
|
90
|
+
})
|
|
91
|
+
})
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
---
|
|
95
|
+
|
|
96
|
+
## streamText
|
|
97
|
+
|
|
98
|
+
텍스트를 실시간으로 스트리밍합니다. 긴 응답이나 실시간 피드백이 필요한 경우에 적합합니다.
|
|
99
|
+
|
|
100
|
+
### 기본 스트리밍
|
|
101
|
+
|
|
102
|
+
```typescript
|
|
103
|
+
import { Hono } from 'hono'
|
|
104
|
+
import { streamText } from 'ai'
|
|
105
|
+
import { openai } from '@ai-sdk/openai'
|
|
106
|
+
|
|
107
|
+
const app = new Hono()
|
|
108
|
+
|
|
109
|
+
app.post('/api/stream', async (c) => {
|
|
110
|
+
const { prompt } = await c.req.json()
|
|
111
|
+
|
|
112
|
+
const result = streamText({
|
|
113
|
+
model: openai('gpt-4o'),
|
|
114
|
+
prompt,
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
return result.toTextStreamResponse()
|
|
118
|
+
})
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
### UI 메시지 스트림 (프론트엔드용)
|
|
122
|
+
|
|
123
|
+
```typescript
|
|
124
|
+
import { Hono } from 'hono'
|
|
125
|
+
import { streamText, convertToModelMessages } from 'ai'
|
|
126
|
+
import { openai } from '@ai-sdk/openai'
|
|
127
|
+
|
|
128
|
+
const app = new Hono()
|
|
129
|
+
|
|
130
|
+
app.post('/api/chat', async (c) => {
|
|
131
|
+
const { messages } = await c.req.json()
|
|
132
|
+
|
|
133
|
+
const result = streamText({
|
|
134
|
+
model: openai('gpt-4o'),
|
|
135
|
+
messages: convertToModelMessages(messages),
|
|
136
|
+
})
|
|
137
|
+
|
|
138
|
+
// @ai-sdk/react의 useChat과 호환
|
|
139
|
+
return result.toUIMessageStreamResponse()
|
|
140
|
+
})
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
### Hono 스트리밍 헬퍼 사용
|
|
144
|
+
|
|
145
|
+
```typescript
|
|
146
|
+
import { Hono } from 'hono'
|
|
147
|
+
import { streamText } from 'ai'
|
|
148
|
+
import { openai } from '@ai-sdk/openai'
|
|
149
|
+
import { stream } from 'hono/streaming'
|
|
150
|
+
|
|
151
|
+
const app = new Hono()
|
|
152
|
+
|
|
153
|
+
app.post('/api/custom-stream', async (c) => {
|
|
154
|
+
const { prompt } = await c.req.json()
|
|
155
|
+
|
|
156
|
+
const result = streamText({
|
|
157
|
+
model: openai('gpt-4o'),
|
|
158
|
+
prompt,
|
|
159
|
+
})
|
|
160
|
+
|
|
161
|
+
return stream(c, async (stream) => {
|
|
162
|
+
for await (const chunk of result.textStream) {
|
|
163
|
+
await stream.write(chunk)
|
|
164
|
+
}
|
|
165
|
+
})
|
|
166
|
+
})
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
### SSE (Server-Sent Events)
|
|
170
|
+
|
|
171
|
+
```typescript
|
|
172
|
+
import { Hono } from 'hono'
|
|
173
|
+
import { streamText } from 'ai'
|
|
174
|
+
import { openai } from '@ai-sdk/openai'
|
|
175
|
+
import { streamSSE } from 'hono/streaming'
|
|
176
|
+
|
|
177
|
+
const app = new Hono()
|
|
178
|
+
|
|
179
|
+
app.post('/api/sse', async (c) => {
|
|
180
|
+
const { prompt } = await c.req.json()
|
|
181
|
+
|
|
182
|
+
const result = streamText({
|
|
183
|
+
model: openai('gpt-4o'),
|
|
184
|
+
prompt,
|
|
185
|
+
})
|
|
186
|
+
|
|
187
|
+
return streamSSE(c, async (stream) => {
|
|
188
|
+
for await (const chunk of result.textStream) {
|
|
189
|
+
await stream.writeSSE({
|
|
190
|
+
data: chunk,
|
|
191
|
+
event: 'message',
|
|
192
|
+
})
|
|
193
|
+
}
|
|
194
|
+
await stream.writeSSE({
|
|
195
|
+
data: '[DONE]',
|
|
196
|
+
event: 'done',
|
|
197
|
+
})
|
|
198
|
+
})
|
|
199
|
+
})
|
|
200
|
+
```
|
|
201
|
+
|
|
202
|
+
---
|
|
203
|
+
|
|
204
|
+
## 스트림 이벤트 처리
|
|
205
|
+
|
|
206
|
+
### fullStream
|
|
207
|
+
|
|
208
|
+
모든 이벤트에 접근:
|
|
209
|
+
|
|
210
|
+
```typescript
|
|
211
|
+
app.post('/api/full-stream', async (c) => {
|
|
212
|
+
const { prompt } = await c.req.json()
|
|
213
|
+
|
|
214
|
+
const result = streamText({
|
|
215
|
+
model: openai('gpt-4o'),
|
|
216
|
+
prompt,
|
|
217
|
+
})
|
|
218
|
+
|
|
219
|
+
return stream(c, async (stream) => {
|
|
220
|
+
for await (const event of result.fullStream) {
|
|
221
|
+
switch (event.type) {
|
|
222
|
+
case 'text-delta':
|
|
223
|
+
await stream.write(
|
|
224
|
+
JSON.stringify({ type: 'text', content: event.textDelta }) + '\n'
|
|
225
|
+
)
|
|
226
|
+
break
|
|
227
|
+
case 'tool-call':
|
|
228
|
+
await stream.write(
|
|
229
|
+
JSON.stringify({ type: 'tool', name: event.toolName }) + '\n'
|
|
230
|
+
)
|
|
231
|
+
break
|
|
232
|
+
case 'finish':
|
|
233
|
+
await stream.write(
|
|
234
|
+
JSON.stringify({
|
|
235
|
+
type: 'finish',
|
|
236
|
+
usage: event.usage,
|
|
237
|
+
}) + '\n'
|
|
238
|
+
)
|
|
239
|
+
break
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
})
|
|
243
|
+
})
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
---
|
|
247
|
+
|
|
248
|
+
## 시스템 프롬프트 패턴
|
|
249
|
+
|
|
250
|
+
### 역할 기반 시스템 프롬프트
|
|
251
|
+
|
|
252
|
+
```typescript
|
|
253
|
+
const systemPrompts = {
|
|
254
|
+
coder: 'You are an expert programmer. Provide clean, efficient code.',
|
|
255
|
+
writer: 'You are a professional writer. Create engaging content.',
|
|
256
|
+
analyst: 'You are a data analyst. Provide insights based on data.',
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
app.post('/api/chat/:role', async (c) => {
|
|
260
|
+
const role = c.req.param('role') as keyof typeof systemPrompts
|
|
261
|
+
const { messages } = await c.req.json()
|
|
262
|
+
|
|
263
|
+
const result = streamText({
|
|
264
|
+
model: openai('gpt-4o'),
|
|
265
|
+
system: systemPrompts[role] ?? systemPrompts.coder,
|
|
266
|
+
messages,
|
|
267
|
+
})
|
|
268
|
+
|
|
269
|
+
return result.toUIMessageStreamResponse()
|
|
270
|
+
})
|
|
271
|
+
```
|
|
272
|
+
|
|
273
|
+
### 컨텍스트 주입
|
|
274
|
+
|
|
275
|
+
```typescript
|
|
276
|
+
app.post('/api/chat-with-context', async (c) => {
|
|
277
|
+
const { messages, context } = await c.req.json()
|
|
278
|
+
|
|
279
|
+
const result = streamText({
|
|
280
|
+
model: openai('gpt-4o'),
|
|
281
|
+
system: `You are a helpful assistant.
|
|
282
|
+
|
|
283
|
+
Use the following context to answer questions:
|
|
284
|
+
${context}
|
|
285
|
+
|
|
286
|
+
If the context doesn't contain relevant information, say so.`,
|
|
287
|
+
messages,
|
|
288
|
+
})
|
|
289
|
+
|
|
290
|
+
return result.toUIMessageStreamResponse()
|
|
291
|
+
})
|
|
292
|
+
```
|
|
293
|
+
|
|
294
|
+
---
|
|
295
|
+
|
|
296
|
+
## 에러 처리
|
|
297
|
+
|
|
298
|
+
```typescript
|
|
299
|
+
import { Hono } from 'hono'
|
|
300
|
+
import { HTTPException } from 'hono/http-exception'
|
|
301
|
+
import { streamText } from 'ai'
|
|
302
|
+
import { openai } from '@ai-sdk/openai'
|
|
303
|
+
|
|
304
|
+
const app = new Hono()
|
|
305
|
+
|
|
306
|
+
app.post('/api/chat', async (c) => {
|
|
307
|
+
try {
|
|
308
|
+
const body = await c.req.json()
|
|
309
|
+
|
|
310
|
+
if (!body.messages) {
|
|
311
|
+
throw new HTTPException(400, { message: 'Messages required' })
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
const result = streamText({
|
|
315
|
+
model: openai('gpt-4o'),
|
|
316
|
+
messages: body.messages,
|
|
317
|
+
})
|
|
318
|
+
|
|
319
|
+
return result.toUIMessageStreamResponse()
|
|
320
|
+
} catch (error) {
|
|
321
|
+
if (error instanceof HTTPException) {
|
|
322
|
+
throw error
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
console.error('Stream error:', error)
|
|
326
|
+
throw new HTTPException(500, { message: 'AI processing failed' })
|
|
327
|
+
}
|
|
328
|
+
})
|
|
329
|
+
```
|
|
330
|
+
|
|
331
|
+
---
|
|
332
|
+
|
|
333
|
+
## Abort 처리
|
|
334
|
+
|
|
335
|
+
```typescript
|
|
336
|
+
app.post('/api/stream', async (c) => {
|
|
337
|
+
const { prompt } = await c.req.json()
|
|
338
|
+
|
|
339
|
+
const abortController = new AbortController()
|
|
340
|
+
|
|
341
|
+
// 클라이언트 연결 종료 시 abort
|
|
342
|
+
c.req.raw.signal.addEventListener('abort', () => {
|
|
343
|
+
abortController.abort()
|
|
344
|
+
})
|
|
345
|
+
|
|
346
|
+
const result = streamText({
|
|
347
|
+
model: openai('gpt-4o'),
|
|
348
|
+
prompt,
|
|
349
|
+
abortSignal: abortController.signal,
|
|
350
|
+
})
|
|
351
|
+
|
|
352
|
+
return result.toTextStreamResponse()
|
|
353
|
+
})
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
---
|
|
357
|
+
|
|
358
|
+
## 캐싱 패턴
|
|
359
|
+
|
|
360
|
+
```typescript
|
|
361
|
+
import { Hono } from 'hono'
|
|
362
|
+
import { generateText } from 'ai'
|
|
363
|
+
import { openai } from '@ai-sdk/openai'
|
|
364
|
+
|
|
365
|
+
type Bindings = {
|
|
366
|
+
AI_CACHE: KVNamespace // Cloudflare KV
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
const app = new Hono<{ Bindings: Bindings }>()
|
|
370
|
+
|
|
371
|
+
app.post('/api/generate', async (c) => {
|
|
372
|
+
const { prompt } = await c.req.json()
|
|
373
|
+
|
|
374
|
+
// 캐시 키 생성
|
|
375
|
+
const cacheKey = `ai:${btoa(prompt)}`
|
|
376
|
+
|
|
377
|
+
// 캐시 확인
|
|
378
|
+
const cached = await c.env.AI_CACHE.get(cacheKey)
|
|
379
|
+
if (cached) {
|
|
380
|
+
return c.json({ text: cached, cached: true })
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
// 새로 생성
|
|
384
|
+
const { text } = await generateText({
|
|
385
|
+
model: openai('gpt-4o'),
|
|
386
|
+
prompt,
|
|
387
|
+
})
|
|
388
|
+
|
|
389
|
+
// 캐시 저장 (1시간)
|
|
390
|
+
await c.env.AI_CACHE.put(cacheKey, text, { expirationTtl: 3600 })
|
|
391
|
+
|
|
392
|
+
return c.json({ text, cached: false })
|
|
393
|
+
})
|
|
394
|
+
```
|
|
395
|
+
|
|
396
|
+
---
|
|
397
|
+
|
|
398
|
+
## 타임아웃 설정
|
|
399
|
+
|
|
400
|
+
```typescript
|
|
401
|
+
app.post('/api/generate', async (c) => {
|
|
402
|
+
const { prompt } = await c.req.json()
|
|
403
|
+
|
|
404
|
+
const controller = new AbortController()
|
|
405
|
+
const timeout = setTimeout(() => controller.abort(), 30000) // 30초
|
|
406
|
+
|
|
407
|
+
try {
|
|
408
|
+
const { text } = await generateText({
|
|
409
|
+
model: openai('gpt-4o'),
|
|
410
|
+
prompt,
|
|
411
|
+
abortSignal: controller.signal,
|
|
412
|
+
})
|
|
413
|
+
|
|
414
|
+
return c.json({ text })
|
|
415
|
+
} catch (error) {
|
|
416
|
+
if (error instanceof Error && error.name === 'AbortError') {
|
|
417
|
+
return c.json({ error: 'Request timed out' }, 408)
|
|
418
|
+
}
|
|
419
|
+
throw error
|
|
420
|
+
} finally {
|
|
421
|
+
clearTimeout(timeout)
|
|
422
|
+
}
|
|
423
|
+
})
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
---
|
|
427
|
+
|
|
428
|
+
## 병렬 생성
|
|
429
|
+
|
|
430
|
+
```typescript
|
|
431
|
+
app.post('/api/multi-generate', async (c) => {
|
|
432
|
+
const { prompts } = await c.req.json()
|
|
433
|
+
|
|
434
|
+
const results = await Promise.all(
|
|
435
|
+
prompts.map((prompt: string) =>
|
|
436
|
+
generateText({
|
|
437
|
+
model: openai('gpt-4o'),
|
|
438
|
+
prompt,
|
|
439
|
+
})
|
|
440
|
+
)
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
return c.json({
|
|
444
|
+
texts: results.map((r) => r.text),
|
|
445
|
+
})
|
|
446
|
+
})
|
|
447
|
+
```
|