@tryhamster/gerbil 1.0.0-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. package/LICENSE +23 -0
  2. package/README.md +253 -0
  3. package/bin/cli.js +2 -0
  4. package/dist/auto-update-BbNHbSU1.mjs +3 -0
  5. package/dist/browser/index.d.mts +262 -0
  6. package/dist/browser/index.d.mts.map +1 -0
  7. package/dist/browser/index.mjs +755 -0
  8. package/dist/browser/index.mjs.map +1 -0
  9. package/dist/chrome-backend-C5Un08O4.mjs +771 -0
  10. package/dist/chrome-backend-C5Un08O4.mjs.map +1 -0
  11. package/dist/chrome-backend-CtwPENIW.mjs +3 -0
  12. package/dist/chunk-Ct1HF2bE.mjs +7 -0
  13. package/dist/cli.d.mts +1 -0
  14. package/dist/cli.mjs +7078 -0
  15. package/dist/cli.mjs.map +1 -0
  16. package/dist/frameworks/express.d.mts +22 -0
  17. package/dist/frameworks/express.d.mts.map +1 -0
  18. package/dist/frameworks/express.mjs +123 -0
  19. package/dist/frameworks/express.mjs.map +1 -0
  20. package/dist/frameworks/fastify.d.mts +11 -0
  21. package/dist/frameworks/fastify.d.mts.map +1 -0
  22. package/dist/frameworks/fastify.mjs +73 -0
  23. package/dist/frameworks/fastify.mjs.map +1 -0
  24. package/dist/frameworks/hono.d.mts +14 -0
  25. package/dist/frameworks/hono.d.mts.map +1 -0
  26. package/dist/frameworks/hono.mjs +82 -0
  27. package/dist/frameworks/hono.mjs.map +1 -0
  28. package/dist/frameworks/next.d.mts +31 -0
  29. package/dist/frameworks/next.d.mts.map +1 -0
  30. package/dist/frameworks/next.mjs +116 -0
  31. package/dist/frameworks/next.mjs.map +1 -0
  32. package/dist/frameworks/react.d.mts +56 -0
  33. package/dist/frameworks/react.d.mts.map +1 -0
  34. package/dist/frameworks/react.mjs +172 -0
  35. package/dist/frameworks/react.mjs.map +1 -0
  36. package/dist/frameworks/trpc.d.mts +12 -0
  37. package/dist/frameworks/trpc.d.mts.map +1 -0
  38. package/dist/frameworks/trpc.mjs +80 -0
  39. package/dist/frameworks/trpc.mjs.map +1 -0
  40. package/dist/gerbil-BfnsFWRE.mjs +644 -0
  41. package/dist/gerbil-BfnsFWRE.mjs.map +1 -0
  42. package/dist/gerbil-BjW-z7Fq.mjs +5 -0
  43. package/dist/gerbil-DZ1k3ChC.d.mts +138 -0
  44. package/dist/gerbil-DZ1k3ChC.d.mts.map +1 -0
  45. package/dist/index.d.mts +223 -0
  46. package/dist/index.d.mts.map +1 -0
  47. package/dist/index.mjs +13 -0
  48. package/dist/index.mjs.map +1 -0
  49. package/dist/integrations/ai-sdk.d.mts +78 -0
  50. package/dist/integrations/ai-sdk.d.mts.map +1 -0
  51. package/dist/integrations/ai-sdk.mjs +199 -0
  52. package/dist/integrations/ai-sdk.mjs.map +1 -0
  53. package/dist/integrations/langchain.d.mts +41 -0
  54. package/dist/integrations/langchain.d.mts.map +1 -0
  55. package/dist/integrations/langchain.mjs +93 -0
  56. package/dist/integrations/langchain.mjs.map +1 -0
  57. package/dist/integrations/llamaindex.d.mts +45 -0
  58. package/dist/integrations/llamaindex.d.mts.map +1 -0
  59. package/dist/integrations/llamaindex.mjs +86 -0
  60. package/dist/integrations/llamaindex.mjs.map +1 -0
  61. package/dist/integrations/mcp-client.d.mts +206 -0
  62. package/dist/integrations/mcp-client.d.mts.map +1 -0
  63. package/dist/integrations/mcp-client.mjs +507 -0
  64. package/dist/integrations/mcp-client.mjs.map +1 -0
  65. package/dist/integrations/mcp.d.mts +177 -0
  66. package/dist/integrations/mcp.d.mts.map +1 -0
  67. package/dist/integrations/mcp.mjs +8 -0
  68. package/dist/mcp-R8kRLIKb.mjs +348 -0
  69. package/dist/mcp-R8kRLIKb.mjs.map +1 -0
  70. package/dist/models-DKULvhOr.mjs +136 -0
  71. package/dist/models-DKULvhOr.mjs.map +1 -0
  72. package/dist/models-De2-_GmQ.d.mts +22 -0
  73. package/dist/models-De2-_GmQ.d.mts.map +1 -0
  74. package/dist/one-liner-BUQR0nqq.mjs +98 -0
  75. package/dist/one-liner-BUQR0nqq.mjs.map +1 -0
  76. package/dist/skills/index.d.mts +390 -0
  77. package/dist/skills/index.d.mts.map +1 -0
  78. package/dist/skills/index.mjs +7 -0
  79. package/dist/skills-D3CEpgDc.mjs +630 -0
  80. package/dist/skills-D3CEpgDc.mjs.map +1 -0
  81. package/dist/tools-BsiEE6f2.mjs +567 -0
  82. package/dist/tools-BsiEE6f2.mjs.map +1 -0
  83. package/dist/types-BS1N92Jt.d.mts +183 -0
  84. package/dist/types-BS1N92Jt.d.mts.map +1 -0
  85. package/dist/utils-7vXqtq2Q.mjs +63 -0
  86. package/dist/utils-7vXqtq2Q.mjs.map +1 -0
  87. package/docs/ai-sdk.md +80 -0
  88. package/docs/architecture/README.md +84 -0
  89. package/docs/architecture/caching.md +227 -0
  90. package/docs/architecture/inference.md +176 -0
  91. package/docs/architecture/overview.md +179 -0
  92. package/docs/architecture/streaming.md +261 -0
  93. package/docs/architecture/webgpu.md +213 -0
  94. package/docs/browser.md +328 -0
  95. package/docs/cli.md +155 -0
  96. package/docs/frameworks.md +90 -0
  97. package/docs/mcp-client.md +224 -0
  98. package/docs/mcp.md +109 -0
  99. package/docs/memory.md +229 -0
  100. package/docs/repl.md +473 -0
  101. package/docs/skills.md +261 -0
  102. package/docs/tools.md +304 -0
  103. package/package.json +207 -0
@@ -0,0 +1,328 @@
1
+ # Browser Usage
2
+
3
+ Run LLMs directly in the browser with WebGPU acceleration. No server required.
4
+
5
+ ## Quick Start (React)
6
+
7
+ ```tsx
8
+ import { useChat } from "@tryhamster/gerbil/browser";
9
+
10
+ function Chat() {
11
+ const { messages, input, setInput, handleSubmit, isLoading, isGenerating } = useChat();
12
+
13
+ if (isLoading) return <div>Loading model...</div>;
14
+
15
+ return (
16
+ <div>
17
+ {messages.map(m => (
18
+ <div key={m.id}>
19
+ <strong>{m.role}:</strong> {m.content}
20
+ </div>
21
+ ))}
22
+ <form onSubmit={handleSubmit}>
23
+ <input
24
+ value={input}
25
+ onChange={e => setInput(e.target.value)}
26
+ placeholder="Say something..."
27
+ />
28
+ <button disabled={isGenerating}>Send</button>
29
+ </form>
30
+ </div>
31
+ );
32
+ }
33
+ ```
34
+
35
+ That's it! The hook handles model loading, streaming, and state management.
36
+
37
+ ## React Hooks
38
+
39
+ ### `useChat`
40
+
41
+ Full-featured chat hook with message history, streaming, and thinking mode.
42
+
43
+ ```tsx
44
+ import { useChat } from "@tryhamster/gerbil/browser";
45
+
46
+ function Chat() {
47
+ const {
48
+ messages, // Message[] - chat history
49
+ input, // string - current input
50
+ setInput, // (value: string) => void
51
+ handleSubmit, // (e?) => void - submit handler
52
+ isLoading, // boolean - model loading
53
+ loadingProgress, // { status, file?, progress?, downloadCount? }
54
+ isGenerating, // boolean - generating response
55
+ thinking, // string - current thinking (streaming)
56
+ stop, // () => void - stop generation
57
+ clear, // () => void - clear messages
58
+ tps, // number - tokens per second
59
+ isReady, // boolean - model ready
60
+ error, // string | null
61
+ load, // () => void - manually load model
62
+ } = useChat({
63
+ model: "qwen3-0.6b", // Model ID
64
+ system: "You are helpful.", // System prompt
65
+ thinking: true, // Enable thinking mode
66
+ maxTokens: 512, // Max tokens per response
67
+ temperature: 0.7, // Sampling temperature
68
+ autoLoad: false, // Auto-load on mount (default: false)
69
+ });
70
+ }
71
+ ```
72
+
73
+ #### Message Type
74
+
75
+ ```typescript
76
+ interface Message {
77
+ id: string;
78
+ role: "user" | "assistant";
79
+ content: string;
80
+ thinking?: string; // Thinking content (if enabled)
81
+ }
82
+ ```
83
+
84
+ #### Auto-Loading Behavior
85
+
86
+ By default, hooks **don't** auto-load on mount (to avoid unexpected downloads). The model loads when:
87
+ 1. You call `handleSubmit()` / `complete()` - triggers load automatically
88
+ 2. You call `load()` manually
89
+ 3. You set `autoLoad: true` - loads on mount
90
+
91
+ ```tsx
92
+ // Default: loads when user submits first message
93
+ function Chat() {
94
+ const { handleSubmit, isLoading } = useChat();
95
+ // Model loads on first handleSubmit()
96
+ }
97
+
98
+ // Explicit preload on mount
99
+ function ChatPreloaded() {
100
+ const { handleSubmit, isLoading } = useChat({ autoLoad: true });
101
+ // Model starts loading immediately
102
+ }
103
+
104
+ // Manual load control
105
+ function ChatManual() {
106
+ const { load, isLoading, isReady } = useChat();
107
+ return <button onClick={load} disabled={isLoading || isReady}>Load Model</button>;
108
+ }
109
+ ```
110
+
111
+ #### With Thinking Mode
112
+
113
+ ```tsx
114
+ function ChatWithThinking() {
115
+ const { messages, input, setInput, handleSubmit, isLoading } = useChat({
116
+ thinking: true,
117
+ });
118
+
119
+ if (isLoading) return <div>Loading...</div>;
120
+
121
+ return (
122
+ <div>
123
+ {messages.map(m => (
124
+ <div key={m.id}>
125
+ {m.thinking && (
126
+ <details>
127
+ <summary>Thinking...</summary>
128
+ <pre>{m.thinking}</pre>
129
+ </details>
130
+ )}
131
+ <p>{m.content}</p>
132
+ </div>
133
+ ))}
134
+ <form onSubmit={handleSubmit}>
135
+ <input value={input} onChange={e => setInput(e.target.value)} />
136
+ </form>
137
+ </div>
138
+ );
139
+ }
140
+ ```
141
+
142
+ #### With Loading Progress
143
+
144
+ The `loadingProgress` object tells you what's happening:
145
+
146
+ - `status: "loading"` - Initial load / compiling shaders
147
+ - `status: "downloading"` - Downloading model files from network
148
+ - `status: "ready"` - Model ready
149
+
150
+ When loading from **cache**, you'll only see `"loading"` → `"ready"` (no `"downloading"`).
151
+ When **downloading**, you'll see `"downloading"` with progress info.
152
+
153
+ ```tsx
154
+ function ChatWithProgress() {
155
+ const { messages, handleSubmit, isLoading, loadingProgress } = useChat();
156
+
157
+ if (isLoading) {
158
+ const p = loadingProgress;
159
+
160
+ if (p?.status === "downloading") {
161
+ // Downloading from network - show progress
162
+ return <div>Downloading {p.file}: {p.progress}%</div>;
163
+ }
164
+
165
+ // Loading from cache or compiling shaders
166
+ return <div>{p?.message || "Loading model..."}</div>;
167
+ }
168
+
169
+ // ... rest of chat UI
170
+ }
171
+ ```
172
+
173
+ ### `useCompletion`
174
+
175
+ Simple completion hook for one-off generations.
176
+
177
+ ```tsx
178
+ import { useCompletion } from "@tryhamster/gerbil/browser";
179
+
180
+ function App() {
181
+ const { complete, completion, isLoading, isGenerating, tps } = useCompletion();
182
+
183
+ if (isLoading) return <div>Loading model...</div>;
184
+
185
+ return (
186
+ <div>
187
+ <button
188
+ onClick={() => complete("Write a haiku about coding")}
189
+ disabled={isGenerating}
190
+ >
191
+ Generate
192
+ </button>
193
+ <p>{completion}</p>
194
+ {isGenerating && <span>{tps.toFixed(0)} tok/s</span>}
195
+ </div>
196
+ );
197
+ }
198
+ ```
199
+
200
+ #### API
201
+
202
+ ```typescript
203
+ const {
204
+ completion, // string - generated text
205
+ thinking, // string - thinking content
206
+ complete, // (prompt: string) => Promise<string>
207
+ isLoading, // boolean - model loading
208
+ loadingProgress, // { status, file?, progress? }
209
+ isGenerating, // boolean - generating
210
+ stop, // () => void - stop generation
211
+ tps, // number - tokens per second
212
+ isReady, // boolean - model ready
213
+ error, // string | null
214
+ } = useCompletion({
215
+ model: "qwen3-0.6b",
216
+ thinking: false,
217
+ maxTokens: 512,
218
+ });
219
+ ```
220
+
221
+ ## Low-Level API
222
+
223
+ For full control, use `createGerbilWorker` directly:
224
+
225
+ ```typescript
226
+ import { createGerbilWorker, isWebGPUSupported } from "@tryhamster/gerbil/browser";
227
+
228
+ if (!isWebGPUSupported()) {
229
+ console.log("WebGPU not supported");
230
+ return;
231
+ }
232
+
233
+ const gerbil = await createGerbilWorker({
234
+ modelId: "qwen3-0.6b",
235
+ onProgress: (p) => console.log(p.status),
236
+ onToken: (token) => console.log(token.text),
237
+ onComplete: (result) => console.log(`${result.tps} tok/s`),
238
+ });
239
+
240
+ await gerbil.generate("Hello!", { thinking: true });
241
+ gerbil.terminate();
242
+ ```
243
+
244
+ ### `createGerbilWorker(options)`
245
+
246
+ ```typescript
247
+ interface GerbilWorkerOptions {
248
+ modelId?: string; // Default: "qwen3-0.6b"
249
+ onProgress?: (progress: WorkerProgress) => void;
250
+ onToken?: (token: WorkerToken) => void;
251
+ onComplete?: (result: WorkerComplete) => void;
252
+ onError?: (error: string) => void;
253
+ }
254
+
255
+ interface GerbilWorker {
256
+ generate: (prompt: string, options?: GenerateOptions) => Promise<string>;
257
+ interrupt: () => void;
258
+ reset: () => void;
259
+ terminate: () => void;
260
+ isReady: () => boolean;
261
+ }
262
+ ```
263
+
264
+ ### Generation Options
265
+
266
+ ```typescript
267
+ interface GenerateOptions {
268
+ maxTokens?: number; // Default: 256
269
+ temperature?: number; // Default: 0.7
270
+ topP?: number; // Default: 0.9
271
+ topK?: number; // Default: 20
272
+ thinking?: boolean; // Enable thinking mode
273
+ system?: string; // System prompt
274
+ }
275
+ ```
276
+
277
+ ## Utilities
278
+
279
+ ### `isWebGPUSupported()`
280
+
281
+ ```typescript
282
+ import { isWebGPUSupported } from "@tryhamster/gerbil/browser";
283
+
284
+ if (!isWebGPUSupported()) {
285
+ // Show fallback or error
286
+ }
287
+ ```
288
+
289
+ ### `getWebGPUInfo()`
290
+
291
+ ```typescript
292
+ import { getWebGPUInfo } from "@tryhamster/gerbil/browser";
293
+
294
+ const info = await getWebGPUInfo();
295
+ // { supported: true, adapter: "Apple", device: "Apple M4 Max" }
296
+ ```
297
+
298
+ ## Models
299
+
300
+ | Model | Size | Best For |
301
+ |-------|------|----------|
302
+ | `qwen3-0.6b` | ~400MB | General use, thinking mode |
303
+ | `smollm2-360m` | ~250MB | Faster, smaller |
304
+ | `smollm2-135m` | ~100MB | Fastest, basic tasks |
305
+
306
+ Models are cached in IndexedDB after first download.
307
+
308
+ ## Browser Support
309
+
310
+ - **Chrome/Edge 113+** — Full WebGPU support
311
+ - **Safari 18+** — WebGPU support (may have quirks)
312
+ - **Firefox** — WebGPU behind flag, not recommended
313
+
314
+ ## Troubleshooting
315
+
316
+ ### "WebGPU not supported"
317
+
318
+ - Update to Chrome/Edge 113+
319
+ - Check `chrome://gpu` for WebGPU status
320
+ - Try `chrome://flags/#enable-unsafe-webgpu`
321
+
322
+ ### Slow first load
323
+
324
+ First load downloads the model (~400MB for qwen3-0.6b) and compiles WebGPU shaders. Subsequent loads use IndexedDB cache (~2-5s).
325
+
326
+ ### Out of memory
327
+
328
+ Use smaller models like `smollm2-135m`. Close other GPU-intensive tabs.
package/docs/cli.md ADDED
@@ -0,0 +1,155 @@
1
+ # Gerbil CLI
2
+
3
+ ## Quick Start
4
+
5
+ ```bash
6
+ # Without installing (one-off usage)
7
+ npx @tryhamster/gerbil # Opens interactive REPL (default)
8
+ npx @tryhamster/gerbil "Your prompt" # Quick text generation
9
+
10
+ # After installing globally
11
+ npm install -g @tryhamster/gerbil
12
+ gerbil # Opens interactive REPL
13
+ gerbil "Your prompt" # Quick text generation
14
+ ```
15
+
16
+ > All examples below use `gerbil` (assumes global install). If using `npx`, replace `gerbil` with `npx @tryhamster/gerbil`.
17
+
18
+ ## Generate Text
19
+
20
+ ```bash
21
+ gerbil "Write a haiku about code"
22
+ gerbil "Explain monads" --thinking
23
+ gerbil "Hello" -m qwen3-0.6b --stream
24
+ ```
25
+
26
+ Options:
27
+ - `-m, --model <id>` - Model to use (default: qwen3-0.6b)
28
+ - `-n, --max-tokens <n>` - Max tokens (default: 256)
29
+ - `-t, --temperature <t>` - Temperature (default: 0.7)
30
+ - `-s, --system <text>` - System prompt
31
+ - `--thinking` - Enable thinking mode
32
+ - `--stream` - Stream output
33
+ - `--json` - Output as JSON
34
+
35
+ ## Skills
36
+
37
+ ```bash
38
+ gerbil commit # Commit message from staged changes
39
+ gerbil commit --type conventional # Conventional commit format
40
+ gerbil commit --write # Write to .git/COMMIT_EDITMSG
41
+
42
+ gerbil summarize README.md # Summarize file
43
+ gerbil summarize README.md -l short
44
+
45
+ gerbil explain src/index.ts # Explain code
46
+ gerbil explain src/index.ts -l beginner
47
+
48
+ gerbil review src/ # Code review
49
+ gerbil review src/ -f security,performance
50
+ ```
51
+
52
+ ## Interactive REPL
53
+
54
+ ```bash
55
+ gerbil # Opens REPL (or just run 'gerbil' with no args)
56
+ gerbil repl # Same as above
57
+ gerbil repl --gpu # Force WebGPU mode
58
+ gerbil repl --cpu # Force CPU mode
59
+ ```
60
+
61
+ Launches the interactive terminal dashboard with:
62
+ - Chat with multiple modes (Assistant, Coder, Teacher, Agent)
63
+ - Skills execution and creation
64
+ - Model management
65
+ - Benchmark testing (compare CPU vs GPU performance)
66
+ - Server management
67
+
68
+ ### REPL Shortcuts
69
+
70
+ | Key | Action |
71
+ |-----|--------|
72
+ | `t` | Toggle thinking mode (Qwen3) |
73
+ | `a` | Toggle agent mode (tool calling) |
74
+ | `m` | Switch between CPU and GPU mode |
75
+ | `q` | Quit |
76
+ | `1-8` | Jump to menu item |
77
+
78
+ ## Chat
79
+
80
+ Opens the REPL directly in chat view:
81
+
82
+ ```bash
83
+ gerbil chat
84
+ gerbil chat --gpu # Chat with WebGPU acceleration
85
+ ```
86
+
87
+ This is equivalent to `gerbil` (or `gerbil repl`) and then pressing `1` for Chat.
88
+
89
+ ## Server
90
+
91
+ ```bash
92
+ gerbil serve # HTTP server on :3000
93
+ gerbil serve -p 8080 # Custom port
94
+ gerbil serve --mcp # MCP server (stdio)
95
+ ```
96
+
97
+ HTTP endpoints:
98
+ - `POST /generate` - Generate text
99
+ - `GET /info` - Server info
100
+
101
+ ## Model Management
102
+
103
+ ```bash
104
+ gerbil models # List built-in models
105
+ gerbil models --search "code" # Search models
106
+ gerbil info # System info
107
+ gerbil info -m qwen3-0.6b # Model info
108
+ ```
109
+
110
+ ## Updates
111
+
112
+ Gerbil checks for updates automatically but **never installs without your permission**.
113
+
114
+ ```bash
115
+ gerbil update # Check and install updates
116
+ ```
117
+
118
+ ### Update Notifications
119
+
120
+ After running any command, you'll see:
121
+
122
+ ```
123
+ 💡 Update available: v1.0.0 → v1.0.1
124
+ Run 'gerbil update' to install
125
+ ```
126
+
127
+ ### In REPL
128
+
129
+ When using the REPL, updates appear in the header:
130
+
131
+ ```
132
+ gerbil 1.0.0 / Chat | Update v1.0.1 → Press u to update
133
+ ```
134
+
135
+ Press **`u`** to install the update.
136
+
137
+ ## Use Any Model
138
+
139
+ ```bash
140
+ gerbil -m hf:microsoft/Phi-3-mini "Hello"
141
+ gerbil -m hf:Qwen/Qwen2.5-Coder-0.5B "Write a function"
142
+ ```
143
+
144
+ ## CI/CD Integration
145
+
146
+ ```yaml
147
+ # .github/workflows/commit.yml
148
+ - name: Generate Commit Message
149
+ run: npx @tryhamster/gerbil commit --write
150
+ ```
151
+
152
+ Pre-commit hook:
153
+ ```bash
154
+ npx husky add .husky/prepare-commit-msg "npx @tryhamster/gerbil commit --write"
155
+ ```
@@ -0,0 +1,90 @@
1
+ # Framework Integrations
2
+
3
+ ## Next.js
4
+
5
+ ```typescript
6
+ // app/api/chat/route.ts
7
+ import { gerbil } from "@tryhamster/gerbil/next";
8
+
9
+ export const POST = gerbil.handler({ model: "qwen3-0.6b" });
10
+ ```
11
+
12
+ ## Express
13
+
14
+ ```typescript
15
+ import express from "express";
16
+ import { gerbil } from "@tryhamster/gerbil/express";
17
+
18
+ const app = express();
19
+ app.use("/ai", gerbil());
20
+
21
+ // Endpoints:
22
+ // POST /ai/generate
23
+ // POST /ai/stream
24
+ // POST /ai/json
25
+ // POST /ai/embed
26
+ ```
27
+
28
+ ## React Hooks
29
+
30
+ ```typescript
31
+ import { useGerbil, useChat } from "@tryhamster/gerbil/react";
32
+
33
+ function Chat() {
34
+ const { generate, isLoading } = useGerbil();
35
+
36
+ const handleSubmit = async (prompt: string) => {
37
+ const result = await generate(prompt);
38
+ };
39
+ }
40
+ ```
41
+
42
+ ## Fastify
43
+
44
+ ```typescript
45
+ import Fastify from "fastify";
46
+ import { gerbilPlugin } from "@tryhamster/gerbil/fastify";
47
+
48
+ const app = Fastify();
49
+ app.register(gerbilPlugin, { prefix: "/ai" });
50
+ ```
51
+
52
+ ## Hono
53
+
54
+ ```typescript
55
+ import { Hono } from "hono";
56
+ import { gerbil } from "@tryhamster/gerbil/hono";
57
+
58
+ const app = new Hono();
59
+ app.route("/ai", gerbil());
60
+ ```
61
+
62
+ ## tRPC
63
+
64
+ ```typescript
65
+ import { gerbilRouter } from "@tryhamster/gerbil/trpc";
66
+
67
+ const appRouter = t.router({
68
+ ai: gerbilRouter(t),
69
+ });
70
+ ```
71
+
72
+ ## LangChain
73
+
74
+ ```typescript
75
+ import { GerbilLLM, GerbilEmbeddings } from "@tryhamster/gerbil/langchain";
76
+
77
+ const llm = new GerbilLLM({ model: "qwen3-0.6b" });
78
+ const embeddings = new GerbilEmbeddings();
79
+
80
+ // Use with chains
81
+ const chain = new LLMChain({ llm, prompt: template });
82
+ ```
83
+
84
+ ## LlamaIndex
85
+
86
+ ```typescript
87
+ import { GerbilLLM } from "@tryhamster/gerbil/llamaindex";
88
+
89
+ const llm = new GerbilLLM({ model: "qwen3-0.6b" });
90
+ ```