agentick 0.1.9 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +523 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +2 -0
- package/dist/index.js.map +1 -1
- package/package.json +4 -2
package/README.md
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
1
|
+
# agentick
|
|
2
|
+
|
|
3
|
+
**React for AI agents.**
|
|
4
|
+
|
|
5
|
+
A React reconciler where the render target is a language model. No prompt templates, no YAML chains, no Jinja. You build the context window with JSX — the same components, hooks, and composition you already know — and the framework compiles it into what the model sees.
|
|
6
|
+
|
|
7
|
+
You're not configuring a chatbot. You're building the application through which the model sees and experiences the world.
|
|
8
|
+
|
|
9
|
+
[](LICENSE)
|
|
10
|
+
|
|
11
|
+
```tsx
|
|
12
|
+
import { createApp, System, Timeline, Message, Section,
|
|
13
|
+
createTool, useContinuation } from "@agentick/core";
|
|
14
|
+
import { openai } from "@agentick/openai";
|
|
15
|
+
import { z } from "zod";
|
|
16
|
+
|
|
17
|
+
// Tools are components — they render state into model context
|
|
18
|
+
const Search = createTool({
|
|
19
|
+
name: "search",
|
|
20
|
+
description: "Search the knowledge base",
|
|
21
|
+
input: z.object({ query: z.string() }),
|
|
22
|
+
handler: async ({ query }, com) => {
|
|
23
|
+
const results = await knowledgeBase.search(query);
|
|
24
|
+
const sources = com.getState("sources") ?? [];
|
|
25
|
+
com.setState("sources", [...sources, ...results.map((r) => r.title)]);
|
|
26
|
+
return [{ type: "text", text: JSON.stringify(results) }];
|
|
27
|
+
},
|
|
28
|
+
// render() injects live state into the context window every tick
|
|
29
|
+
render: (tickState, com) => {
|
|
30
|
+
const sources = com.getState("sources");
|
|
31
|
+
return sources?.length ? (
|
|
32
|
+
<Section id="sources" audience="model">
|
|
33
|
+
Sources found so far: {sources.join(", ")}
|
|
34
|
+
</Section>
|
|
35
|
+
) : null;
|
|
36
|
+
},
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
// Agents are functions that return JSX
|
|
40
|
+
function ResearchAgent({ topic }: { topic: string }) {
|
|
41
|
+
// The model auto-continues when it makes tool calls.
|
|
42
|
+
// Hooks add your own stop conditions.
|
|
43
|
+
useContinuation((result) => {
|
|
44
|
+
if (result.tick >= 20) result.stop("too-many-ticks");
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
return (
|
|
48
|
+
<>
|
|
49
|
+
<System>
|
|
50
|
+
You are a research agent. Search thoroughly, then write a summary.
|
|
51
|
+
</System>
|
|
52
|
+
|
|
53
|
+
{/* You control exactly how conversation history renders */}
|
|
54
|
+
<Timeline>
|
|
55
|
+
{(history, pending) => <>
|
|
56
|
+
{history.map((entry, i) =>
|
|
57
|
+
i < history.length - 4
|
|
58
|
+
? <CompactMessage key={i} entry={entry} />
|
|
59
|
+
: <Message key={i} {...entry.message} />
|
|
60
|
+
)}
|
|
61
|
+
{pending.map((msg, i) => <Message key={`p-${i}`} {...msg.message} />)}
|
|
62
|
+
</>}
|
|
63
|
+
</Timeline>
|
|
64
|
+
|
|
65
|
+
<Search />
|
|
66
|
+
</>
|
|
67
|
+
);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const model = openai({ model: "gpt-4o" });
|
|
71
|
+
const app = createApp(ResearchAgent, { model });
|
|
72
|
+
const result = await app.run({
|
|
73
|
+
props: { topic: "quantum computing" },
|
|
74
|
+
messages: [{ role: "user", content: [{ type: "text", text: "What's new in quantum computing?" }] }],
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
console.log(result.response);
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Why Agentick
|
|
81
|
+
|
|
82
|
+
Every other AI framework gives you a pipeline. A chain. A graph. You slot your prompt into a template, bolt on some tools, and hope the model figures it out.
|
|
83
|
+
|
|
84
|
+
Agentick gives you a **programming language for AI applications.** The context window is your canvas. Components compose into it. Tools render their state back into it. Hooks run arbitrary code between ticks — verify output, summarize history, gate continuation. The model's entire world is JSX that you control, down to how individual content blocks render.
|
|
85
|
+
|
|
86
|
+
There are no prompt templates because JSX _is_ the template language. There are no special abstractions between you and what the model sees — you build it, the framework compiles it, the model reads it. When the model calls a tool, your component re-renders. When you want older messages compressed, you write a component. When you need to verify output before continuing, you write a hook.
|
|
87
|
+
|
|
88
|
+
This is application development, not chatbot configuration.
|
|
89
|
+
|
|
90
|
+
## The Context Is Yours
|
|
91
|
+
|
|
92
|
+
The core insight: **only what you render gets sent to the model.** `<Timeline>` isn't a magic black box — it accepts a render function with `(history, pending)`, and you decide exactly how every message appears in the context window. Skip a message? The model never sees it. Rewrite it? That's what the model reads.
|
|
93
|
+
|
|
94
|
+
### Default — Just Works
|
|
95
|
+
|
|
96
|
+
```tsx
|
|
97
|
+
function SimpleAgent() {
|
|
98
|
+
return (
|
|
99
|
+
<>
|
|
100
|
+
<System>You are helpful.</System>
|
|
101
|
+
<Timeline />
|
|
102
|
+
</>
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
`<Timeline />` with no children renders conversation history with sensible defaults.
|
|
108
|
+
|
|
109
|
+
### Custom Rendering — Control What the Model Sees
|
|
110
|
+
|
|
111
|
+
The render function receives `history` (completed entries) and `pending` (messages queued this tick). Only what you return from this function enters the model's context:
|
|
112
|
+
|
|
113
|
+
```tsx
|
|
114
|
+
<Timeline>
|
|
115
|
+
{(history, pending) => <>
|
|
116
|
+
{history.map((entry, i) => {
|
|
117
|
+
const msg = entry.message;
|
|
118
|
+
const isOld = i < history.length - 6;
|
|
119
|
+
|
|
120
|
+
// Old user messages — drop images, keep text summaries
|
|
121
|
+
if (isOld && msg.role === "user") {
|
|
122
|
+
const textOnly = msg.content
|
|
123
|
+
.filter((b) => b.type === "text")
|
|
124
|
+
.map((b) => b.text)
|
|
125
|
+
.join(" ");
|
|
126
|
+
return <Message key={i} role="user">[Earlier: {textOnly.slice(0, 100)}...]</Message>;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
// Old assistant messages — collapse
|
|
130
|
+
if (isOld && msg.role === "assistant") {
|
|
131
|
+
return <Message key={i} role="assistant">[Previous response]</Message>;
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// Recent messages — full fidelity
|
|
135
|
+
return <Message key={i} {...msg} />;
|
|
136
|
+
})}
|
|
137
|
+
{pending.map((msg, i) => <Message key={`p-${i}`} {...msg.message} />)}
|
|
138
|
+
</>}
|
|
139
|
+
</Timeline>
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
Images from 20 messages ago eating your context window? Render them as `[Image: beach sunset]`. Tool results from early in the conversation? Collapse them. Recent messages? Full detail. You write the function, you decide.
|
|
143
|
+
|
|
144
|
+
### Composability — It's React
|
|
145
|
+
|
|
146
|
+
That render logic getting complex? Extract it into a component. It's React — components compose:
|
|
147
|
+
|
|
148
|
+
```tsx
|
|
149
|
+
// A reusable component for rendering older messages compactly
|
|
150
|
+
function CompactMessage({ entry }: { entry: COMTimelineEntry }) {
|
|
151
|
+
const msg = entry.message;
|
|
152
|
+
|
|
153
|
+
// Walk content blocks — handle each type differently
|
|
154
|
+
const summary = msg.content.map((block) => {
|
|
155
|
+
switch (block.type) {
|
|
156
|
+
case "text": return block.text.slice(0, 80);
|
|
157
|
+
case "image": return `[Image: ${block.source?.description ?? "image"}]`;
|
|
158
|
+
case "tool_use": return `[Called ${block.name}]`;
|
|
159
|
+
case "tool_result": return `[Result from ${block.name}]`;
|
|
160
|
+
default: return "";
|
|
161
|
+
}
|
|
162
|
+
}).filter(Boolean).join(" | ");
|
|
163
|
+
|
|
164
|
+
return <Message role={msg.role}>{summary}</Message>;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Use it in your Timeline
|
|
168
|
+
function Agent() {
|
|
169
|
+
return (
|
|
170
|
+
<>
|
|
171
|
+
<System>You are helpful.</System>
|
|
172
|
+
<Timeline>
|
|
173
|
+
{(history, pending) => <>
|
|
174
|
+
{history.map((entry, i) =>
|
|
175
|
+
i < history.length - 4
|
|
176
|
+
? <CompactMessage key={i} entry={entry} />
|
|
177
|
+
: <Message key={i} {...entry.message} />
|
|
178
|
+
)}
|
|
179
|
+
{pending.map((msg, i) => <Message key={`p-${i}`} {...msg.message} />)}
|
|
180
|
+
</>}
|
|
181
|
+
</Timeline>
|
|
182
|
+
</>
|
|
183
|
+
);
|
|
184
|
+
}
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
Or go further — you don't even need `<Timeline>`. Render the entire conversation as a single user message:
|
|
188
|
+
|
|
189
|
+
```tsx
|
|
190
|
+
function NarrativeAgent() {
|
|
191
|
+
return (
|
|
192
|
+
<>
|
|
193
|
+
<System>Continue the conversation.</System>
|
|
194
|
+
<Timeline>
|
|
195
|
+
{(history) => (
|
|
196
|
+
<User>
|
|
197
|
+
Here's what happened so far:{"\n"}
|
|
198
|
+
{history.map((e) => `${e.message.role}: ${extractText(e)}`).join("\n")}
|
|
199
|
+
</User>
|
|
200
|
+
)}
|
|
201
|
+
</Timeline>
|
|
202
|
+
</>
|
|
203
|
+
);
|
|
204
|
+
}
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
The framework doesn't care how you structure the context. Multiple messages, one message, XML, prose — anything that compiles to content blocks gets sent.
|
|
208
|
+
|
|
209
|
+
### Sections — Structured Context for the Model
|
|
210
|
+
|
|
211
|
+
```tsx
|
|
212
|
+
function AgentWithContext({ userId }: { userId: string }) {
|
|
213
|
+
const profile = useData("profile", () => fetchProfile(userId), [userId]);
|
|
214
|
+
|
|
215
|
+
return (
|
|
216
|
+
<>
|
|
217
|
+
<System>You are a support agent.</System>
|
|
218
|
+
|
|
219
|
+
<Section id="user-context" audience="model">
|
|
220
|
+
Customer: {profile?.name}, Plan: {profile?.plan}, Since: {profile?.joinDate}
|
|
221
|
+
</Section>
|
|
222
|
+
|
|
223
|
+
<Timeline />
|
|
224
|
+
<TicketTool />
|
|
225
|
+
</>
|
|
226
|
+
);
|
|
227
|
+
}
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
`<Section>` injects structured context that the model sees every tick — live data, computed state, whatever you need. The `audience` prop controls visibility (`"model"`, `"user"`, or `"all"`).
|
|
231
|
+
|
|
232
|
+
## Hooks Control Everything
|
|
233
|
+
|
|
234
|
+
Hooks are where the real power lives. They're real React hooks — `useState`, `useEffect`, `useMemo` — plus lifecycle hooks that fire at each phase of execution.
|
|
235
|
+
|
|
236
|
+
### `useContinuation` — Add Stop Conditions
|
|
237
|
+
|
|
238
|
+
The agent loop auto-continues when the model makes tool calls. `useContinuation` lets you add your own stop conditions:
|
|
239
|
+
|
|
240
|
+
```tsx
|
|
241
|
+
// Stop after a done marker
|
|
242
|
+
useContinuation((result) => !result.text?.includes("<DONE>"));
|
|
243
|
+
|
|
244
|
+
// Stop after too many ticks or too many tokens
|
|
245
|
+
useContinuation((result) => {
|
|
246
|
+
if (result.tick >= 10) { result.stop("max-ticks"); return false; }
|
|
247
|
+
if (result.usage && result.usage.totalTokens > 100_000) {
|
|
248
|
+
result.stop("token-budget"); return false;
|
|
249
|
+
}
|
|
250
|
+
});
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
### `useOnTickEnd` — Run Code After Every Model Response
|
|
254
|
+
|
|
255
|
+
`useContinuation` is sugar for `useOnTickEnd`. Use the full version when you need to do real work between ticks:
|
|
256
|
+
|
|
257
|
+
```tsx
|
|
258
|
+
function VerifiedAgent() {
|
|
259
|
+
useOnTickEnd(async (result) => {
|
|
260
|
+
// Log every tick
|
|
261
|
+
analytics.track("tick", { tokens: result.usage?.totalTokens });
|
|
262
|
+
|
|
263
|
+
// When the model is done (no more tool calls), verify before accepting
|
|
264
|
+
if (result.text && !result.toolCalls.length) {
|
|
265
|
+
const quality = await verifyWithModel(result.text);
|
|
266
|
+
if (!quality.acceptable) {
|
|
267
|
+
result.continue("failed-verification"); // force another tick
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
return (
|
|
273
|
+
<>
|
|
274
|
+
<System>Be accurate. Your responses will be verified.</System>
|
|
275
|
+
<Timeline />
|
|
276
|
+
</>
|
|
277
|
+
);
|
|
278
|
+
}
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
### Build Your Own Hooks
|
|
282
|
+
|
|
283
|
+
Custom hooks work exactly like React — they're just functions that call other hooks:
|
|
284
|
+
|
|
285
|
+
```tsx
|
|
286
|
+
// Reusable hook: stop after a token budget
|
|
287
|
+
function useTokenBudget(maxTokens: number) {
|
|
288
|
+
const [spent, setSpent] = useState(0);
|
|
289
|
+
|
|
290
|
+
useOnTickEnd((result) => {
|
|
291
|
+
const total = spent + (result.usage?.totalTokens ?? 0);
|
|
292
|
+
setSpent(total);
|
|
293
|
+
if (total > maxTokens) result.stop("budget-exceeded");
|
|
294
|
+
});
|
|
295
|
+
|
|
296
|
+
return spent;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// Reusable hook: verify output before finishing
|
|
300
|
+
function useVerifiedOutput(verifier: (text: string) => Promise<boolean>) {
|
|
301
|
+
useOnTickEnd(async (result) => {
|
|
302
|
+
if (!result.text || result.toolCalls.length > 0) return;
|
|
303
|
+
const ok = await verifier(result.text);
|
|
304
|
+
if (!ok) result.continue("failed-verification");
|
|
305
|
+
});
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// Compose them — it's just functions
|
|
309
|
+
function CarefulAgent() {
|
|
310
|
+
const spent = useTokenBudget(50_000);
|
|
311
|
+
useVerifiedOutput(myVerifier);
|
|
312
|
+
|
|
313
|
+
return (
|
|
314
|
+
<>
|
|
315
|
+
<System>You have a token budget. Be concise.</System>
|
|
316
|
+
<Section id="budget" audience="model">Tokens used: {spent}</Section>
|
|
317
|
+
<Timeline />
|
|
318
|
+
</>
|
|
319
|
+
);
|
|
320
|
+
}
|
|
321
|
+
```
|
|
322
|
+
|
|
323
|
+
## Everything Is Dual-Use
|
|
324
|
+
|
|
325
|
+
`createTool` and `createAdapter` (used under the hood by `openai()`, `google()`, etc.) return objects that work both as JSX components and as direct function calls:
|
|
326
|
+
|
|
327
|
+
```tsx
|
|
328
|
+
const Search = createTool({ name: "search", ... });
|
|
329
|
+
const model = openai({ model: "gpt-4o" });
|
|
330
|
+
|
|
331
|
+
// As JSX — self-closing tags in the component tree
|
|
332
|
+
<model temperature={0.2} />
|
|
333
|
+
<Search />
|
|
334
|
+
|
|
335
|
+
// As direct calls — use programmatically
|
|
336
|
+
const handle = await model.generate(input);
|
|
337
|
+
const output = await Search.run({ query: "test" });
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
Context is maintained with AsyncLocalStorage, so tools and hooks can access session state from anywhere — no prop drilling required.
|
|
341
|
+
|
|
342
|
+
## More Examples
|
|
343
|
+
|
|
344
|
+
### One-Shot Run
|
|
345
|
+
|
|
346
|
+
```tsx
|
|
347
|
+
import { run, System, Timeline } from "@agentick/core";
|
|
348
|
+
import { openai } from "@agentick/openai";
|
|
349
|
+
|
|
350
|
+
const result = await run(
|
|
351
|
+
<><System>You are helpful.</System><Timeline /></>,
|
|
352
|
+
{ model: openai({ model: "gpt-4o" }), messages: [{ role: "user", content: [{ type: "text", text: "Hello!" }] }] },
|
|
353
|
+
);
|
|
354
|
+
console.log(result.response);
|
|
355
|
+
```
|
|
356
|
+
|
|
357
|
+
### Stateful Tool with Render
|
|
358
|
+
|
|
359
|
+
```tsx
|
|
360
|
+
const TodoTool = createTool({
|
|
361
|
+
name: "manage_todos",
|
|
362
|
+
description: "Add, complete, or list todos",
|
|
363
|
+
input: z.object({
|
|
364
|
+
action: z.enum(["add", "complete", "list"]),
|
|
365
|
+
text: z.string().optional(),
|
|
366
|
+
id: z.number().optional(),
|
|
367
|
+
}),
|
|
368
|
+
handler: async ({ action, text, id }) => {
|
|
369
|
+
if (action === "add") todos.push({ id: todos.length, text, done: false });
|
|
370
|
+
if (action === "complete") todos[id!].done = true;
|
|
371
|
+
return [{ type: "text", text: "Done." }];
|
|
372
|
+
},
|
|
373
|
+
// render() injects live state into the model's context every tick
|
|
374
|
+
render: () => (
|
|
375
|
+
<Section id="todos" audience="model">
|
|
376
|
+
Current todos: {JSON.stringify(todos)}
|
|
377
|
+
</Section>
|
|
378
|
+
),
|
|
379
|
+
});
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
The model sees the current todo list _every time it thinks_ — not just in the tool response, but as persistent context. When it decides what to do next, the state is right there.
|
|
383
|
+
|
|
384
|
+
### Multi-Turn Session
|
|
385
|
+
|
|
386
|
+
```tsx
|
|
387
|
+
const app = createApp(Agent, { model: openai({ model: "gpt-4o" }) });
|
|
388
|
+
const session = await app.session("conv-1");
|
|
389
|
+
|
|
390
|
+
const msg = (text: string) => ({ role: "user" as const, content: [{ type: "text" as const, text }] });
|
|
391
|
+
|
|
392
|
+
await session.send({ messages: [msg("Hi there!")] });
|
|
393
|
+
await session.send({ messages: [msg("Tell me a joke")] });
|
|
394
|
+
|
|
395
|
+
// Stream responses
|
|
396
|
+
for await (const event of session.send({ messages: [msg("Another one")] })) {
|
|
397
|
+
if (event.type === "content_delta") process.stdout.write(event.delta);
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
session.close();
|
|
401
|
+
```
|
|
402
|
+
|
|
403
|
+
### Dynamic Model Selection
|
|
404
|
+
|
|
405
|
+
Models are JSX components — conditionally render them to switch models mid-session:
|
|
406
|
+
|
|
407
|
+
```tsx
|
|
408
|
+
const gpt = openai({ model: "gpt-4o" });
|
|
409
|
+
const gemini = google({ model: "gemini-2.5-pro" });
|
|
410
|
+
|
|
411
|
+
function AdaptiveAgent({ task }: { task: string }) {
|
|
412
|
+
const needsCreativity = task.includes("creative");
|
|
413
|
+
|
|
414
|
+
return (
|
|
415
|
+
<>
|
|
416
|
+
{needsCreativity ? <gemini temperature={0.9} /> : <gpt temperature={0.2} />}
|
|
417
|
+
<System>Handle this task: {task}</System>
|
|
418
|
+
<Timeline />
|
|
419
|
+
</>
|
|
420
|
+
);
|
|
421
|
+
}
|
|
422
|
+
```
|
|
423
|
+
|
|
424
|
+
## Packages
|
|
425
|
+
|
|
426
|
+
| Package | Description |
|
|
427
|
+
| --------------------- | ------------------------------------------------------------ |
|
|
428
|
+
| `@agentick/core` | Reconciler, components, hooks, tools, sessions |
|
|
429
|
+
| `@agentick/kernel` | Execution kernel — procedures, context, middleware, channels |
|
|
430
|
+
| `@agentick/shared` | Platform-independent types and utilities |
|
|
431
|
+
| `@agentick/openai` | OpenAI adapter (GPT-4o, o1, etc.) |
|
|
432
|
+
| `@agentick/google` | Google AI adapter (Gemini) |
|
|
433
|
+
| `@agentick/ai-sdk` | Vercel AI SDK adapter (any provider) |
|
|
434
|
+
| `@agentick/gateway` | Multi-app server with auth, routing, and channels |
|
|
435
|
+
| `@agentick/express` | Express.js integration |
|
|
436
|
+
| `@agentick/nestjs` | NestJS integration |
|
|
437
|
+
| `@agentick/client` | TypeScript client for gateway connections |
|
|
438
|
+
| `@agentick/react` | React hooks for building UIs over sessions |
|
|
439
|
+
| `@agentick/devtools` | Fiber tree inspector, tick scrubber, token tracker |
|
|
440
|
+
| `@agentick/cli` | CLI for running agents |
|
|
441
|
+
| `@agentick/server` | Server utilities |
|
|
442
|
+
| `@agentick/socket.io` | Socket.IO transport |
|
|
443
|
+
|
|
444
|
+
```
|
|
445
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
446
|
+
│ Applications │
|
|
447
|
+
│ (express, nestjs, cli, user apps) │
|
|
448
|
+
└──────────────────────────┬──────────────────────────────────────┘
|
|
449
|
+
│
|
|
450
|
+
┌──────────────────────────┴──────────────────────────────────────┐
|
|
451
|
+
│ Framework Layer │
|
|
452
|
+
│ @agentick/core @agentick/gateway @agentick/client │
|
|
453
|
+
│ @agentick/express @agentick/devtools │
|
|
454
|
+
└──────────────────────────┬──────────────────────────────────────┘
|
|
455
|
+
│
|
|
456
|
+
┌──────────────────────────┴──────────────────────────────────────┐
|
|
457
|
+
│ Adapter Layer │
|
|
458
|
+
│ @agentick/openai @agentick/google @agentick/ai-sdk │
|
|
459
|
+
└──────────────────────────┬──────────────────────────────────────┘
|
|
460
|
+
│
|
|
461
|
+
┌──────────────────────────┴──────────────────────────────────────┐
|
|
462
|
+
│ Foundation Layer │
|
|
463
|
+
│ @agentick/kernel @agentick/shared │
|
|
464
|
+
│ (Node.js only) (Platform-independent) │
|
|
465
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
466
|
+
```
|
|
467
|
+
|
|
468
|
+
## Adapters
|
|
469
|
+
|
|
470
|
+
Three built-in, same interface. Or build your own — implement `prepareInput`, `mapChunk`, `execute`, and `executeStream`. See [`packages/adapters/README.md`](packages/adapters/README.md).
|
|
471
|
+
|
|
472
|
+
```tsx
|
|
473
|
+
import { openai } from "@agentick/openai";
|
|
474
|
+
import { google } from "@agentick/google";
|
|
475
|
+
import { aiSdk } from "@agentick/ai-sdk";
|
|
476
|
+
|
|
477
|
+
const gpt = openai({ model: "gpt-4o" });
|
|
478
|
+
const gemini = google({ model: "gemini-2.5-pro" });
|
|
479
|
+
const sdk = aiSdk({ model: yourAiSdkModel });
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
## DevTools
|
|
483
|
+
|
|
484
|
+
```tsx
|
|
485
|
+
const app = createApp(Agent, { model, devTools: true });
|
|
486
|
+
```
|
|
487
|
+
|
|
488
|
+
Fiber tree inspector, tick-by-tick scrubber, token usage tracking, real-time execution timeline. Record full sessions for replay with `session({ recording: 'full' })`.
|
|
489
|
+
|
|
490
|
+
## Gateway
|
|
491
|
+
|
|
492
|
+
Deploy multiple apps behind a single server with auth, routing, and channel adapters:
|
|
493
|
+
|
|
494
|
+
```tsx
|
|
495
|
+
import { createGateway } from "@agentick/gateway";
|
|
496
|
+
|
|
497
|
+
const gateway = createGateway({
|
|
498
|
+
apps: { support: supportApp, sales: salesApp },
|
|
499
|
+
defaultApp: "support",
|
|
500
|
+
auth: { type: "token", token: process.env.API_TOKEN! },
|
|
501
|
+
});
|
|
502
|
+
```
|
|
503
|
+
|
|
504
|
+
## Quick Start
|
|
505
|
+
|
|
506
|
+
```bash
|
|
507
|
+
npm install agentick @agentick/openai zod
|
|
508
|
+
```
|
|
509
|
+
|
|
510
|
+
**TypeScript config** — add to `tsconfig.json`:
|
|
511
|
+
|
|
512
|
+
```json
|
|
513
|
+
{
|
|
514
|
+
"compilerOptions": {
|
|
515
|
+
"jsx": "react-jsx",
|
|
516
|
+
"jsxImportSource": "react"
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
```
|
|
520
|
+
|
|
521
|
+
## License
|
|
522
|
+
|
|
523
|
+
MIT
|
package/dist/index.d.ts
CHANGED
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC;AAC/B,cAAc,iBAAiB,CAAC;AAChC,cAAc,sBAAsB,CAAC"}
|
package/dist/index.js
CHANGED
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC"}
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,cAAc,gBAAgB,CAAC;AAC/B,cAAc,iBAAiB,CAAC;AAChC,cAAc,sBAAsB,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "agentick",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Build agents like you build apps.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"agent",
|
|
@@ -31,7 +31,9 @@
|
|
|
31
31
|
"access": "public"
|
|
32
32
|
},
|
|
33
33
|
"dependencies": {
|
|
34
|
-
"@agentick/core": "0.
|
|
34
|
+
"@agentick/core": "0.2.0",
|
|
35
|
+
"@agentick/agent": "0.2.0",
|
|
36
|
+
"@agentick/guardrails": "0.2.0"
|
|
35
37
|
},
|
|
36
38
|
"scripts": {
|
|
37
39
|
"build": "tsc -p tsconfig.build.json",
|