agentxjs 0.0.0-dev-20260312143810

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,293 @@
1
+ # agentxjs
2
+
3
+ Client SDK for building AI agent applications. Supports local, remote, and server modes through a unified fluent API.
4
+
5
+ ## Quick Start
6
+
7
+ ### Local Mode (Embedded)
8
+
9
+ Runs agents directly in your process. No server required.
10
+
11
+ ```typescript
12
+ import { createAgentX } from "agentxjs";
13
+ import { nodePlatform } from "@agentxjs/node-platform";
14
+ import { createMonoDriver } from "@agentxjs/mono-driver";
15
+
16
+ const createDriver = (config) => createMonoDriver({
17
+ ...config,
18
+ apiKey: process.env.ANTHROPIC_API_KEY,
19
+ options: { provider: "anthropic" },
20
+ });
21
+
22
+ const ax = createAgentX(nodePlatform({ createDriver }));
23
+
24
+ await ax.container.create("my-app");
25
+
26
+ const { record: image } = await ax.image.create({
27
+ containerId: "my-app",
28
+ systemPrompt: "You are a helpful assistant.",
29
+ });
30
+
31
+ const { agentId } = await ax.agent.create({ imageId: image.imageId });
32
+
33
+ ax.on("text_delta", (e) => process.stdout.write(e.data.text));
34
+ await ax.session.send(agentId, "Hello!");
35
+ ```
36
+
37
+ ### Remote Mode (WebSocket Client)
38
+
39
+ Connects to a running AgentX server. Same API surface.
40
+
41
+ ```typescript
42
+ import { createAgentX } from "agentxjs";
43
+
44
+ const ax = createAgentX();
45
+ const client = await ax.connect("ws://localhost:5200");
46
+
47
+ await client.container.create("my-app");
48
+ const { record: image } = await client.image.create({
49
+ containerId: "my-app",
50
+ systemPrompt: "You are a helpful assistant.",
51
+ });
52
+ const { agentId } = await client.agent.create({ imageId: image.imageId });
53
+
54
+ client.on("text_delta", (e) => process.stdout.write(e.data.text));
55
+ await client.session.send(agentId, "Hello!");
56
+ ```
57
+
58
+ ### Server Mode
59
+
60
+ Start an AgentX WebSocket server for remote clients.
61
+
62
+ ```typescript
63
+ import { createAgentX } from "agentxjs";
64
+ import { nodePlatform } from "@agentxjs/node-platform";
65
+
66
+ const ax = createAgentX(nodePlatform({ createDriver }));
67
+ const server = await ax.serve({ port: 5200 });
68
+ ```
69
+
70
+ ## API Reference
71
+
72
+ ### `createAgentX(config?): AgentXBuilder`
73
+
74
+ Creates an AgentX builder. Synchronous — returns immediately.
75
+
76
+ - **With config** (PlatformConfig): Local mode + `connect()` + `serve()`
77
+ - **Without config**: Only `connect()` available
78
+
79
+ ### AgentX Interface
80
+
81
+ ```typescript
82
+ interface AgentX {
83
+ readonly connected: boolean;
84
+ readonly events: EventBus;
85
+
86
+ // Namespaced operations
87
+ readonly container: ContainerNamespace;
88
+ readonly image: ImageNamespace;
89
+ readonly agent: AgentNamespace;
90
+ readonly session: SessionNamespace;
91
+ readonly presentation: PresentationNamespace;
92
+ readonly llm: LLMNamespace;
93
+
94
+ // Universal RPC
95
+ rpc<T = unknown>(method: string, params?: unknown): Promise<T>;
96
+
97
+ // Event subscription
98
+ on<T extends string>(type: T, handler: BusEventHandler): Unsubscribe;
99
+ onAny(handler: BusEventHandler): Unsubscribe;
100
+ subscribe(sessionId: string): void;
101
+
102
+ // Error handling
103
+ onError(handler: (error: AgentXError) => void): Unsubscribe;
104
+
105
+ // Lifecycle
106
+ disconnect(): Promise<void>;
107
+ dispose(): Promise<void>;
108
+ }
109
+
110
+ interface AgentXBuilder extends AgentX {
111
+ connect(serverUrl: string, options?: ConnectOptions): Promise<AgentX>;
112
+ serve(config?: ServeConfig): Promise<AgentXServer>;
113
+ }
114
+ ```
115
+
116
+ ### Namespace Operations
117
+
118
+ **container**:
119
+
120
+ - `create(containerId: string): Promise<ContainerCreateResponse>`
121
+ - `get(containerId: string): Promise<ContainerGetResponse>`
122
+ - `list(): Promise<ContainerListResponse>`
123
+
124
+ **image**:
125
+
126
+ - `create(params: { containerId, name?, description?, systemPrompt?, mcpServers?, customData? }): Promise<ImageCreateResponse>`
127
+ - `get(imageId: string): Promise<ImageGetResponse>`
128
+ - `list(containerId?: string): Promise<ImageListResponse>`
129
+ - `update(imageId: string, updates: { name?, description?, customData? }): Promise<ImageUpdateResponse>`
130
+ - `delete(imageId: string): Promise<BaseResponse>`
131
+ - `getMessages(imageId: string): Promise<Message[]>`
132
+
133
+ **agent**:
134
+
135
+ - `create(params: { imageId, agentId? }): Promise<AgentCreateResponse>`
136
+ - `get(agentId: string): Promise<AgentGetResponse>`
137
+ - `list(containerId?: string): Promise<AgentListResponse>`
138
+ - `destroy(agentId: string): Promise<BaseResponse>`
139
+
140
+ **session**:
141
+
142
+ - `send(agentId: string, content: string | unknown[]): Promise<MessageSendResponse>`
143
+ - `interrupt(agentId: string): Promise<BaseResponse>`
144
+ - `getMessages(agentId: string): Promise<Message[]>`
145
+
146
+ **presentation**:
147
+
148
+ - `create(agentId: string, options?: PresentationOptions): Promise<Presentation>`
149
+
150
+ **llm**:
151
+
152
+ - `create(params: { containerId, name, vendor, protocol, apiKey, baseUrl?, model? }): Promise<LLMProviderCreateResponse>`
153
+ - `get(id: string): Promise<LLMProviderGetResponse>`
154
+ - `list(containerId: string): Promise<LLMProviderListResponse>`
155
+ - `update(id: string, updates: { name?, apiKey?, baseUrl?, model? }): Promise<LLMProviderUpdateResponse>`
156
+ - `delete(id: string): Promise<BaseResponse>`
157
+ - `setDefault(id: string): Promise<BaseResponse>`
158
+ - `getDefault(containerId: string): Promise<LLMProviderDefaultResponse>`
159
+
160
+ Each LLM provider has a **vendor** (who provides the service — `anthropic`, `openai`, `deepseek`, `ollama`) and a **protocol** (API format — `anthropic` or `openai`). These are separate dimensions: e.g., Deepseek uses vendor `"deepseek"` with protocol `"openai"`.
161
+
162
+ When creating an agent, the runtime validates that the container's default LLM provider protocol is supported by the driver.
163
+
164
+ ### Universal RPC
165
+
166
+ Transport-agnostic JSON-RPC entry point. Works in all modes — local dispatches to CommandHandler, remote forwards via WebSocket.
167
+
168
+ ```typescript
169
+ // Equivalent to ax.container.create("default")
170
+ await ax.rpc("container.create", { containerId: "default" });
171
+
172
+ // Equivalent to ax.image.list()
173
+ const { records } = await ax.rpc<{ records: ImageRecord[] }>("image.list");
174
+
175
+ // Useful for custom transport (e.g. Cloudflare Workers/DO)
176
+ const response = await ax.rpc(request.method, request.params);
177
+ ```
178
+
179
+ ### Error Handling
180
+
181
+ AgentX has two layers of error handling, serving different purposes:
182
+
183
+ | Layer | Purpose | Who uses it | How errors arrive |
184
+ | ----- | ------- | ----------- | ----------------- |
185
+ | **Presentation** | Show errors to end users in chat | UI developers | `ErrorConversation` in `state.conversations` |
186
+ | **`ax.onError`** | Programmatic monitoring & alerting | Platform operators | `AgentXError` callback |
187
+
188
+ Most applications only need the Presentation layer. `ax.onError` is for advanced scenarios like Sentry integration or custom circuit-breaker logic.
189
+
190
+ #### Presentation Errors (recommended)
191
+
192
+ When an LLM call fails (e.g., 403 Forbidden, network timeout), the error automatically appears in `state.conversations` as an `ErrorConversation`:
193
+
194
+ ```typescript
195
+ const presentation = await ax.presentation.create(agentId, {
196
+ onUpdate: (state) => {
197
+ for (const conv of state.conversations) {
198
+ if (conv.role === "user") {
199
+ renderUserMessage(conv);
200
+ } else if (conv.role === "assistant") {
201
+ renderAssistantMessage(conv);
202
+ } else if (conv.role === "error") {
203
+ // LLM errors show up here automatically
204
+ renderErrorMessage(conv.message);
205
+ // e.g. "403 Forbidden: Invalid API key"
206
+ }
207
+ }
208
+ },
209
+ });
210
+ ```
211
+
212
+ The flow is fully automatic — no extra code needed:
213
+
214
+ ```
215
+ LLM API fails → Driver emits error → Engine creates ErrorConversation
216
+ → Presentation state updates → onUpdate fires → UI renders error
217
+ ```
218
+
219
+ `state.streaming` resets to `null` and `state.status` returns to `"idle"`, so the UI naturally stops showing loading indicators.
220
+
221
+ #### `ax.onError` (advanced)
222
+
223
+ For monitoring, logging, or custom recovery logic. Receives structured `AgentXError` from all layers (driver, persistence, connection). Independent of Presentation — fires even without a Presentation instance.
224
+
225
+ ```typescript
226
+ ax.onError((error) => {
227
+ reportToSentry(error);
228
+ console.error(`[${error.category}] ${error.code}: ${error.message}`);
229
+ });
230
+ ```
231
+
232
+ **AgentXError properties:**
233
+
234
+ | Property | Type | Description |
235
+ | ------------- | -------- | ------------------------------------ |
236
+ | `code` | string | `DRIVER_ERROR`, `CIRCUIT_OPEN`, `PERSISTENCE_FAILED`, `CONNECTION_FAILED` |
237
+ | `category` | string | `"driver"` \| `"persistence"` \| `"connection"` \| `"runtime"` |
238
+ | `recoverable` | boolean | Whether the caller should retry |
239
+ | `context` | object | `{ agentId?, sessionId?, imageId? }` |
240
+ | `cause` | Error? | Original error |
241
+
242
+ **Built-in circuit breaker:** After 5 consecutive driver failures, the circuit opens and rejects new requests for 30s. This is automatic — no code required.
243
+
244
+ ### Stream Events
245
+
246
+ | Event | Data | Description |
247
+ | ------------------ | -------------------------- | ---------------------- |
248
+ | `message_start` | `{ messageId, model }` | Response begins |
249
+ | `text_delta` | `{ text }` | Incremental text chunk |
250
+ | `tool_use_start` | `{ toolCallId, toolName }` | Tool call begins |
251
+ | `input_json_delta` | `{ partialJson }` | Incremental tool input |
252
+ | `tool_result` | `{ toolCallId, result }` | Tool execution result |
253
+ | `message_stop` | `{ stopReason }` | Response complete |
254
+ | `error` | `{ message }` | Error during streaming |
255
+
256
+ > **Note:** If you use the Presentation API, you don't need to handle the `error` stream event — it is automatically converted to an `ErrorConversation` in `state.conversations`.
257
+
258
+ ### Presentation API
259
+
260
+ High-level UI state management. Aggregates raw stream events into structured conversation state — the recommended way to build chat UIs.
261
+
262
+ ```typescript
263
+ const presentation = await ax.presentation.create(agentId, {
264
+ onUpdate: (state) => {
265
+ // state.conversations — completed messages (user, assistant, and error)
266
+ // state.streaming — current streaming response (or null)
267
+ // state.status — "idle" | "thinking" | "responding" | "executing"
268
+ renderUI(state);
269
+ },
270
+ });
271
+
272
+ await presentation.send("What is the weather?");
273
+ const state = presentation.getState();
274
+ presentation.dispose();
275
+ ```
276
+
277
+ **Conversation types in `state.conversations`:**
278
+
279
+ | `role` | Type | Content |
280
+ | ------ | ---- | ------- |
281
+ | `"user"` | `UserConversation` | `blocks: [{ type: "text", content }]` |
282
+ | `"assistant"` | `AssistantConversation` | `blocks: [{ type: "text", content }, { type: "tool_use", ... }]` |
283
+ | `"error"` | `ErrorConversation` | `message: string` — the error description |
284
+
285
+ For custom state management, use the exported reducer:
286
+
287
+ ```typescript
288
+ import { presentationReducer, createInitialState, addUserConversation } from "agentxjs";
289
+
290
+ let state = createInitialState();
291
+ state = addUserConversation(state, "Hello");
292
+ state = presentationReducer(state, event); // pure function
293
+ ```