@directive-run/knowledge 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +63 -0
- package/ai/ai-adapters.md +250 -0
- package/ai/ai-agents-streaming.md +269 -0
- package/ai/ai-budget-resilience.md +235 -0
- package/ai/ai-communication.md +281 -0
- package/ai/ai-debug-observability.md +243 -0
- package/ai/ai-guardrails-memory.md +332 -0
- package/ai/ai-mcp-rag.md +288 -0
- package/ai/ai-multi-agent.md +274 -0
- package/ai/ai-orchestrator.md +227 -0
- package/ai/ai-security.md +293 -0
- package/ai/ai-tasks.md +261 -0
- package/ai/ai-testing-evals.md +378 -0
- package/api-skeleton.md +5 -0
- package/core/anti-patterns.md +382 -0
- package/core/constraints.md +263 -0
- package/core/core-patterns.md +228 -0
- package/core/error-boundaries.md +322 -0
- package/core/multi-module.md +315 -0
- package/core/naming.md +283 -0
- package/core/plugins.md +344 -0
- package/core/react-adapter.md +262 -0
- package/core/resolvers.md +357 -0
- package/core/schema-types.md +262 -0
- package/core/system-api.md +271 -0
- package/core/testing.md +257 -0
- package/core/time-travel.md +238 -0
- package/dist/index.cjs +111 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +10 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.js +102 -0
- package/dist/index.js.map +1 -0
- package/examples/ab-testing.ts +385 -0
- package/examples/ai-checkpoint.ts +509 -0
- package/examples/ai-guardrails.ts +319 -0
- package/examples/ai-orchestrator.ts +589 -0
- package/examples/async-chains.ts +287 -0
- package/examples/auth-flow.ts +371 -0
- package/examples/batch-resolver.ts +341 -0
- package/examples/checkers.ts +589 -0
- package/examples/contact-form.ts +176 -0
- package/examples/counter.ts +393 -0
- package/examples/dashboard-loader.ts +512 -0
- package/examples/debounce-constraints.ts +105 -0
- package/examples/dynamic-modules.ts +293 -0
- package/examples/error-boundaries.ts +430 -0
- package/examples/feature-flags.ts +220 -0
- package/examples/form-wizard.ts +347 -0
- package/examples/fraud-analysis.ts +663 -0
- package/examples/goal-heist.ts +341 -0
- package/examples/multi-module.ts +57 -0
- package/examples/newsletter.ts +241 -0
- package/examples/notifications.ts +210 -0
- package/examples/optimistic-updates.ts +317 -0
- package/examples/pagination.ts +260 -0
- package/examples/permissions.ts +337 -0
- package/examples/provider-routing.ts +403 -0
- package/examples/server.ts +316 -0
- package/examples/shopping-cart.ts +422 -0
- package/examples/sudoku.ts +630 -0
- package/examples/theme-locale.ts +204 -0
- package/examples/time-machine.ts +225 -0
- package/examples/topic-guard.ts +306 -0
- package/examples/url-sync.ts +333 -0
- package/examples/websocket.ts +404 -0
- package/package.json +65 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Jason Comes
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# @directive-run/knowledge
|
|
2
|
+
|
|
3
|
+
Knowledge files, examples, and validation scripts for the [Directive](https://directive.run) runtime.
|
|
4
|
+
|
|
5
|
+
This package is the **source of truth** for all Directive coding knowledge used by:
|
|
6
|
+
- `@directive-run/cli` — generates AI rules files (`.cursorrules`, `CLAUDE.md`, etc.)
|
|
7
|
+
- `@directive-run/claude-plugin` — builds Claude Code plugin skills
|
|
8
|
+
- `directive.run/llms.txt` — website LLM reference
|
|
9
|
+
|
|
10
|
+
## Contents
|
|
11
|
+
|
|
12
|
+
| Directory | Count | Description |
|
|
13
|
+
|-----------|-------|-------------|
|
|
14
|
+
| `core/` | 13 | Core Directive knowledge (modules, constraints, resolvers, etc.) |
|
|
15
|
+
| `ai/` | 12 | AI orchestrator knowledge (agents, streaming, guardrails, etc.) |
|
|
16
|
+
| `examples/` | 32 | Extracted examples (auto-generated, DOM wiring stripped) |
|
|
17
|
+
| `api-skeleton.md` | 1 | Auto-generated API reference skeleton |
|
|
18
|
+
|
|
19
|
+
## Programmatic API
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import {
|
|
23
|
+
getKnowledge,
|
|
24
|
+
getAllKnowledge,
|
|
25
|
+
getExample,
|
|
26
|
+
getAllExamples,
|
|
27
|
+
getKnowledgeFiles,
|
|
28
|
+
getExampleFiles,
|
|
29
|
+
clearCache,
|
|
30
|
+
} from "@directive-run/knowledge";
|
|
31
|
+
|
|
32
|
+
// Get a single knowledge file
|
|
33
|
+
const patterns = getKnowledge("core-patterns");
|
|
34
|
+
|
|
35
|
+
// Get multiple files joined with --- separator
|
|
36
|
+
const combined = getKnowledgeFiles(["constraints", "resolvers"]);
|
|
37
|
+
|
|
38
|
+
// Get all examples as a Map<name, content>
|
|
39
|
+
const examples = getAllExamples();
|
|
40
|
+
|
|
41
|
+
// Clear cached knowledge and examples (useful for dev/watch mode)
|
|
42
|
+
clearCache();
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Scripts
|
|
46
|
+
|
|
47
|
+
```bash
|
|
48
|
+
pnpm --filter @directive-run/knowledge generate # Regenerate api-skeleton.md
|
|
49
|
+
pnpm --filter @directive-run/knowledge extract-examples # Re-extract examples
|
|
50
|
+
pnpm --filter @directive-run/knowledge validate # Validate symbol references
|
|
51
|
+
pnpm --filter @directive-run/knowledge test # Run all tests
|
|
52
|
+
pnpm --filter @directive-run/knowledge build # Full build (generate + extract + tsup)
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
## Adding Examples
|
|
56
|
+
|
|
57
|
+
Examples are **auto-discovered** from `examples/*/` in the repo root. The `extract-examples.ts` script:
|
|
58
|
+
1. Scans all example directories
|
|
59
|
+
2. Finds the best source file (prefers `<name>.ts` > `module.ts` > `main.ts`)
|
|
60
|
+
3. Strips DOM wiring code
|
|
61
|
+
4. Outputs clean TypeScript
|
|
62
|
+
|
|
63
|
+
To exclude an example, add it to `EXCLUDED_EXAMPLES` in `scripts/extract-examples.ts`.
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
# AI Adapters
|
|
2
|
+
|
|
3
|
+
Adapters connect the orchestrator to LLM providers. Each adapter normalizes provider-specific APIs into Directive's `AgentRunner` interface.
|
|
4
|
+
|
|
5
|
+
## Decision Tree: "Which adapter do I need?"
|
|
6
|
+
|
|
7
|
+
```
|
|
8
|
+
Which LLM provider?
|
|
9
|
+
├── Anthropic (Claude) → createAnthropicRunner from '@directive-run/ai/anthropic'
|
|
10
|
+
├── OpenAI (GPT) → createOpenAIRunner from '@directive-run/ai/openai'
|
|
11
|
+
├── Google (Gemini) → createGeminiRunner from '@directive-run/ai/gemini'
|
|
12
|
+
└── Ollama (local) → createOllamaRunner from '@directive-run/ai/ollama'
|
|
13
|
+
│
|
|
14
|
+
Need streaming?
|
|
15
|
+
├── Yes → create*StreamingRunner from the same subpath
|
|
16
|
+
└── No → create*Runner is sufficient
|
|
17
|
+
│
|
|
18
|
+
Need a proxy/self-hosted URL?
|
|
19
|
+
└── Yes → pass baseURL option
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## CRITICAL: Subpath Imports
|
|
23
|
+
|
|
24
|
+
Every adapter MUST be imported from its subpath. The main `@directive-run/ai` entry does NOT export adapters.
|
|
25
|
+
|
|
26
|
+
```typescript
|
|
27
|
+
// CORRECT — subpath imports
|
|
28
|
+
import { createAnthropicRunner } from "@directive-run/ai/anthropic";
|
|
29
|
+
import { createOpenAIRunner } from "@directive-run/ai/openai";
|
|
30
|
+
import { createOllamaRunner } from "@directive-run/ai/ollama";
|
|
31
|
+
import { createGeminiRunner } from "@directive-run/ai/gemini";
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
### Anti-Pattern #26: Importing adapters from the main entry
|
|
35
|
+
|
|
36
|
+
```typescript
|
|
37
|
+
// WRONG — adapters are NOT exported from the main package
|
|
38
|
+
import { createOpenAIRunner } from "@directive-run/ai";
|
|
39
|
+
import { createAnthropicRunner } from "@directive-run/ai";
|
|
40
|
+
|
|
41
|
+
// CORRECT — use subpath imports
|
|
42
|
+
import { createOpenAIRunner } from "@directive-run/ai/openai";
|
|
43
|
+
import { createAnthropicRunner } from "@directive-run/ai/anthropic";
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Anthropic Adapter
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
import {
|
|
50
|
+
createAnthropicRunner,
|
|
51
|
+
createAnthropicStreamingRunner,
|
|
52
|
+
} from "@directive-run/ai/anthropic";
|
|
53
|
+
|
|
54
|
+
// Standard runner
|
|
55
|
+
const runner = createAnthropicRunner({
|
|
56
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
57
|
+
defaultModel: "claude-sonnet-4-5",
|
|
58
|
+
maxTokens: 4096,
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
// Streaming runner
|
|
62
|
+
const streamingRunner = createAnthropicStreamingRunner({
|
|
63
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
64
|
+
defaultModel: "claude-sonnet-4-5",
|
|
65
|
+
maxTokens: 4096,
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
// With proxy URL
|
|
69
|
+
const proxiedRunner = createAnthropicRunner({
|
|
70
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
71
|
+
baseURL: "https://my-proxy.example.com/v1",
|
|
72
|
+
});
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## OpenAI Adapter
|
|
76
|
+
|
|
77
|
+
```typescript
|
|
78
|
+
import {
|
|
79
|
+
createOpenAIRunner,
|
|
80
|
+
createOpenAIStreamingRunner,
|
|
81
|
+
} from "@directive-run/ai/openai";
|
|
82
|
+
|
|
83
|
+
const runner = createOpenAIRunner({
|
|
84
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
85
|
+
defaultModel: "gpt-4o",
|
|
86
|
+
organization: "org-xxx", // Optional
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
// Azure OpenAI
|
|
90
|
+
const azureRunner = createOpenAIRunner({
|
|
91
|
+
apiKey: process.env.AZURE_OPENAI_KEY,
|
|
92
|
+
baseURL: "https://my-instance.openai.azure.com/openai/deployments/gpt-4o",
|
|
93
|
+
defaultHeaders: {
|
|
94
|
+
"api-version": "2024-02-01",
|
|
95
|
+
},
|
|
96
|
+
});
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## Ollama Adapter (Local Models)
|
|
100
|
+
|
|
101
|
+
```typescript
|
|
102
|
+
import {
|
|
103
|
+
createOllamaRunner,
|
|
104
|
+
createOllamaStreamingRunner,
|
|
105
|
+
} from "@directive-run/ai/ollama";
|
|
106
|
+
|
|
107
|
+
const runner = createOllamaRunner({
|
|
108
|
+
// Default: http://localhost:11434
|
|
109
|
+
baseURL: "http://localhost:11434",
|
|
110
|
+
defaultModel: "llama3.1",
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// Remote Ollama instance
|
|
114
|
+
const remoteRunner = createOllamaRunner({
|
|
115
|
+
baseURL: "https://ollama.my-server.com",
|
|
116
|
+
defaultModel: "mistral",
|
|
117
|
+
});
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Gemini Adapter
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
import {
|
|
124
|
+
createGeminiRunner,
|
|
125
|
+
createGeminiStreamingRunner,
|
|
126
|
+
} from "@directive-run/ai/gemini";
|
|
127
|
+
|
|
128
|
+
const runner = createGeminiRunner({
|
|
129
|
+
apiKey: process.env.GOOGLE_API_KEY,
|
|
130
|
+
defaultModel: "gemini-2.0-flash",
|
|
131
|
+
});
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## Token Normalization
|
|
135
|
+
|
|
136
|
+
### Anti-Pattern #27: Assuming provider-specific token structure
|
|
137
|
+
|
|
138
|
+
```typescript
|
|
139
|
+
// WRONG — Anthropic returns { input_tokens, output_tokens } natively
|
|
140
|
+
// but adapters normalize this
|
|
141
|
+
const result = await runner.run(agent, prompt);
|
|
142
|
+
console.log(result.tokenUsage.input_tokens); // undefined!
|
|
143
|
+
|
|
144
|
+
// CORRECT — adapters normalize to camelCase
|
|
145
|
+
const result = await runner.run(agent, prompt);
|
|
146
|
+
console.log(result.tokenUsage.inputTokens); // number
|
|
147
|
+
console.log(result.tokenUsage.outputTokens); // number
|
|
148
|
+
console.log(result.totalTokens); // inputTokens + outputTokens
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
All adapters normalize token usage to the same shape regardless of provider:
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
interface NormalizedTokenUsage {
|
|
155
|
+
inputTokens: number;
|
|
156
|
+
outputTokens: number;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// result.totalTokens = inputTokens + outputTokens
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
## Adapter Hooks
|
|
163
|
+
|
|
164
|
+
Every adapter supports lifecycle hooks for logging, metrics, or request modification:
|
|
165
|
+
|
|
166
|
+
```typescript
|
|
167
|
+
const runner = createAnthropicRunner({
|
|
168
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
169
|
+
hooks: {
|
|
170
|
+
// Called before every LLM API call
|
|
171
|
+
onBeforeCall: (agent, prompt, options) => {
|
|
172
|
+
console.log(`Calling ${agent.model} for ${agent.name}`);
|
|
173
|
+
metrics.increment("llm.calls");
|
|
174
|
+
},
|
|
175
|
+
|
|
176
|
+
// Called after every successful LLM API call
|
|
177
|
+
onAfterCall: (agent, result) => {
|
|
178
|
+
console.log(`${agent.name}: ${result.totalTokens} tokens`);
|
|
179
|
+
metrics.histogram("llm.tokens", result.totalTokens);
|
|
180
|
+
},
|
|
181
|
+
|
|
182
|
+
// Called on LLM API errors
|
|
183
|
+
onError: (agent, error) => {
|
|
184
|
+
console.error(`${agent.name} failed:`, error.message);
|
|
185
|
+
metrics.increment("llm.errors");
|
|
186
|
+
},
|
|
187
|
+
},
|
|
188
|
+
});
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
## Using Adapters with Orchestrators
|
|
192
|
+
|
|
193
|
+
```typescript
|
|
194
|
+
import { createAgentOrchestrator } from "@directive-run/ai";
|
|
195
|
+
import { createAnthropicRunner } from "@directive-run/ai/anthropic";
|
|
196
|
+
|
|
197
|
+
const runner = createAnthropicRunner({
|
|
198
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
// Single-agent
|
|
202
|
+
const orchestrator = createAgentOrchestrator({ runner });
|
|
203
|
+
|
|
204
|
+
// Multi-agent — same runner shared across all agents
|
|
205
|
+
const multiOrchestrator = createMultiAgentOrchestrator({
|
|
206
|
+
agents: { /* ... */ },
|
|
207
|
+
runner,
|
|
208
|
+
});
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
## Switching Adapters
|
|
212
|
+
|
|
213
|
+
Adapters are interchangeable. Switch providers by changing the import and config:
|
|
214
|
+
|
|
215
|
+
```typescript
|
|
216
|
+
// Before: Anthropic
|
|
217
|
+
import { createAnthropicRunner } from "@directive-run/ai/anthropic";
|
|
218
|
+
const runner = createAnthropicRunner({
|
|
219
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
// After: OpenAI — same orchestrator, different runner
|
|
223
|
+
import { createOpenAIRunner } from "@directive-run/ai/openai";
|
|
224
|
+
const runner = createOpenAIRunner({
|
|
225
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
// Orchestrator code remains unchanged
|
|
229
|
+
const orchestrator = createAgentOrchestrator({ runner });
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
## Custom BaseURL Patterns
|
|
233
|
+
|
|
234
|
+
| Provider | Default BaseURL | Custom Use Case |
|
|
235
|
+
|---|---|---|
|
|
236
|
+
| Anthropic | `https://api.anthropic.com` | Corporate proxy, VPN relay |
|
|
237
|
+
| OpenAI | `https://api.openai.com/v1` | Azure OpenAI, LiteLLM proxy |
|
|
238
|
+
| Ollama | `http://localhost:11434` | Remote GPU server |
|
|
239
|
+
| Gemini | `https://generativelanguage.googleapis.com` | Regional endpoint |
|
|
240
|
+
|
|
241
|
+
## Quick Reference
|
|
242
|
+
|
|
243
|
+
| Adapter | Import Path | Key Options |
|
|
244
|
+
|---|---|---|
|
|
245
|
+
| Anthropic | `@directive-run/ai/anthropic` | `apiKey`, `defaultModel`, `maxTokens` |
|
|
246
|
+
| OpenAI | `@directive-run/ai/openai` | `apiKey`, `defaultModel`, `organization` |
|
|
247
|
+
| Ollama | `@directive-run/ai/ollama` | `baseURL`, `defaultModel` |
|
|
248
|
+
| Gemini | `@directive-run/ai/gemini` | `apiKey`, `defaultModel` |
|
|
249
|
+
|
|
250
|
+
All adapters support: `baseURL`, `defaultHeaders`, `hooks`, streaming variant.
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
# AI Agents and Streaming
|
|
2
|
+
|
|
3
|
+
Defines the AgentLike interface (what runners receive), RunResult (what runners return), StreamChunk union types, backpressure strategies, and SSE transport.
|
|
4
|
+
|
|
5
|
+
## Decision Tree: "How do I get output from an agent?"
|
|
6
|
+
|
|
7
|
+
```
|
|
8
|
+
Need the complete result?
|
|
9
|
+
├── Yes → orchestrator.run(agent, prompt) → RunResult
|
|
10
|
+
└── No, need incremental output
|
|
11
|
+
├── AsyncIterable → orchestrator.runStream(agent, prompt)
|
|
12
|
+
├── Callback-based → StreamingCallbackRunner
|
|
13
|
+
└── Server-Sent Events → createSSEResponse()
|
|
14
|
+
│
|
|
15
|
+
Backpressure concern?
|
|
16
|
+
├── Consumer is slow → strategy: "block"
|
|
17
|
+
├── Can drop tokens → strategy: "drop"
|
|
18
|
+
└── Default → strategy: "buffer"
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## AgentLike — What the Runner Receives
|
|
22
|
+
|
|
23
|
+
```typescript
|
|
24
|
+
interface AgentLike {
|
|
25
|
+
// Required — unique identifier
|
|
26
|
+
name: string;
|
|
27
|
+
|
|
28
|
+
// System prompt / instructions
|
|
29
|
+
instructions?: string;
|
|
30
|
+
|
|
31
|
+
// Model identifier (adapter-specific)
|
|
32
|
+
model?: string;
|
|
33
|
+
|
|
34
|
+
// Tools the agent can use
|
|
35
|
+
tools?: unknown[];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Usage
|
|
39
|
+
const agent: AgentLike = {
|
|
40
|
+
name: "analyst",
|
|
41
|
+
instructions: "You analyze data and provide insights.",
|
|
42
|
+
model: "claude-sonnet-4-5",
|
|
43
|
+
tools: [searchTool, calculatorTool],
|
|
44
|
+
};
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## RunResult — What the Runner Returns
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
interface RunResult<T = unknown> {
|
|
51
|
+
// The agent's final output
|
|
52
|
+
output: T;
|
|
53
|
+
|
|
54
|
+
// Full message history from this run
|
|
55
|
+
messages: Message[];
|
|
56
|
+
|
|
57
|
+
// Tool calls made during this run
|
|
58
|
+
toolCalls: ToolCall[];
|
|
59
|
+
|
|
60
|
+
// Total tokens consumed (input + output)
|
|
61
|
+
totalTokens: number;
|
|
62
|
+
|
|
63
|
+
// Detailed token breakdown
|
|
64
|
+
tokenUsage?: {
|
|
65
|
+
inputTokens: number;
|
|
66
|
+
outputTokens: number;
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Usage
|
|
71
|
+
const result = await orchestrator.run(agent, "Analyze sales data");
|
|
72
|
+
console.log(result.output);
|
|
73
|
+
console.log(`Tokens used: ${result.totalTokens}`);
|
|
74
|
+
console.log(`Tool calls: ${result.toolCalls.length}`);
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## StreamChunk Union Types
|
|
78
|
+
|
|
79
|
+
Each chunk from `runStream` is one of these discriminated union types:
|
|
80
|
+
|
|
81
|
+
```typescript
|
|
82
|
+
type StreamChunk =
|
|
83
|
+
// Text token from the model
|
|
84
|
+
| { type: "token"; data: string; tokenCount: number }
|
|
85
|
+
|
|
86
|
+
// Tool execution started
|
|
87
|
+
| { type: "tool_start"; tool: string; toolCallId: string; arguments: string }
|
|
88
|
+
|
|
89
|
+
// Tool execution completed
|
|
90
|
+
| { type: "tool_end"; tool: string; toolCallId: string; result: string }
|
|
91
|
+
|
|
92
|
+
// Complete message added to history
|
|
93
|
+
| { type: "message"; message: Message }
|
|
94
|
+
|
|
95
|
+
// Guardrail was triggered during streaming
|
|
96
|
+
| { type: "guardrail_triggered"; guardrailName: string; reason: string; stopped: boolean }
|
|
97
|
+
|
|
98
|
+
// Progress indicator
|
|
99
|
+
| { type: "progress"; phase: "starting" | "generating" | "tool_calling" | "finishing" }
|
|
100
|
+
|
|
101
|
+
// Stream completed
|
|
102
|
+
| { type: "done"; totalTokens: number; duration: number; droppedTokens: number }
|
|
103
|
+
|
|
104
|
+
// Error during streaming
|
|
105
|
+
| { type: "error"; error: Error };
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Consuming a Stream
|
|
109
|
+
|
|
110
|
+
```typescript
|
|
111
|
+
const stream = orchestrator.runStream(agent, "Write a report");
|
|
112
|
+
|
|
113
|
+
for await (const chunk of stream) {
|
|
114
|
+
switch (chunk.type) {
|
|
115
|
+
case "token":
|
|
116
|
+
process.stdout.write(chunk.data);
|
|
117
|
+
break;
|
|
118
|
+
case "tool_start":
|
|
119
|
+
console.log(`\nCalling tool: ${chunk.tool}`);
|
|
120
|
+
break;
|
|
121
|
+
case "tool_end":
|
|
122
|
+
console.log(`Tool result: ${chunk.result.slice(0, 100)}`);
|
|
123
|
+
break;
|
|
124
|
+
case "guardrail_triggered":
|
|
125
|
+
console.warn(`Guardrail ${chunk.guardrailName}: ${chunk.reason}`);
|
|
126
|
+
if (chunk.stopped) {
|
|
127
|
+
console.error("Stream stopped by guardrail");
|
|
128
|
+
}
|
|
129
|
+
break;
|
|
130
|
+
case "done":
|
|
131
|
+
console.log(`\nTokens: ${chunk.totalTokens}, Duration: ${chunk.duration}ms`);
|
|
132
|
+
break;
|
|
133
|
+
case "error":
|
|
134
|
+
console.error("Stream error:", chunk.error);
|
|
135
|
+
break;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
## Backpressure Strategies
|
|
141
|
+
|
|
142
|
+
Control behavior when the consumer cannot keep up with token production:
|
|
143
|
+
|
|
144
|
+
```typescript
|
|
145
|
+
const stream = orchestrator.runStream(agent, "Generate long report", {
|
|
146
|
+
backpressure: "buffer", // default — buffer all tokens in memory
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
const stream = orchestrator.runStream(agent, "Generate long report", {
|
|
150
|
+
backpressure: "block", // pause generation until consumer catches up
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
const stream = orchestrator.runStream(agent, "Generate long report", {
|
|
154
|
+
backpressure: "drop", // drop tokens consumer cannot process in time
|
|
155
|
+
});
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
| Strategy | Behavior | Use When |
|
|
159
|
+
|---|---|---|
|
|
160
|
+
| `"buffer"` | Buffers all tokens in memory | Consumer is slightly slow, memory is available |
|
|
161
|
+
| `"block"` | Pauses model generation | Consumer must process every token |
|
|
162
|
+
| `"drop"` | Drops unprocessed tokens | Real-time display, some loss acceptable |
|
|
163
|
+
|
|
164
|
+
When using `"drop"`, the `done` chunk reports `droppedTokens` count.
|
|
165
|
+
|
|
166
|
+
## StreamingCallbackRunner
|
|
167
|
+
|
|
168
|
+
For callback-based streaming (instead of AsyncIterable):
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
import { createStreamingCallbackRunner } from "@directive-run/ai";
|
|
172
|
+
|
|
173
|
+
const callbackRunner = createStreamingCallbackRunner(runner, {
|
|
174
|
+
onToken: (token) => process.stdout.write(token),
|
|
175
|
+
onToolStart: (tool, id) => console.log(`Tool: ${tool}`),
|
|
176
|
+
onToolEnd: (tool, id, result) => console.log(`Result: ${result}`),
|
|
177
|
+
onComplete: (result) => console.log("Done:", result.totalTokens),
|
|
178
|
+
onError: (error) => console.error(error),
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
// Use in orchestrator
|
|
182
|
+
const orchestrator = createAgentOrchestrator({
|
|
183
|
+
runner: callbackRunner,
|
|
184
|
+
});
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
## SSE Transport (Server-Sent Events)
|
|
188
|
+
|
|
189
|
+
Pipe agent streaming to HTTP responses for web servers:
|
|
190
|
+
|
|
191
|
+
```typescript
|
|
192
|
+
import { createSSEResponse } from "@directive-run/ai";
|
|
193
|
+
|
|
194
|
+
// Express / Node HTTP handler
|
|
195
|
+
app.post("/api/chat", async (req, res) => {
|
|
196
|
+
const stream = orchestrator.runStream(agent, req.body.prompt);
|
|
197
|
+
|
|
198
|
+
// Creates a ReadableStream of SSE-formatted events
|
|
199
|
+
const sseResponse = createSSEResponse(stream);
|
|
200
|
+
|
|
201
|
+
res.setHeader("Content-Type", "text/event-stream");
|
|
202
|
+
res.setHeader("Cache-Control", "no-cache");
|
|
203
|
+
res.setHeader("Connection", "keep-alive");
|
|
204
|
+
|
|
205
|
+
for await (const event of sseResponse) {
|
|
206
|
+
res.write(event);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
res.end();
|
|
210
|
+
});
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
Client-side consumption:
|
|
214
|
+
|
|
215
|
+
```typescript
|
|
216
|
+
const eventSource = new EventSource("/api/chat");
|
|
217
|
+
|
|
218
|
+
eventSource.onmessage = (event) => {
|
|
219
|
+
const chunk = JSON.parse(event.data);
|
|
220
|
+
if (chunk.type === "token") {
|
|
221
|
+
appendToDisplay(chunk.data);
|
|
222
|
+
}
|
|
223
|
+
};
|
|
224
|
+
```
|
|
225
|
+
|
|
226
|
+
## Common Mistakes
|
|
227
|
+
|
|
228
|
+
### Not checking chunk.type before accessing fields
|
|
229
|
+
|
|
230
|
+
```typescript
|
|
231
|
+
// WRONG — not all chunks have .data
|
|
232
|
+
for await (const chunk of stream) {
|
|
233
|
+
console.log(chunk.data); // undefined for non-token chunks
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// CORRECT — switch on chunk.type
|
|
237
|
+
for await (const chunk of stream) {
|
|
238
|
+
if (chunk.type === "token") {
|
|
239
|
+
console.log(chunk.data);
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
### Ignoring the stopped flag on guardrail chunks
|
|
245
|
+
|
|
246
|
+
```typescript
|
|
247
|
+
// WRONG — continuing after a stopping guardrail
|
|
248
|
+
case "guardrail_triggered":
|
|
249
|
+
console.log("Guardrail triggered, continuing...");
|
|
250
|
+
break;
|
|
251
|
+
|
|
252
|
+
// CORRECT — check if the stream was stopped
|
|
253
|
+
case "guardrail_triggered":
|
|
254
|
+
if (chunk.stopped) {
|
|
255
|
+
console.error(`Stopped by ${chunk.guardrailName}: ${chunk.reason}`);
|
|
256
|
+
// Handle stream termination
|
|
257
|
+
}
|
|
258
|
+
break;
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
## Quick Reference
|
|
262
|
+
|
|
263
|
+
| Type | Interface | Purpose |
|
|
264
|
+
|---|---|---|
|
|
265
|
+
| `AgentLike` | `{ name, instructions?, model?, tools? }` | Agent definition |
|
|
266
|
+
| `RunResult` | `{ output, messages, toolCalls, totalTokens }` | Complete run result |
|
|
267
|
+
| `StreamChunk` | Discriminated union (8 types) | Incremental output |
|
|
268
|
+
| `StreamingCallbackRunner` | Callback-based adapter | Alternative to AsyncIterable |
|
|
269
|
+
| `createSSEResponse` | SSE formatter | Web server streaming |
|