@agentic-eng/agent 0.1.0-beta.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +294 -14
  2. package/package.json +1 -1
package/README.md CHANGED
@@ -1,6 +1,11 @@
1
1
  # @agentic-eng/agent
2
2
 
3
- > Core agent primitives and runtime for EASA — Easy Agent System Architecture.
3
+ > Core agent primitives and runtime for [EASA](https://github.com/easa-framework/easa) — Easy Agent System Architecture.
4
+
5
+ [![npm](https://img.shields.io/npm/v/@agentic-eng/agent)](https://www.npmjs.com/package/@agentic-eng/agent)
6
+ [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
7
+
8
+ > **Beta** — API may change before 1.0. Feedback welcome!
4
9
 
5
10
  ## Installation
6
11
 
@@ -10,35 +15,310 @@ npm install @agentic-eng/agent
10
15
  pnpm add @agentic-eng/agent
11
16
  ```
12
17
 
13
- ## Usage
18
+ ## Quick Start
19
+
20
+ ### 1. Implement an LLM Provider
21
+
22
+ EASA ships zero LLM dependencies — you bring your own backend:
23
+
24
+ ```typescript
25
+ import type { LLMProvider, Message, ChatResponse, ChatChunk } from '@agentic-eng/agent';
26
+
27
+ const myProvider: LLMProvider = {
28
+ async chat(messages: Message[]): Promise<ChatResponse> {
29
+ // Call OpenAI, Anthropic, local model, etc.
30
+ const response = await callYourLLM(messages);
31
+ return { message: { role: 'assistant', content: response.text } };
32
+ },
33
+
34
+ async *chatStream(messages: Message[]): AsyncIterable<ChatChunk> {
35
+ for await (const chunk of streamYourLLM(messages)) {
36
+ yield { delta: chunk.text, done: chunk.finished };
37
+ }
38
+ },
39
+ };
40
+ ```
41
+
42
+ ### 2. Create an Agent
14
43
 
15
44
  ```typescript
16
45
  import { Agent } from '@agentic-eng/agent';
17
46
 
18
47
  const agent = new Agent({
19
- name: 'my-agent',
20
- description: 'An example agent',
48
+ name: 'assistant',
49
+ provider: myProvider,
50
+ systemPrompt: 'You are a helpful assistant.',
51
+ });
52
+ ```
53
+
54
+ ### 3. Invoke
55
+
56
+ ```typescript
57
+ const result = await agent.invoke('What is the capital of Thailand?');
58
+ console.log(result.content); // "Bangkok is the capital of Thailand."
59
+ console.log(result.trace.totalIterations); // 1
60
+ ```
61
+
62
+ ## Reasoning Loop
63
+
64
+ The agent uses a JSON-based reasoning loop. The LLM responds with structured JSON at every step:
65
+
66
+ ```json
67
+ {
68
+ "action": "done | continue | tool_call",
69
+ "reasoning": "Internal chain-of-thought",
70
+ "content": "Output for the user",
71
+ "tool_call": { "name": "tool_name", "input": { ... } },
72
+ "memory": { "label": "Title", "content": "Knowledge to persist" }
73
+ }
74
+ ```
75
+
76
+ | Action | Behavior |
77
+ | --- | --- |
78
+ | `done` | Return final answer |
79
+ | `continue` | Next iteration with intermediate output fed back |
80
+ | `tool_call` | Execute tool, feed result back, next iteration |
81
+
82
+ Simple prompts resolve in 1 iteration. Complex tasks iterate (up to `maxIterations`, default 5) with the LLM deciding when it's done.
83
+
84
+ ## Tools
85
+
86
+ ```typescript
87
+ import { ToolRegistry } from '@agentic-eng/agent';
88
+ import type { Tool } from '@agentic-eng/agent';
89
+
90
+ const calculator: Tool = {
91
+ definition: {
92
+ name: 'calculator',
93
+ description: 'Evaluates arithmetic expressions.',
94
+ inputSchema: {
95
+ type: 'object',
96
+ properties: {
97
+ expression: { type: 'string', description: 'Math expression to evaluate' },
98
+ },
99
+ required: ['expression'],
100
+ },
101
+ },
102
+ async execute(input) {
103
+ const result = evaluate(input.expression as string);
104
+ return { toolName: 'calculator', success: true, output: String(result) };
105
+ },
106
+ };
107
+
108
+ const tools = new ToolRegistry();
109
+ tools.register(calculator);
110
+
111
+ const agent = new Agent({
112
+ name: 'math-agent',
113
+ provider: myProvider,
114
+ tools,
115
+ });
116
+
117
+ const result = await agent.invoke('What is 42 × 17?');
118
+ // Agent calls calculator tool, gets 714, returns formatted answer
119
+ ```
120
+
121
+ The agent uses a **hybrid schema approach** to save tokens:
122
+ 1. Every LLM call sends only tool names + descriptions (~10 tokens each)
123
+ 2. When a tool is needed, the full JSON Schema is injected on demand
124
+
125
+ ## Memory
126
+
127
+ ```typescript
128
+ import { FlatFileMemoryProvider } from '@agentic-eng/agent';
129
+
130
+ const agent = new Agent({
131
+ name: 'assistant',
132
+ provider: myProvider,
133
+ memory: new FlatFileMemoryProvider('./agent-memory'),
21
134
  });
22
135
  ```
23
136
 
24
- ## API
137
+ The LLM decides when to persist knowledge. Memories are stored as [KNL](https://github.com/knl-lang/knl) DATA blocks. Implement `MemoryProvider` for custom backends (vector DB, Redis, Postgres, etc.):
25
138
 
26
- ### `Agent`
139
+ ```typescript
140
+ import type { MemoryProvider, MemoryEntry } from '@agentic-eng/agent';
27
141
 
28
- The core agent class.
142
+ const customMemory: MemoryProvider = {
143
+ async store(agentName: string, entry: MemoryEntry) { /* ... */ },
144
+ async retrieve(agentName: string): Promise<MemoryEntry[]> { /* ... */ },
145
+ };
146
+ ```
147
+
148
+ ## Event Emission (Observability)
29
149
 
30
- #### Constructor
150
+ OTEL-aligned lifecycle events for debugging and monitoring:
31
151
 
32
152
  ```typescript
33
- new Agent(config: AgentConfig)
153
+ import { ConsoleEventEmitter } from '@agentic-eng/agent';
154
+
155
+ const agent = new Agent({
156
+ name: 'assistant',
157
+ provider: myProvider,
158
+ emitter: new ConsoleEventEmitter(),
159
+ });
34
160
  ```
35
161
 
36
- #### `AgentConfig`
162
+ Console output:
37
163
 
38
- | Property | Type | Required | Description |
39
- | --- | --- | --- | --- |
40
- | `name` | `string` | Yes | Unique name identifying the agent |
41
- | `description` | `string` | No | Description of what the agent does |
164
+ ```
165
+ [EASA] 14:23:05.123Z INVOKE agent="assistant" prompt="What is 42 × 17?"
166
+ [EASA] 14:23:05.124Z ITER iteration=1/5
167
+ [EASA] 14:23:05.125Z LLM messages=3
168
+ [EASA] 14:23:05.830Z ← LLM tokens=142
169
+ [EASA] 14:23:05.831Z ⚙ TOOL tool="calculator"
170
+ [EASA] 14:23:05.832Z ⚙ TOOL✓ tool="calculator" success=true
171
+ [EASA] 14:23:06.201Z ✓ ITER iteration=2 action="done"
172
+ [EASA] 14:23:06.202Z ■ INVOKE agent="assistant" iterations=2 completed=true
173
+ ```
174
+
175
+ Implement `AgentEventEmitter` for OTEL, Datadog, or any custom backend:
176
+
177
+ ```typescript
178
+ import type { AgentEventEmitter, AgentEvent } from '@agentic-eng/agent';
179
+
180
+ const otelEmitter: AgentEventEmitter = {
181
+ emit(event: AgentEvent) {
182
+ tracer.startSpan(event.type, { attributes: event.data });
183
+ },
184
+ };
185
+ ```
186
+
187
+ ### Event Types
188
+
189
+ | Event | When |
190
+ | --- | --- |
191
+ | `agent.invoke.start` / `end` | Invoke lifecycle |
192
+ | `agent.invoke_stream.start` / `end` | Stream lifecycle |
193
+ | `agent.iteration.start` / `end` | Each reasoning iteration |
194
+ | `llm.call.start` / `end` | Each LLM API call |
195
+ | `tool.call.start` / `end` | Tool execution |
196
+ | `tool.schema.inject` | Full schema injected for a tool |
197
+ | `tool.not_found` | LLM requested unknown tool |
198
+ | `memory.store` | Knowledge persisted |
199
+ | `agent.error` | Any error during execution |
200
+
201
+ ## Streaming
202
+
203
+ For conversational responses without the reasoning loop:
204
+
205
+ ```typescript
206
+ for await (const chunk of agent.invokeStream('Tell me a story.')) {
207
+ process.stdout.write(chunk.delta);
208
+ }
209
+ ```
210
+
211
+ ## Error Handling
212
+
213
+ All errors extend `EasaError`:
214
+
215
+ ```typescript
216
+ import { MaxIterationsError, ProviderError } from '@agentic-eng/agent';
217
+
218
+ try {
219
+ await agent.invoke('Complex task');
220
+ } catch (error) {
221
+ if (error instanceof MaxIterationsError) {
222
+ console.log(`Gave up after ${error.iterationsCompleted} iterations`);
223
+ } else if (error instanceof ProviderError) {
224
+ console.log('LLM call failed:', error.cause);
225
+ }
226
+ }
227
+ ```
228
+
229
+ | Error | When |
230
+ | --- | --- |
231
+ | `AgentConfigError` | Invalid agent configuration |
232
+ | `ProviderError` | LLM provider call fails |
233
+ | `MaxIterationsError` | Reasoning loop exceeds limit |
234
+ | `ReasoningParseError` | LLM returns invalid JSON |
235
+ | `ToolExecutionError` | Tool execution fails |
236
+
237
+ ## API Reference
238
+
239
+ ### `AgentConfig`
240
+
241
+ ```typescript
242
+ interface AgentConfig {
243
+ name: string; // Required — unique agent name
244
+ provider: LLMProvider; // Required — your LLM backend
245
+ description?: string; // What this agent does
246
+ systemPrompt?: string; // Custom system prompt
247
+ defaultOptions?: ChatOptions; // Default LLM options (temperature, maxTokens, etc.)
248
+ maxIterations?: number; // Max reasoning iterations (default: 5)
249
+ memory?: MemoryProvider; // Knowledge persistence
250
+ tools?: ToolRegistry; // Available tools
251
+ emitter?: AgentEventEmitter; // Event emitter for observability
252
+ }
253
+ ```
254
+
255
+ ### `InvokeResult`
256
+
257
+ ```typescript
258
+ interface InvokeResult {
259
+ content: string; // Final answer
260
+ trace: ReasoningTrace; // Full reasoning trace (iterations, completed, totalIterations)
261
+ }
262
+ ```
263
+
264
+ ### `Agent` Methods
265
+
266
+ | Method | Returns | Description |
267
+ | --- | --- | --- |
268
+ | `invoke(prompt, options?)` | `Promise<InvokeResult>` | Run reasoning loop to completion |
269
+ | `invokeStream(prompt, options?)` | `AsyncIterable<ChatChunk>` | Stream a single-pass response |
270
+ | `getMessages()` | `Message[]` | Copy of conversation history |
271
+ | `clearHistory()` | `void` | Reset conversation |
272
+
273
+ ## Full Example
274
+
275
+ ```typescript
276
+ import {
277
+ Agent,
278
+ ToolRegistry,
279
+ FlatFileMemoryProvider,
280
+ ConsoleEventEmitter,
281
+ } from '@agentic-eng/agent';
282
+ import type { Tool, LLMProvider } from '@agentic-eng/agent';
283
+
284
+ // 1. Provider
285
+ const provider: LLMProvider = { /* your implementation */ };
286
+
287
+ // 2. Tools
288
+ const weatherTool: Tool = {
289
+ definition: {
290
+ name: 'weather',
291
+ description: 'Gets current weather for a city.',
292
+ inputSchema: {
293
+ type: 'object',
294
+ properties: { city: { type: 'string', description: 'City name' } },
295
+ required: ['city'],
296
+ },
297
+ },
298
+ async execute(input) {
299
+ const data = await fetchWeather(input.city as string);
300
+ return { toolName: 'weather', success: true, output: JSON.stringify(data) };
301
+ },
302
+ };
303
+
304
+ const tools = new ToolRegistry();
305
+ tools.register(weatherTool);
306
+
307
+ // 3. Agent
308
+ const agent = new Agent({
309
+ name: 'travel-assistant',
310
+ provider,
311
+ systemPrompt: 'You are a helpful travel planning assistant.',
312
+ tools,
313
+ memory: new FlatFileMemoryProvider('./memory'),
314
+ emitter: new ConsoleEventEmitter(),
315
+ maxIterations: 10,
316
+ });
317
+
318
+ // 4. Use
319
+ const result = await agent.invoke('What should I pack for Bangkok next week?');
320
+ console.log(result.content);
321
+ ```
42
322
 
43
323
  ## License
44
324
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@agentic-eng/agent",
3
- "version": "0.1.0-beta.1",
3
+ "version": "0.1.0",
4
4
  "description": "Core agent primitives and runtime for EASA — Easy Agent System Architecture.",
5
5
  "keywords": [
6
6
  "agent",