agent-pulse 1.2.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -17
- package/dist/agent.js +4 -0
- package/dist/types.d.ts +1 -0
- package/package.json +4 -5
package/README.md
CHANGED
|
@@ -99,19 +99,19 @@ import { Agent, openAI, google, grok } from 'agent-pulse';
|
|
|
99
99
|
// OpenAI
|
|
100
100
|
const bot1 = new Agent({
|
|
101
101
|
name: 'gpt-bot',
|
|
102
|
-
provider: new openAI('gpt-
|
|
102
|
+
provider: new openAI('gpt-5.2')
|
|
103
103
|
});
|
|
104
104
|
|
|
105
105
|
// Google Gemini
|
|
106
106
|
const bot2 = new Agent({
|
|
107
107
|
name: 'gemini-bot',
|
|
108
|
-
provider: new google('gemini-
|
|
108
|
+
provider: new google('gemini-3-pro')
|
|
109
109
|
});
|
|
110
110
|
|
|
111
111
|
// xAI / Grok
|
|
112
112
|
const bot3 = new Agent({
|
|
113
113
|
name: 'grok-bot',
|
|
114
|
-
provider: new grok('grok-
|
|
114
|
+
provider: new grok('grok-4.2')
|
|
115
115
|
});
|
|
116
116
|
```
|
|
117
117
|
|
|
@@ -154,7 +154,8 @@ The `response` event and the `agent.run()` promise resolve to a standardized `Ag
|
|
|
154
154
|
|
|
155
155
|
```typescript
|
|
156
156
|
{
|
|
157
|
-
content: string | object, // The Markdown text or
|
|
157
|
+
content: string | object, // The Markdown text, parsed JSON, or tool result (if iterations=1)
|
|
158
|
+
message?: string, // The original LLM text response (useful when a tool is also called)
|
|
158
159
|
usage: {
|
|
159
160
|
input_tokens: number,
|
|
160
161
|
output_tokens: number,
|
|
@@ -167,6 +168,9 @@ The `response` event and the `agent.run()` promise resolve to a standardized `Ag
|
|
|
167
168
|
}
|
|
168
169
|
```
|
|
169
170
|
|
|
171
|
+
> [!NOTE]
|
|
172
|
+
> If an LLM responds with both text and a tool call (common in Gemini), `content` stays consistent with legacy behavior (holding the tool result), while the new `message` field preserves the original LLM text.
|
|
173
|
+
|
|
170
174
|
You can access token usage stats from the `usage` property.
|
|
171
175
|
|
|
172
176
|
## Error Codes
|
|
@@ -213,7 +217,7 @@ const summaryTool = {
|
|
|
213
217
|
|
|
214
218
|
const agent = new Agent({
|
|
215
219
|
name: 'intake',
|
|
216
|
-
provider: new google('gemini-
|
|
220
|
+
provider: new google('gemini-3-pro'),
|
|
217
221
|
tools: [summaryTool]
|
|
218
222
|
});
|
|
219
223
|
|
|
@@ -226,7 +230,18 @@ if (result.content?.type === 'INTENT_COMPLETE') {
|
|
|
226
230
|
}
|
|
227
231
|
```
|
|
228
232
|
|
|
229
|
-
#### Option B:
|
|
233
|
+
#### Option B: Handling Text + Tool (Gemini Style)
|
|
234
|
+
When using models like Gemini that often provide a text explanation *and* a tool call in one turn, use the `message` field to access the text.
|
|
235
|
+
|
|
236
|
+
```typescript
|
|
237
|
+
const result = await agent.run("Tell me a joke and then get the weather.");
|
|
238
|
+
|
|
239
|
+
// If weatherTool was called:
|
|
240
|
+
console.log(result.message); // "Sure! Here's a joke: ... Now, let me get the weather for you."
|
|
241
|
+
console.log(result.content); // { temp: 20, unit: 'celsius' } (The tool result)
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
#### Option C: Events (Side Effects)
|
|
230
245
|
Best for logging, UI updates, or real-time monitoring.
|
|
231
246
|
|
|
232
247
|
```typescript
|
|
@@ -248,7 +263,7 @@ By setting `max_tool_iterations`, the agent can autonomously call tools, receive
|
|
|
248
263
|
```typescript
|
|
249
264
|
const agent = new Agent({
|
|
250
265
|
name: 'researcher',
|
|
251
|
-
provider: new openAI('gpt-
|
|
266
|
+
provider: new openAI('gpt-5.2'),
|
|
252
267
|
tools: [weatherTool, searchTool],
|
|
253
268
|
max_tool_iterations: 5 // Allow up to 5 loop turns
|
|
254
269
|
});
|
|
@@ -265,7 +280,7 @@ If your agent is running on a server but needs the **client** to perform an acti
|
|
|
265
280
|
```typescript
|
|
266
281
|
const agent = new Agent({
|
|
267
282
|
name: 'account-mgr',
|
|
268
|
-
provider: new openAI('gpt-
|
|
283
|
+
provider: new openAI('gpt-5.2'),
|
|
269
284
|
tools: [requestConfirmationTool]
|
|
270
285
|
});
|
|
271
286
|
|
|
@@ -297,7 +312,7 @@ The same pattern works for Gemini! While Google's API uses a different internal
|
|
|
297
312
|
```typescript
|
|
298
313
|
const agent = new Agent({
|
|
299
314
|
name: 'gemini-agent',
|
|
300
|
-
provider: new google('gemini-
|
|
315
|
+
provider: new google('gemini-3-flash')
|
|
301
316
|
});
|
|
302
317
|
|
|
303
318
|
const final = await agent.run([
|
|
@@ -325,7 +340,7 @@ import { Agent, openAI } from 'agent-pulse';
|
|
|
325
340
|
|
|
326
341
|
const agent = new Agent({
|
|
327
342
|
name: 'analyst',
|
|
328
|
-
provider: new openAI('gpt-
|
|
343
|
+
provider: new openAI('gpt-5.2'),
|
|
329
344
|
// You can pass file paths (if handled by environment) or load content yourself
|
|
330
345
|
files: ['/path/to/data.txt']
|
|
331
346
|
});
|
|
@@ -347,7 +362,7 @@ const recipeSchema = z.object({
|
|
|
347
362
|
|
|
348
363
|
const agent = new Agent({
|
|
349
364
|
name: 'chef',
|
|
350
|
-
provider: new google('gemini-
|
|
365
|
+
provider: new google('gemini-3-pro'),
|
|
351
366
|
output_schema: recipeSchema
|
|
352
367
|
});
|
|
353
368
|
|
|
@@ -374,7 +389,7 @@ app.get('/chat', async (req, res) => {
|
|
|
374
389
|
|
|
375
390
|
const agent = new Agent({
|
|
376
391
|
name: 'web-bot',
|
|
377
|
-
provider: new openAI('gpt-
|
|
392
|
+
provider: new openAI('gpt-5.2')
|
|
378
393
|
});
|
|
379
394
|
|
|
380
395
|
// Connect agent events to the response stream
|
|
@@ -394,7 +409,7 @@ import { Agent, google } from 'agent-pulse';
|
|
|
394
409
|
|
|
395
410
|
const agent = new Agent({
|
|
396
411
|
name: 'researcher',
|
|
397
|
-
provider: new google('gemini-
|
|
412
|
+
provider: new google('gemini-3-flash'),
|
|
398
413
|
config: {
|
|
399
414
|
googleSearch: true
|
|
400
415
|
}
|
|
@@ -410,6 +425,47 @@ agent.on('response', (result) => {
|
|
|
410
425
|
await agent.run("Who won the Super Bowl in 2024?");
|
|
411
426
|
```
|
|
412
427
|
|
|
428
|
+
### 7. Image Generation
|
|
429
|
+
|
|
430
|
+
Generate images by setting the agent's model to an image generation model (e.g., `grok-imagine-image` for xAI or models like `gemini-1.5-pro` for Google).
|
|
431
|
+
|
|
432
|
+
#### xAI (Grok Imagine)
|
|
433
|
+
xAI uses `aspect_ratio` instead of `size`.
|
|
434
|
+
|
|
435
|
+
```typescript
|
|
436
|
+
import { Agent, grok } from 'agent-pulse';
|
|
437
|
+
|
|
438
|
+
const agent = new Agent({
|
|
439
|
+
name: 'artist',
|
|
440
|
+
provider: new grok('grok-imagine-image'),
|
|
441
|
+
config: {
|
|
442
|
+
aspect_ratio: '16:9', // Supported: "1:1", "16:9", "9:16", "4:3", "3:2", etc.
|
|
443
|
+
response_format: 'b64_json' // Get base64 data instead of temporary URLs
|
|
444
|
+
}
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
const result = await agent.run("A futuristic city skyline in neon colors.");
|
|
448
|
+
|
|
449
|
+
// result.content will contain markdown image strings:
|
|
450
|
+
// "" or ""
|
|
451
|
+
```
|
|
452
|
+
|
|
453
|
+
#### Google (Gemini)
|
|
454
|
+
Gemini models can generate images as part of their response.
|
|
455
|
+
|
|
456
|
+
```typescript
|
|
457
|
+
import { Agent, google } from 'agent-pulse';
|
|
458
|
+
|
|
459
|
+
const agent = new Agent({
|
|
460
|
+
name: 'painter',
|
|
461
|
+
provider: new google('gemini-3-pro')
|
|
462
|
+
});
|
|
463
|
+
|
|
464
|
+
const result = await agent.run("Generate an image of a serene mountain lake.");
|
|
465
|
+
|
|
466
|
+
// result.content will contain the markdown image string.
|
|
467
|
+
```
|
|
468
|
+
|
|
413
469
|
## Extensibility: Custom Providers
|
|
414
470
|
|
|
415
471
|
To add a new provider (e.g. Anthropic, Mistral), create a class that implements the `LLMProvider` interface.
|
|
@@ -433,10 +489,6 @@ const agent = new Agent({
|
|
|
433
489
|
provider: new MyProvider('my-model')
|
|
434
490
|
});
|
|
435
491
|
```
|
|
436
|
-
## To locally link the package
|
|
437
|
-
|
|
438
|
-
1. Run `npm link` in the agent-pulse directory
|
|
439
|
-
2. Run `npm link agent-pulse --legacy-peer-deps` in your project directory
|
|
440
492
|
|
|
441
493
|
## License
|
|
442
494
|
|
package/dist/agent.js
CHANGED
|
@@ -39,6 +39,10 @@ class Agent extends events_1.EventEmitter {
|
|
|
39
39
|
iterations++;
|
|
40
40
|
const response = await this.provider.generate(this.config.system, messages, this.config.files, this.config.tools, this.config.config, this.config.output_schema, (token) => this.emit('token', token));
|
|
41
41
|
lastResponse = response;
|
|
42
|
+
// Capture the original text as the "message" (LLM's primary text response)
|
|
43
|
+
if (typeof response.content === 'string') {
|
|
44
|
+
lastResponse.message = response.content;
|
|
45
|
+
}
|
|
42
46
|
// Handle Tool Execution
|
|
43
47
|
if (response.tool_calls && this.config.tools) {
|
|
44
48
|
// Add Assistant's tool call message to history
|
package/dist/types.d.ts
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "agent-pulse",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.4.0",
|
|
4
4
|
"description": "A lightweight, agentic AI framework for JavaScript/TypeScript",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -26,13 +26,12 @@
|
|
|
26
26
|
"license": "MIT",
|
|
27
27
|
"repository": {
|
|
28
28
|
"type": "git",
|
|
29
|
-
"url": "git+https://github.com/mehere14/
|
|
30
|
-
"directory": "agent-pulse"
|
|
29
|
+
"url": "git+https://github.com/mehere14/agent-pulse-public.git"
|
|
31
30
|
},
|
|
32
31
|
"bugs": {
|
|
33
|
-
"url": "https://github.com/mehere14/
|
|
32
|
+
"url": "https://github.com/mehere14/agent-pulse-public/issues"
|
|
34
33
|
},
|
|
35
|
-
"homepage": "https://github.com/mehere14/
|
|
34
|
+
"homepage": "https://github.com/mehere14/agent-pulse-public#readme",
|
|
36
35
|
"dependencies": {
|
|
37
36
|
"@google/genai": "^1.33.0",
|
|
38
37
|
"dotenv": "^16.4.5",
|