@deepagents/agent 0.3.1 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,257 @@
1
+ # @deepagents/agent
2
+
3
+ A framework for building multi-agent AI systems with TypeScript. Create agents that use tools, coordinate through handoffs, and work together to solve complex tasks.
4
+
5
+ ## Features
6
+
7
+ - **Agent Composition** - Build modular agents with specific roles and capabilities
8
+ - **Tool Integration** - Compatible with Vercel AI SDK tools
9
+ - **Handoffs** - Agents can delegate to specialized agents automatically
10
+ - **Structured Output** - Type-safe responses with Zod schemas
11
+ - **Streaming** - Real-time streaming responses
12
+ - **Context Sharing** - Type-safe state passed between agents
13
+
14
+ ## Installation
15
+
16
+ ```bash
17
+ npm install @deepagents/agent
18
+ ```
19
+
20
+ Requires Node.js LTS (20+) and a `zod` peer dependency.
21
+
22
+ ## Quick Start
23
+
24
+ ### Simple Agent
25
+
26
+ ```typescript
27
+ import { agent, execute, user } from '@deepagents/agent';
28
+ import { openai } from '@ai-sdk/openai';
29
+
30
+ const assistant = agent({
31
+ name: 'assistant',
32
+ model: openai('gpt-4o'),
33
+ prompt: 'You are a helpful assistant.',
34
+ });
35
+
36
+ const stream = execute(assistant, 'Hello!', {});
37
+
38
+ for await (const chunk of stream.textStream) {
39
+ process.stdout.write(chunk);
40
+ }
41
+ ```
42
+
43
+ ### Agent with Tools
44
+
45
+ ```typescript
46
+ import { agent, execute } from '@deepagents/agent';
47
+ import { openai } from '@ai-sdk/openai';
48
+ import { tool } from 'ai';
49
+ import { z } from 'zod';
50
+
51
+ const weatherTool = tool({
52
+ description: 'Get weather for a location',
53
+ parameters: z.object({
54
+ location: z.string(),
55
+ }),
56
+ execute: async ({ location }) => {
57
+ return { temperature: 72, condition: 'sunny', location };
58
+ },
59
+ });
60
+
61
+ const weatherAgent = agent({
62
+ name: 'weather_agent',
63
+ model: openai('gpt-4o'),
64
+ prompt: 'You help users check the weather.',
65
+ tools: { weather: weatherTool },
66
+ });
67
+
68
+ const stream = execute(weatherAgent, 'What is the weather in Tokyo?', {});
69
+ console.log(await stream.text);
70
+ ```
71
+
72
+ ### Multi-Agent with Handoffs
73
+
74
+ ```typescript
75
+ import { agent, instructions, swarm } from '@deepagents/agent';
76
+ import { openai } from '@ai-sdk/openai';
77
+
78
+ const researcher = agent({
79
+ name: 'researcher',
80
+ model: openai('gpt-4o'),
81
+ prompt: 'You research topics and provide detailed information.',
82
+ handoffDescription: 'Handles research and fact-finding tasks',
83
+ tools: { /* research tools */ },
84
+ });
85
+
86
+ const writer = agent({
87
+ name: 'writer',
88
+ model: openai('gpt-4o'),
89
+ prompt: 'You write clear, engaging content based on research.',
90
+ handoffDescription: 'Handles writing and content creation',
91
+ });
92
+
93
+ const coordinator = agent({
94
+ name: 'coordinator',
95
+ model: openai('gpt-4o'),
96
+ prompt: instructions({
97
+ purpose: ['Coordinate research and writing tasks'],
98
+ routine: [
99
+ 'Analyze the request',
100
+ 'Use transfer_to_researcher for fact-finding',
101
+ 'Use transfer_to_writer for content creation',
102
+ ],
103
+ }),
104
+ handoffs: [researcher, writer],
105
+ });
106
+
107
+ // Agents automatically transfer control via transfer_to_<agent_name> tools
108
+ const stream = swarm(coordinator, 'Write a blog post about AI agents', {});
109
+ ```
110
+
111
+ ### Structured Output
112
+
113
+ ```typescript
114
+ import { agent, generate } from '@deepagents/agent';
115
+ import { openai } from '@ai-sdk/openai';
116
+ import { z } from 'zod';
117
+
118
+ const analyzer = agent({
119
+ name: 'analyzer',
120
+ model: openai('gpt-4o'),
121
+ prompt: 'Analyze the sentiment of the given text.',
122
+ output: z.object({
123
+ sentiment: z.enum(['positive', 'negative', 'neutral']),
124
+ confidence: z.number(),
125
+ keywords: z.array(z.string()),
126
+ }),
127
+ });
128
+
129
+ const result = await generate(analyzer, 'I love this product!', {});
130
+ console.log(result.experimental_output);
131
+ // { sentiment: 'positive', confidence: 0.95, keywords: ['love', 'product'] }
132
+ ```
133
+
134
+ ### Context Variables
135
+
136
+ Share state between agents and tools:
137
+
138
+ ```typescript
139
+ import { agent, execute, toState } from '@deepagents/agent';
140
+ import { tool } from 'ai';
141
+ import { z } from 'zod';
142
+
143
+ interface AppContext {
144
+ userId: string;
145
+ preferences: Record<string, string>;
146
+ }
147
+
148
+ const preferenceTool = tool({
149
+ description: 'Save user preference',
150
+ parameters: z.object({
151
+ key: z.string(),
152
+ value: z.string(),
153
+ }),
154
+ execute: async ({ key, value }, options) => {
155
+ const ctx = toState<AppContext>(options);
156
+ ctx.preferences[key] = value;
157
+ return `Saved ${key}=${value}`;
158
+ },
159
+ });
160
+
161
+ const assistant = agent<unknown, AppContext>({
162
+ name: 'assistant',
163
+ model: openai('gpt-4o'),
164
+ prompt: (ctx) => `Help user ${ctx?.userId} manage their preferences.`,
165
+ tools: { savePreference: preferenceTool },
166
+ });
167
+
168
+ const context: AppContext = { userId: 'user123', preferences: {} };
169
+ const stream = execute(assistant, 'Set my theme to dark', context);
170
+ await stream.text;
171
+
172
+ console.log(context.preferences); // { theme: 'dark' }
173
+ ```
174
+
175
+ ## API Reference
176
+
177
+ ### `agent(config)`
178
+
179
+ Creates a new agent:
180
+
181
+ ```typescript
182
+ agent<Output, ContextIn, ContextOut>({
183
+ name: string; // Agent identifier (required)
184
+ model: LanguageModel; // AI SDK model (required)
185
+ prompt: string | string[] | ((ctx?) => string); // Instructions
186
+ tools?: ToolSet; // Available tools
187
+ handoffs?: Agent[]; // Agents to delegate to
188
+ handoffDescription?: string; // When to use this agent
189
+ output?: z.Schema<Output>; // Structured output schema
190
+ temperature?: number; // LLM temperature
191
+ toolChoice?: ToolChoice; // 'auto' | 'none' | 'required'
192
+ })
193
+ ```
194
+
195
+ ### `instructions({ purpose, routine })`
196
+
197
+ Helper for structured prompts:
198
+
199
+ ```typescript
200
+ instructions({
201
+ purpose: string | string[], // Agent's role
202
+ routine: string[], // Step-by-step workflow
203
+ })
204
+
205
+ // For swarm coordination
206
+ instructions.swarm({ purpose, routine })
207
+ ```
208
+
209
+ ### Execution Functions
210
+
211
+ ```typescript
212
+ // Streaming execution
213
+ execute(agent, messages, context, config?)
214
+ stream(agent, messages, context, config?) // alias
215
+
216
+ // Non-streaming execution
217
+ generate(agent, messages, context, config?)
218
+
219
+ // High-level UI streaming with handoff support
220
+ swarm(agent, messages, context, abortSignal?)
221
+ ```
222
+
223
+ ### Utilities
224
+
225
+ ```typescript
226
+ // Create user message
227
+ user(message: string): UIMessage
228
+
229
+ // Access context in tools
230
+ toState<T>(options: ToolCallOptions): T
231
+
232
+ // Extract structured output
233
+ toOutput<T>(result): Promise<T>
234
+ ```
235
+
236
+ ## AI Model Providers
237
+
238
+ Works with any model provider supported by the [Vercel AI SDK](https://sdk.vercel.ai/docs):
239
+
240
+ ```typescript
241
+ import { openai } from '@ai-sdk/openai';
242
+ import { anthropic } from '@ai-sdk/anthropic';
243
+ import { google } from '@ai-sdk/google';
244
+ import { groq } from '@ai-sdk/groq';
245
+
246
+ const agent1 = agent({ model: openai('gpt-4o'), /* ... */ });
247
+ const agent2 = agent({ model: anthropic('claude-sonnet-4-20250514'), /* ... */ });
248
+ const agent3 = agent({ model: google('gemini-1.5-pro'), /* ... */ });
249
+ ```
250
+
251
+ ## Documentation
252
+
253
+ Full documentation available at [januarylabs.github.io/deepagents](https://januarylabs.github.io/deepagents/docs/agent).
254
+
255
+ ## Repository
256
+
257
+ [github.com/JanuaryLabs/deepagents](https://github.com/JanuaryLabs/deepagents)
package/dist/index.js CHANGED
@@ -406,6 +406,100 @@ function toOutput(result) {
406
406
  function toState(options) {
407
407
  return options.experimental_context;
408
408
  }
409
+ function htmlElementChunking() {
410
+ const WORD_REGEX = /\S+\s+/m;
411
+ return (buffer) => {
412
+ if (buffer.startsWith("<")) {
413
+ if (/^<[a-z]/i.test(buffer)) {
414
+ if (/^<[a-z][a-z0-9-]*$/i.test(buffer)) {
415
+ return null;
416
+ }
417
+ const elementMatch = /^<([a-z][a-z0-9]*(?:-[a-z0-9]+)*)([\s/>])/i.exec(
418
+ buffer
419
+ );
420
+ if (elementMatch) {
421
+ const elementName = elementMatch[1];
422
+ const endIndex = findElementEnd(buffer, elementName);
423
+ if (endIndex === -1) {
424
+ return null;
425
+ }
426
+ return buffer.slice(0, endIndex);
427
+ }
428
+ }
429
+ }
430
+ const ltIndex = buffer.indexOf("<");
431
+ if (ltIndex > 0) {
432
+ const textBefore = buffer.slice(0, ltIndex);
433
+ const wordMatch2 = WORD_REGEX.exec(textBefore);
434
+ if (wordMatch2) {
435
+ return textBefore.slice(0, wordMatch2.index + wordMatch2[0].length);
436
+ }
437
+ return textBefore;
438
+ }
439
+ const wordMatch = WORD_REGEX.exec(buffer);
440
+ if (wordMatch) {
441
+ return buffer.slice(0, wordMatch.index + wordMatch[0].length);
442
+ }
443
+ return null;
444
+ };
445
+ }
446
+ function findElementEnd(buffer, elementName) {
447
+ const nameEndIndex = buffer.indexOf(elementName) + elementName.length;
448
+ let inQuote = null;
449
+ let escaped = false;
450
+ let openTagClosed = false;
451
+ let depth = 0;
452
+ for (let i = nameEndIndex; i < buffer.length; i++) {
453
+ const char = buffer[i];
454
+ const prevChar = i > 0 ? buffer[i - 1] : "";
455
+ if (escaped) {
456
+ escaped = false;
457
+ continue;
458
+ }
459
+ if (char === "\\") {
460
+ escaped = true;
461
+ continue;
462
+ }
463
+ if (inQuote) {
464
+ if (char === inQuote) {
465
+ inQuote = null;
466
+ }
467
+ continue;
468
+ }
469
+ if (char === '"' || char === "'" || char === "`") {
470
+ inQuote = char;
471
+ continue;
472
+ }
473
+ if (!openTagClosed) {
474
+ if (char === ">" && prevChar === "/") {
475
+ return i + 1;
476
+ }
477
+ if (char === ">") {
478
+ openTagClosed = true;
479
+ }
480
+ continue;
481
+ }
482
+ const closingTagLength = elementName.length + 3;
483
+ const potentialClosingTag = buffer.slice(i, i + closingTagLength);
484
+ if (potentialClosingTag.toLowerCase() === `</${elementName.toLowerCase()}>`) {
485
+ if (depth === 0) {
486
+ return i + closingTagLength;
487
+ }
488
+ depth--;
489
+ i += closingTagLength - 1;
490
+ continue;
491
+ }
492
+ const openingTagLength = elementName.length + 1;
493
+ const potentialOpeningTag = buffer.slice(i, i + openingTagLength);
494
+ if (potentialOpeningTag.toLowerCase() === `<${elementName.toLowerCase()}`) {
495
+ const afterName = buffer[i + openingTagLength];
496
+ if (afterName === " " || afterName === ">" || afterName === "/" || afterName === "\n" || afterName === " ") {
497
+ depth++;
498
+ }
499
+ }
500
+ }
501
+ return -1;
502
+ }
409
503
 
410
504
  // packages/agent/src/lib/swarm.ts
411
505
  import { groq } from "@ai-sdk/groq";
@@ -425,7 +519,7 @@ import chalk from "chalk";
425
519
  import dedent2 from "dedent";
426
520
  import { zodToJsonSchema } from "zod-to-json-schema";
427
521
  function generate(agent2, messages, contextVariables, config) {
428
- const result = generateText({
522
+ return generateText({
429
523
  abortSignal: config?.abortSignal,
430
524
  providerOptions: agent2.providerOptions ?? config?.providerOptions,
431
525
  model: agent2.model,
@@ -433,7 +527,7 @@ function generate(agent2, messages, contextVariables, config) {
433
527
  messages: convertToModelMessages(
434
528
  Array.isArray(messages) ? messages : [user(messages)]
435
529
  ),
436
- temperature: agent2.temperature,
530
+ experimental_repairToolCall: repairToolCall,
437
531
  stopWhen: stepCountIs(25),
438
532
  tools: agent2.toToolset(),
439
533
  activeTools: agent2.toolsNames,
@@ -454,7 +548,6 @@ function generate(agent2, messages, contextVariables, config) {
454
548
  // (contextVariables as any).content = result.content;
455
549
  // },
456
550
  });
457
- return Object.assign(result, { state: contextVariables });
458
551
  }
459
552
  function execute(agent2, messages, contextVariables, config) {
460
553
  const runId = generateId2();
@@ -466,9 +559,10 @@ function execute(agent2, messages, contextVariables, config) {
466
559
  messages: convertToModelMessages(
467
560
  Array.isArray(messages) ? messages : [user(messages)]
468
561
  ),
469
- temperature: agent2.temperature,
470
562
  stopWhen: stepCountIs(25),
471
- experimental_transform: smoothStream(),
563
+ experimental_transform: smoothStream({
564
+ chunking: htmlElementChunking()
565
+ }),
472
566
  tools: agent2.toToolset(),
473
567
  activeTools: agent2.toolsNames,
474
568
  experimental_context: contextVariables,
@@ -665,8 +759,8 @@ var Agent = class _Agent {
665
759
  handoffToolName;
666
760
  handoffTool;
667
761
  output;
668
- temperature;
669
762
  providerOptions;
763
+ logging;
670
764
  constructor(config) {
671
765
  this.model = config.model;
672
766
  this.toolChoice = config.toolChoice || "auto";
@@ -674,9 +768,9 @@ var Agent = class _Agent {
674
768
  this.prepareHandoff = config.prepareHandoff;
675
769
  this.prepareEnd = config.prepareEnd;
676
770
  this.output = config.output;
677
- this.temperature = config.temperature;
678
771
  this.internalName = snakecase(config.name);
679
772
  this.providerOptions = config.providerOptions;
773
+ this.logging = config.logging;
680
774
  this.handoff = {
681
775
  name: this.internalName,
682
776
  instructions: config.prompt,
@@ -781,7 +875,7 @@ ${output}
781
875
  experimental_output: this.output ? Output2.object({ schema: this.output }) : void 0,
782
876
  onStepFinish: (step) => {
783
877
  const toolCall = step.toolCalls.at(-1);
784
- if (toolCall) {
878
+ if (toolCall && this.logging) {
785
879
  console.log(
786
880
  `Debug: ${chalk2.yellow("ToolCalled")}: ${toolCall.toolName}(${JSON.stringify(toolCall.input)})`
787
881
  );
@@ -810,7 +904,10 @@ ${JSON.stringify(error)}
810
904
  toTool(props) {
811
905
  return { [this.handoffToolName]: this.asTool(props) };
812
906
  }
813
- debug(prefix = "") {
907
+ debug() {
908
+ if (!this.logging) {
909
+ return;
910
+ }
814
911
  console.log(
815
912
  `Debug: ${chalk2.bgMagenta("Agent")}: ${chalk2.dim.black(this.handoff.name)}`
816
913
  );
@@ -850,7 +947,6 @@ ${JSON.stringify(error)}
850
947
  handoffDescription: agent2?.handoffDescription ?? this.handoff.handoffDescription,
851
948
  handoffs: [...this.handoffs],
852
949
  output: agent2?.output ?? this.output,
853
- temperature: agent2?.temperature ?? this.temperature,
854
950
  providerOptions: agent2?.providerOptions ?? this.providerOptions
855
951
  });
856
952
  }
@@ -1916,11 +2012,30 @@ var lmstudio = createOpenAICompatible({
1916
2012
  supportsStructuredOutputs: true,
1917
2013
  includeUsage: true
1918
2014
  });
2015
+ var ollama = createOpenAICompatible({
2016
+ name: "ollama",
2017
+ baseURL: process.env.OLLAMA_BASE_URL ?? "http://127.0.0.1:11434/v1",
2018
+ supportsStructuredOutputs: true
2019
+ });
1919
2020
  var glm = createOpenAICompatible({
1920
2021
  name: "z.ai",
1921
2022
  baseURL: "https://api.z.ai/api/paas/v4/",
1922
2023
  apiKey: process.env.ZAI_API_KEY
1923
2024
  });
2025
+ var cerebras = createOpenAICompatible({
2026
+ name: "cerebras",
2027
+ baseURL: "https://api.cerebras.ai/v1",
2028
+ apiKey: process.env.CEREBRAS_API_KEY,
2029
+ includeUsage: true,
2030
+ supportsStructuredOutputs: true
2031
+ });
2032
+ var nebius = createOpenAICompatible({
2033
+ name: "nebius",
2034
+ baseURL: "https://api.tokenfactory.nebius.com/v1/",
2035
+ apiKey: process.env.NEBIUS_API_KEY,
2036
+ includeUsage: true,
2037
+ supportsStructuredOutputs: true
2038
+ });
1924
2039
  async function embed(documents) {
1925
2040
  const dimensions = 1024;
1926
2041
  const { embeddings } = await embedMany({
@@ -1995,12 +2110,14 @@ export {
1995
2110
  STEM_STEP_BACK_EXAMPLES,
1996
2111
  SUPERVISOR_PROMPT_PREFIX,
1997
2112
  agent,
2113
+ cerebras,
1998
2114
  confirm,
1999
2115
  embed,
2000
2116
  execute,
2001
2117
  finished,
2002
2118
  generate,
2003
2119
  glm,
2120
+ htmlElementChunking,
2004
2121
  input,
2005
2122
  instructions,
2006
2123
  isTransferToolResult,
@@ -2016,6 +2133,8 @@ export {
2016
2133
  memoryTools,
2017
2134
  memoryWrite,
2018
2135
  messageToUiMessage,
2136
+ nebius,
2137
+ ollama,
2019
2138
  pipe,
2020
2139
  prepareAgent,
2021
2140
  prepareStep,