beddel 1.0.3 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +41 -29
  2. package/dist/agents/assistant-bedrock.yaml +1 -2
  3. package/dist/agents/assistant-gitmcp.yaml +38 -0
  4. package/dist/agents/assistant-openrouter.yaml +1 -2
  5. package/dist/agents/assistant.yaml +1 -2
  6. package/dist/agents/index.d.ts +1 -1
  7. package/dist/agents/index.d.ts.map +1 -1
  8. package/dist/agents/index.js +3 -0
  9. package/dist/agents/multi-step-assistant.yaml +67 -0
  10. package/dist/agents/text-generator.yaml +27 -0
  11. package/dist/core/variable-resolver.d.ts.map +1 -1
  12. package/dist/core/variable-resolver.js +57 -10
  13. package/dist/primitives/call-agent.d.ts +28 -0
  14. package/dist/primitives/call-agent.d.ts.map +1 -0
  15. package/dist/primitives/call-agent.js +79 -0
  16. package/dist/primitives/chat.d.ts +31 -0
  17. package/dist/primitives/chat.d.ts.map +1 -0
  18. package/dist/primitives/chat.js +70 -0
  19. package/dist/primitives/index.d.ts +11 -15
  20. package/dist/primitives/index.d.ts.map +1 -1
  21. package/dist/primitives/index.js +33 -34
  22. package/dist/primitives/llm-core.d.ts +45 -0
  23. package/dist/primitives/llm-core.d.ts.map +1 -0
  24. package/dist/primitives/llm-core.js +43 -0
  25. package/dist/primitives/llm.d.ts +12 -41
  26. package/dist/primitives/llm.d.ts.map +1 -1
  27. package/dist/primitives/llm.js +29 -133
  28. package/dist/primitives/mcp-tool.d.ts +22 -0
  29. package/dist/primitives/mcp-tool.d.ts.map +1 -0
  30. package/dist/primitives/mcp-tool.js +130 -0
  31. package/docs/architecture/api-reference.md +196 -101
  32. package/docs/architecture/components.md +165 -132
  33. package/docs/architecture/core-workflows.md +146 -129
  34. package/docs/architecture/high-level-architecture.md +19 -12
  35. package/docs/architecture/source-tree.md +65 -34
  36. package/package.json +2 -1
  37. package/src/agents/assistant-bedrock.yaml +1 -2
  38. package/src/agents/assistant-gitmcp.yaml +38 -0
  39. package/src/agents/assistant-openrouter.yaml +1 -2
  40. package/src/agents/assistant.yaml +1 -2
  41. package/src/agents/index.ts +3 -0
  42. package/src/agents/multi-step-assistant.yaml +67 -0
  43. package/src/agents/text-generator.yaml +27 -0
  44. package/src/core/variable-resolver.ts +65 -10
  45. package/src/primitives/call-agent.ts +108 -0
  46. package/src/primitives/chat.ts +87 -0
  47. package/src/primitives/index.ts +35 -38
  48. package/src/primitives/llm-core.ts +77 -0
  49. package/src/primitives/llm.ts +30 -177
  50. package/src/primitives/mcp-tool.ts +190 -0
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # Beddel Protocol
2
2
 
3
3
  [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
4
- [![npm version](https://img.shields.io/badge/npm-1.0.0-brightgreen.svg)](https://www.npmjs.com/package/beddel)
4
+ [![npm version](https://img.shields.io/badge/npm-1.0.4-brightgreen.svg)](https://www.npmjs.com/package/beddel)
5
5
  [![TypeScript](https://img.shields.io/badge/TypeScript-5.x-blue.svg)](https://www.typescriptlang.org/)
6
6
  [![AI SDK](https://img.shields.io/badge/AI%20SDK-v6-purple.svg)](https://sdk.vercel.ai/)
7
7
 
@@ -10,11 +10,12 @@
10
10
  ## Features
11
11
 
12
12
  - 🔄 **Sequential Pipeline Execution** — Define workflows as YAML, execute steps in order
13
- - 🌊 **Native Streaming** — First-class `streamText` support with `useChat` compatibility
13
+ - 🌊 **Native Streaming** — First-class `streamText` support via `chat` primitive with `useChat` compatibility
14
14
  - 🔌 **Extensible Primitives** — Register custom step types, tools, and callbacks
15
15
  - 🔒 **Security First** — YAML parsing with `FAILSAFE_SCHEMA` prevents code execution
16
16
  - 📦 **Bundle Separation** — Three entry points for server, client, and full API access
17
17
  - 🌐 **Multi-Provider** — Built-in support for Google Gemini, Amazon Bedrock, and OpenRouter (400+ models)
18
+ - 🔀 **Semantic Primitives** — `chat` for streaming frontend, `llm` for blocking workflows
18
19
 
19
20
  ## Installation
20
21
 
@@ -51,11 +52,10 @@ metadata:
51
52
 
52
53
  workflow:
53
54
  - id: "chat-interaction"
54
- type: "llm"
55
+ type: "chat"
55
56
  config:
56
57
  provider: "google"
57
58
  model: "gemini-2.0-flash-exp"
58
- stream: true
59
59
  system: "You are a helpful assistant."
60
60
  messages: "$input.messages"
61
61
  ```
@@ -71,17 +71,34 @@ metadata:
71
71
 
72
72
  workflow:
73
73
  - id: "chat"
74
- type: "llm"
74
+ type: "chat"
75
75
  config:
76
76
  provider: "bedrock"
77
77
  model: "us.meta.llama3-2-1b-instruct-v1:0"
78
- stream: true
79
78
  system: |
80
79
  You are a helpful, friendly assistant. Be concise and direct.
81
80
  Answer in the same language the user writes to you.
82
81
  messages: "$input.messages"
83
82
  ```
84
83
 
84
+ #### Example 3: OpenRouter (400+ Models)
85
+
86
+ ```yaml
87
+ # src/agents/assistant-openrouter.yaml
88
+ metadata:
89
+ name: "OpenRouter Assistant"
90
+ version: "1.0.0"
91
+
92
+ workflow:
93
+ - id: "chat"
94
+ type: "chat"
95
+ config:
96
+ provider: "openrouter"
97
+ model: "qwen/qwen3-14b:free" # or any model from openrouter.ai/models
98
+ system: "You are a helpful assistant."
99
+ messages: "$input.messages"
100
+ ```
101
+
85
102
  ### 3. Set Environment Variables
86
103
 
87
104
  ```bash
@@ -94,6 +111,9 @@ AWS_BEARER_TOKEN_BEDROCK=your_bedrock_api_key
94
111
  # Or use standard AWS credentials:
95
112
  # AWS_ACCESS_KEY_ID=your_access_key
96
113
  # AWS_SECRET_ACCESS_KEY=your_secret_key
114
+
115
+ # For OpenRouter
116
+ OPENROUTER_API_KEY=your_openrouter_api_key
97
117
  ```
98
118
 
99
119
  ### 4. Use with React (useChat)
@@ -132,25 +152,6 @@ export default function Chat() {
132
152
 
133
153
  > **Note:** The Bedrock provider requires `AWS_REGION` to be set (defaults to `us-east-1` if not provided).
134
154
 
135
- ### Example: OpenRouter (400+ Models)
136
-
137
- ```yaml
138
- # src/agents/assistant-openrouter.yaml
139
- metadata:
140
- name: "OpenRouter Assistant"
141
- version: "1.0.0"
142
-
143
- workflow:
144
- - id: "chat"
145
- type: "llm"
146
- config:
147
- provider: "openrouter"
148
- model: "qwen/qwen3-14b:free" # or any model from openrouter.ai/models
149
- stream: true
150
- system: "You are a helpful assistant."
151
- messages: "$input.messages"
152
- ```
153
-
154
155
  ## Entry Points
155
156
 
156
157
  | Import Path | Purpose | Environment |
@@ -208,10 +209,9 @@ metadata:
208
209
 
209
210
  workflow:
210
211
  - id: "step-1"
211
- type: "llm"
212
+ type: "chat" # or "llm" for non-streaming workflows
212
213
  config:
213
214
  model: "gemini-2.0-flash-exp"
214
- stream: true
215
215
  system: "System prompt"
216
216
  messages: "$input.messages"
217
217
  tools:
@@ -220,6 +220,16 @@ workflow:
220
220
  result: "stepOutput"
221
221
  ```
222
222
 
223
+ ### Primitive Types
224
+
225
+ | Type | Behavior | Use Case |
226
+ |------|----------|----------|
227
+ | `chat` | Always streaming, converts UIMessage | Frontend chat interfaces (`useChat`) |
228
+ | `llm` | Never streaming, returns complete result | Multi-step workflows, variable passing |
229
+ | `call-agent` | Invokes another agent | Sub-agent orchestration |
230
+ | `output-generator` | JSON template transform | Structured output generation |
231
+ | `mcp-tool` | Connects to MCP servers via SSE | External tool integration (GitMCP, Context7) |
232
+
223
233
  ### Variable Resolution
224
234
 
225
235
  | Pattern | Description | Example |
@@ -240,8 +250,9 @@ Beddel is fully compatible with Vercel AI SDK v6:
240
250
 
241
251
  - **Frontend:** `useChat` sends `UIMessage[]` with `{ parts: [...] }` format
242
252
  - **Backend:** `streamText`/`generateText` expects `ModelMessage[]` with `{ content: ... }`
243
- - **Automatic Conversion:** `convertToModelMessages()` bridges the gap
244
- - **Streaming:** `toUIMessageStreamResponse()` returns the correct format for `useChat`
253
+ - **Automatic Conversion:** `chat` primitive uses `convertToModelMessages()` to bridge the gap
254
+ - **Streaming:** `chat` primitive returns `toUIMessageStreamResponse()` for `useChat`
255
+ - **Blocking:** `llm` primitive uses `generateText()` for workflow steps
245
256
 
246
257
  ## Technology Stack
247
258
 
@@ -253,6 +264,7 @@ Beddel is fully compatible with Vercel AI SDK v6:
253
264
  | AI Provider | `@ai-sdk/google` | 3.x |
254
265
  | AI Provider | `@ai-sdk/amazon-bedrock` | 4.x |
255
266
  | AI Provider | `@ai-sdk/openai` | 1.x |
267
+ | MCP Client | `@modelcontextprotocol/sdk` | 1.x |
256
268
  | Validation | `zod` | 3.x |
257
269
  | YAML Parser | `js-yaml` | 4.x |
258
270
 
@@ -9,11 +9,10 @@ metadata:
9
9
 
10
10
  workflow:
11
11
  - id: "chat"
12
- type: "llm"
12
+ type: "chat"
13
13
  config:
14
14
  provider: "bedrock"
15
15
  model: "us.meta.llama3-2-1b-instruct-v1:0"
16
- stream: true
17
16
  system: |
18
17
  You are a helpful, friendly assistant. Be concise and direct.
19
18
  Answer in the same language the user writes to you.
@@ -0,0 +1,38 @@
1
+ # GitMCP Documentation Assistant
2
+ # Fetches documentation from GitHub repos via GitMCP and answers questions
3
+
4
+ metadata:
5
+ name: "GitMCP Documentation Assistant"
6
+ version: "1.0.0"
7
+ builtin: true
8
+ description: "Assistant that fetches documentation from GitHub repos via GitMCP MCP server"
9
+
10
+ workflow:
11
+ # Step 1: Fetch documentation from GitMCP
12
+ # GitMCP URL format: https://gitmcp.io/{owner}/{repo}
13
+ # Default: Beddel Protocol docs
14
+ - id: "fetch-docs"
15
+ type: "mcp-tool"
16
+ config:
17
+ url: "https://gitmcp.io/botanarede/beddel"
18
+ tool: "fetch_beddel_documentation"
19
+ arguments: {}
20
+ result: "mcpDocs"
21
+
22
+ # Step 2: Respond using the fetched documentation as context
23
+ - id: "respond"
24
+ type: "chat"
25
+ config:
26
+ provider: "google"
27
+ model: "gemini-2.0-flash-exp"
28
+ system: |
29
+ You are a helpful assistant specialized in the Beddel Protocol.
30
+ You have access to the official documentation below.
31
+
32
+ Use this documentation to answer questions accurately.
33
+ If the documentation doesn't cover the topic, say so clearly.
34
+
35
+ ## Documentation
36
+
37
+ $stepResult.mcpDocs.data
38
+ messages: "$input.messages"
@@ -8,10 +8,9 @@ metadata:
8
8
 
9
9
  workflow:
10
10
  - id: "chat-interaction"
11
- type: "llm"
11
+ type: "chat"
12
12
  config:
13
13
  provider: "openrouter"
14
14
  model: "qwen/qwen3-coder:free"
15
- stream: true
16
15
  system: "You are a helpful assistant."
17
16
  messages: "$input.messages"
@@ -8,10 +8,9 @@ metadata:
8
8
 
9
9
  workflow:
10
10
  - id: "chat-interaction"
11
- type: "llm"
11
+ type: "chat"
12
12
  config:
13
13
  provider: "google"
14
14
  model: "gemini-2.0-flash-exp"
15
- stream: true
16
15
  system: "You are a helpful assistant."
17
16
  messages: "$input.messages"
@@ -7,7 +7,7 @@
7
7
  /**
8
8
  * List of built-in agent IDs available in the package
9
9
  */
10
- export declare const BUILTIN_AGENTS: readonly ["assistant", "assistant-bedrock", "assistant-openrouter"];
10
+ export declare const BUILTIN_AGENTS: readonly ["assistant", "assistant-bedrock", "assistant-openrouter", "assistant-gitmcp", "text-generator", "multi-step-assistant"];
11
11
  export type BuiltinAgentId = typeof BUILTIN_AGENTS[number];
12
12
  /**
13
13
  * Get the absolute path to the built-in agents directory
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/agents/index.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AASH;;GAEG;AACH,eAAO,MAAM,cAAc,qEAIjB,CAAC;AAEX,MAAM,MAAM,cAAc,GAAG,OAAO,cAAc,CAAC,MAAM,CAAC,CAAC;AAE3D;;GAEG;AACH,wBAAgB,oBAAoB,IAAI,MAAM,CAE7C;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,IAAI,cAAc,CAEzE;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,OAAO,EAAE,cAAc,GAAG,MAAM,CAEnE"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/agents/index.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AASH;;GAEG;AACH,eAAO,MAAM,cAAc,mIAOjB,CAAC;AAEX,MAAM,MAAM,cAAc,GAAG,OAAO,cAAc,CAAC,MAAM,CAAC,CAAC;AAE3D;;GAEG;AACH,wBAAgB,oBAAoB,IAAI,MAAM,CAE7C;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,OAAO,EAAE,MAAM,GAAG,OAAO,IAAI,cAAc,CAEzE;AAED;;GAEG;AACH,wBAAgB,mBAAmB,CAAC,OAAO,EAAE,cAAc,GAAG,MAAM,CAEnE"}
@@ -16,6 +16,9 @@ export const BUILTIN_AGENTS = [
16
16
  'assistant',
17
17
  'assistant-bedrock',
18
18
  'assistant-openrouter',
19
+ 'assistant-gitmcp',
20
+ 'text-generator',
21
+ 'multi-step-assistant',
19
22
  ];
20
23
  /**
21
24
  * Get the absolute path to the built-in agents directory
@@ -0,0 +1,67 @@
1
+ # Multi-Step Text Analyzer
2
+ # Pipeline: Generate Text → Extract Entities → Classify Sentiment → Structured Summary
3
+
4
+ metadata:
5
+ name: "Multi-Step Text Analyzer"
6
+ version: "1.0.0"
7
+ builtin: true
8
+ description: "4-step pipeline demonstrating variable passing between steps"
9
+
10
+ workflow:
11
+ # Step 1: Generate sample text using text-generator agent
12
+ # Input: { messages: [{ role: "user", content: "Generate 200 chars about space exploration" }] }
13
+ - id: "generate-text"
14
+ type: "call-agent"
15
+ config:
16
+ agentId: "text-generator"
17
+ input:
18
+ messages: "$input.messages"
19
+ result: "generatedText"
20
+
21
+ # Step 2: Extract entities and topics from the generated text
22
+ - id: "extract-entities"
23
+ type: "llm"
24
+ config:
25
+ provider: "google"
26
+ model: "gemini-2.0-flash-exp"
27
+ system: |
28
+ You are an entity extraction specialist. Analyze the given text and extract:
29
+ - Named entities (people, places, organizations, dates)
30
+ - Main topics/themes
31
+ - Key concepts
32
+
33
+ Output ONLY valid JSON (no markdown, no explanation):
34
+ {"entities":{"people":[],"places":[],"organizations":[],"dates":[]},"topics":[],"concepts":[]}
35
+ messages:
36
+ - role: "user"
37
+ content: "$stepResult.generatedText.generatedText.text"
38
+ result: "entities"
39
+
40
+ # Step 3: Classify sentiment of the text
41
+ - id: "classify-sentiment"
42
+ type: "llm"
43
+ config:
44
+ provider: "google"
45
+ model: "gemini-2.0-flash-exp"
46
+ system: |
47
+ You are a sentiment analysis expert. Analyze the emotional tone of the text.
48
+
49
+ Output ONLY valid JSON (no markdown, no explanation):
50
+ {"overall":"positive|negative|neutral|mixed","confidence":0.8,"emotions":["joy"],"tone":"formal"}
51
+ messages:
52
+ - role: "user"
53
+ content: "$stepResult.generatedText.generatedText.text"
54
+ result: "sentiment"
55
+
56
+ # Step 4: Generate structured summary combining all analysis
57
+ - id: "final-summary"
58
+ type: "output-generator"
59
+ config:
60
+ template:
61
+ originalText: "$stepResult.generatedText.generatedText.text"
62
+ entities: "$stepResult.entities.text"
63
+ sentiment: "$stepResult.sentiment.text"
64
+ pipeline:
65
+ steps: 4
66
+ status: "completed"
67
+ result: "finalReport"
@@ -0,0 +1,27 @@
1
+ # Text Generator Agent
2
+ # Generates text based on length and subject parameters
3
+ # Uses lightweight model for efficiency
4
+
5
+ metadata:
6
+ name: "Text Generator"
7
+ version: "1.0.0"
8
+ builtin: true
9
+ description: "Generates text with specified length and subject"
10
+
11
+ workflow:
12
+ - id: "generate-text"
13
+ type: "llm"
14
+ config:
15
+ provider: "google"
16
+ model: "gemini-2.0-flash-exp"
17
+ system: |
18
+ You are a text generator. Generate creative, coherent text based on the given parameters.
19
+
20
+ RULES:
21
+ - Generate approximately the requested number of characters (can vary ±20%)
22
+ - Stay focused on the given subject/theme
23
+ - Write in a natural, engaging style
24
+ - Output ONLY the generated text, no explanations or meta-commentary
25
+ - Do not use markdown formatting unless specifically requested
26
+ messages: "$input.messages"
27
+ result: "generatedText"
@@ -1 +1 @@
1
- {"version":3,"file":"variable-resolver.d.ts","sourceRoot":"","sources":["../../src/core/variable-resolver.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,UAAU,CAAC;AAwBjD;;;;;;;;;;;GAWG;AACH,wBAAgB,gBAAgB,CAAC,QAAQ,EAAE,OAAO,EAAE,OAAO,EAAE,gBAAgB,GAAG,OAAO,CAoEtF"}
1
+ {"version":3,"file":"variable-resolver.d.ts","sourceRoot":"","sources":["../../src/core/variable-resolver.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG;AAEH,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,UAAU,CAAC;AA6EjD;;;;;;;;;;;GAWG;AACH,wBAAgB,gBAAgB,CAAC,QAAQ,EAAE,OAAO,EAAE,OAAO,EAAE,gBAAgB,GAAG,OAAO,CAsEtF"}
@@ -26,6 +26,52 @@ function resolvePath(obj, path) {
26
26
  }
27
27
  return current;
28
28
  }
29
+ /**
30
+ * Interpolate variable references within a string.
31
+ * Replaces $input.*, $stepResult.*, and $varName.* patterns.
32
+ *
33
+ * @param template - String containing variable references
34
+ * @param context - Execution context
35
+ * @returns String with variables replaced by their values
36
+ */
37
+ function interpolateVariables(template, context) {
38
+ // Pattern to match $input.path, $stepResult.varName.path, or $varName.path
39
+ // Matches: $word.word.word... (stops at whitespace, newline, or end)
40
+ const variablePattern = /\$([a-zA-Z_][a-zA-Z0-9_]*(?:\.[a-zA-Z_][a-zA-Z0-9_]*)*)/g;
41
+ return template.replace(variablePattern, (match, fullPath) => {
42
+ // Handle $input.* pattern
43
+ if (fullPath.startsWith('input.')) {
44
+ const path = fullPath.slice(6); // Remove "input."
45
+ const value = resolvePath(context.input, path);
46
+ return value !== undefined ? String(value) : match;
47
+ }
48
+ // Handle $stepResult.* pattern
49
+ if (fullPath.startsWith('stepResult.')) {
50
+ const restPath = fullPath.slice(11); // Remove "stepResult."
51
+ const dotIndex = restPath.indexOf('.');
52
+ if (dotIndex === -1) {
53
+ const value = context.variables.get(restPath);
54
+ return value !== undefined ? String(value) : match;
55
+ }
56
+ const varName = restPath.slice(0, dotIndex);
57
+ const valuePath = restPath.slice(dotIndex + 1);
58
+ const varValue = context.variables.get(varName);
59
+ const value = resolvePath(varValue, valuePath);
60
+ return value !== undefined ? String(value) : match;
61
+ }
62
+ // Handle legacy $varName.* pattern
63
+ const dotIndex = fullPath.indexOf('.');
64
+ if (dotIndex === -1) {
65
+ const value = context.variables.get(fullPath);
66
+ return value !== undefined ? String(value) : match;
67
+ }
68
+ const varName = fullPath.slice(0, dotIndex);
69
+ const valuePath = fullPath.slice(dotIndex + 1);
70
+ const varValue = context.variables.get(varName);
71
+ const value = resolvePath(varValue, valuePath);
72
+ return value !== undefined ? String(value) : match;
73
+ });
74
+ }
29
75
  /**
30
76
  * Resolve variable references in a template value.
31
77
  *
@@ -45,39 +91,40 @@ export function resolveVariables(template, context) {
45
91
  }
46
92
  // Handle string patterns
47
93
  if (typeof template === 'string') {
48
- // Check for $input.* pattern
49
- if (template.startsWith('$input.')) {
94
+ // Check if entire string is a single variable reference
95
+ // Check for $input.* pattern (entire string)
96
+ if (template.startsWith('$input.') && !template.includes(' ') && !template.includes('\n')) {
50
97
  const path = template.slice(7); // Remove "$input."
51
98
  return resolvePath(context.input, path);
52
99
  }
53
- // Check for $stepResult.* pattern (references step.result names)
54
- if (template.startsWith('$stepResult.')) {
100
+ // Check for $stepResult.* pattern (entire string)
101
+ if (template.startsWith('$stepResult.') && !template.includes(' ') && !template.includes('\n')) {
55
102
  const fullPath = template.slice(12); // Remove "$stepResult."
56
103
  const dotIndex = fullPath.indexOf('.');
57
104
  if (dotIndex === -1) {
58
- // Just the variable name, e.g., "$stepResult.llmOutput"
59
105
  return context.variables.get(fullPath);
60
106
  }
61
- // Variable name + path, e.g., "$stepResult.llmOutput.text"
62
107
  const varName = fullPath.slice(0, dotIndex);
63
108
  const restPath = fullPath.slice(dotIndex + 1);
64
109
  const varValue = context.variables.get(varName);
65
110
  return resolvePath(varValue, restPath);
66
111
  }
67
- // Check for legacy $varName.* pattern (direct variable reference)
68
- if (template.startsWith('$') && !template.startsWith('$$')) {
112
+ // Check for legacy $varName.* pattern (entire string)
113
+ if (template.startsWith('$') && !template.startsWith('$$') && !template.includes(' ') && !template.includes('\n')) {
69
114
  const fullPath = template.slice(1); // Remove "$"
70
115
  const dotIndex = fullPath.indexOf('.');
71
116
  if (dotIndex === -1) {
72
- // Just the variable name, e.g., "$llmOutput"
73
117
  return context.variables.get(fullPath);
74
118
  }
75
- // Variable name + path, e.g., "$llmOutput.text"
76
119
  const varName = fullPath.slice(0, dotIndex);
77
120
  const restPath = fullPath.slice(dotIndex + 1);
78
121
  const varValue = context.variables.get(varName);
79
122
  return resolvePath(varValue, restPath);
80
123
  }
124
+ // String interpolation: replace $variable.path patterns within larger strings
125
+ if (template.includes('$')) {
126
+ return interpolateVariables(template, context);
127
+ }
81
128
  // No pattern match, return as-is
82
129
  return template;
83
130
  }
@@ -0,0 +1,28 @@
1
+ /**
2
+ * Beddel Protocol - Call Agent Primitive
3
+ *
4
+ * Enables recursive workflow execution by calling other agents.
5
+ * This primitive loads and executes another agent's workflow,
6
+ * passing input and returning the result.
7
+ *
8
+ * Server-only: Uses loadYaml and WorkflowExecutor.
9
+ */
10
+ import type { PrimitiveHandler } from '../types';
11
+ /**
12
+ * Call Agent Primitive Handler
13
+ *
14
+ * Loads another agent's YAML and executes its workflow.
15
+ *
16
+ * IMPORTANT: If the called agent returns a Response (streaming),
17
+ * this primitive will return that Response, causing the parent
18
+ * workflow to also return immediately.
19
+ *
20
+ * For multi-step workflows, ensure called agents use stream: false
21
+ * so their results can be captured and passed to subsequent steps.
22
+ *
23
+ * @param config - Step configuration (agentId, input)
24
+ * @param context - Execution context with input and variables
25
+ * @returns Result from called agent (Response or Record)
26
+ */
27
+ export declare const callAgentPrimitive: PrimitiveHandler;
28
+ //# sourceMappingURL=call-agent.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"call-agent.d.ts","sourceRoot":"","sources":["../../src/primitives/call-agent.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH,OAAO,KAAK,EAAgC,gBAAgB,EAAE,MAAM,UAAU,CAAC;AAwD/E;;;;;;;;;;;;;;;GAeG;AACH,eAAO,MAAM,kBAAkB,EAAE,gBAyBhC,CAAC"}
@@ -0,0 +1,79 @@
1
+ /**
2
+ * Beddel Protocol - Call Agent Primitive
3
+ *
4
+ * Enables recursive workflow execution by calling other agents.
5
+ * This primitive loads and executes another agent's workflow,
6
+ * passing input and returning the result.
7
+ *
8
+ * Server-only: Uses loadYaml and WorkflowExecutor.
9
+ */
10
+ import { loadYaml } from '../core/parser';
11
+ import { WorkflowExecutor } from '../core/workflow';
12
+ import { resolveVariables } from '../core/variable-resolver';
13
+ import { join } from 'path';
14
+ import { access } from 'fs/promises';
15
+ import { getBuiltinAgentsPath } from '../agents';
16
+ /**
17
+ * Check if a file exists at the given path
18
+ */
19
+ async function fileExists(path) {
20
+ try {
21
+ await access(path);
22
+ return true;
23
+ }
24
+ catch {
25
+ return false;
26
+ }
27
+ }
28
+ /**
29
+ * Resolve agent path with fallback chain:
30
+ * 1. User agents (agentsPath) - allows override
31
+ * 2. Built-in agents (package) - fallback
32
+ */
33
+ async function resolveAgentPath(agentId, agentsPath) {
34
+ // 1. First: try user agents
35
+ const userPath = join(process.cwd(), agentsPath, `${agentId}.yaml`);
36
+ if (await fileExists(userPath)) {
37
+ return userPath;
38
+ }
39
+ // 2. Fallback: built-in agents from package
40
+ const builtinPath = join(getBuiltinAgentsPath(), `${agentId}.yaml`);
41
+ if (await fileExists(builtinPath)) {
42
+ return builtinPath;
43
+ }
44
+ throw new Error(`[Beddel] Agent not found: ${agentId}`);
45
+ }
46
+ /**
47
+ * Call Agent Primitive Handler
48
+ *
49
+ * Loads another agent's YAML and executes its workflow.
50
+ *
51
+ * IMPORTANT: If the called agent returns a Response (streaming),
52
+ * this primitive will return that Response, causing the parent
53
+ * workflow to also return immediately.
54
+ *
55
+ * For multi-step workflows, ensure called agents use stream: false
56
+ * so their results can be captured and passed to subsequent steps.
57
+ *
58
+ * @param config - Step configuration (agentId, input)
59
+ * @param context - Execution context with input and variables
60
+ * @returns Result from called agent (Response or Record)
61
+ */
62
+ export const callAgentPrimitive = async (config, context) => {
63
+ const callConfig = config;
64
+ if (!callConfig.agentId) {
65
+ throw new Error('[Beddel] call-agent requires agentId in config');
66
+ }
67
+ // Resolve the input to pass to the agent
68
+ const agentInput = callConfig.input
69
+ ? resolveVariables(callConfig.input, context)
70
+ : context.input;
71
+ // Resolve agent path
72
+ const agentsPath = callConfig.agentsPath || 'src/agents';
73
+ const agentPath = await resolveAgentPath(callConfig.agentId, agentsPath);
74
+ // Load and execute the agent
75
+ const yaml = await loadYaml(agentPath);
76
+ const executor = new WorkflowExecutor(yaml);
77
+ const result = await executor.execute(agentInput);
78
+ return result;
79
+ };
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Beddel Protocol - Chat Primitive
3
+ *
4
+ * DESIGN DECISION: This primitive ALWAYS streams. There is no `stream` config option.
5
+ * Streaming behavior is semantically determined by primitive type:
6
+ * - `chat` → Always streams (frontend UX)
7
+ * - `llm` → Never streams (workflow chaining)
8
+ *
9
+ * MESSAGE CONVERSION: Uses `convertToModelMessages()` because input comes from
10
+ * `useChat` hook which sends `UIMessage[]` format. The AI SDK v6 `streamText`
11
+ * expects `ModelMessage[]` format.
12
+ *
13
+ * Use this primitive when:
14
+ * - Input comes from useChat frontend hook
15
+ * - You need streaming responses to the client
16
+ *
17
+ * Server-only: Uses Vercel AI SDK Core.
18
+ */
19
+ import type { PrimitiveHandler } from '../types';
20
+ /**
21
+ * Chat Primitive Handler
22
+ *
23
+ * Converts UIMessage[] (from useChat) to ModelMessage[] and streams response.
24
+ * Always uses streaming mode for responsive frontend UX.
25
+ *
26
+ * @param config - Step configuration from YAML
27
+ * @param context - Execution context with input and variables
28
+ * @returns Response (always streaming)
29
+ */
30
+ export declare const chatPrimitive: PrimitiveHandler;
31
+ //# sourceMappingURL=chat.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"chat.d.ts","sourceRoot":"","sources":["../../src/primitives/chat.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;GAiBG;AAQH,OAAO,KAAK,EAAgC,gBAAgB,EAAE,MAAM,UAAU,CAAC;AAK/E;;;;;;;;;GASG;AACH,eAAO,MAAM,aAAa,EAAE,gBA8C3B,CAAC"}