beddel 1.0.0 β†’ 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,6 +14,7 @@
14
14
  - πŸ”Œ **Extensible Primitives** β€” Register custom step types, tools, and callbacks
15
15
  - πŸ”’ **Security First** β€” YAML parsing with `FAILSAFE_SCHEMA` prevents code execution
16
16
  - πŸ“¦ **Bundle Separation** β€” Three entry points for server, client, and full API access
17
+ - 🌐 **Multi-Provider** β€” Built-in support for Google Gemini and Amazon Bedrock
17
18
 
18
19
  ## Installation
19
20
 
@@ -40,6 +41,8 @@ export const POST = createBeddelHandler({
40
41
 
41
42
  ### 2. Create YAML Agent
42
43
 
44
+ #### Example 1: Google Gemini (Default Provider)
45
+
43
46
  ```yaml
44
47
  # src/agents/assistant.yaml
45
48
  metadata:
@@ -50,16 +53,47 @@ workflow:
50
53
  - id: "chat-interaction"
51
54
  type: "llm"
52
55
  config:
56
+ provider: "google"
53
57
  model: "gemini-2.0-flash-exp"
54
58
  stream: true
55
59
  system: "You are a helpful assistant."
56
60
  messages: "$input.messages"
57
61
  ```
58
62
 
59
- ### 3. Set Environment Variable
63
+ #### Example 2: Amazon Bedrock (Llama 3.2)
64
+
65
+ ```yaml
66
+ # src/agents/assistant-bedrock.yaml
67
+ metadata:
68
+ name: "Bedrock Assistant"
69
+ version: "1.0.0"
70
+ description: "Simple assistant using Llama 3.2 1B (lightweight)"
71
+
72
+ workflow:
73
+ - id: "chat"
74
+ type: "llm"
75
+ config:
76
+ provider: "bedrock"
77
+ model: "us.meta.llama3-2-1b-instruct-v1:0"
78
+ stream: true
79
+ system: |
80
+ You are a helpful, friendly assistant. Be concise and direct.
81
+ Answer in the same language the user writes to you.
82
+ messages: "$input.messages"
83
+ ```
84
+
85
+ ### 3. Set Environment Variables
60
86
 
61
87
  ```bash
88
+ # For Google Gemini
62
89
  GEMINI_API_KEY=your_api_key_here
90
+
91
+ # For Amazon Bedrock
92
+ AWS_REGION=us-east-1
93
+ AWS_BEARER_TOKEN_BEDROCK=your_bedrock_api_key
94
+ # Or use standard AWS credentials:
95
+ # AWS_ACCESS_KEY_ID=your_access_key
96
+ # AWS_SECRET_ACCESS_KEY=your_secret_key
63
97
  ```
64
98
 
65
99
  ### 4. Use with React (useChat)
@@ -71,7 +105,7 @@ import { useChat } from '@ai-sdk/react';
71
105
  export default function Chat() {
72
106
  const { messages, input, handleInputChange, handleSubmit } = useChat({
73
107
  api: '/api/beddel/chat',
74
- body: { agentId: 'assistant' },
108
+ body: { agentId: 'assistant' }, // or 'assistant-bedrock'
75
109
  });
76
110
 
77
111
  return (
@@ -88,6 +122,15 @@ export default function Chat() {
88
122
  }
89
123
  ```
90
124
 
125
+ ## Built-in Providers
126
+
127
+ | Provider | Environment Variables | Default Model |
128
+ |----------|----------------------|---------------|
129
+ | `google` | `GEMINI_API_KEY` | `gemini-1.5-flash` |
130
+ | `bedrock` | `AWS_REGION`, `AWS_BEARER_TOKEN_BEDROCK` (or AWS credentials) | `anthropic.claude-3-haiku-20240307-v1:0` |
131
+
132
+ > **Note:** The Bedrock provider requires `AWS_REGION` to be set (defaults to `us-east-1` if not provided).
133
+
91
134
  ## Entry Points
92
135
 
93
136
  | Import Path | Purpose | Environment |
@@ -188,6 +231,7 @@ Beddel is fully compatible with Vercel AI SDK v6:
188
231
  | Runtime | Node.js / Edge | 20+ |
189
232
  | AI Core | `ai` | 6.x |
190
233
  | AI Provider | `@ai-sdk/google` | 3.x |
234
+ | AI Provider | `@ai-sdk/amazon-bedrock` | 4.x |
191
235
  | Validation | `zod` | 3.x |
192
236
  | YAML Parser | `js-yaml` | 4.x |
193
237
 
package/dist/index.d.ts CHANGED
@@ -14,5 +14,7 @@ export { createBeddelHandler } from './server/handler';
14
14
  export { handlerRegistry, registerPrimitive } from './primitives';
15
15
  export { toolRegistry, registerTool } from './tools';
16
16
  export type { ToolImplementation } from './tools';
17
+ export { providerRegistry, registerProvider, createModel } from './providers';
18
+ export type { ProviderImplementation, ProviderConfig } from './providers';
17
19
  export type { ParsedYaml, WorkflowStep, StepConfig, YamlMetadata, ExecutionContext, PrimitiveHandler, } from './types';
18
20
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAGH,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AAC5D,OAAO,EAAE,mBAAmB,EAAE,MAAM,kBAAkB,CAAC;AAIvD,OAAO,EAAE,eAAe,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AAGlE,OAAO,EAAE,YAAY,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AACrD,YAAY,EAAE,kBAAkB,EAAE,MAAM,SAAS,CAAC;AAGlD,YAAY,EACR,UAAU,EACV,YAAY,EACZ,UAAU,EACV,YAAY,EACZ,gBAAgB,EAChB,gBAAgB,GACnB,MAAM,SAAS,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAGH,OAAO,EAAE,QAAQ,EAAE,MAAM,eAAe,CAAC;AACzC,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,gBAAgB,EAAE,MAAM,0BAA0B,CAAC;AAC5D,OAAO,EAAE,mBAAmB,EAAE,MAAM,kBAAkB,CAAC;AAIvD,OAAO,EAAE,eAAe,EAAE,iBAAiB,EAAE,MAAM,cAAc,CAAC;AAGlE,OAAO,EAAE,YAAY,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AACrD,YAAY,EAAE,kBAAkB,EAAE,MAAM,SAAS,CAAC;AAGlD,OAAO,EAAE,gBAAgB,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAC9E,YAAY,EAAE,sBAAsB,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAG1E,YAAY,EACR,UAAU,EACV,YAAY,EACZ,UAAU,EACV,YAAY,EACZ,gBAAgB,EAChB,gBAAgB,GACnB,MAAM,SAAS,CAAC"}
package/dist/index.js CHANGED
@@ -16,3 +16,5 @@ export { createBeddelHandler } from './server/handler';
16
16
  export { handlerRegistry, registerPrimitive } from './primitives';
17
17
  // Tools registry (for custom tool registration)
18
18
  export { toolRegistry, registerTool } from './tools';
19
+ // Providers registry (for custom LLM provider registration)
20
+ export { providerRegistry, registerProvider, createModel } from './providers';
@@ -1 +1 @@
1
- {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../src/primitives/llm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAaH,OAAO,KAAK,EAAgC,gBAAgB,EAAE,MAAM,UAAU,CAAC;AAI/E;;GAEG;AACH,MAAM,MAAM,UAAU,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;AAEpF;;;GAGG;AACH,eAAO,MAAM,gBAAgB,EAAE,MAAM,CAAC,MAAM,EAAE,UAAU,CAAM,CAAC;AAE/D;;;;;;;;;;GAUG;AACH,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,UAAU,GAAG,IAAI,CAKzE;AA0DD;;;;;;;;;;;;;;GAcG;AACH,eAAO,MAAM,YAAY,EAAE,gBA4E1B,CAAC"}
1
+ {"version":3,"file":"llm.d.ts","sourceRoot":"","sources":["../../src/primitives/llm.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;GAgBG;AAYH,OAAO,KAAK,EAAgC,gBAAgB,EAAE,MAAM,UAAU,CAAC;AAK/E;;GAEG;AACH,MAAM,MAAM,UAAU,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,KAAK,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;AAEpF;;;GAGG;AACH,eAAO,MAAM,gBAAgB,EAAE,MAAM,CAAC,MAAM,EAAE,UAAU,CAAM,CAAC;AAE/D;;;;;;;;;;GAUG;AACH,wBAAgB,gBAAgB,CAAC,IAAI,EAAE,MAAM,EAAE,QAAQ,EAAE,UAAU,GAAG,IAAI,CAKzE;AA2DD;;;;;;;;;;;;;;GAcG;AACH,eAAO,MAAM,YAAY,EAAE,gBA2E1B,CAAC"}
@@ -16,9 +16,9 @@
16
16
  * - convertToModelMessages() bridges this gap automatically
17
17
  */
18
18
  import { streamText, generateText, dynamicTool, stepCountIs, convertToModelMessages, } from 'ai';
19
- import { createGoogleGenerativeAI } from '@ai-sdk/google';
20
19
  import { toolRegistry } from '../tools';
21
20
  import { resolveVariables } from '../core/variable-resolver';
21
+ import { createModel } from '../providers';
22
22
  /**
23
23
  * Registry for consumer-registered callbacks.
24
24
  * Populated by the application using Beddel.
@@ -87,11 +87,10 @@ function mapTools(toolDefinitions) {
87
87
  */
88
88
  export const llmPrimitive = async (config, context) => {
89
89
  const llmConfig = config;
90
- // Initialize Google Generative AI provider
91
- const google = createGoogleGenerativeAI({
92
- apiKey: process.env.GEMINI_API_KEY,
90
+ // Create model from provider registry (defaults to 'google')
91
+ const model = createModel(llmConfig.provider || 'google', {
92
+ model: llmConfig.model || 'gemini-1.5-flash',
93
93
  });
94
- const model = google(llmConfig.model || 'gemini-1.5-flash');
95
94
  // Resolve messages from context (e.g., $input.messages)
96
95
  // AI SDK v6: Frontend sends UIMessage[], we convert to ModelMessage[]
97
96
  const rawMessages = resolveVariables(llmConfig.messages, context);
@@ -0,0 +1,63 @@
1
+ /**
2
+ * Beddel Protocol - Provider Registry
3
+ *
4
+ * This registry maps provider names to their implementations.
5
+ * Following Expansion Pack Pattern from BMAD-METHODβ„’ for extensibility.
6
+ * See: https://github.com/bmadcode/bmad-method
7
+ *
8
+ * Server-only: Providers may use Node.js APIs and external services.
9
+ */
10
+ import type { LanguageModel } from 'ai';
11
+ /**
12
+ * Configuration passed to provider's createModel method.
13
+ */
14
+ export interface ProviderConfig {
15
+ model: string;
16
+ [key: string]: unknown;
17
+ }
18
+ /**
19
+ * Provider implementation interface.
20
+ * Each provider must implement createModel to return an AI SDK LanguageModel.
21
+ */
22
+ export interface ProviderImplementation {
23
+ createModel: (config: ProviderConfig) => LanguageModel;
24
+ }
25
+ /**
26
+ * Registry of provider implementations keyed by provider name.
27
+ *
28
+ * Built-in Providers:
29
+ * - 'google': Google Gemini via @ai-sdk/google
30
+ * - 'bedrock': Amazon Bedrock via @ai-sdk/amazon-bedrock
31
+ */
32
+ export declare const providerRegistry: Record<string, ProviderImplementation>;
33
+ /**
34
+ * Register a custom provider implementation in the registry.
35
+ * Allows consumers to extend Beddel with additional LLM providers.
36
+ *
37
+ * @param name - Provider identifier (e.g., 'openai', 'anthropic')
38
+ * @param implementation - ProviderImplementation with createModel method
39
+ *
40
+ * @example
41
+ * import { registerProvider } from 'beddel';
42
+ *
43
+ * registerProvider('openai', {
44
+ * createModel: (config) => {
45
+ * const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
46
+ * return openai(config.model || 'gpt-4');
47
+ * },
48
+ * });
49
+ */
50
+ export declare function registerProvider(name: string, implementation: ProviderImplementation): void;
51
+ /**
52
+ * Create a LanguageModel instance from a registered provider.
53
+ *
54
+ * @param provider - Provider name (must be registered in providerRegistry)
55
+ * @param config - Configuration including model name and provider-specific options
56
+ * @returns LanguageModel instance from the AI SDK
57
+ * @throws Error if provider is not found, listing available providers
58
+ *
59
+ * @example
60
+ * const model = createModel('google', { model: 'gemini-1.5-flash' });
61
+ */
62
+ export declare function createModel(provider: string, config: ProviderConfig): LanguageModel;
63
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/providers/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;AAEH,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,IAAI,CAAC;AAIxC;;GAEG;AACH,MAAM,WAAW,cAAc;IAC3B,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CAC1B;AAED;;;GAGG;AACH,MAAM,WAAW,sBAAsB;IACnC,WAAW,EAAE,CAAC,MAAM,EAAE,cAAc,KAAK,aAAa,CAAC;CAC1D;AAED;;;;;;GAMG;AACH,eAAO,MAAM,gBAAgB,EAAE,MAAM,CAAC,MAAM,EAAE,sBAAsB,CAAM,CAAC;AAE3E;;;;;;;;;;;;;;;;GAgBG;AACH,wBAAgB,gBAAgB,CAC5B,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,sBAAsB,GACvC,IAAI,CAKN;AAED;;;;;;;;;;GAUG;AACH,wBAAgB,WAAW,CAAC,QAAQ,EAAE,MAAM,EAAE,MAAM,EAAE,cAAc,GAAG,aAAa,CAOnF"}
@@ -0,0 +1,94 @@
1
+ /**
2
+ * Beddel Protocol - Provider Registry
3
+ *
4
+ * This registry maps provider names to their implementations.
5
+ * Following Expansion Pack Pattern from BMAD-METHODβ„’ for extensibility.
6
+ * See: https://github.com/bmadcode/bmad-method
7
+ *
8
+ * Server-only: Providers may use Node.js APIs and external services.
9
+ */
10
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
11
+ import { bedrock } from '@ai-sdk/amazon-bedrock';
12
+ /**
13
+ * Registry of provider implementations keyed by provider name.
14
+ *
15
+ * Built-in Providers:
16
+ * - 'google': Google Gemini via @ai-sdk/google
17
+ * - 'bedrock': Amazon Bedrock via @ai-sdk/amazon-bedrock
18
+ */
19
+ export const providerRegistry = {};
20
+ /**
21
+ * Register a custom provider implementation in the registry.
22
+ * Allows consumers to extend Beddel with additional LLM providers.
23
+ *
24
+ * @param name - Provider identifier (e.g., 'openai', 'anthropic')
25
+ * @param implementation - ProviderImplementation with createModel method
26
+ *
27
+ * @example
28
+ * import { registerProvider } from 'beddel';
29
+ *
30
+ * registerProvider('openai', {
31
+ * createModel: (config) => {
32
+ * const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
33
+ * return openai(config.model || 'gpt-4');
34
+ * },
35
+ * });
36
+ */
37
+ export function registerProvider(name, implementation) {
38
+ if (providerRegistry[name]) {
39
+ console.warn(`[Beddel] Provider '${name}' already registered, overwriting.`);
40
+ }
41
+ providerRegistry[name] = implementation;
42
+ }
43
+ /**
44
+ * Create a LanguageModel instance from a registered provider.
45
+ *
46
+ * @param provider - Provider name (must be registered in providerRegistry)
47
+ * @param config - Configuration including model name and provider-specific options
48
+ * @returns LanguageModel instance from the AI SDK
49
+ * @throws Error if provider is not found, listing available providers
50
+ *
51
+ * @example
52
+ * const model = createModel('google', { model: 'gemini-1.5-flash' });
53
+ */
54
+ export function createModel(provider, config) {
55
+ const impl = providerRegistry[provider];
56
+ if (!impl) {
57
+ const available = Object.keys(providerRegistry).join(', ') || 'none';
58
+ throw new Error(`Unknown provider: '${provider}'. Available: ${available}`);
59
+ }
60
+ return impl.createModel(config);
61
+ }
62
+ // =============================================================================
63
+ // Built-in Providers
64
+ // =============================================================================
65
+ /**
66
+ * Google Gemini Provider (Built-in)
67
+ *
68
+ * Uses @ai-sdk/google with GEMINI_API_KEY environment variable.
69
+ * Default model: gemini-1.5-flash
70
+ *
71
+ * Requirements: 1.2, 4.1
72
+ */
73
+ registerProvider('google', {
74
+ createModel: (config) => {
75
+ const google = createGoogleGenerativeAI({
76
+ apiKey: process.env.GEMINI_API_KEY,
77
+ });
78
+ return google(config.model || 'gemini-1.5-flash');
79
+ },
80
+ });
81
+ /**
82
+ * Amazon Bedrock Provider (Built-in)
83
+ *
84
+ * Uses @ai-sdk/amazon-bedrock with AWS_BEARER_TOKEN_BEDROCK environment variable.
85
+ * SDK auto-detects credentials from environment.
86
+ * Default model: anthropic.claude-3-haiku-20240307-v1:0
87
+ *
88
+ * Requirements: 1.3, 4.2
89
+ */
90
+ registerProvider('bedrock', {
91
+ createModel: (config) => {
92
+ return bedrock(config.model || 'anthropic.claude-3-haiku-20240307-v1:0');
93
+ },
94
+ });
@@ -0,0 +1,33 @@
1
+ /**
2
+ * Beddel Protocol - Validation Module
3
+ * Schema validation using Zod with stream/output constraint enforcement
4
+ */
5
+ import { ZodSchema } from 'zod';
6
+ import type { SchemaSpec, ParsedYaml } from '../types';
7
+ /**
8
+ * Validation error with detailed issue list
9
+ */
10
+ export declare class ValidationError extends Error {
11
+ details: string[];
12
+ constructor(message: string, details: string[]);
13
+ }
14
+ /**
15
+ * Convert SchemaSpec to Zod schema
16
+ */
17
+ export declare function schemaSpecToZod(spec: SchemaSpec): ZodSchema;
18
+ /**
19
+ * Validate data against SchemaSpec
20
+ */
21
+ export declare function validateSchema(spec: SchemaSpec, data: unknown): {
22
+ success: true;
23
+ data: unknown;
24
+ } | {
25
+ success: false;
26
+ error: ValidationError;
27
+ };
28
+ /**
29
+ * Validate YAML structure for stream/output constraint
30
+ * @throws ValidationError if stream: true AND output schema is defined
31
+ */
32
+ export declare function validateYamlConstraints(yaml: ParsedYaml): void;
33
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/validation/index.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,EAAK,SAAS,EAAE,MAAM,KAAK,CAAC;AACnC,OAAO,KAAK,EAAE,UAAU,EAAE,UAAU,EAAE,MAAM,UAAU,CAAC;AAEvD;;GAEG;AACH,qBAAa,eAAgB,SAAQ,KAAK;IACF,OAAO,EAAE,MAAM,EAAE;gBAAzC,OAAO,EAAE,MAAM,EAAS,OAAO,EAAE,MAAM,EAAE;CAIxD;AAED;;GAEG;AACH,wBAAgB,eAAe,CAAC,IAAI,EAAE,UAAU,GAAG,SAAS,CAqB3D;AAED;;GAEG;AACH,wBAAgB,cAAc,CAC1B,IAAI,EAAE,UAAU,EAChB,IAAI,EAAE,OAAO,GACd;IAAE,OAAO,EAAE,IAAI,CAAC;IAAC,IAAI,EAAE,OAAO,CAAA;CAAE,GAAG;IAAE,OAAO,EAAE,KAAK,CAAC;IAAC,KAAK,EAAE,eAAe,CAAA;CAAE,CAa/E;AAED;;;GAGG;AACH,wBAAgB,uBAAuB,CAAC,IAAI,EAAE,UAAU,GAAG,IAAI,CAiB9D"}
@@ -0,0 +1,74 @@
1
+ /**
2
+ * Beddel Protocol - Validation Module
3
+ * Schema validation using Zod with stream/output constraint enforcement
4
+ */
5
+ import { z } from 'zod';
6
+ /**
7
+ * Validation error with detailed issue list
8
+ */
9
+ export class ValidationError extends Error {
10
+ details;
11
+ constructor(message, details) {
12
+ super(message);
13
+ this.details = details;
14
+ this.name = 'ValidationError';
15
+ }
16
+ }
17
+ /**
18
+ * Convert SchemaSpec to Zod schema
19
+ */
20
+ export function schemaSpecToZod(spec) {
21
+ switch (spec.type) {
22
+ case 'string':
23
+ return z.string();
24
+ case 'number':
25
+ return z.number();
26
+ case 'boolean':
27
+ return z.boolean();
28
+ case 'array':
29
+ return z.array(spec.items ? schemaSpecToZod(spec.items) : z.unknown());
30
+ case 'object': {
31
+ const shape = {};
32
+ for (const [key, value] of Object.entries(spec.properties || {})) {
33
+ const fieldSchema = schemaSpecToZod(value);
34
+ shape[key] = spec.required?.includes(key) ? fieldSchema : fieldSchema.optional();
35
+ }
36
+ return z.object(shape);
37
+ }
38
+ default:
39
+ return z.unknown();
40
+ }
41
+ }
42
+ /**
43
+ * Validate data against SchemaSpec
44
+ */
45
+ export function validateSchema(spec, data) {
46
+ const schema = schemaSpecToZod(spec);
47
+ const result = schema.safeParse(data);
48
+ if (result.success) {
49
+ return { success: true, data: result.data };
50
+ }
51
+ const details = result.error.issues.map(i => `${i.path.join('.')}: ${i.message}`);
52
+ return {
53
+ success: false,
54
+ error: new ValidationError('Validation failed', details)
55
+ };
56
+ }
57
+ /**
58
+ * Validate YAML structure for stream/output constraint
59
+ * @throws ValidationError if stream: true AND output schema is defined
60
+ */
61
+ export function validateYamlConstraints(yaml) {
62
+ for (const step of yaml.workflow) {
63
+ const config = step.config;
64
+ if (step.type === 'llm' && config.stream === true && config.output) {
65
+ const error = new ValidationError(`Step '${step.id}' has incompatible config: stream=true with output schema`, [
66
+ 'Streaming mode does not support structured output validation.',
67
+ 'Either set stream: false, or remove the output schema.'
68
+ ]);
69
+ // Log to server before throwing
70
+ console.error('[Beddel] YAML Constraint Violation:', error.message, error.details);
71
+ throw error;
72
+ }
73
+ }
74
+ }
@@ -88,6 +88,14 @@ Map of tool names to their implementations for LLM function calling.
88
88
  - `calculator` β€” Evaluate mathematical expressions
89
89
  - `getCurrentTime` β€” Get current ISO timestamp
90
90
 
91
+ #### `providerRegistry: Record<string, ProviderImplementation>`
92
+
93
+ Map of provider names to their implementations for LLM model creation.
94
+
95
+ **Built-in providers:**
96
+ - `google` β€” Google Gemini via `@ai-sdk/google` (requires `GEMINI_API_KEY`)
97
+ - `bedrock` β€” Amazon Bedrock via `@ai-sdk/amazon-bedrock` (auto-detects AWS credentials)
98
+
91
99
  ---
92
100
 
93
101
  ### Extensibility Functions
@@ -132,6 +140,36 @@ registerCallback('persistConversation', async ({ text, usage }) => {
132
140
  });
133
141
  ```
134
142
 
143
+ #### `registerProvider(name: string, implementation: ProviderImplementation): void`
144
+
145
+ Register a custom LLM provider for dynamic model selection.
146
+
147
+ ```typescript
148
+ import { registerProvider } from 'beddel';
149
+ import { createOpenAI } from '@ai-sdk/openai';
150
+
151
+ registerProvider('openai', {
152
+ createModel: (config) => {
153
+ const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
154
+ return openai(config.model || 'gpt-4');
155
+ },
156
+ });
157
+ ```
158
+
159
+ #### `createModel(provider: string, config: ProviderConfig): LanguageModel`
160
+
161
+ Create a LanguageModel instance from a registered provider.
162
+
163
+ ```typescript
164
+ import { createModel } from 'beddel';
165
+
166
+ const model = createModel('google', { model: 'gemini-1.5-flash' });
167
+ // Or use bedrock
168
+ const bedrockModel = createModel('bedrock', { model: 'anthropic.claude-3-haiku-20240307-v1:0' });
169
+ ```
170
+
171
+ **Throws:** Error if provider is not found, listing available providers.
172
+
135
173
  ---
136
174
 
137
175
  ## `beddel/server`
@@ -267,6 +305,23 @@ interface ToolImplementation {
267
305
  type CallbackFn = (payload: Record<string, unknown>) => void | Promise<void>;
268
306
  ```
269
307
 
308
+ ### `ProviderImplementation`
309
+
310
+ ```typescript
311
+ interface ProviderImplementation {
312
+ createModel: (config: ProviderConfig) => LanguageModel;
313
+ }
314
+ ```
315
+
316
+ ### `ProviderConfig`
317
+
318
+ ```typescript
319
+ interface ProviderConfig {
320
+ model: string;
321
+ [key: string]: unknown;
322
+ }
323
+ ```
324
+
270
325
  ---
271
326
 
272
327
  ## AI SDK v6 Compatibility
@@ -305,3 +360,4 @@ return result.toUIMessageStreamResponse();
305
360
  |------|---------|-------------|
306
361
  | 2024-12-24 | 1.0.0 | Initial API reference |
307
362
  | 2024-12-24 | 1.0.1 | AI SDK v6 compatibility: UIMessage/ModelMessage conversion |
363
+ | 2024-12-25 | 1.0.2 | Provider registry: registerProvider, createModel, bedrock support |
@@ -66,7 +66,7 @@
66
66
  - `registerCallback(name: string, fn: CallbackFn): void` - Register lifecycle callbacks
67
67
  - `callbackRegistry: Record<string, CallbackFn>` - Stores registered callbacks
68
68
 
69
- **Dependencies:** `ai`, `@ai-sdk/google`, `toolRegistry`, `callbackRegistry`
69
+ **Dependencies:** `ai`, `providerRegistry`, `toolRegistry`, `callbackRegistry`
70
70
 
71
71
  **Technology Stack (AI SDK v6):**
72
72
  - `streamText()` for streaming mode β†’ `result.toUIMessageStreamResponse()`
@@ -75,6 +75,7 @@
75
75
  - `dynamicTool()` for registry-based tool creation
76
76
  - `stopWhen: stepCountIs(5)` for multi-step tool loops
77
77
  - `onFinish` / `onError` lifecycle callbacks
78
+ - `createModel()` from provider registry for dynamic provider selection
78
79
 
79
80
  **AI SDK v6 Message Format Compatibility:**
80
81
  - Frontend (`useChat`) sends `UIMessage[]` with `{ parts: [...] }` format
@@ -84,6 +85,35 @@
84
85
 
85
86
  ---
86
87
 
88
+ ### Provider Registry (`src/providers/index.ts`)
89
+
90
+ **Responsibility:** Register and provide LLM provider implementations for dynamic model creation.
91
+
92
+ **Key Interfaces:**
93
+ - `providerRegistry: Record<string, ProviderImplementation>`
94
+ - `registerProvider(name: string, implementation: ProviderImplementation): void`
95
+ - `createModel(provider: string, config: ProviderConfig): LanguageModel`
96
+ - `ProviderImplementation: { createModel: (config) => LanguageModel }`
97
+ - `ProviderConfig: { model: string, [key: string]: unknown }`
98
+
99
+ **Built-in Providers:**
100
+ - `google` - Google Gemini via `@ai-sdk/google` (requires `GEMINI_API_KEY`)
101
+ - `bedrock` - Amazon Bedrock via `@ai-sdk/amazon-bedrock` (requires `AWS_REGION`, defaults to `us-east-1`)
102
+
103
+ **Environment Variables:**
104
+
105
+ | Provider | Variable | Description |
106
+ |----------|----------|-------------|
107
+ | `google` | `GEMINI_API_KEY` | Google Gemini API key |
108
+ | `bedrock` | `AWS_REGION` | AWS region (defaults to `us-east-1`) |
109
+ | `bedrock` | `AWS_BEARER_TOKEN_BEDROCK` | Bedrock API key (or use standard AWS credentials) |
110
+
111
+ **Dependencies:** `ai`, `@ai-sdk/google`, `@ai-sdk/amazon-bedrock`
112
+
113
+ **Technology Stack:** Registry pattern with Vercel AI SDK LanguageModel interface.
114
+
115
+ ---
116
+
87
117
  ### Output Primitive (`src/primitives/output.ts`)
88
118
 
89
119
  **Responsibility:** Deterministic JSON transform using variable resolution.
@@ -192,6 +222,22 @@ registerCallback('persistConversation', async ({ text, usage }) => {
192
222
  });
193
223
  ```
194
224
 
225
+ ### `registerProvider(name, implementation)`
226
+
227
+ Add custom LLM providers for dynamic model selection.
228
+
229
+ ```typescript
230
+ import { registerProvider } from 'beddel';
231
+ import { createOpenAI } from '@ai-sdk/openai';
232
+
233
+ registerProvider('openai', {
234
+ createModel: (config) => {
235
+ const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
236
+ return openai(config.model || 'gpt-4');
237
+ },
238
+ });
239
+ ```
240
+
195
241
  ---
196
242
 
197
243
  ## Component Diagram
@@ -217,6 +263,12 @@ graph TB
217
263
  CallAgent["call-agent (placeholder)"]
218
264
  end
219
265
 
266
+ subgraph "Providers"
267
+ ProvReg["index.ts (providerRegistry)"]
268
+ Google["google"]
269
+ Bedrock["bedrock"]
270
+ end
271
+
220
272
  subgraph "Tools"
221
273
  ToolReg["index.ts (toolRegistry)"]
222
274
  Calc["calculator"]
@@ -231,6 +283,7 @@ graph TB
231
283
  Index --> Executor
232
284
  Index --> Registry
233
285
  Index --> ToolReg
286
+ Index --> ProvReg
234
287
  Server --> Handler
235
288
  Handler --> Parser
236
289
  Handler --> Executor
@@ -240,6 +293,9 @@ graph TB
240
293
  Registry --> OutputPrim
241
294
  Registry --> CallAgent
242
295
  LLMPrim --> ToolReg
296
+ LLMPrim --> ProvReg
297
+ ProvReg --> Google
298
+ ProvReg --> Bedrock
243
299
  ToolReg --> Calc
244
300
  ToolReg --> Time
245
301
  ```
@@ -35,19 +35,48 @@ workflow:
35
35
  - id: "chat-interaction"
36
36
  type: "llm"
37
37
  config:
38
+ provider: "google" # Optional: 'google' (default) or 'bedrock'
38
39
  model: "gemini-2.0-flash-exp"
39
40
  stream: true
40
41
  system: "You are a helpful and concise assistant."
41
42
  messages: "$input.messages"
42
43
  ```
43
44
 
45
+ **Using Amazon Bedrock:**
46
+
47
+ ```yaml
48
+ # src/agents/bedrock-assistant.yaml
49
+ metadata:
50
+ name: "Bedrock Assistant"
51
+ version: "1.0.0"
52
+
53
+ workflow:
54
+ - id: "chat-interaction"
55
+ type: "llm"
56
+ config:
57
+ provider: "bedrock"
58
+ model: "anthropic.claude-3-haiku-20240307-v1:0"
59
+ stream: true
60
+ system: "You are a helpful assistant."
61
+ messages: "$input.messages"
62
+ ```
63
+
44
64
  ### 4. (Optional) Register Custom Extensions
45
65
 
46
66
  ```typescript
47
67
  // app/api/beddel/chat/route.ts
48
68
  import { createBeddelHandler } from 'beddel/server';
49
- import { registerTool, registerCallback } from 'beddel';
69
+ import { registerTool, registerCallback, registerProvider } from 'beddel';
50
70
  import { z } from 'zod';
71
+ import { createOpenAI } from '@ai-sdk/openai';
72
+
73
+ // Register custom LLM provider
74
+ registerProvider('openai', {
75
+ createModel: (config) => {
76
+ const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
77
+ return openai(config.model || 'gpt-4');
78
+ },
79
+ });
51
80
 
52
81
  // Register custom tool
53
82
  registerTool('myTool', {
@@ -79,6 +108,7 @@ workflow:
79
108
  - id: "step-1"
80
109
  type: "llm" # Primitive type
81
110
  config:
111
+ provider: "google" # Optional: 'google' (default) or 'bedrock' or custom
82
112
  model: "gemini-2.0-flash-exp"
83
113
  stream: true # true = streaming, false = blocking
84
114
  system: "System prompt"
@@ -43,18 +43,22 @@ graph TD
43
43
  Executor --> Output["output-generator Primitive"]
44
44
  Executor --> CallAgent["call-agent (placeholder)"]
45
45
  LLM --> SDK["Vercel AI SDK (streamText/generateText)"]
46
- SDK --> Provider["@ai-sdk/google (Gemini)"]
46
+ SDK --> Provider["Provider Registry"]
47
+ Provider --> Google["@ai-sdk/google (Gemini)"]
48
+ Provider --> Bedrock["@ai-sdk/amazon-bedrock"]
47
49
  LLM --> Tools["Tool Registry"]
48
50
 
49
51
  subgraph "Expansion Pack Pattern"
50
52
  PrimitiveRegistry["handlerRegistry"]
51
53
  ToolRegistry["toolRegistry"]
52
54
  CallbackRegistry["callbackRegistry"]
55
+ ProviderRegistry["providerRegistry"]
53
56
  end
54
57
 
55
58
  Executor --> PrimitiveRegistry
56
59
  LLM --> ToolRegistry
57
60
  LLM --> CallbackRegistry
61
+ LLM --> ProviderRegistry
58
62
  ```
59
63
 
60
64
  ---
@@ -63,10 +67,11 @@ graph TD
63
67
 
64
68
  - **Sequential Pipeline Pattern:** Workflow steps execute in order; first `Response` return breaks the loop β€” *Rationale:* Enables streaming without blocking subsequent steps
65
69
 
66
- - **Expansion Pack Pattern:** Primitives, tools, and callbacks are registered in extensible maps β€” *Rationale:* Inspired by BMAD-METHODβ„’, allows community extensions without core changes
70
+ - **Expansion Pack Pattern:** Primitives, tools, callbacks, and providers are registered in extensible maps β€” *Rationale:* Inspired by BMAD-METHODβ„’, allows community extensions without core changes
67
71
  - `registerPrimitive(type, handler)` β€” Add custom step types
68
72
  - `registerTool(name, impl)` β€” Add custom LLM tools
69
73
  - `registerCallback(name, fn)` β€” Add lifecycle hooks
74
+ - `registerProvider(name, impl)` β€” Add custom LLM providers
70
75
 
71
76
  - **Early Return Pattern:** When `llmPrimitive` returns `Response`, executor immediately returns to client β€” *Rationale:* Prevents buffering of streaming responses
72
77
 
@@ -14,6 +14,8 @@ packages/beddel/
14
14
  β”‚ β”‚ β”œβ”€β”€ index.ts # Handler registry (handlerRegistry)
15
15
  β”‚ β”‚ β”œβ”€β”€ llm.ts # streamText/generateText wrapper
16
16
  β”‚ β”‚ └── output.ts # JSON transform primitive
17
+ β”‚ β”œβ”€β”€ providers/
18
+ β”‚ β”‚ └── index.ts # Provider registry (google, bedrock)
17
19
  β”‚ β”œβ”€β”€ server/
18
20
  β”‚ β”‚ └── handler.ts # createBeddelHandler factory
19
21
  β”‚ β”œβ”€β”€ tools/
@@ -50,7 +52,7 @@ Beddel exports three distinct bundles to support different runtime environments:
50
52
 
51
53
  | Import Path | Entry File | Contents | Use Case |
52
54
  |-------------|------------|----------|----------|
53
- | `beddel` | `index.ts` | Full API: `loadYaml`, `WorkflowExecutor`, `handlerRegistry`, `toolRegistry`, `registerPrimitive`, `registerTool` | Internal Beddel usage, custom handlers |
55
+ | `beddel` | `index.ts` | Full API: `loadYaml`, `WorkflowExecutor`, `handlerRegistry`, `toolRegistry`, `providerRegistry`, `registerPrimitive`, `registerTool`, `registerProvider` | Internal Beddel usage, custom handlers |
54
56
  | `beddel/server` | `server.ts` | `createBeddelHandler`, `BeddelHandlerOptions` | Next.js API Routes (Consumer) |
55
57
  | `beddel/client` | `client.ts` | Types only: `ParsedYaml`, `ExecutionContext`, `PrimitiveHandler`, etc. | Client Components, type-checking |
56
58
 
@@ -8,6 +8,7 @@
8
8
  | **Runtime** | Node.js / Edge | 20+ | JavaScript runtime | Next.js App Router Edge compatibility |
9
9
  | **AI Core** | `ai` | 6.x | Vercel AI SDK Core | Native `streamText`/`generateText` support |
10
10
  | **AI Provider**| `@ai-sdk/google` | 3.x | Google Gemini integration | Default LLM provider (requires v3+ for AI SDK 6.x) |
11
+ | **AI Provider**| `@ai-sdk/amazon-bedrock` | 4.x | Amazon Bedrock integration | Alternative LLM provider for AWS environments |
11
12
  | **Validation** | `zod` | 3.x | Schema validation for tools | Type-safe runtime validation |
12
13
  | **YAML Parser**| `js-yaml` | 4.x | Secure YAML parsing | FAILSAFE_SCHEMA prevents code execution |
13
14
  | **Framework** | Next.js App Router | 14+ | API route hosting | Required by consumers, not bundled |
package/package.json CHANGED
@@ -1,6 +1,14 @@
1
1
  {
2
2
  "name": "beddel",
3
- "version": "1.0.0",
3
+ "version": "1.0.2",
4
+ "repository": {
5
+ "type": "git",
6
+ "url": "git+https://github.com/botanarede/beddel.git"
7
+ },
8
+ "homepage": "https://beddel.com.br",
9
+ "bugs": {
10
+ "url": "https://github.com/botanarede/beddel/issues"
11
+ },
4
12
  "type": "module",
5
13
  "main": "./dist/index.js",
6
14
  "types": "./dist/index.d.ts",
@@ -25,6 +33,7 @@
25
33
  "dependencies": {
26
34
  "ai": "^6.0.3",
27
35
  "@ai-sdk/google": "^3.0.1",
36
+ "@ai-sdk/amazon-bedrock": "^4.0.3",
28
37
  "zod": "^3.23.8",
29
38
  "js-yaml": "^4.1.0"
30
39
  },
@@ -33,4 +42,4 @@
33
42
  "@types/node": "^22.10.2",
34
43
  "@types/js-yaml": "^4.0.9"
35
44
  }
36
- }
45
+ }
package/src/index.ts CHANGED
@@ -22,6 +22,10 @@ export { handlerRegistry, registerPrimitive } from './primitives';
22
22
  export { toolRegistry, registerTool } from './tools';
23
23
  export type { ToolImplementation } from './tools';
24
24
 
25
+ // Providers registry (for custom LLM provider registration)
26
+ export { providerRegistry, registerProvider, createModel } from './providers';
27
+ export type { ProviderImplementation, ProviderConfig } from './providers';
28
+
25
29
  // Types (re-exported for convenience, also available via beddel/client)
26
30
  export type {
27
31
  ParsedYaml,
@@ -26,10 +26,10 @@ import {
26
26
  type UIMessage,
27
27
  type ToolSet,
28
28
  } from 'ai';
29
- import { createGoogleGenerativeAI } from '@ai-sdk/google';
30
29
  import type { StepConfig, ExecutionContext, PrimitiveHandler } from '../types';
31
30
  import { toolRegistry, type ToolImplementation } from '../tools';
32
31
  import { resolveVariables } from '../core/variable-resolver';
32
+ import { createModel } from '../providers';
33
33
 
34
34
  /**
35
35
  * Callback function type for lifecycle hooks (onFinish, onError).
@@ -74,6 +74,7 @@ interface YamlToolDefinition {
74
74
  * LLM step configuration from YAML.
75
75
  */
76
76
  interface LlmConfig extends StepConfig {
77
+ provider?: string;
77
78
  model?: string;
78
79
  stream?: boolean;
79
80
  system?: string;
@@ -137,11 +138,10 @@ export const llmPrimitive: PrimitiveHandler = async (
137
138
  ): Promise<Response | Record<string, unknown>> => {
138
139
  const llmConfig = config as LlmConfig;
139
140
 
140
- // Initialize Google Generative AI provider
141
- const google = createGoogleGenerativeAI({
142
- apiKey: process.env.GEMINI_API_KEY,
141
+ // Create model from provider registry (defaults to 'google')
142
+ const model = createModel(llmConfig.provider || 'google', {
143
+ model: llmConfig.model || 'gemini-1.5-flash',
143
144
  });
144
- const model = google(llmConfig.model || 'gemini-1.5-flash');
145
145
 
146
146
  // Resolve messages from context (e.g., $input.messages)
147
147
  // AI SDK v6: Frontend sends UIMessage[], we convert to ModelMessage[]
@@ -0,0 +1,124 @@
1
+ /**
2
+ * Beddel Protocol - Provider Registry
3
+ *
4
+ * This registry maps provider names to their implementations.
5
+ * Following Expansion Pack Pattern from BMAD-METHODβ„’ for extensibility.
6
+ * See: https://github.com/bmadcode/bmad-method
7
+ *
8
+ * Server-only: Providers may use Node.js APIs and external services.
9
+ */
10
+
11
+ import type { LanguageModel } from 'ai';
12
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
13
+ import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
14
+
15
+ /**
16
+ * Configuration passed to provider's createModel method.
17
+ */
18
+ export interface ProviderConfig {
19
+ model: string;
20
+ [key: string]: unknown;
21
+ }
22
+
23
+ /**
24
+ * Provider implementation interface.
25
+ * Each provider must implement createModel to return an AI SDK LanguageModel.
26
+ */
27
+ export interface ProviderImplementation {
28
+ createModel: (config: ProviderConfig) => LanguageModel;
29
+ }
30
+
31
+ /**
32
+ * Registry of provider implementations keyed by provider name.
33
+ *
34
+ * Built-in Providers:
35
+ * - 'google': Google Gemini via @ai-sdk/google
36
+ * - 'bedrock': Amazon Bedrock via @ai-sdk/amazon-bedrock
37
+ */
38
+ export const providerRegistry: Record<string, ProviderImplementation> = {};
39
+
40
+ /**
41
+ * Register a custom provider implementation in the registry.
42
+ * Allows consumers to extend Beddel with additional LLM providers.
43
+ *
44
+ * @param name - Provider identifier (e.g., 'openai', 'anthropic')
45
+ * @param implementation - ProviderImplementation with createModel method
46
+ *
47
+ * @example
48
+ * import { registerProvider } from 'beddel';
49
+ *
50
+ * registerProvider('openai', {
51
+ * createModel: (config) => {
52
+ * const openai = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
53
+ * return openai(config.model || 'gpt-4');
54
+ * },
55
+ * });
56
+ */
57
+ export function registerProvider(
58
+ name: string,
59
+ implementation: ProviderImplementation
60
+ ): void {
61
+ if (providerRegistry[name]) {
62
+ console.warn(`[Beddel] Provider '${name}' already registered, overwriting.`);
63
+ }
64
+ providerRegistry[name] = implementation;
65
+ }
66
+
67
+ /**
68
+ * Create a LanguageModel instance from a registered provider.
69
+ *
70
+ * @param provider - Provider name (must be registered in providerRegistry)
71
+ * @param config - Configuration including model name and provider-specific options
72
+ * @returns LanguageModel instance from the AI SDK
73
+ * @throws Error if provider is not found, listing available providers
74
+ *
75
+ * @example
76
+ * const model = createModel('google', { model: 'gemini-1.5-flash' });
77
+ */
78
+ export function createModel(provider: string, config: ProviderConfig): LanguageModel {
79
+ const impl = providerRegistry[provider];
80
+ if (!impl) {
81
+ const available = Object.keys(providerRegistry).join(', ') || 'none';
82
+ throw new Error(`Unknown provider: '${provider}'. Available: ${available}`);
83
+ }
84
+ return impl.createModel(config);
85
+ }
86
+
87
+ // =============================================================================
88
+ // Built-in Providers
89
+ // =============================================================================
90
+
91
+ /**
92
+ * Google Gemini Provider (Built-in)
93
+ *
94
+ * Uses @ai-sdk/google with GEMINI_API_KEY environment variable.
95
+ * Default model: gemini-1.5-flash
96
+ *
97
+ * Requirements: 1.2, 4.1
98
+ */
99
+ registerProvider('google', {
100
+ createModel: (config: ProviderConfig): LanguageModel => {
101
+ const google = createGoogleGenerativeAI({
102
+ apiKey: process.env.GEMINI_API_KEY,
103
+ });
104
+ return google(config.model || 'gemini-1.5-flash');
105
+ },
106
+ });
107
+
108
+ /**
109
+ * Amazon Bedrock Provider (Built-in)
110
+ *
111
+ * Uses @ai-sdk/amazon-bedrock with AWS_BEARER_TOKEN_BEDROCK environment variable.
112
+ * Region is configured via AWS_REGION env var or defaults to 'us-east-1'.
113
+ * Default model: anthropic.claude-3-haiku-20240307-v1:0
114
+ *
115
+ * Requirements: 1.3, 4.2
116
+ */
117
+ registerProvider('bedrock', {
118
+ createModel: (config: ProviderConfig): LanguageModel => {
119
+ const bedrock = createAmazonBedrock({
120
+ region: process.env.AWS_REGION || 'us-east-1',
121
+ });
122
+ return bedrock(config.model || 'anthropic.claude-3-haiku-20240307-v1:0');
123
+ },
124
+ });