beddel 0.2.3 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/dist/agents/chat/chat.handler.d.ts +1 -1
- package/dist/agents/chat/chat.handler.d.ts.map +1 -1
- package/dist/agents/chat/chat.handler.js +9 -7
- package/dist/agents/chat/chat.handler.js.map +1 -1
- package/dist/agents/chat/chat.yaml +6 -8
- package/dist/agents/gemini-vectorize/gemini-vectorize.handler.d.ts +1 -1
- package/dist/agents/gemini-vectorize/gemini-vectorize.handler.d.ts.map +1 -1
- package/dist/agents/gemini-vectorize/gemini-vectorize.handler.js +16 -13
- package/dist/agents/gemini-vectorize/gemini-vectorize.handler.js.map +1 -1
- package/dist/agents/image/image.handler.d.ts +1 -1
- package/dist/agents/image/image.handler.d.ts.map +1 -1
- package/dist/agents/image/image.handler.js +9 -6
- package/dist/agents/image/image.handler.js.map +1 -1
- package/dist/agents/image/image.types.d.ts +1 -0
- package/dist/agents/image/image.types.d.ts.map +1 -1
- package/dist/agents/index.d.ts +11 -2
- package/dist/agents/index.d.ts.map +1 -1
- package/dist/agents/index.js +9 -3
- package/dist/agents/index.js.map +1 -1
- package/dist/agents/joker/joker.handler.d.ts +1 -1
- package/dist/agents/joker/joker.handler.d.ts.map +1 -1
- package/dist/agents/joker/joker.handler.js +7 -11
- package/dist/agents/joker/joker.handler.js.map +1 -1
- package/dist/agents/joker/joker.types.d.ts +1 -0
- package/dist/agents/joker/joker.types.d.ts.map +1 -1
- package/dist/agents/llm/index.d.ts +15 -0
- package/dist/agents/llm/index.d.ts.map +1 -0
- package/dist/agents/llm/index.js +20 -0
- package/dist/agents/llm/index.js.map +1 -0
- package/dist/agents/llm/llm.handler.d.ts +8 -0
- package/dist/agents/llm/llm.handler.d.ts.map +1 -0
- package/dist/agents/llm/llm.handler.js +64 -0
- package/dist/agents/llm/llm.handler.js.map +1 -0
- package/dist/agents/llm/llm.schema.d.ts +26 -0
- package/dist/agents/llm/llm.schema.d.ts.map +1 -0
- package/dist/agents/llm/llm.schema.js +23 -0
- package/dist/agents/llm/llm.schema.js.map +1 -0
- package/dist/agents/llm/llm.types.d.ts +34 -0
- package/dist/agents/llm/llm.types.d.ts.map +1 -0
- package/dist/agents/llm/llm.types.js +7 -0
- package/dist/agents/llm/llm.types.js.map +1 -0
- package/dist/agents/llm/llm.yaml +87 -0
- package/dist/agents/rag/rag.handler.d.ts +1 -0
- package/dist/agents/rag/rag.handler.d.ts.map +1 -1
- package/dist/agents/rag/rag.handler.js +15 -38
- package/dist/agents/rag/rag.handler.js.map +1 -1
- package/dist/agents/rag/rag.types.d.ts +2 -7
- package/dist/agents/rag/rag.types.d.ts.map +1 -1
- package/dist/agents/rag/rag.types.js +1 -0
- package/dist/agents/rag/rag.types.js.map +1 -1
- package/dist/agents/registry/agentRegistry.d.ts +5 -0
- package/dist/agents/registry/agentRegistry.d.ts.map +1 -1
- package/dist/agents/registry/agentRegistry.js +33 -1
- package/dist/agents/registry/agentRegistry.js.map +1 -1
- package/dist/agents/translator/translator.handler.d.ts +1 -1
- package/dist/agents/translator/translator.handler.d.ts.map +1 -1
- package/dist/agents/translator/translator.handler.js +11 -13
- package/dist/agents/translator/translator.handler.js.map +1 -1
- package/dist/agents/translator/translator.types.d.ts +1 -0
- package/dist/agents/translator/translator.types.d.ts.map +1 -1
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/runtime/declarativeAgentRuntime.d.ts +4 -4
- package/dist/runtime/declarativeAgentRuntime.d.ts.map +1 -1
- package/dist/runtime/declarativeAgentRuntime.js +14 -9
- package/dist/runtime/declarativeAgentRuntime.js.map +1 -1
- package/dist/runtime/index.d.ts +3 -1
- package/dist/runtime/index.d.ts.map +1 -1
- package/dist/runtime/index.js +6 -1
- package/dist/runtime/index.js.map +1 -1
- package/dist/runtime/llmProviderFactory.d.ts +47 -0
- package/dist/runtime/llmProviderFactory.d.ts.map +1 -0
- package/dist/runtime/llmProviderFactory.js +119 -0
- package/dist/runtime/llmProviderFactory.js.map +1 -0
- package/dist/runtime/workflowExecutor.d.ts +3 -2
- package/dist/runtime/workflowExecutor.d.ts.map +1 -1
- package/dist/runtime/workflowExecutor.js +21 -11
- package/dist/runtime/workflowExecutor.js.map +1 -1
- package/dist/shared/types/agent.types.d.ts +15 -2
- package/dist/shared/types/agent.types.d.ts.map +1 -1
- package/dist/shared/types/agent.types.js +11 -0
- package/dist/shared/types/agent.types.js.map +1 -1
- package/package.json +7 -5
- package/src/agents/chat/chat.handler.ts +15 -13
- package/src/agents/chat/chat.yaml +6 -8
- package/src/agents/gemini-vectorize/gemini-vectorize.handler.ts +18 -15
- package/src/agents/image/image.handler.ts +10 -6
- package/src/agents/image/image.types.ts +1 -0
- package/src/agents/index.ts +6 -2
- package/src/agents/joker/joker.handler.ts +7 -12
- package/src/agents/joker/joker.types.ts +1 -0
- package/src/agents/llm/index.ts +20 -0
- package/src/agents/llm/llm.handler.ts +82 -0
- package/src/agents/llm/llm.schema.ts +25 -0
- package/src/agents/llm/llm.types.ts +37 -0
- package/src/agents/llm/llm.yaml +87 -0
- package/src/agents/rag/rag.handler.ts +20 -44
- package/src/agents/rag/rag.types.ts +2 -8
- package/src/agents/registry/agentRegistry.ts +34 -1
- package/src/agents/translator/translator.handler.ts +11 -13
- package/src/agents/translator/translator.types.ts +1 -0
- package/src/index.ts +8 -0
- package/src/runtime/declarativeAgentRuntime.ts +14 -9
- package/src/runtime/index.ts +5 -0
- package/src/runtime/llmProviderFactory.ts +145 -0
- package/src/runtime/workflowExecutor.ts +23 -10
- package/src/shared/types/agent.types.ts +23 -3
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Agent Schema - Zod validation schemas
|
|
3
|
+
* Safe for both client and server
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { z } from 'zod';
|
|
7
|
+
|
|
8
|
+
export const LlmInputSchema = z.object({
|
|
9
|
+
query: z.string().min(1),
|
|
10
|
+
history: z.array(z.object({
|
|
11
|
+
role: z.enum(['user', 'assistant', 'system']),
|
|
12
|
+
content: z.string(),
|
|
13
|
+
})).optional(),
|
|
14
|
+
temperature: z.number().min(0).max(2).optional(),
|
|
15
|
+
systemPrompt: z.string().optional(),
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
export const LlmOutputSchema = z.object({
|
|
19
|
+
response: z.string(),
|
|
20
|
+
timestamp: z.string().optional(),
|
|
21
|
+
error: z.string().optional(),
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
export type LlmInput = z.infer<typeof LlmInputSchema>;
|
|
25
|
+
export type LlmOutput = z.infer<typeof LlmOutputSchema>;
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Agent Types - Shared between client and server
|
|
3
|
+
* Direct LLM interaction without document context (non-RAG)
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type { ConversationMessage } from '../rag/rag.types';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Parameters for direct LLM chat
|
|
10
|
+
*/
|
|
11
|
+
export interface LlmHandlerParams {
|
|
12
|
+
query: string;
|
|
13
|
+
history?: ConversationMessage[];
|
|
14
|
+
temperature?: number;
|
|
15
|
+
systemPrompt?: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Result from LLM chat
|
|
20
|
+
*/
|
|
21
|
+
export interface LlmHandlerResult {
|
|
22
|
+
response: string;
|
|
23
|
+
timestamp: string;
|
|
24
|
+
error?: string;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* LLM agent metadata
|
|
29
|
+
*/
|
|
30
|
+
export interface LlmMetadata {
|
|
31
|
+
id: 'llm';
|
|
32
|
+
name: string;
|
|
33
|
+
description: string;
|
|
34
|
+
category: 'intelligence';
|
|
35
|
+
route: '/agents/llm';
|
|
36
|
+
tags: string[];
|
|
37
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# LLM Agent - Native Builtin Agent for Beddel Runtime
|
|
2
|
+
# Route: /agents/llm
|
|
3
|
+
# Method: llm.execute
|
|
4
|
+
|
|
5
|
+
agent:
|
|
6
|
+
id: llm
|
|
7
|
+
version: 1.0.0
|
|
8
|
+
protocol: beddel-declarative-protocol/v2.0
|
|
9
|
+
|
|
10
|
+
metadata:
|
|
11
|
+
name: "LLM Chat Agent"
|
|
12
|
+
description: "Direct LLM interaction with conversation history support (non-RAG)"
|
|
13
|
+
category: "intelligence"
|
|
14
|
+
route: "/agents/llm"
|
|
15
|
+
tags:
|
|
16
|
+
- "llm"
|
|
17
|
+
- "chat"
|
|
18
|
+
- "gemini"
|
|
19
|
+
- "conversation"
|
|
20
|
+
|
|
21
|
+
schema:
|
|
22
|
+
input:
|
|
23
|
+
type: "object"
|
|
24
|
+
properties:
|
|
25
|
+
query:
|
|
26
|
+
type: "string"
|
|
27
|
+
description: "User message to respond to"
|
|
28
|
+
history:
|
|
29
|
+
type: "array"
|
|
30
|
+
items:
|
|
31
|
+
type: "object"
|
|
32
|
+
properties:
|
|
33
|
+
role:
|
|
34
|
+
type: "string"
|
|
35
|
+
enum: ["user", "assistant", "system"]
|
|
36
|
+
content:
|
|
37
|
+
type: "string"
|
|
38
|
+
description: "Conversation history for context continuity"
|
|
39
|
+
temperature:
|
|
40
|
+
type: "number"
|
|
41
|
+
minimum: 0
|
|
42
|
+
maximum: 2
|
|
43
|
+
default: 0.7
|
|
44
|
+
description: "LLM temperature for response generation"
|
|
45
|
+
systemPrompt:
|
|
46
|
+
type: "string"
|
|
47
|
+
description: "Custom system prompt for the LLM"
|
|
48
|
+
required: ["query"]
|
|
49
|
+
|
|
50
|
+
output:
|
|
51
|
+
type: "object"
|
|
52
|
+
properties:
|
|
53
|
+
response:
|
|
54
|
+
type: "string"
|
|
55
|
+
description: "Generated response"
|
|
56
|
+
timestamp:
|
|
57
|
+
type: "string"
|
|
58
|
+
description: "ISO timestamp of generation"
|
|
59
|
+
error:
|
|
60
|
+
type: "string"
|
|
61
|
+
required: ["response"]
|
|
62
|
+
|
|
63
|
+
logic:
|
|
64
|
+
workflow:
|
|
65
|
+
- name: "generate-response"
|
|
66
|
+
type: "llm"
|
|
67
|
+
action:
|
|
68
|
+
query: "$input.query"
|
|
69
|
+
history: "$input.history"
|
|
70
|
+
temperature: "$input.temperature"
|
|
71
|
+
systemPrompt: "$input.systemPrompt"
|
|
72
|
+
result: "llmResult"
|
|
73
|
+
|
|
74
|
+
- name: "deliver-response"
|
|
75
|
+
type: "output-generator"
|
|
76
|
+
action:
|
|
77
|
+
type: "generate"
|
|
78
|
+
output:
|
|
79
|
+
response: "$llmResult.response"
|
|
80
|
+
timestamp: "$llmResult.timestamp"
|
|
81
|
+
error: "$llmResult.error"
|
|
82
|
+
|
|
83
|
+
output:
|
|
84
|
+
schema:
|
|
85
|
+
response: "$llmResult.response"
|
|
86
|
+
timestamp: "$llmResult.timestamp"
|
|
87
|
+
error: "$llmResult.error"
|
|
@@ -2,42 +2,26 @@ import 'server-only';
|
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* RAG Agent Handler - Server-only execution logic
|
|
5
|
-
* Generates natural language answers based on provided context using
|
|
6
|
-
*
|
|
5
|
+
* Generates natural language answers based on provided document context using LLM
|
|
6
|
+
*
|
|
7
|
+
* Note: RAG = Retrieval-Augmented Generation
|
|
8
|
+
* This agent ALWAYS requires document context. For direct LLM chat without
|
|
9
|
+
* documents, use the LLM agent instead.
|
|
7
10
|
*/
|
|
8
11
|
|
|
9
12
|
import { generateText } from 'ai';
|
|
10
|
-
import {
|
|
13
|
+
import { LLMProviderFactory, extractProviderConfig } from '../../runtime/llmProviderFactory';
|
|
11
14
|
import type { ExecutionContext } from '../../types/executionContext';
|
|
12
15
|
import type { RagHandlerParams, RagHandlerResult, ConversationMessage } from './rag.types';
|
|
13
16
|
|
|
14
|
-
const GEMINI_RAG_MODEL = 'models/gemini-2.0-flash-exp';
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
* Build prompt for simple chat mode (no documents)
|
|
18
|
-
*/
|
|
19
|
-
function buildSimpleChatPrompt(query: string, history?: ConversationMessage[]): string {
|
|
20
|
-
const conversationContext = history?.length
|
|
21
|
-
? `CONVERSATION HISTORY:\n${history.map((m) => `${m.role.toUpperCase()}: ${m.content}`).join('\n')}\n\n`
|
|
22
|
-
: '';
|
|
23
|
-
|
|
24
|
-
return `You are a helpful, friendly assistant.
|
|
25
|
-
|
|
26
|
-
${conversationContext}USER MESSAGE:
|
|
27
|
-
${query}
|
|
28
|
-
|
|
29
|
-
INSTRUCTIONS:
|
|
30
|
-
1. Respond naturally to the user's message.
|
|
31
|
-
2. Consider the conversation history for context continuity if available.
|
|
32
|
-
3. Be concise but helpful.
|
|
33
|
-
|
|
34
|
-
RESPONSE:`;
|
|
35
|
-
}
|
|
36
|
-
|
|
37
17
|
/**
|
|
38
18
|
* Build prompt for RAG mode (with documents)
|
|
39
19
|
*/
|
|
40
|
-
function buildRagPrompt(
|
|
20
|
+
function buildRagPrompt(
|
|
21
|
+
query: string,
|
|
22
|
+
ragContext: string,
|
|
23
|
+
history?: ConversationMessage[]
|
|
24
|
+
): string {
|
|
41
25
|
const conversationContext = history?.length
|
|
42
26
|
? `CONVERSATION HISTORY:\n${history.map((m) => `${m.role.toUpperCase()}: ${m.content}`).join('\n')}\n\n`
|
|
43
27
|
: '';
|
|
@@ -61,43 +45,36 @@ ANSWER:`;
|
|
|
61
45
|
|
|
62
46
|
/**
|
|
63
47
|
* Execute RAG answer generation
|
|
48
|
+
* Requires document context - for direct LLM chat, use the LLM agent
|
|
64
49
|
*/
|
|
65
50
|
export async function executeRagHandler(
|
|
66
51
|
params: RagHandlerParams,
|
|
67
52
|
props: Record<string, string>,
|
|
68
53
|
context: ExecutionContext
|
|
69
54
|
): Promise<RagHandlerResult> {
|
|
70
|
-
const
|
|
71
|
-
|
|
72
|
-
throw new Error('Missing required prop: gemini_api_key');
|
|
73
|
-
}
|
|
55
|
+
const providerConfig = extractProviderConfig(props, 'google');
|
|
56
|
+
const model = LLMProviderFactory.createLanguageModel(providerConfig);
|
|
74
57
|
|
|
75
|
-
const { query, history
|
|
58
|
+
const { query, history } = params;
|
|
76
59
|
const ragContext = params.context || params.documents;
|
|
77
60
|
|
|
78
61
|
if (!query) {
|
|
79
62
|
throw new Error('Missing required RAG input: query');
|
|
80
63
|
}
|
|
81
64
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
throw new Error('Missing required RAG input: context or documents');
|
|
65
|
+
if (!ragContext) {
|
|
66
|
+
throw new Error('Missing required RAG input: context or documents. For direct LLM chat without documents, use the LLM agent instead.');
|
|
85
67
|
}
|
|
86
68
|
|
|
87
|
-
const
|
|
88
|
-
const model = google(GEMINI_RAG_MODEL);
|
|
89
|
-
|
|
90
|
-
const prompt = mode === 'simple'
|
|
91
|
-
? buildSimpleChatPrompt(query, history)
|
|
92
|
-
: buildRagPrompt(query, ragContext!, history);
|
|
69
|
+
const prompt = buildRagPrompt(query, ragContext, history);
|
|
93
70
|
|
|
94
71
|
try {
|
|
95
|
-
context.log(`[RAG
|
|
72
|
+
context.log(`[RAG] Generating answer for: "${query.substring(0, 50)}..."`);
|
|
96
73
|
|
|
97
74
|
const { text } = await generateText({
|
|
98
75
|
model,
|
|
99
76
|
prompt,
|
|
100
|
-
temperature:
|
|
77
|
+
temperature: 0.3,
|
|
101
78
|
});
|
|
102
79
|
|
|
103
80
|
return {
|
|
@@ -105,7 +82,6 @@ export async function executeRagHandler(
|
|
|
105
82
|
answer: text,
|
|
106
83
|
timestamp: new Date().toISOString(),
|
|
107
84
|
};
|
|
108
|
-
|
|
109
85
|
} catch (error: unknown) {
|
|
110
86
|
const message = error instanceof Error ? error.message : String(error);
|
|
111
87
|
context.log(`[RAG] Error: ${message}`);
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* RAG Agent Types - Shared between client and server
|
|
3
|
+
* RAG = Retrieval-Augmented Generation (always requires document context)
|
|
3
4
|
*/
|
|
4
5
|
|
|
5
6
|
/**
|
|
@@ -10,22 +11,15 @@ export interface ConversationMessage {
|
|
|
10
11
|
content: string;
|
|
11
12
|
}
|
|
12
13
|
|
|
13
|
-
/**
|
|
14
|
-
* RAG execution mode
|
|
15
|
-
* - 'rag': Uses provided documents/context for answer generation
|
|
16
|
-
* - 'simple': Direct LLM chat without document context
|
|
17
|
-
*/
|
|
18
|
-
export type RagMode = 'rag' | 'simple';
|
|
19
|
-
|
|
20
14
|
/**
|
|
21
15
|
* Parameters for RAG answer generation
|
|
16
|
+
* Note: context or documents is REQUIRED - RAG always needs document context
|
|
22
17
|
*/
|
|
23
18
|
export interface RagHandlerParams {
|
|
24
19
|
query: string;
|
|
25
20
|
context?: string;
|
|
26
21
|
documents?: string;
|
|
27
22
|
history?: ConversationMessage[];
|
|
28
|
-
mode?: RagMode;
|
|
29
23
|
}
|
|
30
24
|
|
|
31
25
|
/**
|
|
@@ -220,6 +220,8 @@ export class AgentRegistry {
|
|
|
220
220
|
this.registerGitMcpAgent();
|
|
221
221
|
// Register RAG Agent
|
|
222
222
|
this.registerRagAgent();
|
|
223
|
+
// Register LLM Agent
|
|
224
|
+
this.registerLlmAgent();
|
|
223
225
|
// Register Chat Agent (Orchestrator)
|
|
224
226
|
this.registerChatAgent();
|
|
225
227
|
} catch (error) {
|
|
@@ -422,6 +424,30 @@ export class AgentRegistry {
|
|
|
422
424
|
}
|
|
423
425
|
}
|
|
424
426
|
|
|
427
|
+
/**
|
|
428
|
+
* Register LLM Agent
|
|
429
|
+
*/
|
|
430
|
+
private registerLlmAgent(): void {
|
|
431
|
+
try {
|
|
432
|
+
const yamlPath = this.resolveAgentPath("llm/llm.yaml");
|
|
433
|
+
const yamlContent = readFileSync(yamlPath, "utf-8");
|
|
434
|
+
const agent = this.parseAgentYaml(yamlContent);
|
|
435
|
+
|
|
436
|
+
this.registerAgent({
|
|
437
|
+
id: agent.agent.id,
|
|
438
|
+
name: "llm.execute",
|
|
439
|
+
description: agent.metadata.description,
|
|
440
|
+
protocol: agent.agent.protocol,
|
|
441
|
+
route: agent.metadata.route || "/agents/llm",
|
|
442
|
+
requiredProps: ["gemini_api_key"],
|
|
443
|
+
yamlContent,
|
|
444
|
+
});
|
|
445
|
+
} catch (error) {
|
|
446
|
+
console.error("Failed to register LLM Agent:", error);
|
|
447
|
+
throw error;
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
|
|
425
451
|
/**
|
|
426
452
|
* Register Chat Agent (Orchestrator)
|
|
427
453
|
*/
|
|
@@ -519,11 +545,18 @@ export class AgentRegistry {
|
|
|
519
545
|
|
|
520
546
|
/**
|
|
521
547
|
* Resolve agent asset path when running in bundled runtimes
|
|
548
|
+
* Handles both source (src/agents/) and dist (dist/agents/) paths
|
|
522
549
|
*/
|
|
523
550
|
private resolveAgentPath(filename: string): string {
|
|
524
551
|
const candidatePaths = [
|
|
525
|
-
|
|
552
|
+
// From registry folder, go up one level to agents folder
|
|
553
|
+
join(__dirname, "..", filename),
|
|
554
|
+
// Direct path from src/agents
|
|
526
555
|
join(process.cwd(), "packages", "beddel", "src", "agents", filename),
|
|
556
|
+
// Direct path from dist/agents (for built package)
|
|
557
|
+
join(process.cwd(), "packages", "beddel", "dist", "agents", filename),
|
|
558
|
+
// When running from node_modules
|
|
559
|
+
join(__dirname, "..", "..", "agents", filename),
|
|
527
560
|
];
|
|
528
561
|
|
|
529
562
|
for (const path of candidatePaths) {
|
|
@@ -2,29 +2,27 @@ import 'server-only';
|
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Translator Agent Handler - Server-only execution logic
|
|
5
|
-
* Translates text between languages using
|
|
5
|
+
* Translates text between languages using LLM providers
|
|
6
6
|
*/
|
|
7
7
|
|
|
8
8
|
import { generateText } from 'ai';
|
|
9
|
-
import {
|
|
9
|
+
import { LLMProviderFactory, extractProviderConfig } from '../../runtime/llmProviderFactory';
|
|
10
10
|
import type { ExecutionContext } from '../../types/executionContext';
|
|
11
11
|
import type { TranslationHandlerParams, TranslationHandlerResult } from './translator.types';
|
|
12
12
|
|
|
13
|
-
const GEMINI_MODEL = 'models/gemini-2.5-flash';
|
|
14
13
|
const SUPPORTED_LANGUAGES = ['pt', 'en', 'es', 'fr'];
|
|
15
14
|
|
|
16
15
|
/**
|
|
17
|
-
* Execute translation using
|
|
16
|
+
* Execute translation using configured LLM provider
|
|
18
17
|
*/
|
|
19
18
|
export async function executeTranslationHandler(
|
|
20
19
|
params: TranslationHandlerParams,
|
|
21
20
|
props: Record<string, string>,
|
|
22
21
|
context: ExecutionContext
|
|
23
22
|
): Promise<TranslationHandlerResult> {
|
|
24
|
-
const
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
}
|
|
23
|
+
const providerConfig = extractProviderConfig(props, 'google');
|
|
24
|
+
const model = LLMProviderFactory.createLanguageModel(providerConfig);
|
|
25
|
+
const modelName = providerConfig.model || LLMProviderFactory.getDefaultModel(providerConfig.provider);
|
|
28
26
|
|
|
29
27
|
const text = params.text?.trim();
|
|
30
28
|
const sourceLanguage = params.source_language?.trim().toLowerCase();
|
|
@@ -39,7 +37,8 @@ export async function executeTranslationHandler(
|
|
|
39
37
|
return {
|
|
40
38
|
translated_text: text,
|
|
41
39
|
metadata: {
|
|
42
|
-
model_used:
|
|
40
|
+
model_used: modelName,
|
|
41
|
+
provider: providerConfig.provider,
|
|
43
42
|
processing_time: 0,
|
|
44
43
|
confidence: 1,
|
|
45
44
|
supported_languages: SUPPORTED_LANGUAGES,
|
|
@@ -65,8 +64,6 @@ Text:
|
|
|
65
64
|
.replace(/{{target_language}}/g, targetLanguage)
|
|
66
65
|
.trim();
|
|
67
66
|
|
|
68
|
-
const google = createGoogleGenerativeAI({ apiKey });
|
|
69
|
-
const model = google(GEMINI_MODEL);
|
|
70
67
|
const startTime = Date.now();
|
|
71
68
|
|
|
72
69
|
context.log(`[Translator] Translating from ${sourceLanguage} to ${targetLanguage}`);
|
|
@@ -79,13 +76,14 @@ Text:
|
|
|
79
76
|
|
|
80
77
|
const finalText = translatedText?.trim() || '';
|
|
81
78
|
if (!finalText) {
|
|
82
|
-
throw new Error('
|
|
79
|
+
throw new Error('LLM translation returned empty response');
|
|
83
80
|
}
|
|
84
81
|
|
|
85
82
|
return {
|
|
86
83
|
translated_text: finalText,
|
|
87
84
|
metadata: {
|
|
88
|
-
model_used:
|
|
85
|
+
model_used: modelName,
|
|
86
|
+
provider: providerConfig.provider,
|
|
89
87
|
processing_time: Date.now() - startTime,
|
|
90
88
|
confidence: 0.85,
|
|
91
89
|
supported_languages: SUPPORTED_LANGUAGES,
|
package/src/index.ts
CHANGED
|
@@ -164,5 +164,13 @@ export { chatMetadata } from "./agents/chat";
|
|
|
164
164
|
// RAG types (client-safe)
|
|
165
165
|
export type { ConversationMessage } from "./agents/rag/rag.types";
|
|
166
166
|
|
|
167
|
+
// LLM agent exports (client-safe)
|
|
168
|
+
export type {
|
|
169
|
+
LlmHandlerParams,
|
|
170
|
+
LlmHandlerResult,
|
|
171
|
+
LlmMetadata,
|
|
172
|
+
} from "./agents/llm/llm.types";
|
|
173
|
+
export { llmMetadata } from "./agents/llm";
|
|
174
|
+
|
|
167
175
|
// Server/runtime exports
|
|
168
176
|
export * as Server from "./server";
|
|
@@ -239,18 +239,23 @@ export class DeclarativeAgentInterpreter {
|
|
|
239
239
|
switch (step.type) {
|
|
240
240
|
case 'output-generator':
|
|
241
241
|
return this.executeOutputGenerator(step, variables, options);
|
|
242
|
+
// Preferred step types
|
|
243
|
+
case 'joke':
|
|
242
244
|
case 'genkit-joke':
|
|
243
|
-
return this.
|
|
245
|
+
return this.executeJoke(step, variables, options);
|
|
246
|
+
case 'translation':
|
|
244
247
|
case 'genkit-translation':
|
|
245
|
-
return this.
|
|
248
|
+
return this.executeTranslation(step, variables, options);
|
|
249
|
+
case 'image':
|
|
246
250
|
case 'genkit-image':
|
|
247
|
-
return this.
|
|
251
|
+
return this.executeImage(step, variables, options);
|
|
252
|
+
case 'vectorize':
|
|
253
|
+
case 'gemini-vectorize':
|
|
254
|
+
return this.executeVectorize(step, variables, options);
|
|
248
255
|
case 'custom-action':
|
|
249
256
|
return this.executeCustomAction(step, variables, options);
|
|
250
257
|
case 'mcp-tool':
|
|
251
258
|
return this.executeMcpTool(step, variables, options);
|
|
252
|
-
case 'gemini-vectorize':
|
|
253
|
-
return this.executeGeminiVectorize(step, variables, options);
|
|
254
259
|
case 'chromadb':
|
|
255
260
|
return this.executeChromaDB(step, variables, options);
|
|
256
261
|
case 'gitmcp':
|
|
@@ -309,7 +314,7 @@ export class DeclarativeAgentInterpreter {
|
|
|
309
314
|
// Delegated Handlers - Using extracted agent handlers
|
|
310
315
|
// ============================================================================
|
|
311
316
|
|
|
312
|
-
private async
|
|
317
|
+
private async executeJoke(
|
|
313
318
|
step: any,
|
|
314
319
|
variables: Map<string, any>,
|
|
315
320
|
options: YamlAgentInterpreterOptions
|
|
@@ -337,7 +342,7 @@ export class DeclarativeAgentInterpreter {
|
|
|
337
342
|
return result;
|
|
338
343
|
}
|
|
339
344
|
|
|
340
|
-
private async
|
|
345
|
+
private async executeTranslation(
|
|
341
346
|
step: any,
|
|
342
347
|
variables: Map<string, any>,
|
|
343
348
|
options: YamlAgentInterpreterOptions
|
|
@@ -369,7 +374,7 @@ export class DeclarativeAgentInterpreter {
|
|
|
369
374
|
return result;
|
|
370
375
|
}
|
|
371
376
|
|
|
372
|
-
private async
|
|
377
|
+
private async executeImage(
|
|
373
378
|
step: any,
|
|
374
379
|
variables: Map<string, any>,
|
|
375
380
|
options: YamlAgentInterpreterOptions
|
|
@@ -454,7 +459,7 @@ export class DeclarativeAgentInterpreter {
|
|
|
454
459
|
return result;
|
|
455
460
|
}
|
|
456
461
|
|
|
457
|
-
private async
|
|
462
|
+
private async executeVectorize(
|
|
458
463
|
step: any,
|
|
459
464
|
variables: Map<string, any>,
|
|
460
465
|
options: YamlAgentInterpreterOptions
|
package/src/runtime/index.ts
CHANGED
|
@@ -23,9 +23,14 @@ export {
|
|
|
23
23
|
executeChromaDBHandler,
|
|
24
24
|
executeGitMcpHandler,
|
|
25
25
|
executeRagHandler,
|
|
26
|
+
executeLlmHandler,
|
|
26
27
|
executeChatHandler,
|
|
27
28
|
} from './workflowExecutor';
|
|
28
29
|
|
|
30
|
+
// LLM Provider Factory exports
|
|
31
|
+
export { LLMProviderFactory, extractProviderConfig } from './llmProviderFactory';
|
|
32
|
+
export type { LLMProviderType, LLMProviderConfig } from './llmProviderFactory';
|
|
33
|
+
|
|
29
34
|
// Schema compiler exports
|
|
30
35
|
export { DeclarativeSchemaCompiler, DeclarativeSchemaValidationError } from './schemaCompiler';
|
|
31
36
|
export type { DeclarativeSchemaPhase } from './schemaCompiler';
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import 'server-only';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* LLM Provider Factory - Centralized provider creation and configuration
|
|
5
|
+
*
|
|
6
|
+
* Provides a single point of configuration for LLM providers, enabling:
|
|
7
|
+
* - Centralized API key validation
|
|
8
|
+
* - Easy provider switching (Google, OpenAI, Anthropic, etc.)
|
|
9
|
+
* - Retry/fallback logic between providers
|
|
10
|
+
* - Consistent error handling
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
14
|
+
import type { LanguageModel } from 'ai';
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Supported LLM provider types
|
|
18
|
+
*/
|
|
19
|
+
export type LLMProviderType = 'google' | 'openai' | 'anthropic';
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Provider configuration
|
|
23
|
+
*/
|
|
24
|
+
export interface LLMProviderConfig {
|
|
25
|
+
provider: LLMProviderType;
|
|
26
|
+
apiKey: string;
|
|
27
|
+
model?: string;
|
|
28
|
+
baseURL?: string;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Default models for each provider
|
|
33
|
+
*/
|
|
34
|
+
const DEFAULT_MODELS: Record<LLMProviderType, string> = {
|
|
35
|
+
google: 'models/gemini-2.5-flash',
|
|
36
|
+
openai: 'gpt-4-turbo',
|
|
37
|
+
anthropic: 'claude-3-5-sonnet-20241022',
|
|
38
|
+
};
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* LLM Provider Factory
|
|
42
|
+
*
|
|
43
|
+
* Centralizes provider creation and configuration across all agents
|
|
44
|
+
*/
|
|
45
|
+
export class LLMProviderFactory {
|
|
46
|
+
/**
|
|
47
|
+
* Create a language model instance
|
|
48
|
+
*/
|
|
49
|
+
static createLanguageModel(config: LLMProviderConfig): LanguageModel {
|
|
50
|
+
this.validateConfig(config);
|
|
51
|
+
|
|
52
|
+
switch (config.provider) {
|
|
53
|
+
case 'google':
|
|
54
|
+
return this.createGoogleProvider(config);
|
|
55
|
+
case 'openai':
|
|
56
|
+
throw new Error('OpenAI provider not yet implemented');
|
|
57
|
+
case 'anthropic':
|
|
58
|
+
throw new Error('Anthropic provider not yet implemented');
|
|
59
|
+
default:
|
|
60
|
+
throw new Error(`Unsupported provider: ${config.provider}`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* Create Google Generative AI provider
|
|
66
|
+
*/
|
|
67
|
+
private static createGoogleProvider(config: LLMProviderConfig): LanguageModel {
|
|
68
|
+
const google = createGoogleGenerativeAI({
|
|
69
|
+
apiKey: config.apiKey,
|
|
70
|
+
...(config.baseURL && { baseURL: config.baseURL }),
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
const model = config.model || DEFAULT_MODELS.google;
|
|
74
|
+
return google(model);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Validate provider configuration
|
|
79
|
+
*/
|
|
80
|
+
private static validateConfig(config: LLMProviderConfig): void {
|
|
81
|
+
if (!config.apiKey?.trim()) {
|
|
82
|
+
throw new Error(`Missing API key for provider: ${config.provider}`);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
if (!config.provider) {
|
|
86
|
+
throw new Error('Provider type is required');
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Get default model for a provider
|
|
92
|
+
*/
|
|
93
|
+
static getDefaultModel(provider: LLMProviderType): string {
|
|
94
|
+
return DEFAULT_MODELS[provider];
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Validate API key format (basic check)
|
|
99
|
+
*/
|
|
100
|
+
static validateApiKey(apiKey: string, provider: LLMProviderType): boolean {
|
|
101
|
+
if (!apiKey?.trim()) {
|
|
102
|
+
return false;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// Provider-specific validation can be added here
|
|
106
|
+
switch (provider) {
|
|
107
|
+
case 'google':
|
|
108
|
+
// Google API keys typically start with 'AIza'
|
|
109
|
+
return apiKey.startsWith('AIza') || apiKey.length > 20;
|
|
110
|
+
case 'openai':
|
|
111
|
+
return apiKey.startsWith('sk-');
|
|
112
|
+
case 'anthropic':
|
|
113
|
+
return apiKey.startsWith('sk-ant-');
|
|
114
|
+
default:
|
|
115
|
+
return true;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Helper function to extract provider config from props
|
|
122
|
+
*/
|
|
123
|
+
export function extractProviderConfig(
|
|
124
|
+
props: Record<string, string>,
|
|
125
|
+
defaultProvider: LLMProviderType = 'google'
|
|
126
|
+
): LLMProviderConfig {
|
|
127
|
+
const apiKey = props.gemini_api_key || props.openai_api_key || props.anthropic_api_key;
|
|
128
|
+
|
|
129
|
+
if (!apiKey) {
|
|
130
|
+
throw new Error('Missing required prop: API key (gemini_api_key, openai_api_key, or anthropic_api_key)');
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Determine provider from key name
|
|
134
|
+
let provider: LLMProviderType = defaultProvider;
|
|
135
|
+
if (props.openai_api_key) provider = 'openai';
|
|
136
|
+
if (props.anthropic_api_key) provider = 'anthropic';
|
|
137
|
+
if (props.gemini_api_key) provider = 'google';
|
|
138
|
+
|
|
139
|
+
return {
|
|
140
|
+
provider,
|
|
141
|
+
apiKey: apiKey.trim(),
|
|
142
|
+
model: props.model,
|
|
143
|
+
baseURL: props.base_url,
|
|
144
|
+
};
|
|
145
|
+
}
|