beddel 0.2.3 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/dist/agents/chat/chat.handler.d.ts +1 -1
  3. package/dist/agents/chat/chat.handler.d.ts.map +1 -1
  4. package/dist/agents/chat/chat.handler.js +9 -7
  5. package/dist/agents/chat/chat.handler.js.map +1 -1
  6. package/dist/agents/chat/chat.yaml +6 -8
  7. package/dist/agents/gemini-vectorize/gemini-vectorize.handler.d.ts +1 -1
  8. package/dist/agents/gemini-vectorize/gemini-vectorize.handler.d.ts.map +1 -1
  9. package/dist/agents/gemini-vectorize/gemini-vectorize.handler.js +16 -13
  10. package/dist/agents/gemini-vectorize/gemini-vectorize.handler.js.map +1 -1
  11. package/dist/agents/image/image.handler.d.ts +1 -1
  12. package/dist/agents/image/image.handler.d.ts.map +1 -1
  13. package/dist/agents/image/image.handler.js +9 -6
  14. package/dist/agents/image/image.handler.js.map +1 -1
  15. package/dist/agents/image/image.types.d.ts +1 -0
  16. package/dist/agents/image/image.types.d.ts.map +1 -1
  17. package/dist/agents/index.d.ts +11 -2
  18. package/dist/agents/index.d.ts.map +1 -1
  19. package/dist/agents/index.js +9 -3
  20. package/dist/agents/index.js.map +1 -1
  21. package/dist/agents/joker/joker.handler.d.ts +1 -1
  22. package/dist/agents/joker/joker.handler.d.ts.map +1 -1
  23. package/dist/agents/joker/joker.handler.js +7 -11
  24. package/dist/agents/joker/joker.handler.js.map +1 -1
  25. package/dist/agents/joker/joker.types.d.ts +1 -0
  26. package/dist/agents/joker/joker.types.d.ts.map +1 -1
  27. package/dist/agents/llm/index.d.ts +15 -0
  28. package/dist/agents/llm/index.d.ts.map +1 -0
  29. package/dist/agents/llm/index.js +20 -0
  30. package/dist/agents/llm/index.js.map +1 -0
  31. package/dist/agents/llm/llm.handler.d.ts +8 -0
  32. package/dist/agents/llm/llm.handler.d.ts.map +1 -0
  33. package/dist/agents/llm/llm.handler.js +64 -0
  34. package/dist/agents/llm/llm.handler.js.map +1 -0
  35. package/dist/agents/llm/llm.schema.d.ts +26 -0
  36. package/dist/agents/llm/llm.schema.d.ts.map +1 -0
  37. package/dist/agents/llm/llm.schema.js +23 -0
  38. package/dist/agents/llm/llm.schema.js.map +1 -0
  39. package/dist/agents/llm/llm.types.d.ts +34 -0
  40. package/dist/agents/llm/llm.types.d.ts.map +1 -0
  41. package/dist/agents/llm/llm.types.js +7 -0
  42. package/dist/agents/llm/llm.types.js.map +1 -0
  43. package/dist/agents/llm/llm.yaml +87 -0
  44. package/dist/agents/rag/rag.handler.d.ts +1 -0
  45. package/dist/agents/rag/rag.handler.d.ts.map +1 -1
  46. package/dist/agents/rag/rag.handler.js +15 -38
  47. package/dist/agents/rag/rag.handler.js.map +1 -1
  48. package/dist/agents/rag/rag.types.d.ts +2 -7
  49. package/dist/agents/rag/rag.types.d.ts.map +1 -1
  50. package/dist/agents/rag/rag.types.js +1 -0
  51. package/dist/agents/rag/rag.types.js.map +1 -1
  52. package/dist/agents/registry/agentRegistry.d.ts +5 -0
  53. package/dist/agents/registry/agentRegistry.d.ts.map +1 -1
  54. package/dist/agents/registry/agentRegistry.js +33 -1
  55. package/dist/agents/registry/agentRegistry.js.map +1 -1
  56. package/dist/agents/translator/translator.handler.d.ts +1 -1
  57. package/dist/agents/translator/translator.handler.d.ts.map +1 -1
  58. package/dist/agents/translator/translator.handler.js +11 -13
  59. package/dist/agents/translator/translator.handler.js.map +1 -1
  60. package/dist/agents/translator/translator.types.d.ts +1 -0
  61. package/dist/agents/translator/translator.types.d.ts.map +1 -1
  62. package/dist/index.d.ts +2 -0
  63. package/dist/index.d.ts.map +1 -1
  64. package/dist/index.js +3 -1
  65. package/dist/index.js.map +1 -1
  66. package/dist/runtime/declarativeAgentRuntime.d.ts +4 -4
  67. package/dist/runtime/declarativeAgentRuntime.d.ts.map +1 -1
  68. package/dist/runtime/declarativeAgentRuntime.js +14 -9
  69. package/dist/runtime/declarativeAgentRuntime.js.map +1 -1
  70. package/dist/runtime/index.d.ts +3 -1
  71. package/dist/runtime/index.d.ts.map +1 -1
  72. package/dist/runtime/index.js +6 -1
  73. package/dist/runtime/index.js.map +1 -1
  74. package/dist/runtime/llmProviderFactory.d.ts +47 -0
  75. package/dist/runtime/llmProviderFactory.d.ts.map +1 -0
  76. package/dist/runtime/llmProviderFactory.js +119 -0
  77. package/dist/runtime/llmProviderFactory.js.map +1 -0
  78. package/dist/runtime/workflowExecutor.d.ts +3 -2
  79. package/dist/runtime/workflowExecutor.d.ts.map +1 -1
  80. package/dist/runtime/workflowExecutor.js +21 -11
  81. package/dist/runtime/workflowExecutor.js.map +1 -1
  82. package/dist/shared/types/agent.types.d.ts +15 -2
  83. package/dist/shared/types/agent.types.d.ts.map +1 -1
  84. package/dist/shared/types/agent.types.js +11 -0
  85. package/dist/shared/types/agent.types.js.map +1 -1
  86. package/package.json +7 -5
  87. package/src/agents/chat/chat.handler.ts +15 -13
  88. package/src/agents/chat/chat.yaml +6 -8
  89. package/src/agents/gemini-vectorize/gemini-vectorize.handler.ts +18 -15
  90. package/src/agents/image/image.handler.ts +10 -6
  91. package/src/agents/image/image.types.ts +1 -0
  92. package/src/agents/index.ts +6 -2
  93. package/src/agents/joker/joker.handler.ts +7 -12
  94. package/src/agents/joker/joker.types.ts +1 -0
  95. package/src/agents/llm/index.ts +20 -0
  96. package/src/agents/llm/llm.handler.ts +82 -0
  97. package/src/agents/llm/llm.schema.ts +25 -0
  98. package/src/agents/llm/llm.types.ts +37 -0
  99. package/src/agents/llm/llm.yaml +87 -0
  100. package/src/agents/rag/rag.handler.ts +20 -44
  101. package/src/agents/rag/rag.types.ts +2 -8
  102. package/src/agents/registry/agentRegistry.ts +34 -1
  103. package/src/agents/translator/translator.handler.ts +11 -13
  104. package/src/agents/translator/translator.types.ts +1 -0
  105. package/src/index.ts +8 -0
  106. package/src/runtime/declarativeAgentRuntime.ts +14 -9
  107. package/src/runtime/index.ts +5 -0
  108. package/src/runtime/llmProviderFactory.ts +145 -0
  109. package/src/runtime/workflowExecutor.ts +23 -10
  110. package/src/shared/types/agent.types.ts +23 -3
@@ -1,10 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.executeChatHandler = exports.executeRagHandler = exports.executeGitMcpHandler = exports.executeChromaDBHandler = exports.executeVectorizeHandler = exports.executeMcpToolHandler = exports.executeImageHandler = exports.executeTranslationHandler = exports.executeJokeHandler = void 0;
3
+ exports.executeChatHandler = exports.executeLlmHandler = exports.executeRagHandler = exports.executeGitMcpHandler = exports.executeChromaDBHandler = exports.executeVectorizeHandler = exports.executeMcpToolHandler = exports.executeImageHandler = exports.executeTranslationHandler = exports.executeJokeHandler = void 0;
4
4
  exports.executeWorkflowStep = executeWorkflowStep;
5
5
  exports.getAvailableStepTypes = getAvailableStepTypes;
6
6
  exports.isStepTypeSupported = isStepTypeSupported;
7
7
  require("server-only");
8
+ const agent_types_1 = require("../shared/types/agent.types");
8
9
  // Import handlers from each agent
9
10
  const joker_handler_1 = require("../agents/joker/joker.handler");
10
11
  Object.defineProperty(exports, "executeJokeHandler", { enumerable: true, get: function () { return joker_handler_1.executeJokeHandler; } });
@@ -22,14 +23,15 @@ const gitmcp_handler_1 = require("../agents/gitmcp/gitmcp.handler");
22
23
  Object.defineProperty(exports, "executeGitMcpHandler", { enumerable: true, get: function () { return gitmcp_handler_1.executeGitMcpHandler; } });
23
24
  const rag_handler_1 = require("../agents/rag/rag.handler");
24
25
  Object.defineProperty(exports, "executeRagHandler", { enumerable: true, get: function () { return rag_handler_1.executeRagHandler; } });
26
+ const llm_handler_1 = require("../agents/llm/llm.handler");
27
+ Object.defineProperty(exports, "executeLlmHandler", { enumerable: true, get: function () { return llm_handler_1.executeLlmHandler; } });
25
28
  const chat_handler_1 = require("../agents/chat/chat.handler");
26
29
  Object.defineProperty(exports, "executeChatHandler", { enumerable: true, get: function () { return chat_handler_1.executeChatHandler; } });
27
30
  /**
28
31
  * Map of workflow step types to their handlers
29
- * Maps both legacy (Portuguese) and new (English) step type names
32
+ * Preferred step type names only - legacy names resolved via LEGACY_STEP_TYPE_MAP
30
33
  */
31
34
  const handlerMap = {
32
- // English step types (preferred)
33
35
  'joke': joker_handler_1.executeJokeHandler,
34
36
  'translation': translator_handler_1.executeTranslationHandler,
35
37
  'image': image_handler_1.executeImageHandler,
@@ -38,18 +40,26 @@ const handlerMap = {
38
40
  'chromadb': chromadb_handler_1.executeChromaDBHandler,
39
41
  'gitmcp': gitmcp_handler_1.executeGitMcpHandler,
40
42
  'rag': rag_handler_1.executeRagHandler,
43
+ 'llm': llm_handler_1.executeLlmHandler,
41
44
  'chat': chat_handler_1.executeChatHandler,
42
- // Legacy step types (for backward compatibility)
43
- 'genkit-joke': joker_handler_1.executeJokeHandler,
44
- 'genkit-translation': translator_handler_1.executeTranslationHandler,
45
- 'genkit-image': image_handler_1.executeImageHandler,
46
- 'gemini-vectorize': gemini_vectorize_handler_1.executeVectorizeHandler,
47
45
  };
46
+ /**
47
+ * Resolve step type, handling legacy names with deprecation warning
48
+ */
49
+ function resolveStepType(stepType, context) {
50
+ if (stepType in agent_types_1.LEGACY_STEP_TYPE_MAP) {
51
+ const preferred = agent_types_1.LEGACY_STEP_TYPE_MAP[stepType];
52
+ context.log(`[DEPRECATION WARNING] Step type '${stepType}' is deprecated. Use '${preferred}' instead.`);
53
+ return preferred;
54
+ }
55
+ return stepType;
56
+ }
48
57
  /**
49
58
  * Execute a workflow step by delegating to the appropriate handler
50
59
  */
51
60
  async function executeWorkflowStep(stepType, params, props, context) {
52
- const handler = handlerMap[stepType];
61
+ const resolvedType = resolveStepType(stepType, context);
62
+ const handler = handlerMap[resolvedType];
53
63
  if (!handler) {
54
64
  throw new Error(`Unknown workflow step type: ${stepType}`);
55
65
  }
@@ -62,9 +72,9 @@ function getAvailableStepTypes() {
62
72
  return Object.keys(handlerMap);
63
73
  }
64
74
  /**
65
- * Check if a step type is supported
75
+ * Check if a step type is supported (including legacy names)
66
76
  */
67
77
  function isStepTypeSupported(stepType) {
68
- return stepType in handlerMap;
78
+ return stepType in handlerMap || stepType in agent_types_1.LEGACY_STEP_TYPE_MAP;
69
79
  }
70
80
  //# sourceMappingURL=workflowExecutor.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"workflowExecutor.js","sourceRoot":"","sources":["../../src/runtime/workflowExecutor.ts"],"names":[],"mappings":";;;AAuDA,kDAWC;AAKD,sDAEC;AAKD,kDAEC;AAhFD,uBAAqB;AAUrB,kCAAkC;AAClC,iEAAmE;AAyEjE,mGAzEO,kCAAkB,OAyEP;AAxEpB,gFAAoF;AAyElF,0GAzEO,8CAAyB,OAyEP;AAxE3B,iEAAoE;AAyElE,oGAzEO,mCAAmB,OAyEP;AAxErB,0EAA4E;AAyE1E,sGAzEO,wCAAqB,OAyEP;AAxEvB,kGAA8F;AAyE5F,wGAzEO,kDAAuB,OAyEP;AAxEzB,0EAA6E;AAyE3E,uGAzEO,yCAAsB,OAyEP;AAxExB,oEAAuE;AAyErE,qGAzEO,qCAAoB,OAyEP;AAxEtB,2DAA8D;AAyE5D,kGAzEO,+BAAiB,OAyEP;AAxEnB,8DAAiE;AAyE/D,mGAzEO,iCAAkB,OAyEP;AA9DpB;;;GAGG;AACH,MAAM,UAAU,GAAoC;IAClD,iCAAiC;IACjC,MAAM,EAAE,kCAAkB;IAC1B,aAAa,EAAE,8CAAyB;IACxC,OAAO,EAAE,mCAAmB;IAC5B,UAAU,EAAE,wCAAqB;IACjC,WAAW,EAAE,kDAAuB;IACpC,UAAU,EAAE,yCAAsB;IAClC,QAAQ,EAAE,qCAAoB;IAC9B,KAAK,EAAE,+BAAiB;IACxB,MAAM,EAAE,iCAAkB;IAC1B,iDAAiD;IACjD,aAAa,EAAE,kCAAkB;IACjC,oBAAoB,EAAE,8CAAyB;IAC/C,cAAc,EAAE,mCAAmB;IACnC,kBAAkB,EAAE,kDAAuB;CAC5C,CAAC;AAEF;;GAEG;AACI,KAAK,UAAU,mBAAmB,CACvC,QAAmC,EACnC,MAA+B,EAC/B,KAA6B,EAC7B,OAAyB;IAEzB,MAAM,OAAO,GAAG,UAAU,CAAC,QAAQ,CAAC,CAAC;IACrC,IAAI,CAAC,OAAO,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,+BAA+B,QAAQ,EAAE,CAAC,CAAC;IAC7D,CAAC;IACD,OAAO,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,SAAgB,qBAAqB;IACnC,OAAO,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AACjC,CAAC;AAED;;GAEG;AACH,SAAgB,mBAAmB,CAAC,QAAgB;IAClD,OAAO,QAAQ,IAAI,UAAU,CAAC;AAChC,CAAC"}
1
+ {"version":3,"file":"workflowExecutor.js","sourceRoot":"","sources":["../../src/runtime/workflowExecutor.ts"],"names":[],"mappings":";;;AAkEA,kDAYC;AAKD,sDAEC;AAKD,kDAEC;AA5FD,uBAAqB;AASrB,6DAAmE;AAEnE,kCAAkC;AAClC,iEAAmE;AAoFjE,mGApFO,kCAAkB,OAoFP;AAnFpB,gFAAoF;AAoFlF,0GApFO,8CAAyB,OAoFP;AAnF3B,iEAAoE;AAoFlE,oGApFO,mCAAmB,OAoFP;AAnFrB,0EAA4E;AAoF1E,sGApFO,wCAAqB,OAoFP;AAnFvB,kGAA8F;AAoF5F,wGApFO,kDAAuB,OAoFP;AAnFzB,0EAA6E;AAoF3E,uGApFO,yCAAsB,OAoFP;AAnFxB,oEAAuE;AAoFrE,qGApFO,qCAAoB,OAoFP;AAnFtB,2DAA8D;AAoF5D,kGApFO,+BAAiB,OAoFP;AAnFnB,2DAA8D;AAoF5D,kGApFO,+BAAiB,OAoFP;AAnFnB,8DAAiE;AAoF/D,mGApFO,iCAAkB,OAoFP;AAzEpB;;;GAGG;AACH,MAAM,UAAU,GAAoC;IAClD,MAAM,EAAE,kCAAkB;IAC1B,aAAa,EAAE,8CAAyB;IACxC,OAAO,EAAE,mCAAmB;IAC5B,UAAU,EAAE,wCAAqB;IACjC,WAAW,EAAE,kDAAuB;IACpC,UAAU,EAAE,yCAAsB;IAClC,QAAQ,EAAE,qCAAoB;IAC9B,KAAK,EAAE,+BAAiB;IACxB,KAAK,EAAE,+BAAiB;IACxB,MAAM,EAAE,iCAAkB;CAC3B,CAAC;AAEF;;GAEG;AACH,SAAS,eAAe,CAAC,QAAgB,EAAE,OAAyB;IAClE,IAAI,QAAQ,IAAI,kCAAoB,EAAE,CAAC;QACrC,MAAM,SAAS,GAAG,kCAAoB,CAAC,QAAQ,CAAC,CAAC;QACjD,OAAO,CAAC,GAAG,CACT,oCAAoC,QAAQ,yBAAyB,SAAS,YAAY,CAC3F,CAAC;QACF,OAAO,SAAS,CAAC;IACnB,CAAC;IACD,OAAO,QAAQ,CAAC;AAClB,CAAC;AAED;;GAEG;AACI,KAAK,UAAU,mBAAmB,CACvC,QAAmC,EACnC,MAA+B,EAC/B,KAA6B,EAC7B,OAAyB;IAEzB,MAAM,YAAY,GAAG,eAAe,CAAC,QAAQ,EAAE,OAAO,CAAC,CAAC;IACxD,MAAM,OAAO,GAAG,UAAU,CAAC,YAAY,CAAC,CAAC;IACzC,IAAI,CAAC,OAAO,EAAE,CAAC;QACb,MAAM,IAAI,KAAK,CAAC,+BAA+B,QAAQ,EAAE,CAAC,CAAC;IAC7D,CAAC;IACD,OAAO,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,OAAO,CAAC,CAAC;AACzC,CAAC;AAED;;GAEG;AACH,SAAgB,qBAAqB;IACnC,OAAO,MAAM,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;AACjC,CAAC;AAED;;GAEG;AACH,SAAgB,mBAAmB,CAAC,QAAgB;IAClD,OAAO,QAAQ,IAAI,UAAU,IAAI,QAAQ,IAAI,kCAAoB,CAAC;AACpE,CAAC"}
@@ -40,9 +40,22 @@ export interface ExecutionStep {
40
40
  }
41
41
  /**
42
42
  * Workflow step types supported by the runtime
43
- * Includes both English (preferred) and legacy (Portuguese) names
43
+ *
44
+ * PREFERRED: Use the English step type names (joke, translation, image, vectorize)
45
+ *
46
+ * DEPRECATED: The following legacy names are supported for backward compatibility
47
+ * but will be removed in a future major version:
48
+ * - genkit-joke → use 'joke'
49
+ * - genkit-translation → use 'translation'
50
+ * - genkit-image → use 'image'
51
+ * - gemini-vectorize → use 'vectorize'
44
52
  */
45
- export type WorkflowStepType = 'joke' | 'translation' | 'image' | 'vectorize' | 'mcp-tool' | 'chromadb' | 'gitmcp' | 'rag' | 'chat' | 'output-generator' | 'builtin-agent' | 'custom-action' | 'genkit-joke' | 'genkit-translation' | 'genkit-image' | 'gemini-vectorize';
53
+ export type WorkflowStepType = 'joke' | 'translation' | 'image' | 'vectorize' | 'mcp-tool' | 'chromadb' | 'gitmcp' | 'rag' | 'llm' | 'chat' | 'output-generator' | 'builtin-agent' | 'custom-action' | 'genkit-joke' | 'genkit-translation' | 'genkit-image' | 'gemini-vectorize';
54
+ /**
55
+ * Maps legacy step type names to their preferred equivalents
56
+ * @deprecated Use preferred step type names directly
57
+ */
58
+ export declare const LEGACY_STEP_TYPE_MAP: Record<string, WorkflowStepType>;
46
59
  /**
47
60
  * Agent categories for organization
48
61
  */
@@ -1 +1 @@
1
- {"version":3,"file":"agent.types.d.ts","sourceRoot":"","sources":["../../../src/shared/types/agent.types.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa,CAAC,CAAC,GAAG,OAAO;IACxC,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,CAAC,EAAE,CAAC,CAAC;IACT,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,SAAS,GAAG,SAAS,GAAG,OAAO,CAAC;IACxC,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAChC,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACjC,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,KAAK,CAAC,EAAE,eAAe,GAAG,eAAe,GAAG,SAAS,GAAG,WAAW,GAAG,WAAW,GAAG,YAAY,CAAC;CAClG;AAED;;;GAGG;AACH,MAAM,MAAM,gBAAgB,GAExB,MAAM,GACN,aAAa,GACb,OAAO,GACP,WAAW,GACX,UAAU,GACV,UAAU,GACV,QAAQ,GACR,KAAK,GACL,MAAM,GACN,kBAAkB,GAClB,eAAe,GACf,eAAe,GAEf,aAAa,GACb,oBAAoB,GACpB,cAAc,GACd,kBAAkB,CAAC;AAEvB;;GAEG;AACH,MAAM,MAAM,aAAa,GACrB,SAAS,GACT,aAAa,GACb,OAAO,GACP,KAAK,GACL,eAAe,GACf,SAAS,GACT,WAAW,GACX,eAAe,CAAC"}
1
+ {"version":3,"file":"agent.types.d.ts","sourceRoot":"","sources":["../../../src/shared/types/agent.types.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,QAAQ,EAAE,MAAM,CAAC;IACjB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,CAAC,EAAE,MAAM,EAAE,CAAC;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa,CAAC,CAAC,GAAG,OAAO;IACxC,OAAO,EAAE,OAAO,CAAC;IACjB,IAAI,CAAC,EAAE,CAAC,CAAC;IACT,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,aAAa;IAC5B,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,MAAM,EAAE,SAAS,GAAG,SAAS,GAAG,OAAO,CAAC;IACxC,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAChC,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACjC,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,KAAK,CAAC,EAAE,eAAe,GAAG,eAAe,GAAG,SAAS,GAAG,WAAW,GAAG,WAAW,GAAG,YAAY,CAAC;CAClG;AAED;;;;;;;;;;;GAWG;AACH,MAAM,MAAM,gBAAgB,GAExB,MAAM,GACN,aAAa,GACb,OAAO,GACP,WAAW,GACX,UAAU,GACV,UAAU,GACV,QAAQ,GACR,KAAK,GACL,KAAK,GACL,MAAM,GACN,kBAAkB,GAClB,eAAe,GACf,eAAe,GAEf,aAAa,GACb,oBAAoB,GACpB,cAAc,GACd,kBAAkB,CAAC;AAEvB;;;GAGG;AACH,eAAO,MAAM,oBAAoB,EAAE,MAAM,CAAC,MAAM,EAAE,gBAAgB,CAKjE,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,aAAa,GACrB,SAAS,GACT,aAAa,GACb,OAAO,GACP,KAAK,GACL,eAAe,GACf,SAAS,GACT,WAAW,GACX,eAAe,CAAC"}
@@ -4,4 +4,15 @@
4
4
  * These types contain no sensitive data and can be used in both environments
5
5
  */
6
6
  Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.LEGACY_STEP_TYPE_MAP = void 0;
8
+ /**
9
+ * Maps legacy step type names to their preferred equivalents
10
+ * @deprecated Use preferred step type names directly
11
+ */
12
+ exports.LEGACY_STEP_TYPE_MAP = {
13
+ 'genkit-joke': 'joke',
14
+ 'genkit-translation': 'translation',
15
+ 'genkit-image': 'image',
16
+ 'gemini-vectorize': 'vectorize',
17
+ };
7
18
  //# sourceMappingURL=agent.types.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"agent.types.js","sourceRoot":"","sources":["../../../src/shared/types/agent.types.ts"],"names":[],"mappings":";AAAA;;;GAGG"}
1
+ {"version":3,"file":"agent.types.js","sourceRoot":"","sources":["../../../src/shared/types/agent.types.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;AA0EH;;;GAGG;AACU,QAAA,oBAAoB,GAAqC;IACpE,aAAa,EAAE,MAAM;IACrB,oBAAoB,EAAE,aAAa;IACnC,cAAc,EAAE,OAAO;IACvB,kBAAkB,EAAE,WAAW;CAChC,CAAC"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "beddel",
3
- "version": "0.2.3",
3
+ "version": "0.3.0",
4
4
  "description": "Beddel - A secure YAML parser and OpenAPI endpoint manager for Node.js applications",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -77,12 +77,12 @@
77
77
  "license": "MIT",
78
78
  "repository": {
79
79
  "type": "git",
80
- "url": "https://github.com/botanarede/beddel.git"
80
+ "url": "https://github.com/botanarede/beddel-alpha.git"
81
81
  },
82
82
  "bugs": {
83
- "url": "https://github.com/botanarede/beddel/issues"
83
+ "url": "https://github.com/botanarede/beddel-alpha/issues"
84
84
  },
85
- "homepage": "https://github.com/botanarede/beddel#readme",
85
+ "homepage": "https://github.com/botanarede/beddel-alpha#readme",
86
86
  "dependencies": {
87
87
  "@ai-sdk/google": "^2.0.23",
88
88
  "@modelcontextprotocol/sdk": "^1.0.0",
@@ -106,6 +106,7 @@
106
106
  }
107
107
  },
108
108
  "devDependencies": {
109
+ "@eslint/js": "^9.39.2",
109
110
  "@types/eventsource": "^1.1.15",
110
111
  "@types/jest": "^29.5.12",
111
112
  "@types/js-yaml": "^4.0.9",
@@ -115,7 +116,8 @@
115
116
  "eslint": "^9.0.0",
116
117
  "jest": "^29.0.0",
117
118
  "ts-jest": "^29.0.0",
118
- "typescript": "^5.0.0"
119
+ "typescript": "^5.0.0",
120
+ "typescript-eslint": "^8.50.0"
119
121
  },
120
122
  "engines": {
121
123
  "node": ">=18.0.0"
@@ -2,7 +2,7 @@ import 'server-only';
2
2
 
3
3
  /**
4
4
  * Chat Agent Handler - Server-only execution logic
5
- * Orchestrates RAG pipeline or simple chat based on mode
5
+ * Orchestrates RAG pipeline or simple LLM chat based on mode
6
6
  */
7
7
 
8
8
  import type { ExecutionContext } from '../../types/executionContext';
@@ -11,11 +11,12 @@ import type { ChatHandlerParams, ChatHandlerResult, ChatMode } from './chat.type
11
11
  import { executeVectorizeHandler } from '../gemini-vectorize/gemini-vectorize.handler';
12
12
  import { executeChromaDBHandler } from '../chromadb/chromadb.handler';
13
13
  import { executeRagHandler } from '../rag/rag.handler';
14
+ import { executeLlmHandler } from '../llm/llm.handler';
14
15
 
15
16
  const KNOWLEDGE_COLLECTION = 'beddel_knowledge';
16
17
 
17
18
  /**
18
- * Execute simple chat mode - direct LLM with conversation history
19
+ * Execute simple chat mode - direct LLM with conversation history (no documents)
19
20
  */
20
21
  async function executeSimpleChat(
21
22
  query: string,
@@ -29,7 +30,7 @@ async function executeSimpleChat(
29
30
  context.log(`[Chat:Simple] Processing query: "${query.substring(0, 50)}..."`);
30
31
 
31
32
  const chatStep: ExecutionStep = {
32
- agent: 'rag',
33
+ agent: 'llm',
33
34
  action: 'chat',
34
35
  status: 'running',
35
36
  startTime: Date.now(),
@@ -37,8 +38,8 @@ async function executeSimpleChat(
37
38
  };
38
39
  executionSteps.push(chatStep);
39
40
 
40
- const result = await executeRagHandler(
41
- { query, history: messages, mode: 'simple' },
41
+ const result = await executeLlmHandler(
42
+ { query, history: messages },
42
43
  props,
43
44
  context
44
45
  );
@@ -139,7 +140,7 @@ async function executeRagChat(
139
140
  searchStep.endTime = Date.now();
140
141
  searchStep.duration = searchStep.endTime - searchStep.startTime;
141
142
 
142
- // Step 4: Generate answer
143
+ // Step 4: Generate answer using RAG
143
144
  const ragStep: ExecutionStep = {
144
145
  agent: 'rag',
145
146
  action: 'generate',
@@ -150,14 +151,15 @@ async function executeRagChat(
150
151
  executionSteps.push(ragStep);
151
152
 
152
153
  const hasDocuments = searchResult.documents && searchResult.documents.trim().length > 0;
154
+ const documents = hasDocuments
155
+ ? searchResult.documents
156
+ : 'No specific documentation available. Answer based on general knowledge.';
153
157
 
154
- const ragResult = hasDocuments
155
- ? await executeRagHandler({ query, documents: searchResult.documents, history: messages }, props, context)
156
- : await executeRagHandler(
157
- { query, documents: 'No specific documentation available. Answer based on general knowledge.', history: messages },
158
- props,
159
- context
160
- );
158
+ const ragResult = await executeRagHandler(
159
+ { query, documents, history: messages },
160
+ props,
161
+ context
162
+ );
161
163
 
162
164
  ragStep.status = ragResult.error ? 'error' : 'success';
163
165
  ragStep.endTime = Date.now();
@@ -9,7 +9,7 @@ agent:
9
9
 
10
10
  metadata:
11
11
  name: "Q&A Context Chat Agent"
12
- description: "Orchestrates RAG pipeline or simple chat based on mode selection"
12
+ description: "Orchestrates RAG pipeline or direct LLM chat based on mode selection"
13
13
  category: "chat"
14
14
  route: "/agents/chat"
15
15
  knowledge_sources:
@@ -18,8 +18,8 @@ metadata:
18
18
  - "chat"
19
19
  - "orchestrator"
20
20
  - "rag"
21
+ - "llm"
21
22
  - "qa"
22
- - "simple"
23
23
 
24
24
  schema:
25
25
  input:
@@ -87,19 +87,18 @@ logic:
87
87
  output:
88
88
  _extracted: true
89
89
 
90
- # Step 2: Mode branch - Simple chat or RAG pipeline
90
+ # Step 2: Mode branch - Simple LLM chat or RAG pipeline
91
91
  - name: "mode-check"
92
92
  type: "conditional"
93
93
  condition: "$input.mode == 'simple'"
94
94
  then:
95
- # Simple mode: Direct LLM chat with conversation history
95
+ # Simple mode: Direct LLM chat with conversation history (no documents)
96
96
  - name: "simple-chat"
97
- type: "rag"
97
+ type: "llm"
98
98
  action:
99
99
  query: "$input.query"
100
100
  history: "$input.messages"
101
- mode: "simple"
102
- result: "ragResult"
101
+ result: "llmResult"
103
102
  else:
104
103
  # RAG mode: Full pipeline with knowledge base
105
104
  - name: "vectorize-query"
@@ -132,7 +131,6 @@ logic:
132
131
  query: "$input.query"
133
132
  documents: "$searchResult.documents"
134
133
  history: "$input.messages"
135
- mode: "rag"
136
134
  result: "ragResult"
137
135
 
138
136
  # Step 3: Deliver final response
@@ -2,32 +2,35 @@ import 'server-only';
2
2
 
3
3
  /**
4
4
  * Gemini Vectorize Agent Handler - Server-only execution logic
5
- * Generates text embeddings using Google's Gemini text-embedding-004 model
5
+ * Generates text embeddings using Google's text-embedding-004 model
6
+ *
7
+ * Note: Embeddings currently only support Google provider via Vercel AI SDK
6
8
  */
7
9
 
8
10
  import { embed, embedMany } from 'ai';
9
- import { google } from '@ai-sdk/google';
11
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
12
+ import { extractProviderConfig } from '../../runtime/llmProviderFactory';
10
13
  import type { ExecutionContext } from '../../types/executionContext';
11
14
  import type { VectorizeHandlerParams, VectorizeHandlerResult } from './gemini-vectorize.types';
12
15
 
13
- const GEMINI_EMBEDDING_MODEL = 'text-embedding-004';
16
+ const EMBEDDING_MODEL = 'text-embedding-004';
14
17
 
15
18
  /**
16
- * Execute vectorization using Gemini embeddings
19
+ * Execute vectorization using embeddings
17
20
  */
18
21
  export async function executeVectorizeHandler(
19
22
  params: VectorizeHandlerParams,
20
23
  props: Record<string, string>,
21
24
  context: ExecutionContext
22
25
  ): Promise<VectorizeHandlerResult> {
23
- const apiKey = props?.gemini_api_key?.trim();
24
- if (!apiKey) {
25
- throw new Error('Missing required prop: gemini_api_key');
26
+ const providerConfig = extractProviderConfig(props, 'google');
27
+
28
+ // Currently only Google supports embeddings via Vercel AI SDK
29
+ if (providerConfig.provider !== 'google') {
30
+ throw new Error(`Embeddings are currently only supported with Google provider, got: ${providerConfig.provider}`);
26
31
  }
27
32
 
28
- // Set API key in environment for google provider
29
- process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
30
-
33
+ const google = createGoogleGenerativeAI({ apiKey: providerConfig.apiKey });
31
34
  const action = params.action || 'embedSingle';
32
35
 
33
36
  try {
@@ -37,10 +40,10 @@ export async function executeVectorizeHandler(
37
40
  throw new Error('Text input is required for embedSingle');
38
41
  }
39
42
 
40
- context.log(`[Gemini Vectorize] Embedding single text (${text.length} chars)...`);
43
+ context.log(`[Vectorize] Embedding single text (${text.length} chars)...`);
41
44
 
42
45
  const { embedding } = await embed({
43
- model: google.textEmbeddingModel(GEMINI_EMBEDDING_MODEL),
46
+ model: google.textEmbeddingModel(EMBEDDING_MODEL),
44
47
  value: text,
45
48
  });
46
49
 
@@ -52,10 +55,10 @@ export async function executeVectorizeHandler(
52
55
  throw new Error('Texts array input is required for embedBatch');
53
56
  }
54
57
 
55
- context.log(`[Gemini Vectorize] Embedding batch of ${texts.length} texts...`);
58
+ context.log(`[Vectorize] Embedding batch of ${texts.length} texts...`);
56
59
 
57
60
  const { embeddings } = await embedMany({
58
- model: google.textEmbeddingModel(GEMINI_EMBEDDING_MODEL),
61
+ model: google.textEmbeddingModel(EMBEDDING_MODEL),
59
62
  values: texts,
60
63
  });
61
64
 
@@ -66,7 +69,7 @@ export async function executeVectorizeHandler(
66
69
  }
67
70
  } catch (error: unknown) {
68
71
  const message = error instanceof Error ? error.message : String(error);
69
- context.log(`[Gemini Vectorize] Error: ${message}`);
72
+ context.log(`[Vectorize] Error: ${message}`);
70
73
  return { success: false, error: message };
71
74
  }
72
75
  }
@@ -2,27 +2,30 @@ import 'server-only';
2
2
 
3
3
  /**
4
4
  * Image Agent Handler - Server-only execution logic
5
- * Generates images using Gemini Flash with curated styles
5
+ * Generates images using LLM providers (Google Gemini Imagen by default)
6
6
  */
7
7
 
8
8
  import { experimental_generateImage } from 'ai';
9
9
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
10
+ import { extractProviderConfig } from '../../runtime/llmProviderFactory';
10
11
  import type { ExecutionContext } from '../../types/executionContext';
11
12
  import type { ImageHandlerParams, ImageHandlerResult } from './image.types';
12
13
 
13
14
  const GEMINI_IMAGE_MODEL = 'imagen-4.0-fast-generate-001';
14
15
 
15
16
  /**
16
- * Execute image generation using Gemini
17
+ * Execute image generation using configured provider
17
18
  */
18
19
  export async function executeImageHandler(
19
20
  params: ImageHandlerParams,
20
21
  props: Record<string, string>,
21
22
  context: ExecutionContext
22
23
  ): Promise<ImageHandlerResult> {
23
- const apiKey = props?.gemini_api_key?.trim();
24
- if (!apiKey) {
25
- throw new Error('Missing required prop: gemini_api_key');
24
+ const providerConfig = extractProviderConfig(props, 'google');
25
+
26
+ // Currently only Google supports image generation via Vercel AI SDK
27
+ if (providerConfig.provider !== 'google') {
28
+ throw new Error(`Image generation is currently only supported with Google provider, got: ${providerConfig.provider}`);
26
29
  }
27
30
 
28
31
  const description = params.description?.trim();
@@ -47,7 +50,7 @@ export async function executeImageHandler(
47
50
  .replace(/{{style}}/g, style)
48
51
  .trim();
49
52
 
50
- const google = createGoogleGenerativeAI({ apiKey });
53
+ const google = createGoogleGenerativeAI({ apiKey: providerConfig.apiKey });
51
54
  const model = google.image(GEMINI_IMAGE_MODEL);
52
55
  const startTime = Date.now();
53
56
 
@@ -74,6 +77,7 @@ export async function executeImageHandler(
74
77
  prompt_used: prompt,
75
78
  metadata: {
76
79
  model_used: GEMINI_IMAGE_MODEL,
80
+ provider: providerConfig.provider,
77
81
  processing_time: Date.now() - startTime,
78
82
  style,
79
83
  resolution,
@@ -27,6 +27,7 @@ export interface ImageHandlerResult {
27
27
  prompt_used: string;
28
28
  metadata: {
29
29
  model_used: string;
30
+ provider: string;
30
31
  processing_time: number;
31
32
  style: string;
32
33
  resolution: string;
@@ -19,6 +19,7 @@ export { geminiVectorizeMetadata } from './gemini-vectorize';
19
19
  export { chromadbMetadata } from './chromadb';
20
20
  export { gitmcpMetadata } from './gitmcp';
21
21
  export { ragMetadata } from './rag';
22
+ export { llmMetadata } from './llm';
22
23
  export { chatMetadata } from './chat';
23
24
 
24
25
  // Schema exports (client-safe)
@@ -30,6 +31,7 @@ export { GeminiVectorizeInputSchema, GeminiVectorizeOutputSchema } from './gemin
30
31
  export { ChromaDBInputSchema, ChromaDBOutputSchema } from './chromadb';
31
32
  export { GitMcpInputSchema, GitMcpOutputSchema } from './gitmcp';
32
33
  export { RagInputSchema, RagOutputSchema } from './rag';
34
+ export { LlmInputSchema, LlmOutputSchema } from './llm';
33
35
  export { ChatInputSchema, ChatOutputSchema } from './chat';
34
36
 
35
37
  // Type exports (client-safe)
@@ -41,6 +43,7 @@ export type { GeminiVectorizeInput, GeminiVectorizeOutput, VectorizeHandlerParam
41
43
  export type { ChromaDBInput, ChromaDBOutput, ChromaDBHandlerParams, ChromaDBHandlerResult, ChromaDBSearchResult } from './chromadb';
42
44
  export type { GitMcpInput, GitMcpOutput, GitMcpHandlerParams, GitMcpHandlerResult } from './gitmcp';
43
45
  export type { RagInput, RagOutput, RagHandlerParams, RagHandlerResult, ConversationMessage } from './rag';
46
+ export type { LlmInput, LlmOutput, LlmHandlerParams, LlmHandlerResult } from './llm';
44
47
  export type { ChatInput, ChatOutput, ChatHandlerParams, ChatHandlerResult } from './chat';
45
48
 
46
49
  /**
@@ -54,6 +57,7 @@ export const allAgentMetadata = [
54
57
  { id: 'gemini-vectorize', name: 'Gemini Vectorize Agent', description: "Generates text embeddings using Google's Gemini text-embedding-004 model", category: 'ai-service', route: '/agents/gemini-vectorize' },
55
58
  { id: 'chromadb', name: 'ChromaDB Agent', description: 'Vector storage and retrieval using ChromaDB. Supports local and cloud deployments.', category: 'database', route: '/agents/chromadb' },
56
59
  { id: 'gitmcp', name: 'GitMCP Documentation Agent', description: 'Fetches and chunks GitHub repository documentation via gitmcp.io MCP servers', category: 'integration', route: '/agents/gitmcp' },
57
- { id: 'rag', name: 'RAG Intelligence Agent', description: 'Generates natural language answers based on provided context using Gemini', category: 'intelligence', route: '/agents/rag' },
58
- { id: 'chat', name: 'Q&A Context Chat Agent', description: 'Orchestrates RAG pipeline: vectorization, storage, retrieval and answer generation', category: 'chat', route: '/agents/chat' },
60
+ { id: 'rag', name: 'RAG Intelligence Agent', description: 'Generates natural language answers based on provided document context using Gemini', category: 'intelligence', route: '/agents/rag' },
61
+ { id: 'llm', name: 'LLM Chat Agent', description: 'Direct LLM interaction with conversation history support (non-RAG)', category: 'intelligence', route: '/agents/llm' },
62
+ { id: 'chat', name: 'Q&A Context Chat Agent', description: 'Orchestrates RAG pipeline or direct LLM chat based on mode selection', category: 'chat', route: '/agents/chat' },
59
63
  ] as const;
@@ -2,35 +2,29 @@ import 'server-only';
2
2
 
3
3
  /**
4
4
  * Joker Agent Handler - Server-only execution logic
5
- * Generates jokes using Gemini Flash
5
+ * Generates jokes using LLM providers (Google Gemini by default)
6
6
  */
7
7
 
8
8
  import { generateText } from 'ai';
9
- import { createGoogleGenerativeAI } from '@ai-sdk/google';
9
+ import { LLMProviderFactory, extractProviderConfig } from '../../runtime/llmProviderFactory';
10
10
  import type { ExecutionContext } from '../../types/executionContext';
11
11
  import type { JokeHandlerParams, JokeHandlerResult } from './joker.types';
12
12
 
13
- const GEMINI_MODEL = 'models/gemini-2.5-flash';
14
-
15
13
  /**
16
- * Execute joke generation using Gemini Flash
14
+ * Execute joke generation using configured LLM provider
17
15
  */
18
16
  export async function executeJokeHandler(
19
17
  params: JokeHandlerParams,
20
18
  props: Record<string, string>,
21
19
  context: ExecutionContext
22
20
  ): Promise<JokeHandlerResult> {
23
- const apiKey = props?.gemini_api_key?.trim();
24
- if (!apiKey) {
25
- throw new Error('Missing required prop: gemini_api_key');
26
- }
21
+ const providerConfig = extractProviderConfig(props, 'google');
22
+ const model = LLMProviderFactory.createLanguageModel(providerConfig);
27
23
 
28
24
  const prompt = params.prompt?.trim() || 'Tell a short and original joke that works for any audience.';
29
25
  const temperature = params.temperature ?? 0.8;
30
26
  const maxTokens = params.maxTokens;
31
27
 
32
- const google = createGoogleGenerativeAI({ apiKey });
33
- const model = google(GEMINI_MODEL);
34
28
  const startTime = Date.now();
35
29
 
36
30
  context.log(`[Joker] Generating joke with temperature=${temperature}`);
@@ -50,7 +44,8 @@ export async function executeJokeHandler(
50
44
  return {
51
45
  text: finalText,
52
46
  metadata: {
53
- model_used: GEMINI_MODEL,
47
+ model_used: providerConfig.model || LLMProviderFactory.getDefaultModel(providerConfig.provider),
48
+ provider: providerConfig.provider,
54
49
  processing_time: Date.now() - startTime,
55
50
  temperature,
56
51
  max_tokens: maxTokens ?? null,
@@ -18,6 +18,7 @@ export interface JokeHandlerResult {
18
18
  text: string;
19
19
  metadata: {
20
20
  model_used: string;
21
+ provider: string;
21
22
  processing_time: number;
22
23
  temperature: number;
23
24
  max_tokens: number | null;
@@ -0,0 +1,20 @@
1
+ /**
2
+ * LLM Agent - Public exports (client-safe)
3
+ */
4
+
5
+ // Schema exports (client-safe)
6
+ export { LlmInputSchema, LlmOutputSchema } from './llm.schema';
7
+ export type { LlmInput, LlmOutput } from './llm.schema';
8
+
9
+ // Type exports (client-safe)
10
+ export type { LlmHandlerParams, LlmHandlerResult, LlmMetadata } from './llm.types';
11
+
12
+ // Metadata (client-safe)
13
+ export const llmMetadata = {
14
+ id: 'llm',
15
+ name: 'LLM Chat Agent',
16
+ description: 'Direct LLM interaction with conversation history support (non-RAG)',
17
+ category: 'intelligence',
18
+ route: '/agents/llm',
19
+ tags: ['llm', 'chat', 'gemini', 'conversation'],
20
+ } as const;
@@ -0,0 +1,82 @@
1
+ import 'server-only';
2
+
3
+ /**
4
+ * LLM Agent Handler - Server-only execution logic
5
+ * Direct LLM interaction with conversation history support (non-RAG)
6
+ */
7
+
8
+ import { generateText } from 'ai';
9
+ import { LLMProviderFactory, extractProviderConfig } from '../../runtime/llmProviderFactory';
10
+ import type { ExecutionContext } from '../../types/executionContext';
11
+ import type { LlmHandlerParams, LlmHandlerResult } from './llm.types';
12
+ import type { ConversationMessage } from '../rag/rag.types';
13
+
14
+ /**
15
+ * Build prompt for direct LLM chat (no document context)
16
+ */
17
+ function buildChatPrompt(
18
+ query: string,
19
+ history?: ConversationMessage[],
20
+ systemPrompt?: string
21
+ ): string {
22
+ const system = systemPrompt || 'You are a helpful, friendly assistant.';
23
+
24
+ const conversationContext = history?.length
25
+ ? `CONVERSATION HISTORY:\n${history.map((m) => `${m.role.toUpperCase()}: ${m.content}`).join('\n')}\n\n`
26
+ : '';
27
+
28
+ return `${system}
29
+
30
+ ${conversationContext}USER MESSAGE:
31
+ ${query}
32
+
33
+ INSTRUCTIONS:
34
+ 1. Respond naturally to the user's message.
35
+ 2. Consider the conversation history for context continuity if available.
36
+ 3. Be concise but helpful.
37
+
38
+ RESPONSE:`;
39
+ }
40
+
41
+ /**
42
+ * Execute direct LLM chat
43
+ */
44
+ export async function executeLlmHandler(
45
+ params: LlmHandlerParams,
46
+ props: Record<string, string>,
47
+ context: ExecutionContext
48
+ ): Promise<LlmHandlerResult> {
49
+ const providerConfig = extractProviderConfig(props, 'google');
50
+ const model = LLMProviderFactory.createLanguageModel(providerConfig);
51
+
52
+ const { query, history, temperature = 0.7, systemPrompt } = params;
53
+
54
+ if (!query) {
55
+ throw new Error('Missing required LLM input: query');
56
+ }
57
+
58
+ const prompt = buildChatPrompt(query, history, systemPrompt);
59
+
60
+ try {
61
+ context.log(`[LLM] Generating response for: "${query.substring(0, 50)}..."`);
62
+
63
+ const { text } = await generateText({
64
+ model,
65
+ prompt,
66
+ temperature,
67
+ });
68
+
69
+ return {
70
+ response: text,
71
+ timestamp: new Date().toISOString(),
72
+ };
73
+ } catch (error: unknown) {
74
+ const message = error instanceof Error ? error.message : String(error);
75
+ context.log(`[LLM] Error: ${message}`);
76
+ return {
77
+ response: '',
78
+ timestamp: new Date().toISOString(),
79
+ error: message,
80
+ };
81
+ }
82
+ }