@juspay/neurolink 9.3.0 → 9.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/README.md +8 -8
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/index.js +1 -0
- package/dist/index.d.ts +35 -0
- package/dist/index.js +17 -0
- package/dist/lib/agent/directTools.d.ts +5 -5
- package/dist/lib/index.d.ts +35 -0
- package/dist/lib/index.js +17 -0
- package/dist/lib/neurolink.d.ts +12 -1
- package/dist/lib/neurolink.js +265 -4
- package/dist/lib/server/utils/validation.d.ts +8 -8
- package/dist/lib/types/generateTypes.d.ts +28 -0
- package/dist/lib/types/index.d.ts +6 -0
- package/dist/lib/types/index.js +12 -0
- package/dist/lib/types/modelTypes.d.ts +2 -2
- package/dist/lib/types/streamTypes.d.ts +35 -0
- package/dist/lib/types/workflowTypes.d.ts +558 -0
- package/dist/lib/types/workflowTypes.js +32 -0
- package/dist/lib/workflow/LAYER-EXAMPLES.d.ts +13 -0
- package/dist/lib/workflow/LAYER-EXAMPLES.js +312 -0
- package/dist/lib/workflow/PROMPT-EXAMPLES.d.ts +117 -0
- package/dist/lib/workflow/PROMPT-EXAMPLES.js +246 -0
- package/dist/lib/workflow/config.d.ts +1569 -0
- package/dist/lib/workflow/config.js +399 -0
- package/dist/lib/workflow/core/ensembleExecutor.d.ts +56 -0
- package/dist/lib/workflow/core/ensembleExecutor.js +398 -0
- package/dist/lib/workflow/core/judgeScorer.d.ts +26 -0
- package/dist/lib/workflow/core/judgeScorer.js +527 -0
- package/dist/lib/workflow/core/responseConditioner.d.ts +22 -0
- package/dist/lib/workflow/core/responseConditioner.js +226 -0
- package/dist/lib/workflow/core/types/conditionerTypes.d.ts +7 -0
- package/dist/lib/workflow/core/types/conditionerTypes.js +8 -0
- package/dist/lib/workflow/core/types/ensembleTypes.d.ts +7 -0
- package/dist/lib/workflow/core/types/ensembleTypes.js +8 -0
- package/dist/lib/workflow/core/types/index.d.ts +7 -0
- package/dist/lib/workflow/core/types/index.js +8 -0
- package/dist/lib/workflow/core/types/judgeTypes.d.ts +7 -0
- package/dist/lib/workflow/core/types/judgeTypes.js +8 -0
- package/dist/lib/workflow/core/types/layerTypes.d.ts +7 -0
- package/dist/lib/workflow/core/types/layerTypes.js +8 -0
- package/dist/lib/workflow/core/types/registryTypes.d.ts +7 -0
- package/dist/lib/workflow/core/types/registryTypes.js +8 -0
- package/dist/lib/workflow/core/workflowRegistry.d.ts +73 -0
- package/dist/lib/workflow/core/workflowRegistry.js +305 -0
- package/dist/lib/workflow/core/workflowRunner.d.ts +115 -0
- package/dist/lib/workflow/core/workflowRunner.js +554 -0
- package/dist/lib/workflow/index.d.ts +36 -0
- package/dist/lib/workflow/index.js +51 -0
- package/dist/lib/workflow/types.d.ts +19 -0
- package/dist/lib/workflow/types.js +10 -0
- package/dist/lib/workflow/utils/types/index.d.ts +7 -0
- package/dist/lib/workflow/utils/types/index.js +8 -0
- package/dist/lib/workflow/utils/types/metricsTypes.d.ts +7 -0
- package/dist/lib/workflow/utils/types/metricsTypes.js +8 -0
- package/dist/lib/workflow/utils/types/validationTypes.d.ts +7 -0
- package/dist/lib/workflow/utils/types/validationTypes.js +8 -0
- package/dist/lib/workflow/utils/workflowMetrics.d.ts +76 -0
- package/dist/lib/workflow/utils/workflowMetrics.js +312 -0
- package/dist/lib/workflow/utils/workflowValidation.d.ts +29 -0
- package/dist/lib/workflow/utils/workflowValidation.js +421 -0
- package/dist/lib/workflow/workflows/adaptiveWorkflow.d.ts +72 -0
- package/dist/lib/workflow/workflows/adaptiveWorkflow.js +367 -0
- package/dist/lib/workflow/workflows/consensusWorkflow.d.ts +69 -0
- package/dist/lib/workflow/workflows/consensusWorkflow.js +193 -0
- package/dist/lib/workflow/workflows/fallbackWorkflow.d.ts +49 -0
- package/dist/lib/workflow/workflows/fallbackWorkflow.js +226 -0
- package/dist/lib/workflow/workflows/multiJudgeWorkflow.d.ts +70 -0
- package/dist/lib/workflow/workflows/multiJudgeWorkflow.js +352 -0
- package/dist/neurolink.d.ts +12 -1
- package/dist/neurolink.js +265 -4
- package/dist/types/generateTypes.d.ts +28 -0
- package/dist/types/index.d.ts +6 -0
- package/dist/types/index.js +12 -0
- package/dist/types/streamTypes.d.ts +35 -0
- package/dist/types/workflowTypes.d.ts +558 -0
- package/dist/types/workflowTypes.js +31 -0
- package/dist/workflow/LAYER-EXAMPLES.d.ts +13 -0
- package/dist/workflow/LAYER-EXAMPLES.js +311 -0
- package/dist/workflow/PROMPT-EXAMPLES.d.ts +117 -0
- package/dist/workflow/PROMPT-EXAMPLES.js +245 -0
- package/dist/workflow/config.d.ts +1569 -0
- package/dist/workflow/config.js +398 -0
- package/dist/workflow/core/ensembleExecutor.d.ts +56 -0
- package/dist/workflow/core/ensembleExecutor.js +397 -0
- package/dist/workflow/core/judgeScorer.d.ts +26 -0
- package/dist/workflow/core/judgeScorer.js +526 -0
- package/dist/workflow/core/responseConditioner.d.ts +22 -0
- package/dist/workflow/core/responseConditioner.js +225 -0
- package/dist/workflow/core/types/conditionerTypes.d.ts +7 -0
- package/dist/workflow/core/types/conditionerTypes.js +7 -0
- package/dist/workflow/core/types/ensembleTypes.d.ts +7 -0
- package/dist/workflow/core/types/ensembleTypes.js +7 -0
- package/dist/workflow/core/types/index.d.ts +7 -0
- package/dist/workflow/core/types/index.js +7 -0
- package/dist/workflow/core/types/judgeTypes.d.ts +7 -0
- package/dist/workflow/core/types/judgeTypes.js +7 -0
- package/dist/workflow/core/types/layerTypes.d.ts +7 -0
- package/dist/workflow/core/types/layerTypes.js +7 -0
- package/dist/workflow/core/types/registryTypes.d.ts +7 -0
- package/dist/workflow/core/types/registryTypes.js +7 -0
- package/dist/workflow/core/workflowRegistry.d.ts +73 -0
- package/dist/workflow/core/workflowRegistry.js +304 -0
- package/dist/workflow/core/workflowRunner.d.ts +115 -0
- package/dist/workflow/core/workflowRunner.js +553 -0
- package/dist/workflow/index.d.ts +36 -0
- package/dist/workflow/index.js +50 -0
- package/dist/workflow/types.d.ts +19 -0
- package/dist/workflow/types.js +9 -0
- package/dist/workflow/utils/types/index.d.ts +7 -0
- package/dist/workflow/utils/types/index.js +7 -0
- package/dist/workflow/utils/types/metricsTypes.d.ts +7 -0
- package/dist/workflow/utils/types/metricsTypes.js +7 -0
- package/dist/workflow/utils/types/validationTypes.d.ts +7 -0
- package/dist/workflow/utils/types/validationTypes.js +7 -0
- package/dist/workflow/utils/workflowMetrics.d.ts +76 -0
- package/dist/workflow/utils/workflowMetrics.js +311 -0
- package/dist/workflow/utils/workflowValidation.d.ts +29 -0
- package/dist/workflow/utils/workflowValidation.js +420 -0
- package/dist/workflow/workflows/adaptiveWorkflow.d.ts +72 -0
- package/dist/workflow/workflows/adaptiveWorkflow.js +366 -0
- package/dist/workflow/workflows/consensusWorkflow.d.ts +69 -0
- package/dist/workflow/workflows/consensusWorkflow.js +192 -0
- package/dist/workflow/workflows/fallbackWorkflow.d.ts +49 -0
- package/dist/workflow/workflows/fallbackWorkflow.js +225 -0
- package/dist/workflow/workflows/multiJudgeWorkflow.d.ts +70 -0
- package/dist/workflow/workflows/multiJudgeWorkflow.js +351 -0
- package/package.json +3 -2
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Adaptive Quality Workflow
|
|
3
|
+
* =========================
|
|
4
|
+
*
|
|
5
|
+
* Layer-based execution optimizing for maximum quality:
|
|
6
|
+
* - Start with fast validation tier
|
|
7
|
+
* - Escalate to premium tier if needed
|
|
8
|
+
* - Final expert tier for complex cases
|
|
9
|
+
*
|
|
10
|
+
* Ideal for: Quality-critical tasks with cost awareness
|
|
11
|
+
*
|
|
12
|
+
* @module workflow/workflows/adaptiveWorkflow
|
|
13
|
+
*/
|
|
14
|
+
import { AIProviderName } from "../../constants/enums.js";
|
|
15
|
+
import { WORKFLOW_CREATION_DATE } from "../config.js";
|
|
16
|
+
import { logger } from "../../utils/logger.js";
|
|
17
|
+
/**
|
|
18
|
+
* Quality-Max Adaptive Workflow
|
|
19
|
+
*
|
|
20
|
+
* Uses 3-tier layer-based execution:
|
|
21
|
+
* 1. Validation tier (parallel): 2 fast models check complexity
|
|
22
|
+
* 2. Premium tier (parallel): 2 high-quality models if validation uncertain
|
|
23
|
+
* 3. Expert tier (sequential): Best model for final polish
|
|
24
|
+
*
|
|
25
|
+
* Each tier evaluates if next tier is needed based on confidence
|
|
26
|
+
*
|
|
27
|
+
* @example
|
|
28
|
+
* ```typescript
|
|
29
|
+
* import { runWorkflow } from '../core/workflowRunner.js';
|
|
30
|
+
* import { QUALITY_MAX_WORKFLOW } from './adaptiveWorkflow.js';
|
|
31
|
+
*
|
|
32
|
+
* const result = await runWorkflow(QUALITY_MAX_WORKFLOW, {
|
|
33
|
+
* prompt: 'Design a scalable microservices architecture',
|
|
34
|
+
* verbose: true,
|
|
35
|
+
* });
|
|
36
|
+
*
|
|
37
|
+
* console.log('Quality score:', result.score);
|
|
38
|
+
* console.log('Tiers executed:', result.ensembleResponses.length);
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
41
|
+
export const QUALITY_MAX_WORKFLOW = {
|
|
42
|
+
id: "quality-max",
|
|
43
|
+
name: "Quality-Max Adaptive",
|
|
44
|
+
description: "Adaptive 3-tier execution optimizing for maximum quality",
|
|
45
|
+
version: "1.0.0",
|
|
46
|
+
type: "adaptive",
|
|
47
|
+
// Placeholder (required, but modelGroups takes precedence)
|
|
48
|
+
models: [
|
|
49
|
+
{
|
|
50
|
+
provider: AIProviderName.OPENAI,
|
|
51
|
+
model: "gpt-4o",
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
// Layer-based execution with quality escalation
|
|
55
|
+
modelGroups: [
|
|
56
|
+
{
|
|
57
|
+
id: "validation-tier",
|
|
58
|
+
name: "Validation Tier",
|
|
59
|
+
description: "Fast models to assess complexity and confidence",
|
|
60
|
+
models: [
|
|
61
|
+
{
|
|
62
|
+
provider: AIProviderName.OPENAI,
|
|
63
|
+
model: "gpt-4o-mini",
|
|
64
|
+
label: "GPT-4o-mini",
|
|
65
|
+
temperature: 0.7,
|
|
66
|
+
timeout: 10000,
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
provider: AIProviderName.GOOGLE_AI,
|
|
70
|
+
model: "gemini-2.0-flash",
|
|
71
|
+
label: "Gemini Flash",
|
|
72
|
+
temperature: 0.7,
|
|
73
|
+
timeout: 10000,
|
|
74
|
+
},
|
|
75
|
+
],
|
|
76
|
+
executionStrategy: "parallel",
|
|
77
|
+
continueOnFailure: true, // Always try premium tier
|
|
78
|
+
minSuccessful: 1,
|
|
79
|
+
parallelism: 2,
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
id: "premium-tier",
|
|
83
|
+
name: "Premium Tier",
|
|
84
|
+
description: "High-quality models for thorough analysis",
|
|
85
|
+
models: [
|
|
86
|
+
{
|
|
87
|
+
provider: AIProviderName.OPENAI,
|
|
88
|
+
model: "gpt-4o",
|
|
89
|
+
label: "GPT-4o",
|
|
90
|
+
temperature: 0.7,
|
|
91
|
+
systemPrompt: "Provide comprehensive, high-quality responses with deep analysis.",
|
|
92
|
+
timeout: 20000,
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
provider: AIProviderName.ANTHROPIC,
|
|
96
|
+
model: "claude-3-5-sonnet-20241022",
|
|
97
|
+
label: "Claude 3.5 Sonnet",
|
|
98
|
+
temperature: 0.7,
|
|
99
|
+
systemPrompt: "Think deeply and provide nuanced, well-reasoned responses.",
|
|
100
|
+
timeout: 20000,
|
|
101
|
+
},
|
|
102
|
+
],
|
|
103
|
+
executionStrategy: "parallel",
|
|
104
|
+
continueOnFailure: true,
|
|
105
|
+
minSuccessful: 1,
|
|
106
|
+
parallelism: 2,
|
|
107
|
+
},
|
|
108
|
+
{
|
|
109
|
+
id: "expert-tier",
|
|
110
|
+
name: "Expert Tier",
|
|
111
|
+
description: "Top-tier model for final quality assurance",
|
|
112
|
+
models: [
|
|
113
|
+
{
|
|
114
|
+
provider: AIProviderName.ANTHROPIC,
|
|
115
|
+
model: "claude-3-5-sonnet-20241022",
|
|
116
|
+
label: "Claude 3.5 Sonnet Expert",
|
|
117
|
+
temperature: 0.6, // Lower temp for consistency
|
|
118
|
+
systemPrompt: "You are an expert. Provide the highest quality, most accurate response possible. Be thorough, precise, and authoritative.",
|
|
119
|
+
timeout: 30000,
|
|
120
|
+
},
|
|
121
|
+
],
|
|
122
|
+
executionStrategy: "sequential",
|
|
123
|
+
continueOnFailure: false,
|
|
124
|
+
minSuccessful: 1,
|
|
125
|
+
},
|
|
126
|
+
],
|
|
127
|
+
// Judge evaluates all responses and selects best
|
|
128
|
+
judge: {
|
|
129
|
+
provider: AIProviderName.OPENAI,
|
|
130
|
+
model: "gpt-4o",
|
|
131
|
+
criteria: ["quality", "depth", "accuracy", "completeness"],
|
|
132
|
+
outputFormat: "detailed",
|
|
133
|
+
includeReasoning: true,
|
|
134
|
+
temperature: 0.1,
|
|
135
|
+
scoreScale: { min: 0, max: 100 },
|
|
136
|
+
customPrompt: "Evaluate responses for maximum quality. Prioritize depth, accuracy, and completeness. Be rigorous in assessment.",
|
|
137
|
+
},
|
|
138
|
+
// Execution configuration
|
|
139
|
+
execution: {
|
|
140
|
+
timeout: 70000, // 70 seconds for all tiers
|
|
141
|
+
minResponses: 2,
|
|
142
|
+
costThreshold: 0.12,
|
|
143
|
+
},
|
|
144
|
+
// Metadata
|
|
145
|
+
tags: ["adaptive", "quality", "tiered", "escalation"],
|
|
146
|
+
metadata: {
|
|
147
|
+
useCase: "Quality-critical tasks with adaptive execution",
|
|
148
|
+
recommendedFor: [
|
|
149
|
+
"complex analysis",
|
|
150
|
+
"expert consultation",
|
|
151
|
+
"high-stakes decisions",
|
|
152
|
+
"technical documentation",
|
|
153
|
+
],
|
|
154
|
+
averageCost: 0.08, // Cost depends on how many tiers execute
|
|
155
|
+
averageLatency: 4500,
|
|
156
|
+
},
|
|
157
|
+
createdAt: WORKFLOW_CREATION_DATE,
|
|
158
|
+
};
|
|
159
|
+
/**
|
|
160
|
+
* Speed-First Adaptive Workflow
|
|
161
|
+
*
|
|
162
|
+
* Optimizes for speed with quality fallback:
|
|
163
|
+
* 1. Fast tier: Single fast model (GPT-4o-mini)
|
|
164
|
+
* 2. Balanced tier: If fast fails, use Gemini 2.0
|
|
165
|
+
* 3. Quality tier: If both fail, use GPT-4o
|
|
166
|
+
*/
|
|
167
|
+
export const SPEED_FIRST_WORKFLOW = {
|
|
168
|
+
id: "speed-first",
|
|
169
|
+
name: "Speed-First Adaptive",
|
|
170
|
+
description: "Fast execution with quality fallback",
|
|
171
|
+
version: "1.0.0",
|
|
172
|
+
type: "adaptive",
|
|
173
|
+
models: [
|
|
174
|
+
{
|
|
175
|
+
provider: AIProviderName.OPENAI,
|
|
176
|
+
model: "gpt-4o-mini",
|
|
177
|
+
},
|
|
178
|
+
],
|
|
179
|
+
modelGroups: [
|
|
180
|
+
{
|
|
181
|
+
id: "fast-tier",
|
|
182
|
+
name: "Fast Tier",
|
|
183
|
+
models: [
|
|
184
|
+
{
|
|
185
|
+
provider: AIProviderName.OPENAI,
|
|
186
|
+
model: "gpt-4o-mini",
|
|
187
|
+
temperature: 0.7,
|
|
188
|
+
timeout: 5000, // 5 second timeout
|
|
189
|
+
},
|
|
190
|
+
],
|
|
191
|
+
executionStrategy: "sequential",
|
|
192
|
+
continueOnFailure: true,
|
|
193
|
+
minSuccessful: 1,
|
|
194
|
+
},
|
|
195
|
+
{
|
|
196
|
+
id: "balanced-tier",
|
|
197
|
+
name: "Balanced Tier",
|
|
198
|
+
models: [
|
|
199
|
+
{
|
|
200
|
+
provider: AIProviderName.GOOGLE_AI,
|
|
201
|
+
model: "gemini-2.0-flash",
|
|
202
|
+
temperature: 0.7,
|
|
203
|
+
timeout: 10000,
|
|
204
|
+
},
|
|
205
|
+
],
|
|
206
|
+
executionStrategy: "sequential",
|
|
207
|
+
continueOnFailure: true,
|
|
208
|
+
minSuccessful: 1,
|
|
209
|
+
},
|
|
210
|
+
{
|
|
211
|
+
id: "quality-tier",
|
|
212
|
+
name: "Quality Tier",
|
|
213
|
+
models: [
|
|
214
|
+
{
|
|
215
|
+
provider: AIProviderName.OPENAI,
|
|
216
|
+
model: "gpt-4o",
|
|
217
|
+
temperature: 0.7,
|
|
218
|
+
timeout: 15000,
|
|
219
|
+
},
|
|
220
|
+
],
|
|
221
|
+
executionStrategy: "sequential",
|
|
222
|
+
continueOnFailure: false,
|
|
223
|
+
minSuccessful: 1,
|
|
224
|
+
},
|
|
225
|
+
],
|
|
226
|
+
judge: {
|
|
227
|
+
provider: AIProviderName.OPENAI,
|
|
228
|
+
model: "gpt-4o-mini", // Fast judge
|
|
229
|
+
criteria: ["speed", "quality"],
|
|
230
|
+
outputFormat: "best",
|
|
231
|
+
includeReasoning: true,
|
|
232
|
+
temperature: 0.1,
|
|
233
|
+
scoreScale: { min: 0, max: 100 },
|
|
234
|
+
},
|
|
235
|
+
execution: {
|
|
236
|
+
timeout: 35000,
|
|
237
|
+
minResponses: 1,
|
|
238
|
+
costThreshold: 0.05,
|
|
239
|
+
},
|
|
240
|
+
tags: ["adaptive", "speed", "fallback"],
|
|
241
|
+
metadata: {
|
|
242
|
+
useCase: "Speed-optimized with quality guarantee",
|
|
243
|
+
recommendedFor: ["real-time applications", "quick queries"],
|
|
244
|
+
averageCost: 0.01,
|
|
245
|
+
averageLatency: 1500,
|
|
246
|
+
},
|
|
247
|
+
createdAt: WORKFLOW_CREATION_DATE,
|
|
248
|
+
};
|
|
249
|
+
/**
|
|
250
|
+
* Balanced Adaptive Workflow
|
|
251
|
+
*
|
|
252
|
+
* Balances speed, cost, and quality:
|
|
253
|
+
* 1. Standard tier (parallel): GPT-4o-mini + Gemini Flash
|
|
254
|
+
* 2. Premium tier (parallel): GPT-4o + Claude 3.5 if standard uncertain
|
|
255
|
+
*/
|
|
256
|
+
export const BALANCED_ADAPTIVE_WORKFLOW = {
|
|
257
|
+
id: "balanced-adaptive",
|
|
258
|
+
name: "Balanced Adaptive",
|
|
259
|
+
description: "Balanced 2-tier execution",
|
|
260
|
+
version: "1.0.0",
|
|
261
|
+
type: "adaptive",
|
|
262
|
+
models: [
|
|
263
|
+
{
|
|
264
|
+
provider: AIProviderName.OPENAI,
|
|
265
|
+
model: "gpt-4o-mini",
|
|
266
|
+
},
|
|
267
|
+
],
|
|
268
|
+
modelGroups: [
|
|
269
|
+
{
|
|
270
|
+
id: "standard-tier",
|
|
271
|
+
name: "Standard Tier",
|
|
272
|
+
description: "Fast, cost-effective models",
|
|
273
|
+
models: [
|
|
274
|
+
{
|
|
275
|
+
provider: AIProviderName.OPENAI,
|
|
276
|
+
model: "gpt-4o-mini",
|
|
277
|
+
temperature: 0.7,
|
|
278
|
+
},
|
|
279
|
+
{
|
|
280
|
+
provider: AIProviderName.GOOGLE_AI,
|
|
281
|
+
model: "gemini-2.0-flash",
|
|
282
|
+
temperature: 0.7,
|
|
283
|
+
},
|
|
284
|
+
],
|
|
285
|
+
executionStrategy: "parallel",
|
|
286
|
+
continueOnFailure: true,
|
|
287
|
+
minSuccessful: 1,
|
|
288
|
+
parallelism: 2,
|
|
289
|
+
},
|
|
290
|
+
{
|
|
291
|
+
id: "premium-tier",
|
|
292
|
+
name: "Premium Tier",
|
|
293
|
+
description: "High-quality models for complex cases",
|
|
294
|
+
models: [
|
|
295
|
+
{
|
|
296
|
+
provider: AIProviderName.OPENAI,
|
|
297
|
+
model: "gpt-4o",
|
|
298
|
+
temperature: 0.7,
|
|
299
|
+
},
|
|
300
|
+
{
|
|
301
|
+
provider: AIProviderName.ANTHROPIC,
|
|
302
|
+
model: "claude-3-5-sonnet-20241022",
|
|
303
|
+
temperature: 0.7,
|
|
304
|
+
},
|
|
305
|
+
],
|
|
306
|
+
executionStrategy: "parallel",
|
|
307
|
+
continueOnFailure: false,
|
|
308
|
+
minSuccessful: 1,
|
|
309
|
+
parallelism: 2,
|
|
310
|
+
},
|
|
311
|
+
],
|
|
312
|
+
judge: {
|
|
313
|
+
provider: AIProviderName.OPENAI,
|
|
314
|
+
model: "gpt-4o",
|
|
315
|
+
criteria: ["quality", "accuracy", "balance"],
|
|
316
|
+
outputFormat: "detailed",
|
|
317
|
+
includeReasoning: true,
|
|
318
|
+
temperature: 0.1,
|
|
319
|
+
scoreScale: { min: 0, max: 100 },
|
|
320
|
+
},
|
|
321
|
+
execution: {
|
|
322
|
+
timeout: 40000,
|
|
323
|
+
minResponses: 2,
|
|
324
|
+
costThreshold: 0.08,
|
|
325
|
+
},
|
|
326
|
+
tags: ["adaptive", "balanced", "tiered"],
|
|
327
|
+
metadata: {
|
|
328
|
+
useCase: "Balanced speed/quality/cost tradeoff",
|
|
329
|
+
recommendedFor: ["general purpose", "production applications"],
|
|
330
|
+
averageCost: 0.04,
|
|
331
|
+
averageLatency: 2500,
|
|
332
|
+
},
|
|
333
|
+
createdAt: WORKFLOW_CREATION_DATE,
|
|
334
|
+
};
|
|
335
|
+
/**
|
|
336
|
+
* Create custom adaptive workflow
|
|
337
|
+
*
|
|
338
|
+
* @param tiers - Number of quality tiers (2, 3, or 4)
|
|
339
|
+
* @param strategy - 'speed' | 'balanced' | 'quality'
|
|
340
|
+
* @returns Configured adaptive workflow
|
|
341
|
+
*
|
|
342
|
+
* @example
|
|
343
|
+
* ```typescript
|
|
344
|
+
* const workflow = createAdaptiveWorkflow(3, 'quality');
|
|
345
|
+
* const result = await runWorkflow(workflow, {
|
|
346
|
+
* prompt: 'Complex technical analysis',
|
|
347
|
+
* });
|
|
348
|
+
* ```
|
|
349
|
+
*/
|
|
350
|
+
export function createAdaptiveWorkflow(tiers, strategy) {
|
|
351
|
+
const workflows = {
|
|
352
|
+
speed: SPEED_FIRST_WORKFLOW,
|
|
353
|
+
balanced: BALANCED_ADAPTIVE_WORKFLOW,
|
|
354
|
+
quality: QUALITY_MAX_WORKFLOW,
|
|
355
|
+
};
|
|
356
|
+
const actualTiers = strategy === "balanced" ? 2 : 3;
|
|
357
|
+
if (tiers !== actualTiers) {
|
|
358
|
+
logger.warn(`[AdaptiveWorkflow] Requested ${tiers} tiers but ${strategy} strategy uses ${actualTiers} tiers`);
|
|
359
|
+
}
|
|
360
|
+
return {
|
|
361
|
+
...workflows[strategy],
|
|
362
|
+
id: `adaptive-${actualTiers}tier-${strategy}`,
|
|
363
|
+
name: `Adaptive ${actualTiers}-Tier (${strategy})`,
|
|
364
|
+
description: `${actualTiers}-tier adaptive execution optimized for ${strategy}`,
|
|
365
|
+
};
|
|
366
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Consensus-3 Workflow
|
|
3
|
+
* ====================
|
|
4
|
+
*
|
|
5
|
+
* 3-model ensemble with judge selecting best response based on:
|
|
6
|
+
* - Accuracy
|
|
7
|
+
* - Clarity
|
|
8
|
+
* - Completeness
|
|
9
|
+
*
|
|
10
|
+
* Ideal for: Balanced quality across multiple providers
|
|
11
|
+
*
|
|
12
|
+
* @module workflow/workflows/consensusWorkflow
|
|
13
|
+
*/
|
|
14
|
+
import type { WorkflowConfig } from "../types.js";
|
|
15
|
+
/**
|
|
16
|
+
* Consensus-3 Workflow Configuration
|
|
17
|
+
*
|
|
18
|
+
* Uses 3 high-quality models in parallel:
|
|
19
|
+
* - GPT-4o (OpenAI) - Strong reasoning
|
|
20
|
+
* - Claude 3.5 Sonnet (Anthropic) - Thoughtful analysis
|
|
21
|
+
* - Gemini 2.0 Flash (Google) - Fast and capable
|
|
22
|
+
*
|
|
23
|
+
* Judge: GPT-4o evaluates on accuracy, clarity, and completeness
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* ```typescript
|
|
27
|
+
* import { runWorkflow } from '../core/workflowRunner.js';
|
|
28
|
+
* import { CONSENSUS_3_WORKFLOW } from './consensusWorkflow.js';
|
|
29
|
+
*
|
|
30
|
+
* const result = await runWorkflow(CONSENSUS_3_WORKFLOW, {
|
|
31
|
+
* prompt: 'Explain the theory of relativity',
|
|
32
|
+
* verbose: true,
|
|
33
|
+
* });
|
|
34
|
+
*
|
|
35
|
+
* console.log('Best response:', result.content);
|
|
36
|
+
* console.log('Score:', result.score);
|
|
37
|
+
* console.log('Reasoning:', result.reasoning);
|
|
38
|
+
* ```
|
|
39
|
+
*/
|
|
40
|
+
export declare const CONSENSUS_3_WORKFLOW: WorkflowConfig;
|
|
41
|
+
/**
|
|
42
|
+
* Consensus-3 with Custom System Prompt
|
|
43
|
+
*
|
|
44
|
+
* Same as CONSENSUS_3_WORKFLOW but allows custom system prompt
|
|
45
|
+
*
|
|
46
|
+
* @param systemPrompt - Custom system prompt for all models
|
|
47
|
+
* @returns Workflow configuration with custom prompt
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* ```typescript
|
|
51
|
+
* const workflow = createConsensus3WithPrompt(
|
|
52
|
+
* 'You are a technical expert. Provide detailed, accurate responses.'
|
|
53
|
+
* );
|
|
54
|
+
*
|
|
55
|
+
* const result = await runWorkflow(workflow, {
|
|
56
|
+
* prompt: 'Explain async/await in JavaScript',
|
|
57
|
+
* });
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
export declare function createConsensus3WithPrompt(systemPrompt: string): WorkflowConfig;
|
|
61
|
+
/**
|
|
62
|
+
* Consensus-3 Fast (Lower Cost, Faster)
|
|
63
|
+
*
|
|
64
|
+
* Uses faster/cheaper models with same consensus approach:
|
|
65
|
+
* - GPT-4o-mini
|
|
66
|
+
* - Claude 3 Haiku
|
|
67
|
+
* - Gemini 2.0 Flash
|
|
68
|
+
*/
|
|
69
|
+
export declare const CONSENSUS_3_FAST_WORKFLOW: WorkflowConfig;
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Consensus-3 Workflow
|
|
3
|
+
* ====================
|
|
4
|
+
*
|
|
5
|
+
* 3-model ensemble with judge selecting best response based on:
|
|
6
|
+
* - Accuracy
|
|
7
|
+
* - Clarity
|
|
8
|
+
* - Completeness
|
|
9
|
+
*
|
|
10
|
+
* Ideal for: Balanced quality across multiple providers
|
|
11
|
+
*
|
|
12
|
+
* @module workflow/workflows/consensusWorkflow
|
|
13
|
+
*/
|
|
14
|
+
import { AIProviderName } from "../../constants/enums.js";
|
|
15
|
+
import { WORKFLOW_CREATION_DATE } from "../config.js";
|
|
16
|
+
/**
|
|
17
|
+
* Consensus-3 Workflow Configuration
|
|
18
|
+
*
|
|
19
|
+
* Uses 3 high-quality models in parallel:
|
|
20
|
+
* - GPT-4o (OpenAI) - Strong reasoning
|
|
21
|
+
* - Claude 3.5 Sonnet (Anthropic) - Thoughtful analysis
|
|
22
|
+
* - Gemini 2.0 Flash (Google) - Fast and capable
|
|
23
|
+
*
|
|
24
|
+
* Judge: GPT-4o evaluates on accuracy, clarity, and completeness
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```typescript
|
|
28
|
+
* import { runWorkflow } from '../core/workflowRunner.js';
|
|
29
|
+
* import { CONSENSUS_3_WORKFLOW } from './consensusWorkflow.js';
|
|
30
|
+
*
|
|
31
|
+
* const result = await runWorkflow(CONSENSUS_3_WORKFLOW, {
|
|
32
|
+
* prompt: 'Explain the theory of relativity',
|
|
33
|
+
* verbose: true,
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* console.log('Best response:', result.content);
|
|
37
|
+
* console.log('Score:', result.score);
|
|
38
|
+
* console.log('Reasoning:', result.reasoning);
|
|
39
|
+
* ```
|
|
40
|
+
*/
|
|
41
|
+
export const CONSENSUS_3_WORKFLOW = {
|
|
42
|
+
id: "consensus-3",
|
|
43
|
+
name: "Consensus-3 Ensemble",
|
|
44
|
+
description: "3-model parallel ensemble with judge-based selection",
|
|
45
|
+
version: "1.0.0",
|
|
46
|
+
type: "ensemble",
|
|
47
|
+
// 3 high-quality models running in parallel
|
|
48
|
+
models: [
|
|
49
|
+
{
|
|
50
|
+
provider: AIProviderName.OPENAI,
|
|
51
|
+
model: "gpt-4o",
|
|
52
|
+
label: "GPT-4o",
|
|
53
|
+
weight: 1.0,
|
|
54
|
+
temperature: 0.7,
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
provider: AIProviderName.ANTHROPIC,
|
|
58
|
+
model: "claude-3-5-sonnet-20241022",
|
|
59
|
+
label: "Claude 3.5 Sonnet",
|
|
60
|
+
weight: 1.0,
|
|
61
|
+
temperature: 0.7,
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
provider: AIProviderName.GOOGLE_AI,
|
|
65
|
+
model: "gemini-2.0-flash",
|
|
66
|
+
label: "Gemini 2.0 Flash",
|
|
67
|
+
weight: 1.0,
|
|
68
|
+
temperature: 0.7,
|
|
69
|
+
},
|
|
70
|
+
],
|
|
71
|
+
// Judge configuration - evaluates all 3 responses
|
|
72
|
+
judge: {
|
|
73
|
+
provider: AIProviderName.OPENAI,
|
|
74
|
+
model: "gpt-4o",
|
|
75
|
+
criteria: ["accuracy", "clarity", "completeness"],
|
|
76
|
+
outputFormat: "detailed",
|
|
77
|
+
includeReasoning: true,
|
|
78
|
+
temperature: 0.1, // Low temperature for consistent judging
|
|
79
|
+
scoreScale: {
|
|
80
|
+
min: 0,
|
|
81
|
+
max: 100,
|
|
82
|
+
},
|
|
83
|
+
},
|
|
84
|
+
// Execution configuration
|
|
85
|
+
execution: {
|
|
86
|
+
parallelism: 3, // All 3 models run simultaneously
|
|
87
|
+
timeout: 30000, // 30 second total timeout
|
|
88
|
+
modelTimeout: 25000, // 25 second per-model timeout
|
|
89
|
+
minResponses: 2, // Need at least 2 successful responses
|
|
90
|
+
costThreshold: 0.1, // Warn if cost exceeds $0.10
|
|
91
|
+
},
|
|
92
|
+
// Metadata
|
|
93
|
+
tags: ["ensemble", "consensus", "balanced", "multi-provider"],
|
|
94
|
+
metadata: {
|
|
95
|
+
useCase: "Balanced quality across providers",
|
|
96
|
+
recommendedFor: ["general queries", "explanations", "analysis"],
|
|
97
|
+
averageCost: 0.02,
|
|
98
|
+
averageLatency: 2000,
|
|
99
|
+
},
|
|
100
|
+
createdAt: WORKFLOW_CREATION_DATE,
|
|
101
|
+
};
|
|
102
|
+
/**
|
|
103
|
+
* Consensus-3 with Custom System Prompt
|
|
104
|
+
*
|
|
105
|
+
* Same as CONSENSUS_3_WORKFLOW but allows custom system prompt
|
|
106
|
+
*
|
|
107
|
+
* @param systemPrompt - Custom system prompt for all models
|
|
108
|
+
* @returns Workflow configuration with custom prompt
|
|
109
|
+
*
|
|
110
|
+
* @example
|
|
111
|
+
* ```typescript
|
|
112
|
+
* const workflow = createConsensus3WithPrompt(
|
|
113
|
+
* 'You are a technical expert. Provide detailed, accurate responses.'
|
|
114
|
+
* );
|
|
115
|
+
*
|
|
116
|
+
* const result = await runWorkflow(workflow, {
|
|
117
|
+
* prompt: 'Explain async/await in JavaScript',
|
|
118
|
+
* });
|
|
119
|
+
* ```
|
|
120
|
+
*/
|
|
121
|
+
export function createConsensus3WithPrompt(systemPrompt) {
|
|
122
|
+
return {
|
|
123
|
+
...CONSENSUS_3_WORKFLOW,
|
|
124
|
+
id: `consensus-3-custom-${Date.now()}`,
|
|
125
|
+
defaultSystemPrompt: systemPrompt,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Consensus-3 Fast (Lower Cost, Faster)
|
|
130
|
+
*
|
|
131
|
+
* Uses faster/cheaper models with same consensus approach:
|
|
132
|
+
* - GPT-4o-mini
|
|
133
|
+
* - Claude 3 Haiku
|
|
134
|
+
* - Gemini 2.0 Flash
|
|
135
|
+
*/
|
|
136
|
+
export const CONSENSUS_3_FAST_WORKFLOW = {
|
|
137
|
+
id: "consensus-3-fast",
|
|
138
|
+
name: "Consensus-3 Fast",
|
|
139
|
+
description: "3-model fast ensemble (lower cost)",
|
|
140
|
+
version: "1.0.0",
|
|
141
|
+
type: "ensemble",
|
|
142
|
+
models: [
|
|
143
|
+
{
|
|
144
|
+
provider: AIProviderName.OPENAI,
|
|
145
|
+
model: "gpt-4o-mini",
|
|
146
|
+
label: "GPT-4o-mini",
|
|
147
|
+
weight: 1.0,
|
|
148
|
+
temperature: 0.7,
|
|
149
|
+
},
|
|
150
|
+
{
|
|
151
|
+
provider: AIProviderName.ANTHROPIC,
|
|
152
|
+
model: "claude-3-haiku-20240307",
|
|
153
|
+
label: "Claude 3 Haiku",
|
|
154
|
+
weight: 1.0,
|
|
155
|
+
temperature: 0.7,
|
|
156
|
+
},
|
|
157
|
+
{
|
|
158
|
+
provider: AIProviderName.GOOGLE_AI,
|
|
159
|
+
model: "gemini-2.0-flash",
|
|
160
|
+
label: "Gemini 2.0 Flash",
|
|
161
|
+
weight: 1.0,
|
|
162
|
+
temperature: 0.7,
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
judge: {
|
|
166
|
+
provider: AIProviderName.OPENAI,
|
|
167
|
+
model: "gpt-4o-mini", // Also use fast judge
|
|
168
|
+
criteria: ["accuracy", "clarity"],
|
|
169
|
+
outputFormat: "best",
|
|
170
|
+
includeReasoning: true,
|
|
171
|
+
temperature: 0.1,
|
|
172
|
+
scoreScale: {
|
|
173
|
+
min: 0,
|
|
174
|
+
max: 100,
|
|
175
|
+
},
|
|
176
|
+
},
|
|
177
|
+
execution: {
|
|
178
|
+
parallelism: 3,
|
|
179
|
+
timeout: 20000, // 20 seconds
|
|
180
|
+
modelTimeout: 15000,
|
|
181
|
+
minResponses: 2,
|
|
182
|
+
costThreshold: 0.02, // Lower cost threshold
|
|
183
|
+
},
|
|
184
|
+
tags: ["ensemble", "fast", "low-cost", "consensus"],
|
|
185
|
+
metadata: {
|
|
186
|
+
useCase: "Fast consensus for simple queries",
|
|
187
|
+
recommendedFor: ["quick questions", "simple explanations"],
|
|
188
|
+
averageCost: 0.01,
|
|
189
|
+
averageLatency: 1500,
|
|
190
|
+
},
|
|
191
|
+
createdAt: WORKFLOW_CREATION_DATE,
|
|
192
|
+
};
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Fallback Workflow
|
|
3
|
+
* =================
|
|
4
|
+
*
|
|
5
|
+
* Sequential fallback chain using layer-based execution:
|
|
6
|
+
* - Try fast model first
|
|
7
|
+
* - Fall back to mid-tier if needed
|
|
8
|
+
* - Final fallback to premium model
|
|
9
|
+
*
|
|
10
|
+
* Ideal for: Cost-optimization with quality guarantee
|
|
11
|
+
*
|
|
12
|
+
* @module workflow/workflows/fallbackWorkflow
|
|
13
|
+
*/
|
|
14
|
+
import type { WorkflowConfig } from "../types.js";
|
|
15
|
+
/**
|
|
16
|
+
* Fast-Fallback Workflow Configuration
|
|
17
|
+
*
|
|
18
|
+
* Uses layer-based execution with sequential groups:
|
|
19
|
+
* 1. Fast tier: GPT-4o-mini (try first)
|
|
20
|
+
* 2. Mid tier: Gemini 2.0 Flash (if fast fails)
|
|
21
|
+
* 3. Premium tier: GPT-4o or Claude 3.5 Sonnet (last resort)
|
|
22
|
+
*
|
|
23
|
+
* Each group runs sequentially - only proceeds if previous fails
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* ```typescript
|
|
27
|
+
* import { runWorkflow } from '../core/workflowRunner.js';
|
|
28
|
+
* import { FAST_FALLBACK_WORKFLOW } from './fallbackWorkflow.js';
|
|
29
|
+
*
|
|
30
|
+
* const result = await runWorkflow(FAST_FALLBACK_WORKFLOW, {
|
|
31
|
+
* prompt: 'What is 2+2?',
|
|
32
|
+
* verbose: true,
|
|
33
|
+
* });
|
|
34
|
+
*
|
|
35
|
+
* // Usually completes with fast tier, saving cost
|
|
36
|
+
* console.log('Executed models:', result.ensembleResponses.length);
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
export declare const FAST_FALLBACK_WORKFLOW: WorkflowConfig;
|
|
40
|
+
/**
|
|
41
|
+
* Aggressive Fallback Workflow
|
|
42
|
+
*
|
|
43
|
+
* More aggressive fallback with parallel premium tier:
|
|
44
|
+
* 1. Fast tier: GPT-4o-mini (sequential)
|
|
45
|
+
* 2. Premium tier: GPT-4o + Claude 3.5 (parallel, both execute)
|
|
46
|
+
*
|
|
47
|
+
* Guarantees high quality if fast tier fails
|
|
48
|
+
*/
|
|
49
|
+
export declare const AGGRESSIVE_FALLBACK_WORKFLOW: WorkflowConfig;
|