@juspay/neurolink 7.44.0 → 7.46.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/cli/commands/config.d.ts +2 -2
  3. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  4. package/dist/core/factory.d.ts +3 -1
  5. package/dist/core/factory.js +5 -3
  6. package/dist/evaluation/contextBuilder.d.ts +48 -0
  7. package/dist/evaluation/contextBuilder.js +134 -0
  8. package/dist/evaluation/index.d.ts +36 -0
  9. package/dist/evaluation/index.js +61 -0
  10. package/dist/evaluation/prompts.d.ts +22 -0
  11. package/dist/evaluation/prompts.js +73 -0
  12. package/dist/evaluation/ragasEvaluator.d.ts +28 -0
  13. package/dist/evaluation/ragasEvaluator.js +90 -0
  14. package/dist/evaluation/retryManager.d.ts +40 -0
  15. package/dist/evaluation/retryManager.js +78 -0
  16. package/dist/evaluation/scoring.d.ts +16 -0
  17. package/dist/evaluation/scoring.js +35 -0
  18. package/dist/factories/providerFactory.d.ts +3 -3
  19. package/dist/factories/providerFactory.js +3 -3
  20. package/dist/factories/providerRegistry.js +6 -6
  21. package/dist/lib/core/factory.d.ts +3 -1
  22. package/dist/lib/core/factory.js +5 -3
  23. package/dist/lib/evaluation/contextBuilder.d.ts +48 -0
  24. package/dist/lib/evaluation/contextBuilder.js +134 -0
  25. package/dist/lib/evaluation/index.d.ts +36 -0
  26. package/dist/lib/evaluation/index.js +61 -0
  27. package/dist/lib/evaluation/prompts.d.ts +22 -0
  28. package/dist/lib/evaluation/prompts.js +73 -0
  29. package/dist/lib/evaluation/ragasEvaluator.d.ts +28 -0
  30. package/dist/lib/evaluation/ragasEvaluator.js +90 -0
  31. package/dist/lib/evaluation/retryManager.d.ts +40 -0
  32. package/dist/lib/evaluation/retryManager.js +78 -0
  33. package/dist/lib/evaluation/scoring.d.ts +16 -0
  34. package/dist/lib/evaluation/scoring.js +35 -0
  35. package/dist/lib/factories/providerFactory.d.ts +3 -3
  36. package/dist/lib/factories/providerFactory.js +3 -3
  37. package/dist/lib/factories/providerRegistry.js +6 -6
  38. package/dist/lib/middleware/builtin/autoEvaluation.d.ts +14 -0
  39. package/dist/lib/middleware/builtin/autoEvaluation.js +181 -0
  40. package/dist/lib/middleware/factory.js +6 -0
  41. package/dist/lib/neurolink.js +7 -3
  42. package/dist/lib/providers/amazonBedrock.d.ts +2 -1
  43. package/dist/lib/providers/amazonBedrock.js +6 -4
  44. package/dist/lib/providers/amazonSagemaker.d.ts +1 -1
  45. package/dist/lib/providers/amazonSagemaker.js +2 -2
  46. package/dist/lib/providers/googleVertex.d.ts +1 -1
  47. package/dist/lib/providers/googleVertex.js +9 -10
  48. package/dist/lib/providers/sagemaker/config.d.ts +7 -5
  49. package/dist/lib/providers/sagemaker/config.js +11 -6
  50. package/dist/lib/types/evaluation.d.ts +2 -0
  51. package/dist/lib/types/evaluationTypes.d.ts +142 -0
  52. package/dist/lib/types/evaluationTypes.js +1 -0
  53. package/dist/lib/types/generateTypes.d.ts +2 -0
  54. package/dist/lib/types/middlewareTypes.d.ts +28 -2
  55. package/dist/lib/types/streamTypes.d.ts +1 -0
  56. package/dist/middleware/builtin/autoEvaluation.d.ts +14 -0
  57. package/dist/middleware/builtin/autoEvaluation.js +181 -0
  58. package/dist/middleware/factory.js +6 -0
  59. package/dist/neurolink.js +7 -3
  60. package/dist/providers/amazonBedrock.d.ts +2 -1
  61. package/dist/providers/amazonBedrock.js +6 -4
  62. package/dist/providers/amazonSagemaker.d.ts +1 -1
  63. package/dist/providers/amazonSagemaker.js +2 -2
  64. package/dist/providers/googleVertex.d.ts +1 -1
  65. package/dist/providers/googleVertex.js +9 -10
  66. package/dist/providers/sagemaker/config.d.ts +7 -5
  67. package/dist/providers/sagemaker/config.js +11 -6
  68. package/dist/types/evaluation.d.ts +2 -0
  69. package/dist/types/evaluationTypes.d.ts +142 -0
  70. package/dist/types/evaluationTypes.js +1 -0
  71. package/dist/types/generateTypes.d.ts +2 -0
  72. package/dist/types/middlewareTypes.d.ts +28 -2
  73. package/dist/types/streamTypes.d.ts +1 -0
  74. package/package.json +1 -1
@@ -0,0 +1,78 @@
1
+ /**
2
+ * @file Implements the RetryManager class for handling evaluation retries.
3
+ */
4
+ /**
5
+ * Manages the retry logic for the auto-evaluation middleware. It decides if a
6
+ * retry is warranted based on the evaluation score and prepares the options
7
+ * for the next generation attempt by incorporating feedback into the prompt.
8
+ */
9
+ export class RetryManager {
10
+ maxRetries;
11
+ constructor(maxRetries = 2) {
12
+ // Total 3 attempts: 1 initial + 2 retries
13
+ this.maxRetries = maxRetries;
14
+ }
15
+ /**
16
+ * Determines if a retry should be attempted based on the evaluation result.
17
+ *
18
+ * @param evaluation The `EvaluationResult` of the last attempt.
19
+ * @returns `true` if the response did not pass and the maximum number of retries has not been reached.
20
+ */
21
+ shouldRetry(evaluation) {
22
+ // Attempt number is 1-based. If attempt 1 fails, we can retry.
23
+ // If attempt 3 (maxRetries + 1) fails, we stop.
24
+ return !evaluation.isPassing && evaluation.attemptNumber <= this.maxRetries;
25
+ }
26
+ /**
27
+ * Prepares the options for the next generation attempt by creating a new,
28
+ * improved prompt that includes feedback from the failed evaluation.
29
+ *
30
+ * @param originalOptions The original `TextGenerationOptions` from the user request.
31
+ * @param evaluation The `EvaluationResult` of the failed attempt.
32
+ * @returns A new `TextGenerationOptions` object with an improved prompt.
33
+ */
34
+ prepareRetryOptions(originalOptions, evaluation) {
35
+ const originalPrompt = originalOptions.prompt || originalOptions.input?.text || "";
36
+ const newPrompt = this.buildRetryPrompt(originalPrompt, evaluation.suggestedImprovements, evaluation.attemptNumber + 1);
37
+ // Return a new options object with the updated prompt
38
+ return {
39
+ ...originalOptions,
40
+ prompt: newPrompt,
41
+ // Ensure input is not carried over if prompt is now the source of truth
42
+ input: undefined,
43
+ // Carry over the original prompt for context in subsequent retries if needed
44
+ originalPrompt: originalOptions.originalPrompt || originalPrompt,
45
+ };
46
+ }
47
+ /**
48
+ * Builds a new prompt for a retry attempt by incorporating feedback from the
49
+ * evaluation. The instructions become progressively more direct with each attempt.
50
+ *
51
+ * @param originalPrompt The user's original prompt.
52
+ * @param feedback The constructive feedback from the evaluation.
53
+ * @param attemptNumber The upcoming attempt number (e.g., 2 for the first retry).
54
+ * @returns A new, enhanced prompt string.
55
+ */
56
+ buildRetryPrompt(originalPrompt, feedback, attemptNumber) {
57
+ let instruction = "";
58
+ switch (attemptNumber) {
59
+ case 2: // First retry
60
+ instruction = `The previous response was not satisfactory. Please improve it based on the following feedback: "${feedback}".`;
61
+ break;
62
+ case 3: // Second retry
63
+ instruction = `The last response still requires improvement. Pay close attention to this feedback: "${feedback}". You MUST address these points.`;
64
+ break;
65
+ default: // Final retry or unexpected attempt number
66
+ instruction = `This is the final attempt. You MUST address the following feedback to generate a satisfactory response: "${feedback}".`;
67
+ break;
68
+ }
69
+ return `
70
+ Original Request: ${originalPrompt}
71
+
72
+ **Correction Instructions:**
73
+ ${instruction}
74
+
75
+ Generate a new, complete response that incorporates this feedback.
76
+ `;
77
+ }
78
+ }
@@ -0,0 +1,16 @@
1
+ /**
2
+ * @file Contains the logic for mapping raw evaluation results to the structured EvaluationData type.
3
+ */
4
+ import type { EnhancedEvaluationContext, EvaluationResult } from "../types/evaluationTypes.js";
5
+ import type { EvaluationData } from "../types/evaluation.js";
6
+ /**
7
+ * Maps a raw `EvaluationResult` to the structured `EvaluationData` format.
8
+ * This includes calculating derived fields like `isOffTopic` and `alertSeverity`.
9
+ *
10
+ * @param result The raw `EvaluationResult` from the evaluator.
11
+ * @param threshold The score threshold to determine if the evaluation is passing.
12
+ * @param offTopicThreshold The score below which a response is considered off-topic.
13
+ * @param highSeverityThreshold The score below which a failing response is high severity.
14
+ * @returns A structured `EvaluationData` object.
15
+ */
16
+ export declare function mapToEvaluationData(evalContext: EnhancedEvaluationContext, result: EvaluationResult, threshold: number, offTopicThreshold?: number, highSeverityThreshold?: number): EvaluationData;
@@ -0,0 +1,35 @@
1
+ /**
2
+ * @file Contains the logic for mapping raw evaluation results to the structured EvaluationData type.
3
+ */
4
+ /**
5
+ * Maps a raw `EvaluationResult` to the structured `EvaluationData` format.
6
+ * This includes calculating derived fields like `isOffTopic` and `alertSeverity`.
7
+ *
8
+ * @param result The raw `EvaluationResult` from the evaluator.
9
+ * @param threshold The score threshold to determine if the evaluation is passing.
10
+ * @param offTopicThreshold The score below which a response is considered off-topic.
11
+ * @param highSeverityThreshold The score below which a failing response is high severity.
12
+ * @returns A structured `EvaluationData` object.
13
+ */
14
+ export function mapToEvaluationData(evalContext, result, threshold, offTopicThreshold = 5, highSeverityThreshold = 4) {
15
+ const isPassing = result.finalScore >= threshold;
16
+ return {
17
+ relevance: result.relevanceScore,
18
+ accuracy: result.accuracyScore,
19
+ completeness: result.completenessScore,
20
+ overall: result.finalScore,
21
+ isOffTopic: result.finalScore < offTopicThreshold,
22
+ alertSeverity: isPassing
23
+ ? "none"
24
+ : result.finalScore < highSeverityThreshold
25
+ ? "high"
26
+ : "medium",
27
+ reasoning: result.reasoning,
28
+ suggestedImprovements: result.suggestedImprovements,
29
+ evaluationModel: result.evaluationModel,
30
+ evaluationTime: result.evaluationTime,
31
+ evaluationAttempt: result.attemptNumber,
32
+ responseContent: evalContext.aiResponse,
33
+ queryContent: evalContext.userQuery,
34
+ };
35
+ }
@@ -4,8 +4,8 @@ import type { UnknownRecord } from "../types/common.js";
4
4
  * Provider constructor interface - supports both sync constructors and async factory functions
5
5
  */
6
6
  type ProviderConstructor = {
7
- new (modelName?: string, providerName?: string, sdk?: UnknownRecord): AIProvider;
8
- } | ((modelName?: string, providerName?: string, sdk?: UnknownRecord) => Promise<AIProvider>);
7
+ new (modelName?: string, providerName?: string, sdk?: UnknownRecord, region?: string): AIProvider;
8
+ } | ((modelName?: string, providerName?: string, sdk?: UnknownRecord, region?: string) => Promise<AIProvider>);
9
9
  /**
10
10
  * Provider registration entry
11
11
  */
@@ -30,7 +30,7 @@ export declare class ProviderFactory {
30
30
  /**
31
31
  * Create a provider instance
32
32
  */
33
- static createProvider(providerName: AIProviderName | string, modelName?: string, sdk?: UnknownRecord): Promise<AIProvider>;
33
+ static createProvider(providerName: AIProviderName | string, modelName?: string, sdk?: UnknownRecord, region?: string): Promise<AIProvider>;
34
34
  /**
35
35
  * Check if a provider is registered
36
36
  */
@@ -28,7 +28,7 @@ export class ProviderFactory {
28
28
  /**
29
29
  * Create a provider instance
30
30
  */
31
- static async createProvider(providerName, modelName, sdk) {
31
+ static async createProvider(providerName, modelName, sdk, region) {
32
32
  // Note: Providers are registered explicitly by ProviderRegistry to avoid circular dependencies
33
33
  const normalizedName = providerName.toLowerCase();
34
34
  const registration = this.providers.get(normalizedName);
@@ -54,7 +54,7 @@ export class ProviderFactory {
54
54
  }
55
55
  let result;
56
56
  try {
57
- const factoryResult = registration.constructor(model, providerName, sdk);
57
+ const factoryResult = registration.constructor(model, providerName, sdk, region);
58
58
  // Handle both sync and async results
59
59
  result =
60
60
  factoryResult instanceof Promise
@@ -66,7 +66,7 @@ export class ProviderFactory {
66
66
  registration.constructor.prototype.constructor ===
67
67
  registration.constructor) {
68
68
  try {
69
- result = new registration.constructor(model, providerName, sdk);
69
+ result = new registration.constructor(model, providerName, sdk, region);
70
70
  }
71
71
  catch (constructorError) {
72
72
  throw new Error(`Both factory function and constructor failed. Factory error: ${factoryError}. Constructor error: ${constructorError}`);
@@ -39,9 +39,9 @@ export class ProviderRegistry {
39
39
  return new AnthropicProvider(modelName, sdk);
40
40
  }, "claude-3-5-sonnet-20241022", ["claude", "anthropic"]);
41
41
  // Register Amazon Bedrock provider
42
- ProviderFactory.registerProvider(AIProviderName.BEDROCK, async (modelName, _providerName, sdk) => {
42
+ ProviderFactory.registerProvider(AIProviderName.BEDROCK, async (modelName, _providerName, sdk, region) => {
43
43
  const { AmazonBedrockProvider } = await import("../providers/amazonBedrock.js");
44
- return new AmazonBedrockProvider(modelName, sdk);
44
+ return new AmazonBedrockProvider(modelName, sdk, region);
45
45
  }, undefined, // Let provider read BEDROCK_MODEL from .env
46
46
  ["bedrock", "aws"]);
47
47
  // Register Azure OpenAI provider
@@ -54,9 +54,9 @@ export class ProviderRegistry {
54
54
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
55
55
  "gpt-4o-mini", ["azure", "azureOpenai"]);
56
56
  // Register Google Vertex AI provider
57
- ProviderFactory.registerProvider(AIProviderName.VERTEX, async (modelName, providerName, sdk) => {
57
+ ProviderFactory.registerProvider(AIProviderName.VERTEX, async (modelName, providerName, sdk, region) => {
58
58
  const { GoogleVertexProvider } = await import("../providers/googleVertex.js");
59
- return new GoogleVertexProvider(modelName, providerName, sdk);
59
+ return new GoogleVertexProvider(modelName, providerName, sdk, region);
60
60
  }, "claude-sonnet-4@20250514", ["vertex", "googleVertex"]);
61
61
  // Register Hugging Face provider (Unified Router implementation)
62
62
  ProviderFactory.registerProvider(AIProviderName.HUGGINGFACE, async (modelName) => {
@@ -85,9 +85,9 @@ export class ProviderRegistry {
85
85
  }, process.env.OPENAI_COMPATIBLE_MODEL || undefined, // Enable auto-discovery when no model specified
86
86
  ["openai-compatible", "openrouter", "vllm", "compatible"]);
87
87
  // Register Amazon SageMaker provider
88
- ProviderFactory.registerProvider(AIProviderName.SAGEMAKER, async (modelName, _providerName, _sdk) => {
88
+ ProviderFactory.registerProvider(AIProviderName.SAGEMAKER, async (modelName, _providerName, _sdk, region) => {
89
89
  const { AmazonSageMakerProvider } = await import("../providers/amazonSagemaker.js");
90
- return new AmazonSageMakerProvider(modelName);
90
+ return new AmazonSageMakerProvider(modelName, region);
91
91
  }, process.env.SAGEMAKER_MODEL || "sagemaker-model", ["sagemaker", "aws-sagemaker"]);
92
92
  logger.debug("All providers registered successfully");
93
93
  this.registered = true;
@@ -19,9 +19,11 @@ export declare class AIProviderFactory {
19
19
  * @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
20
20
  * @param modelName - Optional model name override
21
21
  * @param enableMCP - Optional flag to enable MCP integration (default: true)
22
+ * @param sdk - SDK instance
23
+ * @param region - Optional region override for cloud providers
22
24
  * @returns AIProvider instance
23
25
  */
24
- static createProvider(providerName: string, modelName?: string | null, enableMCP?: boolean, sdk?: UnknownRecord): Promise<AIProvider>;
26
+ static createProvider(providerName: string, modelName?: string | null, enableMCP?: boolean, sdk?: UnknownRecord, region?: string): Promise<AIProvider>;
25
27
  /**
26
28
  * Create a provider instance with specific provider enum and model
27
29
  * @param provider - Provider enum value
@@ -52,9 +52,11 @@ export class AIProviderFactory {
52
52
  * @param providerName - Name of the provider ('vertex', 'bedrock', 'openai')
53
53
  * @param modelName - Optional model name override
54
54
  * @param enableMCP - Optional flag to enable MCP integration (default: true)
55
+ * @param sdk - SDK instance
56
+ * @param region - Optional region override for cloud providers
55
57
  * @returns AIProvider instance
56
58
  */
57
- static async createProvider(providerName, modelName, enableMCP = true, sdk) {
59
+ static async createProvider(providerName, modelName, enableMCP = true, sdk, region) {
58
60
  const functionTag = "AIProviderFactory.createProvider";
59
61
  // Providers are registered via ProviderFactory.initialize() on first use
60
62
  logger.debug(`[${functionTag}] Provider creation started`, {
@@ -198,8 +200,8 @@ export class AIProviderFactory {
198
200
  resolvedModelName: resolvedModelName || "not resolved",
199
201
  finalModelName: finalModelName || "using provider default",
200
202
  });
201
- // Create provider with enhanced SDK
202
- const provider = await ProviderFactory.createProvider(normalizedName, finalModelName, sdk);
203
+ // Create provider with enhanced SDK and region support
204
+ const provider = await ProviderFactory.createProvider(normalizedName, finalModelName, sdk, region);
203
205
  // Summary logging in format expected by debugging tools
204
206
  logger.debug(`[AIProviderFactory] Provider creation completed { providerName: '${normalizedName}', modelName: '${finalModelName}' }`);
205
207
  logger.debug(`[AIProviderFactory] Resolved model: ${finalModelName}`);
@@ -0,0 +1,48 @@
1
+ /**
2
+ * @file Implements the ContextBuilder class for creating rich evaluation context.
3
+ */
4
+ import type { EnhancedEvaluationContext, EvaluationResult } from "../types/evaluationTypes.js";
5
+ import type { GenerateResult } from "../types/generateTypes.js";
6
+ import type { LanguageModelV1CallOptions } from "ai";
7
+ /**
8
+ * Builds the enhanced context required for a RAGAS-style evaluation.
9
+ * This class gathers data from the generation options and results to create a
10
+ * rich snapshot of the interaction, which is then used by the evaluator.
11
+ */
12
+ export declare class ContextBuilder {
13
+ private attemptNumber;
14
+ private previousEvaluations;
15
+ private extractTextFromContent;
16
+ /**
17
+ * Builds the full evaluation context for a single evaluation attempt.
18
+ *
19
+ * @param options The original `TextGenerationOptions` used for the request.
20
+ * @param result The `GenerateResult` from the provider.
21
+ * @returns An `EnhancedEvaluationContext` object ready for evaluation.
22
+ */
23
+ buildContext(options: LanguageModelV1CallOptions, result: GenerateResult): EnhancedEvaluationContext;
24
+ /**
25
+ * Records the result of an evaluation and increments the internal attempt counter.
26
+ * This is used to build up the `previousEvaluations` array for subsequent retries.
27
+ *
28
+ * @param evaluation The `EvaluationResult` from the last attempt.
29
+ */
30
+ recordEvaluation(evaluation: EvaluationResult): void;
31
+ /**
32
+ * Resets the internal state of the context builder. This should be called
33
+ * before starting a new, independent evaluation sequence.
34
+ */
35
+ reset(): void;
36
+ /**
37
+ * Analyzes the user's query to determine intent and complexity.
38
+ * @param query The user's input query.
39
+ * @returns A QueryIntentAnalysis object.
40
+ */
41
+ private analyzeQuery;
42
+ /**
43
+ * Maps the tool execution format from GenerateResult to the canonical ToolExecution type.
44
+ * @param result The result from the generate call.
45
+ * @returns An array of ToolExecution objects.
46
+ */
47
+ private mapToolExecutions;
48
+ }
@@ -0,0 +1,134 @@
1
+ /**
2
+ * @file Implements the ContextBuilder class for creating rich evaluation context.
3
+ */
4
+ import { logger } from "../utils/logger.js";
5
+ /**
6
+ * Builds the enhanced context required for a RAGAS-style evaluation.
7
+ * This class gathers data from the generation options and results to create a
8
+ * rich snapshot of the interaction, which is then used by the evaluator.
9
+ */
10
+ export class ContextBuilder {
11
+ attemptNumber = 1;
12
+ previousEvaluations = [];
13
+ extractTextFromContent(content) {
14
+ if (typeof content === "string") {
15
+ return content;
16
+ }
17
+ if (Array.isArray(content)) {
18
+ return content
19
+ .filter((part) => part.type === "text" && "text" in part)
20
+ .map((part) => part.text)
21
+ .join("");
22
+ }
23
+ return "";
24
+ }
25
+ /**
26
+ * Builds the full evaluation context for a single evaluation attempt.
27
+ *
28
+ * @param options The original `TextGenerationOptions` used for the request.
29
+ * @param result The `GenerateResult` from the provider.
30
+ * @returns An `EnhancedEvaluationContext` object ready for evaluation.
31
+ */
32
+ buildContext(options, result) {
33
+ const userMessages = options.prompt.filter((p) => p.role === "user");
34
+ const lastUserMessage = userMessages[userMessages.length - 1];
35
+ const userQuery = this.extractTextFromContent(lastUserMessage?.content ?? "");
36
+ const systemPromptMessage = options.prompt.find((p) => p.role === "system");
37
+ const systemPrompt = this.extractTextFromContent(systemPromptMessage?.content ?? "");
38
+ const queryAnalysis = this.analyzeQuery(userQuery);
39
+ const toolExecutions = this.mapToolExecutions(result);
40
+ const context = {
41
+ userQuery,
42
+ queryAnalysis,
43
+ aiResponse: result.content,
44
+ provider: result.provider || "unknown",
45
+ model: result.model || "unknown",
46
+ generationParams: {
47
+ temperature: options.temperature,
48
+ maxTokens: options.maxTokens,
49
+ systemPrompt: systemPrompt || undefined,
50
+ },
51
+ toolExecutions,
52
+ conversationHistory: (options.prompt || [])
53
+ .filter((p) => p.role !== "system")
54
+ .map((turn) => ({
55
+ role: turn.role,
56
+ content: this.extractTextFromContent(turn.content),
57
+ timestamp: new Date().toISOString(),
58
+ })),
59
+ responseTime: result.responseTime || 0,
60
+ tokenUsage: result.usage || { input: 0, output: 0, total: 0 },
61
+ previousEvaluations: this.previousEvaluations,
62
+ attemptNumber: this.attemptNumber,
63
+ };
64
+ logger.debug("Built Evaluation Context:", context);
65
+ return context;
66
+ }
67
+ /**
68
+ * Records the result of an evaluation and increments the internal attempt counter.
69
+ * This is used to build up the `previousEvaluations` array for subsequent retries.
70
+ *
71
+ * @param evaluation The `EvaluationResult` from the last attempt.
72
+ */
73
+ recordEvaluation(evaluation) {
74
+ this.previousEvaluations.push(evaluation);
75
+ this.attemptNumber++;
76
+ }
77
+ /**
78
+ * Resets the internal state of the context builder. This should be called
79
+ * before starting a new, independent evaluation sequence.
80
+ */
81
+ reset() {
82
+ this.attemptNumber = 1;
83
+ this.previousEvaluations = [];
84
+ }
85
+ /**
86
+ * Analyzes the user's query to determine intent and complexity.
87
+ * @param query The user's input query.
88
+ * @returns A QueryIntentAnalysis object.
89
+ */
90
+ analyzeQuery(query) {
91
+ const lowerCaseQuery = query.toLowerCase();
92
+ let type = "unknown";
93
+ if (lowerCaseQuery.startsWith("what") ||
94
+ lowerCaseQuery.startsWith("how") ||
95
+ lowerCaseQuery.startsWith("why")) {
96
+ type = "question";
97
+ }
98
+ else if (lowerCaseQuery.length < 20) {
99
+ type = "greeting";
100
+ }
101
+ else {
102
+ type = "command";
103
+ }
104
+ const complexity = query.length > 100 ? "high" : query.length > 40 ? "medium" : "low";
105
+ return {
106
+ type,
107
+ complexity,
108
+ shouldHaveUsedTools: false, // This would require deeper analysis
109
+ };
110
+ }
111
+ /**
112
+ * Maps the tool execution format from GenerateResult to the canonical ToolExecution type.
113
+ * @param result The result from the generate call.
114
+ * @returns An array of ToolExecution objects.
115
+ */
116
+ mapToolExecutions(result) {
117
+ if (!result.toolExecutions) {
118
+ return [];
119
+ }
120
+ return result.toolExecutions.map((exec) => {
121
+ const toolResult = {
122
+ success: true,
123
+ data: exec.output,
124
+ };
125
+ return {
126
+ toolName: exec.name,
127
+ params: exec.input,
128
+ result: toolResult,
129
+ executionTime: 0,
130
+ timestamp: Date.now(),
131
+ };
132
+ });
133
+ }
134
+ }
@@ -0,0 +1,36 @@
1
+ /**
2
+ * @file This file exports the main Evaluator class, which serves as the central entry point for the evaluation system.
3
+ */
4
+ import type { GenerateResult } from "../types/generateTypes.js";
5
+ import type { EvaluationConfig } from "../types/evaluationTypes.js";
6
+ import type { LanguageModelV1CallOptions } from "ai";
7
+ import type { AutoEvaluationConfig } from "../types/middlewareTypes.js";
8
+ import type { EvaluationData } from "../types/evaluation.js";
9
+ /**
10
+ * A centralized class for performing response evaluations. It supports different
11
+ * evaluation strategies, with RAGAS-style model-based evaluation as the default.
12
+ * This class orchestrates the context building and evaluation process.
13
+ */
14
+ export declare class Evaluator {
15
+ private contextBuilder;
16
+ private config;
17
+ private ragasEvaluator;
18
+ constructor(config?: EvaluationConfig);
19
+ /**
20
+ * The main entry point for performing an evaluation. It selects the evaluation
21
+ * strategy based on the configuration and executes it.
22
+ *
23
+ * @param options The original `TextGenerationOptions` from the user request.
24
+ * @param result The `GenerateResult` from the provider.
25
+ * @returns A promise that resolves to the `EvaluationResult`.
26
+ */
27
+ evaluate(options: LanguageModelV1CallOptions, result: GenerateResult, threshold: number, config: AutoEvaluationConfig): Promise<EvaluationData>;
28
+ /**
29
+ * Performs evaluation using the RAGAS-style model-based evaluator.
30
+ *
31
+ * @param options The original `TextGenerationOptions`.
32
+ * @param result The `GenerateResult` to be evaluated.
33
+ * @returns A promise that resolves to the `EvaluationResult`.
34
+ */
35
+ private evaluateWithRAGAS;
36
+ }
@@ -0,0 +1,61 @@
1
+ /**
2
+ * @file This file exports the main Evaluator class, which serves as the central entry point for the evaluation system.
3
+ */
4
+ import { ContextBuilder } from "./contextBuilder.js";
5
+ import { RAGASEvaluator } from "./ragasEvaluator.js";
6
+ import { mapToEvaluationData } from "./scoring.js";
7
+ /**
8
+ * A centralized class for performing response evaluations. It supports different
9
+ * evaluation strategies, with RAGAS-style model-based evaluation as the default.
10
+ * This class orchestrates the context building and evaluation process.
11
+ */
12
+ export class Evaluator {
13
+ contextBuilder;
14
+ config;
15
+ ragasEvaluator;
16
+ constructor(config = {}) {
17
+ this.config = config;
18
+ this.contextBuilder = new ContextBuilder();
19
+ this.ragasEvaluator = new RAGASEvaluator(this.config.evaluationModel, this.config.provider, this.config.threshold, this.config.promptGenerator);
20
+ }
21
+ /**
22
+ * The main entry point for performing an evaluation. It selects the evaluation
23
+ * strategy based on the configuration and executes it.
24
+ *
25
+ * @param options The original `TextGenerationOptions` from the user request.
26
+ * @param result The `GenerateResult` from the provider.
27
+ * @returns A promise that resolves to the `EvaluationResult`.
28
+ */
29
+ async evaluate(options, result, threshold, config) {
30
+ const evaluationStrategy = this.config.evaluationStrategy || "ragas";
31
+ const customEvaluator = this.config.customEvaluator;
32
+ switch (evaluationStrategy) {
33
+ case "ragas": {
34
+ const { evaluationResult, evalContext } = await this.evaluateWithRAGAS(options, result);
35
+ const evaluationData = mapToEvaluationData(evalContext, evaluationResult, threshold, config.offTopicThreshold, config.highSeverityThreshold);
36
+ return evaluationData;
37
+ }
38
+ case "custom": {
39
+ if (customEvaluator) {
40
+ const { evaluationResult, evalContext } = await customEvaluator(options, result);
41
+ return mapToEvaluationData(evalContext, evaluationResult, threshold, config.offTopicThreshold, config.highSeverityThreshold);
42
+ }
43
+ throw new Error("Custom evaluator function not provided in config.");
44
+ }
45
+ default:
46
+ throw new Error(`Unsupported evaluation strategy: ${evaluationStrategy} `);
47
+ }
48
+ }
49
+ /**
50
+ * Performs evaluation using the RAGAS-style model-based evaluator.
51
+ *
52
+ * @param options The original `TextGenerationOptions`.
53
+ * @param result The `GenerateResult` to be evaluated.
54
+ * @returns A promise that resolves to the `EvaluationResult`.
55
+ */
56
+ async evaluateWithRAGAS(options, result) {
57
+ const evalContext = this.contextBuilder.buildContext(options, result);
58
+ const evaluationResult = await this.ragasEvaluator.evaluate(evalContext);
59
+ return { evaluationResult, evalContext };
60
+ }
61
+ }
@@ -0,0 +1,22 @@
1
+ import type { EnhancedEvaluationContext, GetPromptFunction } from "../types/evaluationTypes.js";
2
+ /**
3
+ * A flexible class for building evaluation prompts. It allows for custom prompt
4
+ * generation logic to be injected while ensuring a consistent output format.
5
+ */
6
+ export declare class PromptBuilder {
7
+ /**
8
+ * Builds the final evaluation prompt.
9
+ *
10
+ * @param context The rich context for the evaluation.
11
+ * @param getPrompt An optional function to generate the main body of the prompt.
12
+ * If not provided, a default prompt is used.
13
+ * @returns The complete prompt string to be sent to the judge LLM.
14
+ */
15
+ buildEvaluationPrompt(context: EnhancedEvaluationContext, getPrompt?: GetPromptFunction): string;
16
+ /**
17
+ * The default prompt generation logic.
18
+ * @param context The prepared context strings.
19
+ * @returns The default prompt body.
20
+ */
21
+ private getDefaultPrompt;
22
+ }
@@ -0,0 +1,73 @@
1
+ /**
2
+ * A flexible class for building evaluation prompts. It allows for custom prompt
3
+ * generation logic to be injected while ensuring a consistent output format.
4
+ */
5
+ export class PromptBuilder {
6
+ /**
7
+ * Builds the final evaluation prompt.
8
+ *
9
+ * @param context The rich context for the evaluation.
10
+ * @param getPrompt An optional function to generate the main body of the prompt.
11
+ * If not provided, a default prompt is used.
12
+ * @returns The complete prompt string to be sent to the judge LLM.
13
+ */
14
+ buildEvaluationPrompt(context, getPrompt) {
15
+ const { userQuery, aiResponse, conversationHistory, toolExecutions, previousEvaluations, } = context;
16
+ const historyStr = conversationHistory
17
+ .map((turn) => `${turn.role}: ${turn.content}`)
18
+ .join("\n");
19
+ const toolsStr = toolExecutions.length > 0
20
+ ? `Tools were used: ${toolExecutions.map((t) => t.toolName).join(", ")}`
21
+ : "No tools were used.";
22
+ const retryStr = previousEvaluations && previousEvaluations.length > 0
23
+ ? `This is attempt #${context.attemptNumber}. Previous reasoning: ${previousEvaluations
24
+ .map((e) => e.reasoning)
25
+ .join("; ")} Previous suggested Improvements: ${previousEvaluations.map((e) => e.suggestedImprovements).join("; ")}`
26
+ : "This is the first attempt.";
27
+ const promptContext = {
28
+ userQuery,
29
+ history: historyStr,
30
+ tools: toolsStr,
31
+ retryInfo: retryStr,
32
+ aiResponse,
33
+ };
34
+ const mainPrompt = getPrompt
35
+ ? getPrompt(promptContext)
36
+ : this.getDefaultPrompt(promptContext);
37
+ return `
38
+ ${mainPrompt}
39
+
40
+ **Output Format (JSON):**
41
+ {
42
+ "relevanceScore": <1-10>,
43
+ "accuracyScore": <1-10>,
44
+ "completenessScore": <1-10>,
45
+ "finalScore": <1-10>,
46
+ "reasoning": "<Your constructive reasoning here>",
47
+ "suggestedImprovements": "<How the response can be improved>"
48
+ }
49
+ `;
50
+ }
51
+ /**
52
+ * The default prompt generation logic.
53
+ * @param context The prepared context strings.
54
+ * @returns The default prompt body.
55
+ */
56
+ getDefaultPrompt(context) {
57
+ return `
58
+ You are an expert AI quality evaluator. Your task is to evaluate the AI assistant's response based on the provided context.
59
+ Provide a score from 1 to 10 for each of the following criteria: Relevance, Accuracy, and Completeness.
60
+ Finally, provide an overall finalScore and constructive feedback for improvement.
61
+
62
+ **Evaluation Context:**
63
+ - User Query: ${context.userQuery}
64
+ - Conversation History:
65
+ ${context.history}
66
+ - Tools Executed: ${context.tools}
67
+ - Retry Information: ${context.retryInfo}
68
+
69
+ **AI Assistant's Response to Evaluate:**
70
+ ${context.aiResponse}
71
+ `;
72
+ }
73
+ }
@@ -0,0 +1,28 @@
1
+ import type { EnhancedEvaluationContext, EvaluationResult, GetPromptFunction } from "../types/evaluationTypes.js";
2
+ /**
3
+ * Implements a RAGAS-style evaluator that uses a "judge" LLM to score the
4
+ * quality of an AI response based on rich, contextual information.
5
+ */
6
+ export declare class RAGASEvaluator {
7
+ private evaluationModel;
8
+ private providerName;
9
+ private threshold;
10
+ private promptBuilder;
11
+ private promptGenerator?;
12
+ constructor(evaluationModel?: string, providerName?: string, threshold?: number, promptGenerator?: GetPromptFunction);
13
+ /**
14
+ * Evaluates an AI-generated response using a model-based approach.
15
+ *
16
+ * @param context The rich, contextual information for the evaluation.
17
+ * @returns A promise that resolves to a detailed `EvaluationResult`.
18
+ */
19
+ evaluate(context: EnhancedEvaluationContext): Promise<EvaluationResult>;
20
+ /**
21
+ * Parses the raw JSON string from the judge LLM into a structured `EvaluationResult` object.
22
+ * It includes error handling to gracefully manage malformed JSON.
23
+ *
24
+ * @param rawResponse The raw string response from the evaluation model.
25
+ * @returns A structured object containing the evaluation scores and feedback.
26
+ */
27
+ private parseEvaluationResponse;
28
+ }