n8n-nodes-rooyai-chat 0.3.0 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1,11 +1,21 @@
|
|
|
1
1
|
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
|
|
2
2
|
import type { Serialized } from '@langchain/core/load/serializable';
|
|
3
|
-
import {
|
|
3
|
+
import type { LLMResult } from '@langchain/core/outputs';
|
|
4
|
+
import type { ISupplyDataFunctions } from 'n8n-workflow';
|
|
4
5
|
export declare class N8nLlmTracing extends BaseCallbackHandler {
|
|
6
|
+
private executionFunctions;
|
|
5
7
|
name: string;
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
8
|
+
awaitHandlers: boolean;
|
|
9
|
+
connectionType: "ai_languageModel";
|
|
10
|
+
promptTokensEstimate: number;
|
|
11
|
+
completionTokensEstimate: number;
|
|
12
|
+
runsMap: Record<string, {
|
|
13
|
+
index: number;
|
|
14
|
+
messages: string[];
|
|
15
|
+
options: any;
|
|
16
|
+
}>;
|
|
17
|
+
constructor(executionFunctions: ISupplyDataFunctions);
|
|
18
|
+
handleLLMStart(llm: Serialized, prompts: string[], runId: string): Promise<void>;
|
|
19
|
+
handleLLMEnd(output: LLMResult, runId: string): Promise<void>;
|
|
20
|
+
handleLLMError(error: Error, runId: string): Promise<void>;
|
|
11
21
|
}
|
|
@@ -2,37 +2,82 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.N8nLlmTracing = void 0;
|
|
4
4
|
const base_1 = require("@langchain/core/callbacks/base");
|
|
5
|
+
const n8n_workflow_1 = require("n8n-workflow");
|
|
5
6
|
class N8nLlmTracing extends base_1.BaseCallbackHandler {
|
|
6
|
-
constructor(
|
|
7
|
+
constructor(executionFunctions) {
|
|
7
8
|
super();
|
|
8
|
-
this.
|
|
9
|
-
this.
|
|
9
|
+
this.executionFunctions = executionFunctions;
|
|
10
|
+
this.name = 'N8nLlmTracing';
|
|
11
|
+
this.awaitHandlers = true;
|
|
12
|
+
this.connectionType = n8n_workflow_1.NodeConnectionTypes.AiLanguageModel;
|
|
13
|
+
this.promptTokensEstimate = 0;
|
|
14
|
+
this.completionTokensEstimate = 0;
|
|
15
|
+
this.runsMap = {};
|
|
10
16
|
}
|
|
11
|
-
async handleLLMStart(llm, prompts, runId
|
|
12
|
-
|
|
13
|
-
|
|
17
|
+
async handleLLMStart(llm, prompts, runId) {
|
|
18
|
+
const estimatedTokens = prompts.reduce((sum, p) => sum + Math.ceil(p.length / 4), 0);
|
|
19
|
+
this.promptTokensEstimate = estimatedTokens;
|
|
20
|
+
const options = llm.type === 'constructor' ? llm.kwargs : llm;
|
|
21
|
+
const { index } = this.executionFunctions.addInputData(this.connectionType, [
|
|
22
|
+
[
|
|
23
|
+
{
|
|
24
|
+
json: {
|
|
25
|
+
messages: prompts,
|
|
26
|
+
estimatedTokens,
|
|
27
|
+
options,
|
|
28
|
+
},
|
|
29
|
+
},
|
|
30
|
+
],
|
|
31
|
+
]);
|
|
32
|
+
this.runsMap[runId] = {
|
|
33
|
+
index,
|
|
34
|
+
options,
|
|
35
|
+
messages: prompts,
|
|
36
|
+
};
|
|
37
|
+
if (this.executionFunctions.logger) {
|
|
38
|
+
this.executionFunctions.logger.info('🚀 Rooyai LLM started', {
|
|
14
39
|
runId,
|
|
15
40
|
promptCount: prompts.length,
|
|
16
|
-
|
|
41
|
+
estimatedTokens,
|
|
17
42
|
});
|
|
18
43
|
}
|
|
19
44
|
}
|
|
20
|
-
async handleLLMEnd(output, runId
|
|
21
|
-
|
|
22
|
-
|
|
45
|
+
async handleLLMEnd(output, runId) {
|
|
46
|
+
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length, messages: [], options: {} };
|
|
47
|
+
const tokenUsage = {
|
|
48
|
+
completionTokens: output?.llmOutput?.tokenUsage?.completionTokens ?? 0,
|
|
49
|
+
promptTokens: output?.llmOutput?.tokenUsage?.promptTokens ?? 0,
|
|
50
|
+
totalTokens: output?.llmOutput?.tokenUsage?.totalTokens ?? 0,
|
|
51
|
+
};
|
|
52
|
+
const response = {
|
|
53
|
+
response: { generations: output.generations },
|
|
54
|
+
};
|
|
55
|
+
if (tokenUsage.totalTokens > 0) {
|
|
56
|
+
response.tokenUsage = tokenUsage;
|
|
57
|
+
}
|
|
58
|
+
if (output?.llmOutput?.costUsd !== undefined) {
|
|
59
|
+
response.costUsd = output.llmOutput.costUsd;
|
|
60
|
+
}
|
|
61
|
+
this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [[{ json: { ...response } }]]);
|
|
62
|
+
if (this.executionFunctions.logger) {
|
|
63
|
+
this.executionFunctions.logger.info('✅ Rooyai LLM finished successfully', {
|
|
23
64
|
runId,
|
|
24
|
-
generationCount: output
|
|
65
|
+
generationCount: output.generations?.length || 0,
|
|
66
|
+
cost: output?.llmOutput?.costUsd,
|
|
25
67
|
});
|
|
26
68
|
}
|
|
69
|
+
delete this.runsMap[runId];
|
|
27
70
|
}
|
|
28
|
-
async handleLLMError(error, runId
|
|
29
|
-
|
|
30
|
-
|
|
71
|
+
async handleLLMError(error, runId) {
|
|
72
|
+
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length, messages: [], options: {} };
|
|
73
|
+
if (this.executionFunctions.logger) {
|
|
74
|
+
this.executionFunctions.logger.error('❌ Rooyai LLM error', {
|
|
31
75
|
runId,
|
|
32
76
|
error: error.message,
|
|
33
77
|
stack: error.stack,
|
|
34
78
|
});
|
|
35
79
|
}
|
|
80
|
+
delete this.runsMap[runId];
|
|
36
81
|
}
|
|
37
82
|
}
|
|
38
83
|
exports.N8nLlmTracing = N8nLlmTracing;
|
|
@@ -38,14 +38,14 @@ class Rooyai {
|
|
|
38
38
|
name: 'model',
|
|
39
39
|
type: 'options',
|
|
40
40
|
options: [
|
|
41
|
-
{
|
|
42
|
-
name: 'Gemini 2.0 Flash',
|
|
43
|
-
value: 'gemini-2.0-flash',
|
|
44
|
-
},
|
|
45
41
|
{
|
|
46
42
|
name: 'Llama 3.3 70B',
|
|
47
43
|
value: 'llama-3.3-70b',
|
|
48
44
|
},
|
|
45
|
+
{
|
|
46
|
+
name: 'Gemini 2.0 Flash',
|
|
47
|
+
value: 'gemini-2.0-flash',
|
|
48
|
+
},
|
|
49
49
|
{
|
|
50
50
|
name: 'DeepSeek R1',
|
|
51
51
|
value: 'deepseek-r1',
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "n8n-nodes-rooyai-chat",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.4.1",
|
|
4
4
|
"description": "n8n supply node for Rooyai Chat API - Provides Rooyai language models for use with Basic LLM Chain, AI Agent, and other AI nodes.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"n8n-community-node-package",
|