n8n-nodes-rooyai-chat 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,7 +4,7 @@ Custom n8n node for Rooyai Chat API - A supply node that provides Rooyai languag
4
4
 
5
5
  ## Description
6
6
 
7
- This n8n custom node provides a **Rooyai Chat Model** supply node that can be connected to the **Basic LLM Chain** node, **AI Agent**, **Better AI Agent**, and other AI processing nodes in n8n.
7
+ This n8n custom node provides a **Rooyai Chat Model** supply node that can be connected to the **Basic LLM Chain** node, **AI Agent**, and other AI processing nodes in n8n.
8
8
 
9
9
  ## Features
10
10
 
@@ -77,7 +77,7 @@ Then restart your n8n instance. The node will appear in the node palette under *
77
77
  Connect the **Model** output from **Rooyai Chat Model** to:
78
78
  - **Basic LLM Chain**
79
79
  - **AI Agent**
80
- - **Better AI Agent**
80
+
81
81
  - Any other n8n AI node that accepts a Language Model
82
82
 
83
83
  ## Available Models
@@ -1,8 +1,9 @@
1
- import { IAuthenticateGeneric, ICredentialType, INodeProperties } from 'n8n-workflow';
1
+ import { IAuthenticateGeneric, ICredentialTestRequest, ICredentialType, INodeProperties } from 'n8n-workflow';
2
2
  export declare class RooyaiApi implements ICredentialType {
3
3
  name: string;
4
4
  displayName: string;
5
5
  documentationUrl: string;
6
6
  properties: INodeProperties[];
7
7
  authenticate: IAuthenticateGeneric;
8
+ test: ICredentialTestRequest;
8
9
  }
@@ -35,6 +35,21 @@ class RooyaiApi {
35
35
  },
36
36
  },
37
37
  };
38
+ this.test = {
39
+ request: {
40
+ baseURL: '={{$credentials.baseUrl}}',
41
+ url: '/chat',
42
+ method: 'POST',
43
+ headers: {
44
+ 'Content-Type': 'application/json',
45
+ },
46
+ body: {
47
+ model: 'gemini-2.0-flash',
48
+ messages: [{ role: 'user', content: 'test' }],
49
+ max_tokens: 5,
50
+ },
51
+ },
52
+ };
38
53
  }
39
54
  }
40
55
  exports.RooyaiApi = RooyaiApi;
@@ -1,11 +1,21 @@
1
1
  import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
2
2
  import type { Serialized } from '@langchain/core/load/serializable';
3
- import { ISupplyDataFunctions } from 'n8n-workflow';
3
+ import type { LLMResult } from '@langchain/core/outputs';
4
+ import type { ISupplyDataFunctions } from 'n8n-workflow';
4
5
  export declare class N8nLlmTracing extends BaseCallbackHandler {
6
+ private executionFunctions;
5
7
  name: string;
6
- supplyDataFunctions: ISupplyDataFunctions;
7
- constructor(supplyDataFunctions: ISupplyDataFunctions);
8
- handleLLMStart(llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: Record<string, unknown>, tags?: string[], metadata?: Record<string, unknown>): Promise<void>;
9
- handleLLMEnd(output: any, runId: string, parentRunId?: string, tags?: string[]): Promise<void>;
10
- handleLLMError(error: Error, runId: string, parentRunId?: string, tags?: string[]): Promise<void>;
8
+ awaitHandlers: boolean;
9
+ connectionType: "ai_languageModel";
10
+ promptTokensEstimate: number;
11
+ completionTokensEstimate: number;
12
+ runsMap: Record<string, {
13
+ index: number;
14
+ messages: string[];
15
+ options: any;
16
+ }>;
17
+ constructor(executionFunctions: ISupplyDataFunctions);
18
+ handleLLMStart(llm: Serialized, prompts: string[], runId: string): Promise<void>;
19
+ handleLLMEnd(output: LLMResult, runId: string): Promise<void>;
20
+ handleLLMError(error: Error, runId: string): Promise<void>;
11
21
  }
@@ -2,37 +2,82 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.N8nLlmTracing = void 0;
4
4
  const base_1 = require("@langchain/core/callbacks/base");
5
+ const n8n_workflow_1 = require("n8n-workflow");
5
6
  class N8nLlmTracing extends base_1.BaseCallbackHandler {
6
- constructor(supplyDataFunctions) {
7
+ constructor(executionFunctions) {
7
8
  super();
8
- this.name = 'n8n_llm_tracing';
9
- this.supplyDataFunctions = supplyDataFunctions;
9
+ this.executionFunctions = executionFunctions;
10
+ this.name = 'N8nLlmTracing';
11
+ this.awaitHandlers = true;
12
+ this.connectionType = n8n_workflow_1.NodeConnectionTypes.AiLanguageModel;
13
+ this.promptTokensEstimate = 0;
14
+ this.completionTokensEstimate = 0;
15
+ this.runsMap = {};
10
16
  }
11
- async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
12
- if (this.supplyDataFunctions.logger) {
13
- this.supplyDataFunctions.logger.debug('Rooyai LLM started', {
17
+ async handleLLMStart(llm, prompts, runId) {
18
+ const estimatedTokens = prompts.reduce((sum, p) => sum + Math.ceil(p.length / 4), 0);
19
+ this.promptTokensEstimate = estimatedTokens;
20
+ const options = llm.type === 'constructor' ? llm.kwargs : llm;
21
+ const { index } = this.executionFunctions.addInputData(this.connectionType, [
22
+ [
23
+ {
24
+ json: {
25
+ messages: prompts,
26
+ estimatedTokens,
27
+ options,
28
+ },
29
+ },
30
+ ],
31
+ ]);
32
+ this.runsMap[runId] = {
33
+ index,
34
+ options,
35
+ messages: prompts,
36
+ };
37
+ if (this.executionFunctions.logger) {
38
+ this.executionFunctions.logger.info('🚀 Rooyai LLM started', {
14
39
  runId,
15
- parentRunId,
16
- prompts,
40
+ promptCount: prompts.length,
41
+ estimatedTokens,
17
42
  });
18
43
  }
19
44
  }
20
- async handleLLMEnd(output, runId, parentRunId, tags) {
21
- if (this.supplyDataFunctions.logger) {
22
- this.supplyDataFunctions.logger.debug('Rooyai LLM finished', {
45
+ async handleLLMEnd(output, runId) {
46
+ const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length, messages: [], options: {} };
47
+ const tokenUsage = {
48
+ completionTokens: output?.llmOutput?.tokenUsage?.completionTokens ?? 0,
49
+ promptTokens: output?.llmOutput?.tokenUsage?.promptTokens ?? 0,
50
+ totalTokens: output?.llmOutput?.tokenUsage?.totalTokens ?? 0,
51
+ };
52
+ const response = {
53
+ response: { generations: output.generations },
54
+ };
55
+ if (tokenUsage.totalTokens > 0) {
56
+ response.tokenUsage = tokenUsage;
57
+ }
58
+ if (output?.llmOutput?.costUsd !== undefined) {
59
+ response.costUsd = output.llmOutput.costUsd;
60
+ }
61
+ this.executionFunctions.addOutputData(this.connectionType, runDetails.index, [[{ json: { ...response } }]]);
62
+ if (this.executionFunctions.logger) {
63
+ this.executionFunctions.logger.info('✅ Rooyai LLM finished successfully', {
23
64
  runId,
24
- parentRunId,
65
+ generationCount: output.generations?.length || 0,
66
+ cost: output?.llmOutput?.costUsd,
25
67
  });
26
68
  }
69
+ delete this.runsMap[runId];
27
70
  }
28
- async handleLLMError(error, runId, parentRunId, tags) {
29
- if (this.supplyDataFunctions.logger) {
30
- this.supplyDataFunctions.logger.error('Rooyai LLM error', {
71
+ async handleLLMError(error, runId) {
72
+ const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length, messages: [], options: {} };
73
+ if (this.executionFunctions.logger) {
74
+ this.executionFunctions.logger.error('❌ Rooyai LLM error', {
31
75
  runId,
32
- parentRunId,
33
76
  error: error.message,
77
+ stack: error.stack,
34
78
  });
35
79
  }
80
+ delete this.runsMap[runId];
36
81
  }
37
82
  }
38
83
  exports.N8nLlmTracing = N8nLlmTracing;
@@ -147,10 +147,11 @@ class Rooyai {
147
147
  const modelName = this.getNodeParameter('model', itemIndex);
148
148
  const options = this.getNodeParameter('options', itemIndex, {});
149
149
  if (this.logger) {
150
- this.logger.info('Rooyai Chat Model initialized', {
150
+ this.logger.info('🎯 Initializing Rooyai Chat Model', {
151
151
  model: modelName,
152
152
  temperature: options.temperature ?? 0.7,
153
153
  maxTokens: options.maxTokensToSample,
154
+ baseUrl,
154
155
  });
155
156
  }
156
157
  const model = new RooyaiLangChainWrapper_1.RooyaiLangChainWrapper({
@@ -161,7 +162,13 @@ class Rooyai {
161
162
  temperature: options.temperature ?? 0.7,
162
163
  supplyDataFunctions: this,
163
164
  callbacks: [new N8nLlmTracing_1.N8nLlmTracing(this)],
165
+ verbose: true,
164
166
  });
167
+ if (this.logger) {
168
+ this.logger.info('✅ Rooyai Chat Model ready', {
169
+ model: modelName,
170
+ });
171
+ }
165
172
  return {
166
173
  response: model,
167
174
  };
@@ -11,6 +11,7 @@ interface RooyaiLangChainWrapperParams {
11
11
  maxTokens?: number;
12
12
  supplyDataFunctions?: ISupplyDataFunctions;
13
13
  callbacks?: any[];
14
+ verbose?: boolean;
14
15
  }
15
16
  export declare class RooyaiLangChainWrapper extends BaseChatModel {
16
17
  lc_namespace: string[];
@@ -20,6 +21,7 @@ export declare class RooyaiLangChainWrapper extends BaseChatModel {
20
21
  private temperature;
21
22
  private maxTokens?;
22
23
  private supplyDataFunctions?;
24
+ verbose: boolean;
23
25
  constructor(params: RooyaiLangChainWrapperParams);
24
26
  _llmType(): string;
25
27
  _modelType(): string;
@@ -13,6 +13,7 @@ class RooyaiLangChainWrapper extends chat_models_1.BaseChatModel {
13
13
  super({
14
14
  ...params,
15
15
  callbacks,
16
+ verbose: params.verbose ?? false,
16
17
  });
17
18
  this.lc_namespace = ['n8n', 'rooyai', 'chat'];
18
19
  this.apiKey = params.apiKey;
@@ -21,6 +22,7 @@ class RooyaiLangChainWrapper extends chat_models_1.BaseChatModel {
21
22
  this.temperature = params.temperature ?? 0.7;
22
23
  this.maxTokens = params.maxTokens;
23
24
  this.supplyDataFunctions = params.supplyDataFunctions;
25
+ this.verbose = params.verbose ?? false;
24
26
  }
25
27
  _llmType() {
26
28
  return 'rooyai';
@@ -34,6 +36,15 @@ class RooyaiLangChainWrapper extends chat_models_1.BaseChatModel {
34
36
  'Content-Type': 'application/json',
35
37
  };
36
38
  const url = `${this.baseUrl}/chat`;
39
+ if (this.verbose && this.supplyDataFunctions?.logger) {
40
+ this.supplyDataFunctions.logger.debug('📡 Calling Rooyai API', {
41
+ url,
42
+ model: body.model,
43
+ messageCount: body.messages.length,
44
+ temperature: body.temperature,
45
+ maxTokens: body.max_tokens,
46
+ });
47
+ }
37
48
  try {
38
49
  const response = await axios_1.default.post(url, body, {
39
50
  headers,
@@ -45,9 +56,22 @@ class RooyaiLangChainWrapper extends chat_models_1.BaseChatModel {
45
56
  : JSON.stringify(response.data);
46
57
  throw new Error(`Rooyai API error (${response.status}): ${errorText}`);
47
58
  }
59
+ if (this.verbose && this.supplyDataFunctions?.logger) {
60
+ this.supplyDataFunctions.logger.debug('✅ Rooyai API response received', {
61
+ status: response.status,
62
+ hasChoices: !!response.data.choices,
63
+ hasReply: !!response.data.reply,
64
+ cost: response.data.usage?.cost_usd,
65
+ });
66
+ }
48
67
  return response.data;
49
68
  }
50
69
  catch (error) {
70
+ if (this.verbose && this.supplyDataFunctions?.logger) {
71
+ this.supplyDataFunctions.logger.error('❌ Rooyai API call failed', {
72
+ error: error instanceof Error ? error.message : String(error),
73
+ });
74
+ }
51
75
  if (error instanceof Error) {
52
76
  throw new Error(`Failed to call Rooyai API: ${error.message}`);
53
77
  }
@@ -104,6 +128,12 @@ class RooyaiLangChainWrapper extends chat_models_1.BaseChatModel {
104
128
  };
105
129
  if (response.usage?.cost_usd !== undefined) {
106
130
  llmOutput.costUsd = response.usage.cost_usd;
131
+ if (this.verbose && this.supplyDataFunctions?.logger) {
132
+ this.supplyDataFunctions.logger.info('💰 API Cost', {
133
+ cost: response.usage.cost_usd,
134
+ totalTokens: response.usage.total_tokens,
135
+ });
136
+ }
107
137
  }
108
138
  const generation = {
109
139
  message: aiMessage,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "n8n-nodes-rooyai-chat",
3
- "version": "0.2.0",
3
+ "version": "0.4.0",
4
4
  "description": "n8n supply node for Rooyai Chat API - Provides Rooyai language models for use with Basic LLM Chain, AI Agent, and other AI nodes.",
5
5
  "keywords": [
6
6
  "n8n-community-node-package",