@n8n/n8n-nodes-langchain 1.107.0 → 1.109.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js +2 -2
- package/dist/nodes/llms/LMChatOpenAi/LmChatOpenAi.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js +4 -2
- package/dist/nodes/text_splitters/TextSplitterCharacterTextSplitter/TextSplitterCharacterTextSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js +4 -2
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TextSplitterTokenSplitter.node.js.map +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js +1 -1
- package/dist/nodes/text_splitters/TextSplitterTokenSplitter/TokenTextSplitter.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/audio/transcribe.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/audio/transcribe.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/audio/translate.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/audio/translate.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/file/upload.operation.js +10 -8
- package/dist/nodes/vendors/OpenAi/actions/file/upload.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js +30 -0
- package/dist/nodes/vendors/OpenAi/actions/text/message.operation.js.map +1 -1
- package/dist/nodes/vendors/OpenAi/helpers/binary-data.js +38 -0
- package/dist/nodes/vendors/OpenAi/helpers/binary-data.js.map +1 -0
- package/dist/types/nodes.json +4 -4
- package/dist/utils/tokenizer/tiktoken.js +26 -22
- package/dist/utils/tokenizer/tiktoken.js.map +1 -1
- package/dist/utils/tokenizer/token-estimator.js +1 -1
- package/dist/utils/tokenizer/token-estimator.js.map +1 -1
- package/package.json +6 -6
|
@@ -22,36 +22,40 @@ __export(tiktoken_exports, {
|
|
|
22
22
|
getEncoding: () => getEncoding
|
|
23
23
|
});
|
|
24
24
|
module.exports = __toCommonJS(tiktoken_exports);
|
|
25
|
-
var
|
|
25
|
+
var import_promises = require("fs/promises");
|
|
26
26
|
var import_lite = require("js-tiktoken/lite");
|
|
27
27
|
var import_n8n_workflow = require("n8n-workflow");
|
|
28
28
|
var import_path = require("path");
|
|
29
29
|
const cache = {};
|
|
30
|
-
const loadJSONFile = (filename) => {
|
|
30
|
+
const loadJSONFile = async (filename) => {
|
|
31
31
|
const filePath = (0, import_path.join)(__dirname, filename);
|
|
32
|
-
const content = (0,
|
|
33
|
-
return (0, import_n8n_workflow.jsonParse)(content);
|
|
32
|
+
const content = await (0, import_promises.readFile)(filePath, "utf-8");
|
|
33
|
+
return await (0, import_n8n_workflow.jsonParse)(content);
|
|
34
34
|
};
|
|
35
|
-
function getEncoding(encoding) {
|
|
36
|
-
if (cache
|
|
37
|
-
|
|
35
|
+
async function getEncoding(encoding) {
|
|
36
|
+
if (!(encoding in cache)) {
|
|
37
|
+
cache[encoding] = (async () => {
|
|
38
|
+
let jsonData;
|
|
39
|
+
switch (encoding) {
|
|
40
|
+
case "o200k_base":
|
|
41
|
+
jsonData = await loadJSONFile("./o200k_base.json");
|
|
42
|
+
break;
|
|
43
|
+
case "cl100k_base":
|
|
44
|
+
jsonData = await loadJSONFile("./cl100k_base.json");
|
|
45
|
+
break;
|
|
46
|
+
default:
|
|
47
|
+
jsonData = await loadJSONFile("./cl100k_base.json");
|
|
48
|
+
}
|
|
49
|
+
return new import_lite.Tiktoken(jsonData);
|
|
50
|
+
})().catch((error) => {
|
|
51
|
+
delete cache[encoding];
|
|
52
|
+
throw error;
|
|
53
|
+
});
|
|
38
54
|
}
|
|
39
|
-
|
|
40
|
-
switch (encoding) {
|
|
41
|
-
case "o200k_base":
|
|
42
|
-
jsonData = loadJSONFile("./o200k_base.json");
|
|
43
|
-
break;
|
|
44
|
-
case "cl100k_base":
|
|
45
|
-
jsonData = loadJSONFile("./cl100k_base.json");
|
|
46
|
-
break;
|
|
47
|
-
default:
|
|
48
|
-
jsonData = loadJSONFile("./cl100k_base.json");
|
|
49
|
-
}
|
|
50
|
-
cache[encoding] = new import_lite.Tiktoken(jsonData);
|
|
51
|
-
return cache[encoding];
|
|
55
|
+
return await cache[encoding];
|
|
52
56
|
}
|
|
53
|
-
function encodingForModel(model) {
|
|
54
|
-
return getEncoding((0, import_lite.getEncodingNameForModel)(model));
|
|
57
|
+
async function encodingForModel(model) {
|
|
58
|
+
return await getEncoding((0, import_lite.getEncodingNameForModel)(model));
|
|
55
59
|
}
|
|
56
60
|
// Annotate the CommonJS export names for ESM import in node:
|
|
57
61
|
0 && (module.exports = {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../utils/tokenizer/tiktoken.ts"],"sourcesContent":["import {
|
|
1
|
+
{"version":3,"sources":["../../../utils/tokenizer/tiktoken.ts"],"sourcesContent":["import { readFile } from 'fs/promises';\nimport type { TiktokenBPE, TiktokenEncoding, TiktokenModel } from 'js-tiktoken/lite';\nimport { Tiktoken, getEncodingNameForModel } from 'js-tiktoken/lite';\nimport { jsonParse } from 'n8n-workflow';\nimport { join } from 'path';\n\nconst cache: Record<string, Promise<Tiktoken>> = {};\n\nconst loadJSONFile = async (filename: string): Promise<TiktokenBPE> => {\n\tconst filePath = join(__dirname, filename);\n\tconst content = await readFile(filePath, 'utf-8');\n\treturn await jsonParse(content);\n};\n\nexport async function getEncoding(encoding: TiktokenEncoding): Promise<Tiktoken> {\n\tif (!(encoding in cache)) {\n\t\t// Create and cache the promise for loading this encoding\n\t\tcache[encoding] = (async () => {\n\t\t\tlet jsonData: TiktokenBPE;\n\n\t\t\tswitch (encoding) {\n\t\t\t\tcase 'o200k_base':\n\t\t\t\t\tjsonData = await loadJSONFile('./o200k_base.json');\n\t\t\t\t\tbreak;\n\t\t\t\tcase 'cl100k_base':\n\t\t\t\t\tjsonData = await loadJSONFile('./cl100k_base.json');\n\t\t\t\t\tbreak;\n\t\t\t\tdefault:\n\t\t\t\t\t// Fall back to cl100k_base for unsupported encodings\n\t\t\t\t\tjsonData = await loadJSONFile('./cl100k_base.json');\n\t\t\t}\n\n\t\t\treturn new Tiktoken(jsonData);\n\t\t})().catch((error) => {\n\t\t\tdelete cache[encoding];\n\t\t\tthrow error;\n\t\t});\n\t}\n\n\treturn await cache[encoding];\n}\n\nexport async function encodingForModel(model: TiktokenModel): Promise<Tiktoken> {\n\treturn await getEncoding(getEncodingNameForModel(model));\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,sBAAyB;AAEzB,kBAAkD;AAClD,0BAA0B;AAC1B,kBAAqB;AAErB,MAAM,QAA2C,CAAC;AAElD,MAAM,eAAe,OAAO,aAA2C;AACtE,QAAM,eAAW,kBAAK,WAAW,QAAQ;AACzC,QAAM,UAAU,UAAM,0BAAS,UAAU,OAAO;AAChD,SAAO,UAAM,+BAAU,OAAO;AAC/B;AAEA,eAAsB,YAAY,UAA+C;AAChF,MAAI,EAAE,YAAY,QAAQ;AAEzB,UAAM,QAAQ,KAAK,YAAY;AAC9B,UAAI;AAEJ,cAAQ,UAAU;AAAA,QACjB,KAAK;AACJ,qBAAW,MAAM,aAAa,mBAAmB;AACjD;AAAA,QACD,KAAK;AACJ,qBAAW,MAAM,aAAa,oBAAoB;AAClD;AAAA,QACD;AAEC,qBAAW,MAAM,aAAa,oBAAoB;AAAA,MACpD;AAEA,aAAO,IAAI,qBAAS,QAAQ;AAAA,IAC7B,GAAG,EAAE,MAAM,CAAC,UAAU;AACrB,aAAO,MAAM,QAAQ;AACrB,YAAM;AAAA,IACP,CAAC;AAAA,EACF;AAEA,SAAO,MAAM,MAAM,QAAQ;AAC5B;AAEA,eAAsB,iBAAiB,OAAyC;AAC/E,SAAO,MAAM,gBAAY,qCAAwB,KAAK,CAAC;AACxD;","names":[]}
|
|
@@ -82,7 +82,7 @@ async function estimateTokensFromStringList(list, model) {
|
|
|
82
82
|
if (!Array.isArray(list)) {
|
|
83
83
|
return 0;
|
|
84
84
|
}
|
|
85
|
-
const encoder = (0, import_tiktoken.encodingForModel)(model);
|
|
85
|
+
const encoder = await (0, import_tiktoken.encodingForModel)(model);
|
|
86
86
|
const encodedListLength = await Promise.all(
|
|
87
87
|
list.map(async (text) => {
|
|
88
88
|
try {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../../utils/tokenizer/token-estimator.ts"],"sourcesContent":["/**\n * Token estimation utilities for handling text without using tiktoken.\n * This is used as a fallback when tiktoken would be too slow (e.g., with repetitive content).\n */\n\nimport type { TiktokenModel } from 'js-tiktoken';\n\nimport { encodingForModel } from './tiktoken';\nimport { hasLongSequentialRepeat } from '../helpers';\n\n/**\n * Model-specific average characters per token ratios.\n * These are approximate values based on typical English text.\n */\nconst MODEL_CHAR_PER_TOKEN_RATIOS: Record<string, number> = {\n\t'gpt-4o': 3.8,\n\t'gpt-4': 4.0,\n\t'gpt-3.5-turbo': 4.0,\n\tcl100k_base: 4.0,\n\to200k_base: 3.5,\n\tp50k_base: 4.2,\n\tr50k_base: 4.2,\n};\n\n/**\n * Estimates the number of tokens in a text based on character count.\n * This is much faster than tiktoken but less accurate.\n *\n * @param text The text to estimate tokens for\n * @param model The model or encoding name (optional)\n * @returns Estimated number of tokens\n */\nexport function estimateTokensByCharCount(text: string, model: string = 'cl100k_base'): number {\n\ttry {\n\t\t// Validate input\n\t\tif (!text || typeof text !== 'string' || text.length === 0) {\n\t\t\treturn 0;\n\t\t}\n\n\t\t// Get the ratio for the specific model, or use default\n\t\tconst charsPerToken = MODEL_CHAR_PER_TOKEN_RATIOS[model] || 4.0;\n\n\t\t// Validate ratio\n\t\tif (!Number.isFinite(charsPerToken) || charsPerToken <= 0) {\n\t\t\t// Fallback to default ratio\n\t\t\tconst estimatedTokens = Math.ceil(text.length / 4.0);\n\t\t\treturn estimatedTokens;\n\t\t}\n\n\t\t// Calculate estimated tokens\n\t\tconst estimatedTokens = Math.ceil(text.length / charsPerToken);\n\n\t\treturn estimatedTokens;\n\t} catch (error) {\n\t\t// Return conservative estimate on error\n\t\treturn Math.ceil((text?.length || 0) / 4.0);\n\t}\n}\n\n/**\n * Estimates tokens for text splitting purposes.\n * Returns chunk boundaries based on character positions rather than token positions.\n *\n * @param text The text to split\n * @param chunkSize Target chunk size in tokens\n * @param chunkOverlap Overlap between chunks in tokens\n * @param model The model or encoding name (optional)\n * @returns Array of text chunks\n */\nexport function estimateTextSplitsByTokens(\n\ttext: string,\n\tchunkSize: number,\n\tchunkOverlap: number,\n\tmodel: string = 'cl100k_base',\n): string[] {\n\ttry {\n\t\t// Validate inputs\n\t\tif (!text || typeof text !== 'string' || text.length === 0) {\n\t\t\treturn [];\n\t\t}\n\n\t\t// Validate numeric parameters\n\t\tif (!Number.isFinite(chunkSize) || chunkSize <= 0) {\n\t\t\t// Return whole text as single chunk if invalid chunk size\n\t\t\treturn [text];\n\t\t}\n\n\t\t// Ensure overlap is valid and less than chunk size\n\t\tconst validOverlap =\n\t\t\tNumber.isFinite(chunkOverlap) && chunkOverlap >= 0\n\t\t\t\t? Math.min(chunkOverlap, chunkSize - 1)\n\t\t\t\t: 0;\n\n\t\tconst charsPerToken = MODEL_CHAR_PER_TOKEN_RATIOS[model] || 4.0;\n\t\tconst chunkSizeInChars = Math.floor(chunkSize * charsPerToken);\n\t\tconst overlapInChars = Math.floor(validOverlap * charsPerToken);\n\n\t\tconst chunks: string[] = [];\n\t\tlet start = 0;\n\n\t\twhile (start < text.length) {\n\t\t\tconst end = Math.min(start + chunkSizeInChars, text.length);\n\t\t\tchunks.push(text.slice(start, end));\n\n\t\t\tif (end >= text.length) {\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\t// Move to next chunk with overlap\n\t\t\tstart = Math.max(end - overlapInChars, start + 1);\n\t\t}\n\n\t\treturn chunks;\n\t} catch (error) {\n\t\t// Return text as single chunk on error\n\t\treturn text ? [text] : [];\n\t}\n}\n\n/**\n * Estimates the total number of tokens for a list of strings.\n * Uses tiktoken for normal text but falls back to character-based estimation\n * for repetitive content or on errors.\n *\n * @param list Array of strings to estimate tokens for\n * @param model The model or encoding name to use for estimation\n * @returns Total estimated number of tokens across all strings\n */\nexport async function estimateTokensFromStringList(\n\tlist: string[],\n\tmodel: TiktokenModel,\n): Promise<number> {\n\ttry {\n\t\t// Validate input\n\t\tif (!Array.isArray(list)) {\n\t\t\treturn 0;\n\t\t}\n\n\t\tconst encoder = encodingForModel(model);\n\t\tconst encodedListLength = await Promise.all(\n\t\t\tlist.map(async (text) => {\n\t\t\t\ttry {\n\t\t\t\t\t// Handle null/undefined text\n\t\t\t\t\tif (!text || typeof text !== 'string') {\n\t\t\t\t\t\treturn 0;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check for repetitive content\n\t\t\t\t\tif (hasLongSequentialRepeat(text)) {\n\t\t\t\t\t\tconst estimatedTokens = estimateTokensByCharCount(text, model);\n\t\t\t\t\t\treturn estimatedTokens;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Use tiktoken for normal text\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst tokens = encoder.encode(text);\n\t\t\t\t\t\treturn tokens.length;\n\t\t\t\t\t} catch (encodingError) {\n\t\t\t\t\t\t// Fall back to estimation if tiktoken fails\n\t\t\t\t\t\treturn estimateTokensByCharCount(text, model);\n\t\t\t\t\t}\n\t\t\t\t} catch (itemError) {\n\t\t\t\t\t// Return 0 for individual item errors\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t}),\n\t\t);\n\n\t\tconst totalTokens = encodedListLength.reduce((acc, curr) => acc + curr, 0);\n\n\t\treturn totalTokens;\n\t} catch (error) {\n\t\t// Return 0 on complete failure\n\t\treturn 0;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,sBAAiC;AACjC,qBAAwC;AAMxC,MAAM,8BAAsD;AAAA,EAC3D,UAAU;AAAA,EACV,SAAS;AAAA,EACT,iBAAiB;AAAA,EACjB,aAAa;AAAA,EACb,YAAY;AAAA,EACZ,WAAW;AAAA,EACX,WAAW;AACZ;AAUO,SAAS,0BAA0B,MAAc,QAAgB,eAAuB;AAC9F,MAAI;AAEH,QAAI,CAAC,QAAQ,OAAO,SAAS,YAAY,KAAK,WAAW,GAAG;AAC3D,aAAO;AAAA,IACR;AAGA,UAAM,gBAAgB,4BAA4B,KAAK,KAAK;AAG5D,QAAI,CAAC,OAAO,SAAS,aAAa,KAAK,iBAAiB,GAAG;AAE1D,YAAMA,mBAAkB,KAAK,KAAK,KAAK,SAAS,CAAG;AACnD,aAAOA;AAAA,IACR;AAGA,UAAM,kBAAkB,KAAK,KAAK,KAAK,SAAS,aAAa;AAE7D,WAAO;AAAA,EACR,SAAS,OAAO;AAEf,WAAO,KAAK,MAAM,MAAM,UAAU,KAAK,CAAG;AAAA,EAC3C;AACD;AAYO,SAAS,2BACf,MACA,WACA,cACA,QAAgB,eACL;AACX,MAAI;AAEH,QAAI,CAAC,QAAQ,OAAO,SAAS,YAAY,KAAK,WAAW,GAAG;AAC3D,aAAO,CAAC;AAAA,IACT;AAGA,QAAI,CAAC,OAAO,SAAS,SAAS,KAAK,aAAa,GAAG;AAElD,aAAO,CAAC,IAAI;AAAA,IACb;AAGA,UAAM,eACL,OAAO,SAAS,YAAY,KAAK,gBAAgB,IAC9C,KAAK,IAAI,cAAc,YAAY,CAAC,IACpC;AAEJ,UAAM,gBAAgB,4BAA4B,KAAK,KAAK;AAC5D,UAAM,mBAAmB,KAAK,MAAM,YAAY,aAAa;AAC7D,UAAM,iBAAiB,KAAK,MAAM,eAAe,aAAa;AAE9D,UAAM,SAAmB,CAAC;AAC1B,QAAI,QAAQ;AAEZ,WAAO,QAAQ,KAAK,QAAQ;AAC3B,YAAM,MAAM,KAAK,IAAI,QAAQ,kBAAkB,KAAK,MAAM;AAC1D,aAAO,KAAK,KAAK,MAAM,OAAO,GAAG,CAAC;AAElC,UAAI,OAAO,KAAK,QAAQ;AACvB;AAAA,MACD;AAGA,cAAQ,KAAK,IAAI,MAAM,gBAAgB,QAAQ,CAAC;AAAA,IACjD;AAEA,WAAO;AAAA,EACR,SAAS,OAAO;AAEf,WAAO,OAAO,CAAC,IAAI,IAAI,CAAC;AAAA,EACzB;AACD;AAWA,eAAsB,6BACrB,MACA,OACkB;AAClB,MAAI;AAEH,QAAI,CAAC,MAAM,QAAQ,IAAI,GAAG;AACzB,aAAO;AAAA,IACR;AAEA,UAAM,
|
|
1
|
+
{"version":3,"sources":["../../../utils/tokenizer/token-estimator.ts"],"sourcesContent":["/**\n * Token estimation utilities for handling text without using tiktoken.\n * This is used as a fallback when tiktoken would be too slow (e.g., with repetitive content).\n */\n\nimport type { TiktokenModel } from 'js-tiktoken';\n\nimport { encodingForModel } from './tiktoken';\nimport { hasLongSequentialRepeat } from '../helpers';\n\n/**\n * Model-specific average characters per token ratios.\n * These are approximate values based on typical English text.\n */\nconst MODEL_CHAR_PER_TOKEN_RATIOS: Record<string, number> = {\n\t'gpt-4o': 3.8,\n\t'gpt-4': 4.0,\n\t'gpt-3.5-turbo': 4.0,\n\tcl100k_base: 4.0,\n\to200k_base: 3.5,\n\tp50k_base: 4.2,\n\tr50k_base: 4.2,\n};\n\n/**\n * Estimates the number of tokens in a text based on character count.\n * This is much faster than tiktoken but less accurate.\n *\n * @param text The text to estimate tokens for\n * @param model The model or encoding name (optional)\n * @returns Estimated number of tokens\n */\nexport function estimateTokensByCharCount(text: string, model: string = 'cl100k_base'): number {\n\ttry {\n\t\t// Validate input\n\t\tif (!text || typeof text !== 'string' || text.length === 0) {\n\t\t\treturn 0;\n\t\t}\n\n\t\t// Get the ratio for the specific model, or use default\n\t\tconst charsPerToken = MODEL_CHAR_PER_TOKEN_RATIOS[model] || 4.0;\n\n\t\t// Validate ratio\n\t\tif (!Number.isFinite(charsPerToken) || charsPerToken <= 0) {\n\t\t\t// Fallback to default ratio\n\t\t\tconst estimatedTokens = Math.ceil(text.length / 4.0);\n\t\t\treturn estimatedTokens;\n\t\t}\n\n\t\t// Calculate estimated tokens\n\t\tconst estimatedTokens = Math.ceil(text.length / charsPerToken);\n\n\t\treturn estimatedTokens;\n\t} catch (error) {\n\t\t// Return conservative estimate on error\n\t\treturn Math.ceil((text?.length || 0) / 4.0);\n\t}\n}\n\n/**\n * Estimates tokens for text splitting purposes.\n * Returns chunk boundaries based on character positions rather than token positions.\n *\n * @param text The text to split\n * @param chunkSize Target chunk size in tokens\n * @param chunkOverlap Overlap between chunks in tokens\n * @param model The model or encoding name (optional)\n * @returns Array of text chunks\n */\nexport function estimateTextSplitsByTokens(\n\ttext: string,\n\tchunkSize: number,\n\tchunkOverlap: number,\n\tmodel: string = 'cl100k_base',\n): string[] {\n\ttry {\n\t\t// Validate inputs\n\t\tif (!text || typeof text !== 'string' || text.length === 0) {\n\t\t\treturn [];\n\t\t}\n\n\t\t// Validate numeric parameters\n\t\tif (!Number.isFinite(chunkSize) || chunkSize <= 0) {\n\t\t\t// Return whole text as single chunk if invalid chunk size\n\t\t\treturn [text];\n\t\t}\n\n\t\t// Ensure overlap is valid and less than chunk size\n\t\tconst validOverlap =\n\t\t\tNumber.isFinite(chunkOverlap) && chunkOverlap >= 0\n\t\t\t\t? Math.min(chunkOverlap, chunkSize - 1)\n\t\t\t\t: 0;\n\n\t\tconst charsPerToken = MODEL_CHAR_PER_TOKEN_RATIOS[model] || 4.0;\n\t\tconst chunkSizeInChars = Math.floor(chunkSize * charsPerToken);\n\t\tconst overlapInChars = Math.floor(validOverlap * charsPerToken);\n\n\t\tconst chunks: string[] = [];\n\t\tlet start = 0;\n\n\t\twhile (start < text.length) {\n\t\t\tconst end = Math.min(start + chunkSizeInChars, text.length);\n\t\t\tchunks.push(text.slice(start, end));\n\n\t\t\tif (end >= text.length) {\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\t// Move to next chunk with overlap\n\t\t\tstart = Math.max(end - overlapInChars, start + 1);\n\t\t}\n\n\t\treturn chunks;\n\t} catch (error) {\n\t\t// Return text as single chunk on error\n\t\treturn text ? [text] : [];\n\t}\n}\n\n/**\n * Estimates the total number of tokens for a list of strings.\n * Uses tiktoken for normal text but falls back to character-based estimation\n * for repetitive content or on errors.\n *\n * @param list Array of strings to estimate tokens for\n * @param model The model or encoding name to use for estimation\n * @returns Total estimated number of tokens across all strings\n */\nexport async function estimateTokensFromStringList(\n\tlist: string[],\n\tmodel: TiktokenModel,\n): Promise<number> {\n\ttry {\n\t\t// Validate input\n\t\tif (!Array.isArray(list)) {\n\t\t\treturn 0;\n\t\t}\n\n\t\tconst encoder = await encodingForModel(model);\n\t\tconst encodedListLength = await Promise.all(\n\t\t\tlist.map(async (text) => {\n\t\t\t\ttry {\n\t\t\t\t\t// Handle null/undefined text\n\t\t\t\t\tif (!text || typeof text !== 'string') {\n\t\t\t\t\t\treturn 0;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check for repetitive content\n\t\t\t\t\tif (hasLongSequentialRepeat(text)) {\n\t\t\t\t\t\tconst estimatedTokens = estimateTokensByCharCount(text, model);\n\t\t\t\t\t\treturn estimatedTokens;\n\t\t\t\t\t}\n\n\t\t\t\t\t// Use tiktoken for normal text\n\t\t\t\t\ttry {\n\t\t\t\t\t\tconst tokens = encoder.encode(text);\n\t\t\t\t\t\treturn tokens.length;\n\t\t\t\t\t} catch (encodingError) {\n\t\t\t\t\t\t// Fall back to estimation if tiktoken fails\n\t\t\t\t\t\treturn estimateTokensByCharCount(text, model);\n\t\t\t\t\t}\n\t\t\t\t} catch (itemError) {\n\t\t\t\t\t// Return 0 for individual item errors\n\t\t\t\t\treturn 0;\n\t\t\t\t}\n\t\t\t}),\n\t\t);\n\n\t\tconst totalTokens = encodedListLength.reduce((acc, curr) => acc + curr, 0);\n\n\t\treturn totalTokens;\n\t} catch (error) {\n\t\t// Return 0 on complete failure\n\t\treturn 0;\n\t}\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,sBAAiC;AACjC,qBAAwC;AAMxC,MAAM,8BAAsD;AAAA,EAC3D,UAAU;AAAA,EACV,SAAS;AAAA,EACT,iBAAiB;AAAA,EACjB,aAAa;AAAA,EACb,YAAY;AAAA,EACZ,WAAW;AAAA,EACX,WAAW;AACZ;AAUO,SAAS,0BAA0B,MAAc,QAAgB,eAAuB;AAC9F,MAAI;AAEH,QAAI,CAAC,QAAQ,OAAO,SAAS,YAAY,KAAK,WAAW,GAAG;AAC3D,aAAO;AAAA,IACR;AAGA,UAAM,gBAAgB,4BAA4B,KAAK,KAAK;AAG5D,QAAI,CAAC,OAAO,SAAS,aAAa,KAAK,iBAAiB,GAAG;AAE1D,YAAMA,mBAAkB,KAAK,KAAK,KAAK,SAAS,CAAG;AACnD,aAAOA;AAAA,IACR;AAGA,UAAM,kBAAkB,KAAK,KAAK,KAAK,SAAS,aAAa;AAE7D,WAAO;AAAA,EACR,SAAS,OAAO;AAEf,WAAO,KAAK,MAAM,MAAM,UAAU,KAAK,CAAG;AAAA,EAC3C;AACD;AAYO,SAAS,2BACf,MACA,WACA,cACA,QAAgB,eACL;AACX,MAAI;AAEH,QAAI,CAAC,QAAQ,OAAO,SAAS,YAAY,KAAK,WAAW,GAAG;AAC3D,aAAO,CAAC;AAAA,IACT;AAGA,QAAI,CAAC,OAAO,SAAS,SAAS,KAAK,aAAa,GAAG;AAElD,aAAO,CAAC,IAAI;AAAA,IACb;AAGA,UAAM,eACL,OAAO,SAAS,YAAY,KAAK,gBAAgB,IAC9C,KAAK,IAAI,cAAc,YAAY,CAAC,IACpC;AAEJ,UAAM,gBAAgB,4BAA4B,KAAK,KAAK;AAC5D,UAAM,mBAAmB,KAAK,MAAM,YAAY,aAAa;AAC7D,UAAM,iBAAiB,KAAK,MAAM,eAAe,aAAa;AAE9D,UAAM,SAAmB,CAAC;AAC1B,QAAI,QAAQ;AAEZ,WAAO,QAAQ,KAAK,QAAQ;AAC3B,YAAM,MAAM,KAAK,IAAI,QAAQ,kBAAkB,KAAK,MAAM;AAC1D,aAAO,KAAK,KAAK,MAAM,OAAO,GAAG,CAAC;AAElC,UAAI,OAAO,KAAK,QAAQ;AACvB;AAAA,MACD;AAGA,cAAQ,KAAK,IAAI,MAAM,gBAAgB,QAAQ,CAAC;AAAA,IACjD;AAEA,WAAO;AAAA,EACR,SAAS,OAAO;AAEf,WAAO,OAAO,CAAC,IAAI,IAAI,CAAC;AAAA,EACzB;AACD;AAWA,eAAsB,6BACrB,MACA,OACkB;AAClB,MAAI;AAEH,QAAI,CAAC,MAAM,QAAQ,IAAI,GAAG;AACzB,aAAO;AAAA,IACR;AAEA,UAAM,UAAU,UAAM,kCAAiB,KAAK;AAC5C,UAAM,oBAAoB,MAAM,QAAQ;AAAA,MACvC,KAAK,IAAI,OAAO,SAAS;AACxB,YAAI;AAEH,cAAI,CAAC,QAAQ,OAAO,SAAS,UAAU;AACtC,mBAAO;AAAA,UACR;AAGA,kBAAI,wCAAwB,IAAI,GAAG;AAClC,kBAAM,kBAAkB,0BAA0B,MAAM,KAAK;AAC7D,mBAAO;AAAA,UACR;AAGA,cAAI;AACH,kBAAM,SAAS,QAAQ,OAAO,IAAI;AAClC,mBAAO,OAAO;AAAA,UACf,SAAS,eAAe;AAEvB,mBAAO,0BAA0B,MAAM,KAAK;AAAA,UAC7C;AAAA,QACD,SAAS,WAAW;AAEnB,iBAAO;AAAA,QACR;AAAA,MACD,CAAC;AAAA,IACF;AAEA,UAAM,cAAc,kBAAkB,OAAO,CAAC,KAAK,SAAS,MAAM,MAAM,CAAC;AAEzE,WAAO;AAAA,EACR,SAAS,OAAO;AAEf,WAAO;AAAA,EACR;AACD;","names":["estimatedTokens"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@n8n/n8n-nodes-langchain",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.109.0",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"files": [
|
|
@@ -148,7 +148,7 @@
|
|
|
148
148
|
"fast-glob": "3.2.12",
|
|
149
149
|
"jest-mock-extended": "^3.0.4",
|
|
150
150
|
"tsup": "^8.5.0",
|
|
151
|
-
"n8n-core": "1.
|
|
151
|
+
"n8n-core": "1.109.0"
|
|
152
152
|
},
|
|
153
153
|
"dependencies": {
|
|
154
154
|
"@aws-sdk/client-sso-oidc": "3.808.0",
|
|
@@ -215,12 +215,12 @@
|
|
|
215
215
|
"weaviate-client": "3.6.2",
|
|
216
216
|
"zod": "3.25.67",
|
|
217
217
|
"zod-to-json-schema": "3.23.3",
|
|
218
|
-
"@n8n/client-oauth2": "0.
|
|
219
|
-
"@n8n/errors": "^0.4.0",
|
|
218
|
+
"@n8n/client-oauth2": "0.29.0",
|
|
220
219
|
"@n8n/json-schema-to-zod": "1.5.0",
|
|
220
|
+
"@n8n/errors": "^0.5.0",
|
|
221
221
|
"@n8n/typescript-config": "1.3.0",
|
|
222
|
-
"n8n-workflow": "1.
|
|
223
|
-
"n8n-nodes-base": "1.
|
|
222
|
+
"n8n-workflow": "1.107.0",
|
|
223
|
+
"n8n-nodes-base": "1.108.0"
|
|
224
224
|
},
|
|
225
225
|
"license": "SEE LICENSE IN LICENSE.md",
|
|
226
226
|
"homepage": "https://n8n.io",
|