@atomoz/workflows-nodes 0.1.23 → 0.1.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1088,7 +1088,7 @@ async function createLLMFromModel(modelConfig, authToken, streaming = false) {
1088
1088
  case "gemini":
1089
1089
  return new import_google_gauth.ChatGoogle({
1090
1090
  model: "gemini-flash-latest",
1091
- apiKey: "AIzaSyD9WiFuXp2fhVAokIMPp9YPKMlh7_Bvddc",
1091
+ apiKey: "AIzaSyBzrL8Hx6dHhXgwc2HfLlQsf5Y-9pdtc9M",
1092
1092
  streaming
1093
1093
  });
1094
1094
  case "openai":
@@ -1342,8 +1342,27 @@ IMPORTANT: You must base your response on the last message in the conversation h
1342
1342
  };
1343
1343
  const finalModel = model?.integrationId || typeof model?.bindTools === "function" ? model : defaultModel;
1344
1344
  console.log(`\u{1F916} IaAgentNode "${name}": Using model:`, finalModel === defaultModel ? "DEFAULT (Gemini)" : finalModel?.model || "instance");
1345
- const callbacks = stream && emitter ? [
1346
- {
1345
+ const tokenUsage = { input: 0, output: 0, total: 0 };
1346
+ const callbacks = [
1347
+ {
1348
+ handleLLMEnd: (output2) => {
1349
+ let usage = output2.llmOutput?.tokenUsage || output2.llmOutput?.estimatedTokenUsage;
1350
+ if (!usage && output2.generations && Array.isArray(output2.generations) && output2.generations.length > 0) {
1351
+ const firstGen = output2.generations[0][0];
1352
+ if (firstGen?.generationInfo?.tokenUsage) {
1353
+ usage = firstGen.generationInfo.tokenUsage;
1354
+ }
1355
+ }
1356
+ if (usage) {
1357
+ tokenUsage.input += usage.promptTokens || usage.input_tokens || 0;
1358
+ tokenUsage.output += usage.completionTokens || usage.output_tokens || 0;
1359
+ tokenUsage.total += usage.totalTokens || usage.total_tokens || 0;
1360
+ }
1361
+ }
1362
+ }
1363
+ ];
1364
+ if (stream && emitter) {
1365
+ callbacks.push({
1347
1366
  handleLLMNewToken: (token) => {
1348
1367
  if (emitter?.emitDelta) {
1349
1368
  emitter.emitDelta({
@@ -1354,8 +1373,8 @@ IMPORTANT: You must base your response on the last message in the conversation h
1354
1373
  });
1355
1374
  }
1356
1375
  }
1357
- }
1358
- ] : [];
1376
+ });
1377
+ }
1359
1378
  let llmInstance;
1360
1379
  if (finalModel?.integrationId) {
1361
1380
  if (!authToken) {
@@ -1367,7 +1386,7 @@ IMPORTANT: You must base your response on the last message in the conversation h
1367
1386
  } else {
1368
1387
  throw new Error("Invalid model: must have integrationId or be a valid LLM instance with bindTools method");
1369
1388
  }
1370
- if (stream && callbacks.length > 0) {
1389
+ if (callbacks.length > 0) {
1371
1390
  if (llmInstance.callbacks) {
1372
1391
  if (Array.isArray(llmInstance.callbacks)) {
1373
1392
  llmInstance.callbacks.push(...callbacks);
@@ -1418,6 +1437,22 @@ IMPORTANT: You must base your response on the last message in the conversation h
1418
1437
  if (lastMessages.length > 0) {
1419
1438
  const lastMessage = lastMessages[lastMessages.length - 1];
1420
1439
  const content = lastMessage?.content;
1440
+ const msg = lastMessage;
1441
+ const usageMetadata = msg?.response_metadata?.tokenUsage || msg?.response_metadata?.usage || msg?.usage_metadata;
1442
+ if (usageMetadata) {
1443
+ const input = usageMetadata.promptTokens || usageMetadata.input_tokens || 0;
1444
+ const output2 = usageMetadata.completionTokens || usageMetadata.output_tokens || 0;
1445
+ const total = usageMetadata.totalTokens || usageMetadata.total_tokens || 0;
1446
+ if (tokenUsage.total === 0) {
1447
+ if (input + output2 > 0) {
1448
+ tokenUsage.input = input;
1449
+ tokenUsage.output = output2;
1450
+ tokenUsage.total = input + output2;
1451
+ } else if (total > 0) {
1452
+ tokenUsage.total = total;
1453
+ }
1454
+ }
1455
+ }
1421
1456
  if (typeof content === "string") {
1422
1457
  output = content;
1423
1458
  } else if (Array.isArray(content)) {
@@ -1436,6 +1471,22 @@ IMPORTANT: You must base your response on the last message in the conversation h
1436
1471
  if (result?.messages && result.messages.length > 0) {
1437
1472
  const lastMessage = result.messages[result.messages.length - 1];
1438
1473
  const content = lastMessage?.content;
1474
+ const msg = lastMessage;
1475
+ const usageMetadata = msg?.response_metadata?.tokenUsage || msg?.response_metadata?.usage || msg?.usage_metadata;
1476
+ if (usageMetadata) {
1477
+ const input = usageMetadata.promptTokens || usageMetadata.input_tokens || 0;
1478
+ const output2 = usageMetadata.completionTokens || usageMetadata.output_tokens || 0;
1479
+ const total = usageMetadata.totalTokens || usageMetadata.total_tokens || 0;
1480
+ if (tokenUsage.total === 0) {
1481
+ if (input + output2 > 0) {
1482
+ tokenUsage.input = input;
1483
+ tokenUsage.output = output2;
1484
+ tokenUsage.total = input + output2;
1485
+ } else if (total > 0) {
1486
+ tokenUsage.total = total;
1487
+ }
1488
+ }
1489
+ }
1439
1490
  if (typeof content === "string") {
1440
1491
  output = content;
1441
1492
  } else if (Array.isArray(content)) {
@@ -1494,6 +1545,12 @@ ${afterResults.message}`,
1494
1545
  output = `Error: ${error instanceof Error ? error.message : "Unknown error"}`;
1495
1546
  }
1496
1547
  }
1548
+ if (tokenUsage.total === 0 && (tokenUsage.input > 0 || tokenUsage.output > 0)) {
1549
+ tokenUsage.total = tokenUsage.input + tokenUsage.output;
1550
+ } else if (tokenUsage.total !== tokenUsage.input + tokenUsage.output) {
1551
+ tokenUsage.total = tokenUsage.input + tokenUsage.output;
1552
+ }
1553
+ console.log(`[TOKEN COUNT] Input: ${tokenUsage.input}, Output: ${tokenUsage.output}, Total: ${tokenUsage.total}`);
1497
1554
  return {
1498
1555
  agent,
1499
1556
  response: output || "",
@@ -1920,7 +1977,7 @@ var import_langgraph_checkpoint_postgres = require("@langchain/langgraph-checkpo
1920
1977
  var PostgresMemoryNodeFunction = async (inputs) => {
1921
1978
  const { $field: _$field, $req: _$req, $inputs: _$inputs, $vars: _$vars } = inputs;
1922
1979
  const fieldValues = inputs.fieldValues || {};
1923
- const connectionString = fieldValues.connectionString || inputs.connectionString || "postgresql://postgres:postgres@localhost:5432/workflows";
1980
+ const connectionString = fieldValues.connectionString || inputs.connectionString || "postgresql://yugabyte:yugabyte@localhost:5433/workflows";
1924
1981
  try {
1925
1982
  const checkpointer = import_langgraph_checkpoint_postgres.PostgresSaver.fromConnString(connectionString);
1926
1983
  await checkpointer.setup();
@@ -2444,6 +2501,19 @@ var CustomCodeNode = {
2444
2501
  fieldType: "any",
2445
2502
  required: false
2446
2503
  }
2504
+ },
2505
+ {
2506
+ id: "function",
2507
+ label: "Function",
2508
+ type: "function",
2509
+ required: false,
2510
+ typeable: false,
2511
+ handle: {
2512
+ type: "output",
2513
+ label: "Function",
2514
+ name: "function",
2515
+ fieldType: "function"
2516
+ }
2447
2517
  }
2448
2518
  ]
2449
2519
  };
package/dist/index.d.cts CHANGED
@@ -34,6 +34,8 @@ interface NodeField {
34
34
  min?: number;
35
35
  max?: number;
36
36
  handle?: NodeHandle;
37
+ /** Whether this field is active/visible in the UI. Defaults to true if not specified. */
38
+ active?: boolean;
37
39
  }
38
40
  interface NodeInput {
39
41
  id: string;
package/dist/index.d.ts CHANGED
@@ -34,6 +34,8 @@ interface NodeField {
34
34
  min?: number;
35
35
  max?: number;
36
36
  handle?: NodeHandle;
37
+ /** Whether this field is active/visible in the UI. Defaults to true if not specified. */
38
+ active?: boolean;
37
39
  }
38
40
  interface NodeInput {
39
41
  id: string;
package/dist/index.js CHANGED
@@ -1008,7 +1008,7 @@ async function createLLMFromModel(modelConfig, authToken, streaming = false) {
1008
1008
  case "gemini":
1009
1009
  return new ChatGoogle({
1010
1010
  model: "gemini-flash-latest",
1011
- apiKey: "AIzaSyD9WiFuXp2fhVAokIMPp9YPKMlh7_Bvddc",
1011
+ apiKey: "AIzaSyBzrL8Hx6dHhXgwc2HfLlQsf5Y-9pdtc9M",
1012
1012
  streaming
1013
1013
  });
1014
1014
  case "openai":
@@ -1262,8 +1262,27 @@ IMPORTANT: You must base your response on the last message in the conversation h
1262
1262
  };
1263
1263
  const finalModel = model?.integrationId || typeof model?.bindTools === "function" ? model : defaultModel;
1264
1264
  console.log(`\u{1F916} IaAgentNode "${name}": Using model:`, finalModel === defaultModel ? "DEFAULT (Gemini)" : finalModel?.model || "instance");
1265
- const callbacks = stream && emitter ? [
1266
- {
1265
+ const tokenUsage = { input: 0, output: 0, total: 0 };
1266
+ const callbacks = [
1267
+ {
1268
+ handleLLMEnd: (output2) => {
1269
+ let usage = output2.llmOutput?.tokenUsage || output2.llmOutput?.estimatedTokenUsage;
1270
+ if (!usage && output2.generations && Array.isArray(output2.generations) && output2.generations.length > 0) {
1271
+ const firstGen = output2.generations[0][0];
1272
+ if (firstGen?.generationInfo?.tokenUsage) {
1273
+ usage = firstGen.generationInfo.tokenUsage;
1274
+ }
1275
+ }
1276
+ if (usage) {
1277
+ tokenUsage.input += usage.promptTokens || usage.input_tokens || 0;
1278
+ tokenUsage.output += usage.completionTokens || usage.output_tokens || 0;
1279
+ tokenUsage.total += usage.totalTokens || usage.total_tokens || 0;
1280
+ }
1281
+ }
1282
+ }
1283
+ ];
1284
+ if (stream && emitter) {
1285
+ callbacks.push({
1267
1286
  handleLLMNewToken: (token) => {
1268
1287
  if (emitter?.emitDelta) {
1269
1288
  emitter.emitDelta({
@@ -1274,8 +1293,8 @@ IMPORTANT: You must base your response on the last message in the conversation h
1274
1293
  });
1275
1294
  }
1276
1295
  }
1277
- }
1278
- ] : [];
1296
+ });
1297
+ }
1279
1298
  let llmInstance;
1280
1299
  if (finalModel?.integrationId) {
1281
1300
  if (!authToken) {
@@ -1287,7 +1306,7 @@ IMPORTANT: You must base your response on the last message in the conversation h
1287
1306
  } else {
1288
1307
  throw new Error("Invalid model: must have integrationId or be a valid LLM instance with bindTools method");
1289
1308
  }
1290
- if (stream && callbacks.length > 0) {
1309
+ if (callbacks.length > 0) {
1291
1310
  if (llmInstance.callbacks) {
1292
1311
  if (Array.isArray(llmInstance.callbacks)) {
1293
1312
  llmInstance.callbacks.push(...callbacks);
@@ -1338,6 +1357,22 @@ IMPORTANT: You must base your response on the last message in the conversation h
1338
1357
  if (lastMessages.length > 0) {
1339
1358
  const lastMessage = lastMessages[lastMessages.length - 1];
1340
1359
  const content = lastMessage?.content;
1360
+ const msg = lastMessage;
1361
+ const usageMetadata = msg?.response_metadata?.tokenUsage || msg?.response_metadata?.usage || msg?.usage_metadata;
1362
+ if (usageMetadata) {
1363
+ const input = usageMetadata.promptTokens || usageMetadata.input_tokens || 0;
1364
+ const output2 = usageMetadata.completionTokens || usageMetadata.output_tokens || 0;
1365
+ const total = usageMetadata.totalTokens || usageMetadata.total_tokens || 0;
1366
+ if (tokenUsage.total === 0) {
1367
+ if (input + output2 > 0) {
1368
+ tokenUsage.input = input;
1369
+ tokenUsage.output = output2;
1370
+ tokenUsage.total = input + output2;
1371
+ } else if (total > 0) {
1372
+ tokenUsage.total = total;
1373
+ }
1374
+ }
1375
+ }
1341
1376
  if (typeof content === "string") {
1342
1377
  output = content;
1343
1378
  } else if (Array.isArray(content)) {
@@ -1356,6 +1391,22 @@ IMPORTANT: You must base your response on the last message in the conversation h
1356
1391
  if (result?.messages && result.messages.length > 0) {
1357
1392
  const lastMessage = result.messages[result.messages.length - 1];
1358
1393
  const content = lastMessage?.content;
1394
+ const msg = lastMessage;
1395
+ const usageMetadata = msg?.response_metadata?.tokenUsage || msg?.response_metadata?.usage || msg?.usage_metadata;
1396
+ if (usageMetadata) {
1397
+ const input = usageMetadata.promptTokens || usageMetadata.input_tokens || 0;
1398
+ const output2 = usageMetadata.completionTokens || usageMetadata.output_tokens || 0;
1399
+ const total = usageMetadata.totalTokens || usageMetadata.total_tokens || 0;
1400
+ if (tokenUsage.total === 0) {
1401
+ if (input + output2 > 0) {
1402
+ tokenUsage.input = input;
1403
+ tokenUsage.output = output2;
1404
+ tokenUsage.total = input + output2;
1405
+ } else if (total > 0) {
1406
+ tokenUsage.total = total;
1407
+ }
1408
+ }
1409
+ }
1359
1410
  if (typeof content === "string") {
1360
1411
  output = content;
1361
1412
  } else if (Array.isArray(content)) {
@@ -1414,6 +1465,12 @@ ${afterResults.message}`,
1414
1465
  output = `Error: ${error instanceof Error ? error.message : "Unknown error"}`;
1415
1466
  }
1416
1467
  }
1468
+ if (tokenUsage.total === 0 && (tokenUsage.input > 0 || tokenUsage.output > 0)) {
1469
+ tokenUsage.total = tokenUsage.input + tokenUsage.output;
1470
+ } else if (tokenUsage.total !== tokenUsage.input + tokenUsage.output) {
1471
+ tokenUsage.total = tokenUsage.input + tokenUsage.output;
1472
+ }
1473
+ console.log(`[TOKEN COUNT] Input: ${tokenUsage.input}, Output: ${tokenUsage.output}, Total: ${tokenUsage.total}`);
1417
1474
  return {
1418
1475
  agent,
1419
1476
  response: output || "",
@@ -1840,7 +1897,7 @@ import { PostgresSaver } from "@langchain/langgraph-checkpoint-postgres";
1840
1897
  var PostgresMemoryNodeFunction = async (inputs) => {
1841
1898
  const { $field: _$field, $req: _$req, $inputs: _$inputs, $vars: _$vars } = inputs;
1842
1899
  const fieldValues = inputs.fieldValues || {};
1843
- const connectionString = fieldValues.connectionString || inputs.connectionString || "postgresql://postgres:postgres@localhost:5432/workflows";
1900
+ const connectionString = fieldValues.connectionString || inputs.connectionString || "postgresql://yugabyte:yugabyte@localhost:5433/workflows";
1844
1901
  try {
1845
1902
  const checkpointer = PostgresSaver.fromConnString(connectionString);
1846
1903
  await checkpointer.setup();
@@ -2364,6 +2421,19 @@ var CustomCodeNode = {
2364
2421
  fieldType: "any",
2365
2422
  required: false
2366
2423
  }
2424
+ },
2425
+ {
2426
+ id: "function",
2427
+ label: "Function",
2428
+ type: "function",
2429
+ required: false,
2430
+ typeable: false,
2431
+ handle: {
2432
+ type: "output",
2433
+ label: "Function",
2434
+ name: "function",
2435
+ fieldType: "function"
2436
+ }
2367
2437
  }
2368
2438
  ]
2369
2439
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@atomoz/workflows-nodes",
3
- "version": "0.1.23",
3
+ "version": "0.1.25",
4
4
  "description": "Atomoz Workflows - Node Library",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",