@flutchai/flutch-sdk 0.1.23 → 0.1.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -1245,28 +1245,54 @@ var init_versioned_graph_service = __esm({
1245
1245
  ], exports.VersionedGraphService);
1246
1246
  }
1247
1247
  });
1248
+ function generateCallbackToken(graphType) {
1249
+ return `cb::${graphType}::${crypto.randomBytes(8).toString("base64url")}`;
1250
+ }
1251
+ function createCallbackRecord(entry, token, now) {
1252
+ return {
1253
+ ...entry,
1254
+ token,
1255
+ status: "pending",
1256
+ createdAt: now,
1257
+ retries: 0
1258
+ };
1259
+ }
1260
+ function resolveCallbackTTL(entry) {
1261
+ return entry.metadata?.ttlSec ?? 600;
1262
+ }
1263
+ function parseCallbackRecord(data) {
1264
+ try {
1265
+ return JSON.parse(data);
1266
+ } catch {
1267
+ return null;
1268
+ }
1269
+ }
1270
+ function markAsFailed(record, error) {
1271
+ return {
1272
+ ...record,
1273
+ status: "failed",
1274
+ retries: (record.retries || 0) + 1,
1275
+ lastError: error
1276
+ };
1277
+ }
1278
+ function markAsPending(record) {
1279
+ return { ...record, status: "pending" };
1280
+ }
1281
+
1282
+ // src/callbacks/callback-store.ts
1248
1283
  var CallbackStore = class {
1249
1284
  constructor(redis) {
1250
1285
  this.redis = redis;
1251
1286
  this.isProduction = process.env.NODE_ENV === "production";
1252
1287
  }
1253
1288
  isProduction;
1254
- generateToken(graphType) {
1255
- return `cb::${graphType}::${crypto.randomBytes(8).toString("base64url")}`;
1256
- }
1257
1289
  /**
1258
1290
  * Issues a new callback token and persists its payload.
1259
1291
  */
1260
1292
  async issue(entry) {
1261
- const token = this.generateToken(entry.graphType);
1262
- const record = {
1263
- ...entry,
1264
- token,
1265
- status: "pending",
1266
- createdAt: Date.now(),
1267
- retries: 0
1268
- };
1269
- const ttl = entry.metadata?.ttlSec ?? 600;
1293
+ const token = generateCallbackToken(entry.graphType);
1294
+ const record = createCallbackRecord(entry, token, Date.now());
1295
+ const ttl = resolveCallbackTTL(entry);
1270
1296
  await this.redis.setex(`callback:${token}`, ttl, JSON.stringify(record));
1271
1297
  return token;
1272
1298
  }
@@ -1281,7 +1307,9 @@ var CallbackStore = class {
1281
1307
  }
1282
1308
  }
1283
1309
  /**
1284
- * Production version with Lua script for atomicity
1310
+ * Production version: uses Redis Lua scripting for atomic get-and-lock.
1311
+ * NOTE: redis.eval() here executes a Lua script on the Redis server,
1312
+ * NOT JavaScript eval(). This is the standard ioredis API for Lua scripting.
1285
1313
  */
1286
1314
  async getAndLockAtomic(token) {
1287
1315
  const script = `
@@ -1305,18 +1333,17 @@ var CallbackStore = class {
1305
1333
  if (!data) {
1306
1334
  return null;
1307
1335
  }
1308
- try {
1309
- const record = JSON.parse(data);
1310
- if (record.status !== "pending") {
1311
- return null;
1312
- }
1313
- record.status = "processing";
1314
- await this.redis.set(key, JSON.stringify(record));
1315
- return record;
1316
- } catch (error) {
1317
- console.error("Failed to parse callback record:", error);
1336
+ const record = parseCallbackRecord(data);
1337
+ if (!record) {
1338
+ console.error("Failed to parse callback record");
1339
+ return null;
1340
+ }
1341
+ if (record.status !== "pending") {
1318
1342
  return null;
1319
1343
  }
1344
+ record.status = "processing";
1345
+ await this.redis.set(key, JSON.stringify(record));
1346
+ return record;
1320
1347
  }
1321
1348
  /**
1322
1349
  * Finalizes callback processing by removing token.
@@ -1335,7 +1362,8 @@ var CallbackStore = class {
1335
1362
  }
1336
1363
  }
1337
1364
  /**
1338
- * Production version with Lua script for atomicity
1365
+ * Production version: uses Redis Lua scripting for atomic fail.
1366
+ * NOTE: redis.eval() here executes a Lua script on the Redis server.
1339
1367
  */
1340
1368
  async failAtomic(token, error) {
1341
1369
  const script = `
@@ -1360,17 +1388,14 @@ var CallbackStore = class {
1360
1388
  if (!data) {
1361
1389
  return null;
1362
1390
  }
1363
- try {
1364
- const record = JSON.parse(data);
1365
- record.status = "failed";
1366
- record.retries = (record.retries || 0) + 1;
1367
- record.lastError = error;
1368
- await this.redis.set(key, JSON.stringify(record));
1369
- return record;
1370
- } catch (parseError) {
1371
- console.error("Failed to parse callback record:", parseError);
1391
+ const record = parseCallbackRecord(data);
1392
+ if (!record) {
1393
+ console.error("Failed to parse callback record");
1372
1394
  return null;
1373
1395
  }
1396
+ const updated = markAsFailed(record, error);
1397
+ await this.redis.set(key, JSON.stringify(updated));
1398
+ return updated;
1374
1399
  }
1375
1400
  /**
1376
1401
  * Reset callback status to pending for retry.
@@ -1383,7 +1408,8 @@ var CallbackStore = class {
1383
1408
  }
1384
1409
  }
1385
1410
  /**
1386
- * Production version with Lua script for atomicity
1411
+ * Production version: uses Redis Lua scripting for atomic retry.
1412
+ * NOTE: redis.eval() here executes a Lua script on the Redis server.
1387
1413
  */
1388
1414
  async retryAtomic(token) {
1389
1415
  const script = `
@@ -1406,15 +1432,14 @@ var CallbackStore = class {
1406
1432
  if (!data) {
1407
1433
  return null;
1408
1434
  }
1409
- try {
1410
- const record = JSON.parse(data);
1411
- record.status = "pending";
1412
- await this.redis.set(key, JSON.stringify(record));
1413
- return record;
1414
- } catch (parseError) {
1415
- console.error("Failed to parse callback record:", parseError);
1435
+ const record = parseCallbackRecord(data);
1436
+ if (!record) {
1437
+ console.error("Failed to parse callback record");
1416
1438
  return null;
1417
1439
  }
1440
+ const updated = markAsPending(record);
1441
+ await this.redis.set(key, JSON.stringify(updated));
1442
+ return updated;
1418
1443
  }
1419
1444
  };
1420
1445
 
@@ -3066,6 +3091,38 @@ exports.CallbackController = __decorateClass([
3066
3091
 
3067
3092
  // src/graph/abstract-graph.builder.ts
3068
3093
  init_agent_ui();
3094
+
3095
+ // src/graph/graph.logic.ts
3096
+ function isValidSemver(version) {
3097
+ return /^\d+\.\d+\.\d+$/.test(version);
3098
+ }
3099
+ function parseCallbackToken(token) {
3100
+ const parts = token.split("_");
3101
+ if (parts.length < 4 || parts[0] !== "cb") {
3102
+ return null;
3103
+ }
3104
+ const graphName = parts[1];
3105
+ const handler = parts[2];
3106
+ const graphType = `${graphName}::1.0.0`;
3107
+ return { graphType, handler };
3108
+ }
3109
+ function decodeCallbackParams(token) {
3110
+ const parts = token.split("_");
3111
+ if (parts.length < 4) {
3112
+ return {};
3113
+ }
3114
+ try {
3115
+ const encodedParams = parts.slice(3).join("_");
3116
+ const decodedParams = Buffer.from(encodedParams, "base64url").toString(
3117
+ "utf8"
3118
+ );
3119
+ return JSON.parse(decodedParams);
3120
+ } catch {
3121
+ return {};
3122
+ }
3123
+ }
3124
+
3125
+ // src/graph/abstract-graph.builder.ts
3069
3126
  var _AbstractGraphBuilder = class _AbstractGraphBuilder {
3070
3127
  logger = new common.Logger(_AbstractGraphBuilder.name);
3071
3128
  callbackRegistry;
@@ -3274,8 +3331,7 @@ var _AbstractGraphBuilder = class _AbstractGraphBuilder {
3274
3331
  * Version validation
3275
3332
  */
3276
3333
  validateVersion() {
3277
- const versionRegex = /^\d+\.\d+\.\d+$/;
3278
- if (!versionRegex.test(this.version)) {
3334
+ if (!isValidSemver(this.version)) {
3279
3335
  throw new Error(
3280
3336
  `Invalid version format: ${this.version}. Expected format: X.Y.Z`
3281
3337
  );
@@ -3625,36 +3681,21 @@ exports.UniversalGraphService = class UniversalGraphService {
3625
3681
  * Expected format: cb_{graphName}_{handler}_{encodedParams}
3626
3682
  */
3627
3683
  parseCallbackToken(token) {
3628
- const parts = token.split("_");
3629
- if (parts.length < 4 || parts[0] !== "cb") {
3684
+ const result = parseCallbackToken(token);
3685
+ if (!result) {
3630
3686
  throw new Error(`Invalid callback token format: ${token}`);
3631
3687
  }
3632
- const graphName = parts[1];
3633
- const handler = parts[2];
3634
- const graphType = `${graphName}::1.0.0`;
3635
- return { graphType, handler };
3688
+ return result;
3636
3689
  }
3637
3690
  /**
3638
3691
  * Extract parameters from callback token
3639
3692
  */
3640
3693
  parseCallbackParams(token) {
3641
- const parts = token.split("_");
3642
- if (parts.length < 4) {
3643
- return {};
3644
- }
3645
- try {
3646
- const encodedParams = parts.slice(3).join("_");
3647
- const decodedParams = Buffer.from(encodedParams, "base64url").toString(
3648
- "utf8"
3649
- );
3650
- return JSON.parse(decodedParams);
3651
- } catch (error) {
3652
- this.logger.warn(
3653
- `Failed to parse callback params from token: ${token}`,
3654
- error
3655
- );
3656
- return {};
3694
+ const result = decodeCallbackParams(token);
3695
+ if (Object.keys(result).length === 0 && token.split("_").length >= 4) {
3696
+ this.logger.warn(`Failed to parse callback params from token: ${token}`);
3657
3697
  }
3698
+ return result;
3658
3699
  }
3659
3700
  /**
3660
3701
  * Call a graph endpoint
@@ -4450,11 +4491,21 @@ exports.EventProcessor = class EventProcessor {
4450
4491
  channels: /* @__PURE__ */ new Map([
4451
4492
  [
4452
4493
  "text" /* TEXT */,
4453
- { contentChain: [], currentBlock: null, pendingToolBlocks: [] }
4494
+ {
4495
+ contentChain: [],
4496
+ currentBlock: null,
4497
+ pendingToolBlocks: [],
4498
+ toolBlocksByRunId: /* @__PURE__ */ new Map()
4499
+ }
4454
4500
  ],
4455
4501
  [
4456
4502
  "processing" /* PROCESSING */,
4457
- { contentChain: [], currentBlock: null, pendingToolBlocks: [] }
4503
+ {
4504
+ contentChain: [],
4505
+ currentBlock: null,
4506
+ pendingToolBlocks: [],
4507
+ toolBlocksByRunId: /* @__PURE__ */ new Map()
4508
+ }
4458
4509
  ]
4459
4510
  ]),
4460
4511
  attachments: [],
@@ -4627,11 +4678,20 @@ exports.EventProcessor = class EventProcessor {
4627
4678
  return;
4628
4679
  }
4629
4680
  if (event.event === "on_tool_start") {
4681
+ const channel = event.metadata?.stream_channel ?? "text" /* TEXT */;
4682
+ const state = acc.channels.get(channel);
4683
+ if (state && event.run_id) {
4684
+ const idx = state.pendingToolBlocks.findIndex(
4685
+ (b) => b.name === event.name
4686
+ );
4687
+ if (idx !== -1) {
4688
+ const block = state.pendingToolBlocks.splice(idx, 1)[0];
4689
+ state.toolBlocksByRunId.set(event.run_id, block);
4690
+ }
4691
+ }
4630
4692
  this.logger.log("\u{1F527} Tool execution started", {
4631
4693
  toolName: event.name,
4632
- input: event.data?.input,
4633
- runId: event.run_id,
4634
- metadata: event.metadata
4694
+ runId: event.run_id
4635
4695
  });
4636
4696
  return;
4637
4697
  }
@@ -4639,7 +4699,13 @@ exports.EventProcessor = class EventProcessor {
4639
4699
  const channel = event.metadata?.stream_channel ?? "text" /* TEXT */;
4640
4700
  const state = acc.channels.get(channel);
4641
4701
  if (!state) return;
4642
- const toolBlock = state.pendingToolBlocks.shift();
4702
+ let toolBlock;
4703
+ if (event.run_id && state.toolBlocksByRunId.has(event.run_id)) {
4704
+ toolBlock = state.toolBlocksByRunId.get(event.run_id);
4705
+ state.toolBlocksByRunId.delete(event.run_id);
4706
+ } else {
4707
+ toolBlock = state.pendingToolBlocks.shift();
4708
+ }
4643
4709
  if (toolBlock && toolBlock.type === "tool_use") {
4644
4710
  const output = event.data?.output;
4645
4711
  const outputString = typeof output === "string" ? output : JSON.stringify(output, null, 2);
@@ -4653,26 +4719,26 @@ exports.EventProcessor = class EventProcessor {
4653
4719
  },
4654
4720
  onPartial
4655
4721
  );
4656
- this.logger.log("\u2705 Tool execution completed", {
4722
+ this.logger.log("\u2705 Tool completed", {
4657
4723
  toolName: event.name,
4658
4724
  toolBlockId: toolBlock.id,
4659
- outputPreview: outputString.substring(0, 200) + (outputString.length > 200 ? "..." : ""),
4660
4725
  runId: event.run_id
4661
4726
  });
4662
4727
  } else {
4663
- this.logger.warn(
4664
- "\u26A0\uFE0F on_tool_end received but no pending tool block found",
4665
- {
4666
- toolName: event.name,
4667
- runId: event.run_id,
4668
- pendingCount: state.pendingToolBlocks.length
4669
- }
4670
- );
4728
+ this.logger.warn("\u26A0\uFE0F on_tool_end: no matching tool block", {
4729
+ toolName: event.name,
4730
+ runId: event.run_id
4731
+ });
4671
4732
  }
4672
4733
  return;
4673
4734
  }
4674
4735
  if (event.event === "on_tool_error") {
4675
- this.logger.error("\u274C Tool execution failed", {
4736
+ const channel = event.metadata?.stream_channel ?? "text" /* TEXT */;
4737
+ const state = acc.channels.get(channel);
4738
+ if (state && event.run_id) {
4739
+ state.toolBlocksByRunId.delete(event.run_id);
4740
+ }
4741
+ this.logger.error("\u274C Tool failed", {
4676
4742
  toolName: event.name,
4677
4743
  error: event.data?.error,
4678
4744
  runId: event.run_id
@@ -4720,6 +4786,13 @@ exports.EventProcessor = class EventProcessor {
4720
4786
  getResult(acc) {
4721
4787
  const allChains = [];
4722
4788
  for (const [channel, state] of acc.channels.entries()) {
4789
+ if (state.pendingToolBlocks.length > 0 || state.toolBlocksByRunId.size > 0) {
4790
+ this.logger.warn("\u26A0\uFE0F Orphaned tool blocks detected at finalization", {
4791
+ channel,
4792
+ pendingCount: state.pendingToolBlocks.length,
4793
+ mappedCount: state.toolBlocksByRunId.size
4794
+ });
4795
+ }
4723
4796
  if (state.currentBlock) {
4724
4797
  state.contentChain.push(state.currentBlock);
4725
4798
  }
@@ -5952,6 +6025,24 @@ var ChatFeature = /* @__PURE__ */ ((ChatFeature2) => {
5952
6025
  ChatFeature2["JSON_MODE"] = "json_mode";
5953
6026
  return ChatFeature2;
5954
6027
  })(ChatFeature || {});
6028
+ function isReasoningModel(modelName) {
6029
+ return modelName.includes("gpt-5") || modelName.includes("gpt-o1") || modelName.includes("gpt-o2") || modelName.includes("gpt-o3") || modelName.includes("gpt-o4") || /^gpt-(5|6|7|8|9)/.test(modelName) || /^gpt-o[1-4]/.test(modelName);
6030
+ }
6031
+ function hashToolsConfig(toolsConfig) {
6032
+ const sorted = toolsConfig.map((t) => `${t.toolName}:${t.enabled}:${JSON.stringify(t.config || {})}`).sort().join("|");
6033
+ return crypto.createHash("md5").update(sorted).digest("hex").slice(0, 16);
6034
+ }
6035
+ function generateModelCacheKey(modelId, temperature, maxTokens, toolsConfig) {
6036
+ const parts = [
6037
+ modelId,
6038
+ temperature ?? "default",
6039
+ maxTokens ?? "default"
6040
+ ];
6041
+ if (toolsConfig && toolsConfig.length > 0) {
6042
+ parts.push(hashToolsConfig(toolsConfig));
6043
+ }
6044
+ return parts.join(":");
6045
+ }
5955
6046
  var VoyageAIRerank = class extends document_compressors.BaseDocumentCompressor {
5956
6047
  apiKey;
5957
6048
  model;
@@ -6247,8 +6338,7 @@ var ModelInitializer = class _ModelInitializer {
6247
6338
  * Uses MD5 hash to create short, unique identifier
6248
6339
  */
6249
6340
  hashToolsConfig(toolsConfig) {
6250
- const sorted = toolsConfig.map((t) => `${t.toolName}:${t.enabled}:${JSON.stringify(t.config || {})}`).sort().join("|");
6251
- return crypto.createHash("md5").update(sorted).digest("hex").slice(0, 16);
6341
+ return hashToolsConfig(toolsConfig);
6252
6342
  }
6253
6343
  /**
6254
6344
  * Generate cache key from ModelByIdConfig
@@ -6256,16 +6346,12 @@ var ModelInitializer = class _ModelInitializer {
6256
6346
  * Example: "model123:0.7:4096" or "model123:0.7:4096:a1b2c3d4e5f6g7h8"
6257
6347
  */
6258
6348
  generateModelCacheKey(config) {
6259
- const parts = [
6349
+ return generateModelCacheKey(
6260
6350
  config.modelId,
6261
- config.temperature ?? "default",
6262
- config.maxTokens ?? "default"
6263
- ];
6264
- if (config.toolsConfig && config.toolsConfig.length > 0) {
6265
- const toolsHash = this.hashToolsConfig(config.toolsConfig);
6266
- parts.push(toolsHash);
6267
- }
6268
- return parts.join(":");
6351
+ config.temperature,
6352
+ config.maxTokens,
6353
+ config.toolsConfig
6354
+ );
6269
6355
  }
6270
6356
  /**
6271
6357
  * TEMPORARY SOLUTION for compatibility with new OpenAI models
@@ -6282,20 +6368,10 @@ var ModelInitializer = class _ModelInitializer {
6282
6368
  * @returns true if model requires maxCompletionTokens and temperature = 1
6283
6369
  */
6284
6370
  requiresMaxCompletionTokens(modelName) {
6285
- const requiresNew = modelName.includes("gpt-5") || modelName.includes("gpt-o1") || modelName.includes("gpt-o2") || modelName.includes("gpt-o3") || modelName.includes("gpt-o4") || // Add other patterns as new models are released
6286
- /^gpt-(5|6|7|8|9)/.test(modelName) || /^gpt-o[1-4]/.test(modelName);
6371
+ const requiresNew = isReasoningModel(modelName);
6287
6372
  this.logger.debug(`Checking token parameter for model "${modelName}"`, {
6288
6373
  modelName,
6289
- requiresMaxCompletionTokens: requiresNew,
6290
- checks: {
6291
- includesGpt5: modelName.includes("gpt-5"),
6292
- includesO1: modelName.includes("gpt-o1"),
6293
- includesO2: modelName.includes("gpt-o2"),
6294
- includesO3: modelName.includes("gpt-o3"),
6295
- includesO4: modelName.includes("gpt-o4"),
6296
- regexGpt5Plus: /^gpt-(5|6|7|8|9)/.test(modelName),
6297
- regexO1to4: /^gpt-o[1-4]/.test(modelName)
6298
- }
6374
+ requiresMaxCompletionTokens: requiresNew
6299
6375
  });
6300
6376
  return requiresNew;
6301
6377
  }