langchain 1.0.3 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (372) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/dist/agents/ReactAgent.cjs.map +1 -1
  3. package/dist/agents/ReactAgent.js.map +1 -1
  4. package/dist/agents/annotation.cjs +6 -4
  5. package/dist/agents/annotation.cjs.map +1 -1
  6. package/dist/agents/annotation.js +7 -5
  7. package/dist/agents/annotation.js.map +1 -1
  8. package/dist/agents/middleware/contextEditing.d.cts.map +1 -1
  9. package/dist/agents/middleware/contextEditing.d.ts.map +1 -1
  10. package/dist/agents/middleware/dynamicSystemPrompt.d.ts.map +1 -1
  11. package/dist/agents/middleware/hitl.d.cts.map +1 -1
  12. package/dist/agents/middleware/llmToolSelector.d.cts +4 -4
  13. package/dist/agents/middleware/llmToolSelector.d.cts.map +1 -1
  14. package/dist/agents/middleware/modelCallLimit.d.cts.map +1 -1
  15. package/dist/agents/middleware/modelCallLimit.d.ts.map +1 -1
  16. package/dist/agents/middleware/promptCaching.d.cts.map +1 -1
  17. package/dist/agents/middleware/promptCaching.d.ts.map +1 -1
  18. package/dist/agents/middleware/summarization.cjs +272 -52
  19. package/dist/agents/middleware/summarization.cjs.map +1 -1
  20. package/dist/agents/middleware/summarization.d.cts +325 -30
  21. package/dist/agents/middleware/summarization.d.cts.map +1 -1
  22. package/dist/agents/middleware/summarization.d.ts +320 -25
  23. package/dist/agents/middleware/summarization.d.ts.map +1 -1
  24. package/dist/agents/middleware/summarization.js +274 -54
  25. package/dist/agents/middleware/summarization.js.map +1 -1
  26. package/dist/agents/middleware/todoListMiddleware.d.cts.map +1 -1
  27. package/dist/agents/middleware/todoListMiddleware.d.ts.map +1 -1
  28. package/dist/agents/middleware/toolCallLimit.cjs +36 -27
  29. package/dist/agents/middleware/toolCallLimit.cjs.map +1 -1
  30. package/dist/agents/middleware/toolCallLimit.d.cts +3 -1
  31. package/dist/agents/middleware/toolCallLimit.d.cts.map +1 -1
  32. package/dist/agents/middleware/toolCallLimit.d.ts +3 -1
  33. package/dist/agents/middleware/toolCallLimit.d.ts.map +1 -1
  34. package/dist/agents/middleware/toolCallLimit.js +36 -27
  35. package/dist/agents/middleware/toolCallLimit.js.map +1 -1
  36. package/dist/agents/middleware/types.d.cts.map +1 -1
  37. package/dist/agents/middleware/types.d.ts.map +1 -1
  38. package/dist/agents/middleware/utils.d.cts.map +1 -1
  39. package/dist/agents/middleware/utils.d.ts.map +1 -1
  40. package/dist/agents/nodes/AgentNode.cjs +0 -4
  41. package/dist/agents/nodes/AgentNode.cjs.map +1 -1
  42. package/dist/agents/nodes/AgentNode.js +1 -5
  43. package/dist/agents/nodes/AgentNode.js.map +1 -1
  44. package/dist/agents/responses.cjs +0 -1
  45. package/dist/agents/responses.js +1 -1
  46. package/dist/index.cjs.map +1 -1
  47. package/dist/index.d.cts +2 -2
  48. package/dist/index.d.ts +2 -2
  49. package/dist/index.js.map +1 -1
  50. package/package.json +6 -6
  51. package/dist/agents/middleware/callLimit.cjs +0 -130
  52. package/dist/agents/middleware/callLimit.cjs.map +0 -1
  53. package/dist/agents/middleware/callLimit.d.cts +0 -119
  54. package/dist/agents/middleware/callLimit.d.cts.map +0 -1
  55. package/dist/agents/middleware/callLimit.d.ts +0 -119
  56. package/dist/agents/middleware/callLimit.d.ts.map +0 -1
  57. package/dist/agents/middleware/callLimit.js +0 -129
  58. package/dist/agents/middleware/callLimit.js.map +0 -1
  59. package/dist/agents/middleware/index.d.cts +0 -15
  60. package/dist/agents/middleware/index.d.ts +0 -14
  61. package/dist/agents/tools.d.cts +0 -9
  62. package/dist/agents/tools.d.cts.map +0 -1
  63. package/dist/agents/tools.d.ts +0 -9
  64. package/dist/agents/tools.d.ts.map +0 -1
  65. package/dist/embeddings/cache_backed.cjs +0 -140
  66. package/dist/embeddings/cache_backed.cjs.map +0 -1
  67. package/dist/embeddings/cache_backed.d.cts +0 -107
  68. package/dist/embeddings/cache_backed.d.cts.map +0 -1
  69. package/dist/embeddings/cache_backed.d.ts +0 -107
  70. package/dist/embeddings/cache_backed.d.ts.map +0 -1
  71. package/dist/embeddings/cache_backed.js +0 -134
  72. package/dist/embeddings/cache_backed.js.map +0 -1
  73. package/dist/embeddings/fake.cjs +0 -22
  74. package/dist/embeddings/fake.cjs.map +0 -1
  75. package/dist/embeddings/fake.d.cts +0 -1
  76. package/dist/embeddings/fake.d.ts +0 -1
  77. package/dist/embeddings/fake.js +0 -12
  78. package/dist/embeddings/fake.js.map +0 -1
  79. package/dist/node_modules/.pnpm/eventemitter3@4.0.7/node_modules/eventemitter3/index.cjs +0 -248
  80. package/dist/node_modules/.pnpm/eventemitter3@4.0.7/node_modules/eventemitter3/index.cjs.map +0 -1
  81. package/dist/node_modules/.pnpm/eventemitter3@4.0.7/node_modules/eventemitter3/index.js +0 -244
  82. package/dist/node_modules/.pnpm/eventemitter3@4.0.7/node_modules/eventemitter3/index.js.map +0 -1
  83. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/client.cjs +0 -3096
  84. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/client.cjs.map +0 -1
  85. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/client.js +0 -3095
  86. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/client.js.map +0 -1
  87. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/env.cjs +0 -12
  88. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/env.cjs.map +0 -1
  89. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/env.js +0 -12
  90. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/env.js.map +0 -1
  91. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/constants.cjs +0 -71
  92. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/constants.cjs.map +0 -1
  93. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/constants.js +0 -38
  94. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/constants.js.map +0 -1
  95. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/translator.cjs +0 -221
  96. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/translator.cjs.map +0 -1
  97. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/translator.js +0 -221
  98. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/experimental/otel/translator.js.map +0 -1
  99. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/index.cjs +0 -11
  100. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/index.cjs.map +0 -1
  101. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/index.js +0 -11
  102. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/index.js.map +0 -1
  103. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/run_trees.cjs +0 -703
  104. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/run_trees.cjs.map +0 -1
  105. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/run_trees.js +0 -702
  106. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/run_trees.js.map +0 -1
  107. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/constants.cjs +0 -7
  108. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/constants.cjs.map +0 -1
  109. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/constants.js +0 -6
  110. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/constants.js.map +0 -1
  111. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/fetch.cjs +0 -29
  112. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/fetch.cjs.map +0 -1
  113. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/fetch.js +0 -28
  114. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/fetch.js.map +0 -1
  115. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/otel.cjs +0 -115
  116. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/otel.cjs.map +0 -1
  117. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/otel.js +0 -113
  118. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/singletons/otel.js.map +0 -1
  119. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/_uuid.cjs +0 -14
  120. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/_uuid.cjs.map +0 -1
  121. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/_uuid.js +0 -13
  122. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/_uuid.js.map +0 -1
  123. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/async_caller.cjs +0 -95
  124. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/async_caller.cjs.map +0 -1
  125. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/async_caller.js +0 -95
  126. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/async_caller.js.map +0 -1
  127. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/env.cjs +0 -136
  128. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/env.cjs.map +0 -1
  129. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/env.js +0 -131
  130. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/env.js.map +0 -1
  131. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/error.cjs +0 -102
  132. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/error.cjs.map +0 -1
  133. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/error.js +0 -99
  134. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/error.js.map +0 -1
  135. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/fast-safe-stringify/index.cjs +0 -140
  136. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/fast-safe-stringify/index.cjs.map +0 -1
  137. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/fast-safe-stringify/index.js +0 -140
  138. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/fast-safe-stringify/index.js.map +0 -1
  139. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/messages.cjs +0 -18
  140. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/messages.cjs.map +0 -1
  141. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/messages.js +0 -16
  142. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/messages.js.map +0 -1
  143. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/project.cjs +0 -10
  144. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/project.cjs.map +0 -1
  145. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/project.js +0 -10
  146. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/project.js.map +0 -1
  147. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/prompts.cjs +0 -30
  148. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/prompts.cjs.map +0 -1
  149. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/prompts.js +0 -30
  150. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/prompts.js.map +0 -1
  151. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/warn.cjs +0 -13
  152. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/warn.cjs.map +0 -1
  153. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/warn.js +0 -12
  154. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/dist/utils/warn.js.map +0 -1
  155. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/index.cjs +0 -6
  156. package/dist/node_modules/.pnpm/langsmith@0.3.74_@opentelemetry_api@1.9.0_openai@5.12.2_ws@8.18.3_bufferutil@4.0.9_utf-8-validate@6.0.5__zod@3.25.76_/node_modules/langsmith/index.js +0 -6
  157. package/dist/node_modules/.pnpm/p-finally@1.0.0/node_modules/p-finally/index.cjs +0 -27
  158. package/dist/node_modules/.pnpm/p-finally@1.0.0/node_modules/p-finally/index.cjs.map +0 -1
  159. package/dist/node_modules/.pnpm/p-finally@1.0.0/node_modules/p-finally/index.js +0 -23
  160. package/dist/node_modules/.pnpm/p-finally@1.0.0/node_modules/p-finally/index.js.map +0 -1
  161. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.cjs +0 -267
  162. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.cjs.map +0 -1
  163. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.js +0 -263
  164. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.js.map +0 -1
  165. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/lower-bound.cjs +0 -32
  166. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/lower-bound.cjs.map +0 -1
  167. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/lower-bound.js +0 -28
  168. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/lower-bound.js.map +0 -1
  169. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/priority-queue.cjs +0 -49
  170. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/priority-queue.cjs.map +0 -1
  171. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/priority-queue.js +0 -45
  172. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/priority-queue.js.map +0 -1
  173. package/dist/node_modules/.pnpm/p-retry@4.6.2/node_modules/p-retry/index.cjs +0 -83
  174. package/dist/node_modules/.pnpm/p-retry@4.6.2/node_modules/p-retry/index.cjs.map +0 -1
  175. package/dist/node_modules/.pnpm/p-retry@4.6.2/node_modules/p-retry/index.js +0 -79
  176. package/dist/node_modules/.pnpm/p-retry@4.6.2/node_modules/p-retry/index.js.map +0 -1
  177. package/dist/node_modules/.pnpm/p-timeout@3.2.0/node_modules/p-timeout/index.cjs +0 -52
  178. package/dist/node_modules/.pnpm/p-timeout@3.2.0/node_modules/p-timeout/index.cjs.map +0 -1
  179. package/dist/node_modules/.pnpm/p-timeout@3.2.0/node_modules/p-timeout/index.js +0 -48
  180. package/dist/node_modules/.pnpm/p-timeout@3.2.0/node_modules/p-timeout/index.js.map +0 -1
  181. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/index.cjs +0 -16
  182. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/index.cjs.map +0 -1
  183. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/index.js +0 -13
  184. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/index.js.map +0 -1
  185. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry.cjs +0 -77
  186. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry.cjs.map +0 -1
  187. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry.js +0 -74
  188. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry.js.map +0 -1
  189. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry_operation.cjs +0 -115
  190. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry_operation.cjs.map +0 -1
  191. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry_operation.js +0 -112
  192. package/dist/node_modules/.pnpm/retry@0.13.1/node_modules/retry/lib/retry_operation.js.map +0 -1
  193. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/comparator.cjs +0 -90
  194. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/comparator.cjs.map +0 -1
  195. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/comparator.js +0 -86
  196. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/comparator.js.map +0 -1
  197. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/range.cjs +0 -294
  198. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/range.cjs.map +0 -1
  199. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/range.js +0 -290
  200. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/range.js.map +0 -1
  201. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/semver.cjs +0 -191
  202. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/semver.cjs.map +0 -1
  203. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/semver.js +0 -187
  204. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/classes/semver.js.map +0 -1
  205. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/clean.cjs +0 -24
  206. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/clean.cjs.map +0 -1
  207. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/clean.js +0 -20
  208. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/clean.js.map +0 -1
  209. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/cmp.cjs +0 -51
  210. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/cmp.cjs.map +0 -1
  211. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/cmp.js +0 -47
  212. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/cmp.js.map +0 -1
  213. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/coerce.cjs +0 -48
  214. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/coerce.cjs.map +0 -1
  215. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/coerce.js +0 -44
  216. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/coerce.js.map +0 -1
  217. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-build.cjs +0 -25
  218. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-build.cjs.map +0 -1
  219. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-build.js +0 -21
  220. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-build.js.map +0 -1
  221. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-loose.cjs +0 -21
  222. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-loose.cjs.map +0 -1
  223. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-loose.js +0 -17
  224. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare-loose.js.map +0 -1
  225. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare.cjs +0 -21
  226. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare.cjs.map +0 -1
  227. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare.js +0 -17
  228. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/compare.js.map +0 -1
  229. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/diff.cjs +0 -43
  230. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/diff.cjs.map +0 -1
  231. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/diff.js +0 -39
  232. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/diff.js.map +0 -1
  233. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/eq.cjs +0 -21
  234. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/eq.cjs.map +0 -1
  235. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/eq.js +0 -17
  236. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/eq.js.map +0 -1
  237. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gt.cjs +0 -21
  238. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gt.cjs.map +0 -1
  239. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gt.js +0 -17
  240. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gt.js.map +0 -1
  241. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gte.cjs +0 -21
  242. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gte.cjs.map +0 -1
  243. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gte.js +0 -17
  244. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/gte.js.map +0 -1
  245. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/inc.cjs +0 -32
  246. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/inc.cjs.map +0 -1
  247. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/inc.js +0 -28
  248. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/inc.js.map +0 -1
  249. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lt.cjs +0 -21
  250. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lt.cjs.map +0 -1
  251. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lt.js +0 -17
  252. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lt.js.map +0 -1
  253. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lte.cjs +0 -21
  254. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lte.cjs.map +0 -1
  255. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lte.js +0 -17
  256. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/lte.js.map +0 -1
  257. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/major.cjs +0 -21
  258. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/major.cjs.map +0 -1
  259. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/major.js +0 -17
  260. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/major.js.map +0 -1
  261. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/minor.cjs +0 -21
  262. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/minor.cjs.map +0 -1
  263. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/minor.js +0 -17
  264. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/minor.js.map +0 -1
  265. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/neq.cjs +0 -21
  266. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/neq.cjs.map +0 -1
  267. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/neq.js +0 -17
  268. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/neq.js.map +0 -1
  269. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/parse.cjs +0 -29
  270. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/parse.cjs.map +0 -1
  271. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/parse.js +0 -25
  272. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/parse.js.map +0 -1
  273. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/patch.cjs +0 -21
  274. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/patch.cjs.map +0 -1
  275. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/patch.js +0 -17
  276. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/patch.js.map +0 -1
  277. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/prerelease.cjs +0 -24
  278. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/prerelease.cjs.map +0 -1
  279. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/prerelease.js +0 -20
  280. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/prerelease.js.map +0 -1
  281. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rcompare.cjs +0 -21
  282. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rcompare.cjs.map +0 -1
  283. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rcompare.js +0 -17
  284. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rcompare.js.map +0 -1
  285. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rsort.cjs +0 -21
  286. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rsort.cjs.map +0 -1
  287. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rsort.js +0 -17
  288. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/rsort.js.map +0 -1
  289. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/satisfies.cjs +0 -28
  290. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/satisfies.cjs.map +0 -1
  291. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/satisfies.js +0 -24
  292. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/satisfies.js.map +0 -1
  293. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/sort.cjs +0 -21
  294. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/sort.cjs.map +0 -1
  295. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/sort.js +0 -17
  296. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/sort.js.map +0 -1
  297. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/valid.cjs +0 -24
  298. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/valid.cjs.map +0 -1
  299. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/valid.js +0 -20
  300. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/functions/valid.js.map +0 -1
  301. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/index.cjs +0 -146
  302. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/index.cjs.map +0 -1
  303. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/index.js +0 -142
  304. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/index.js.map +0 -1
  305. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/constants.cjs +0 -41
  306. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/constants.cjs.map +0 -1
  307. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/constants.js +0 -37
  308. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/constants.js.map +0 -1
  309. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/debug.cjs +0 -19
  310. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/debug.cjs.map +0 -1
  311. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/debug.js +0 -15
  312. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/debug.js.map +0 -1
  313. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/identifiers.cjs +0 -32
  314. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/identifiers.cjs.map +0 -1
  315. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/identifiers.js +0 -28
  316. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/identifiers.js.map +0 -1
  317. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/lrucache.cjs +0 -47
  318. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/lrucache.cjs.map +0 -1
  319. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/lrucache.js +0 -43
  320. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/lrucache.js.map +0 -1
  321. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/parse-options.cjs +0 -25
  322. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/parse-options.cjs.map +0 -1
  323. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/parse-options.js +0 -21
  324. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/parse-options.js.map +0 -1
  325. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/re.cjs +0 -94
  326. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/re.cjs.map +0 -1
  327. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/re.js +0 -90
  328. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/internal/re.js.map +0 -1
  329. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/gtr.cjs +0 -21
  330. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/gtr.cjs.map +0 -1
  331. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/gtr.js +0 -17
  332. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/gtr.js.map +0 -1
  333. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/intersects.cjs +0 -25
  334. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/intersects.cjs.map +0 -1
  335. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/intersects.js +0 -21
  336. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/intersects.js.map +0 -1
  337. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/ltr.cjs +0 -21
  338. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/ltr.cjs.map +0 -1
  339. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/ltr.js +0 -17
  340. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/ltr.js.map +0 -1
  341. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/max-satisfying.cjs +0 -41
  342. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/max-satisfying.cjs.map +0 -1
  343. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/max-satisfying.js +0 -37
  344. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/max-satisfying.js.map +0 -1
  345. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-satisfying.cjs +0 -41
  346. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-satisfying.cjs.map +0 -1
  347. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-satisfying.js +0 -37
  348. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-satisfying.js.map +0 -1
  349. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-version.cjs +0 -55
  350. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-version.cjs.map +0 -1
  351. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-version.js +0 -51
  352. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/min-version.js.map +0 -1
  353. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/outside.cjs +0 -74
  354. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/outside.cjs.map +0 -1
  355. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/outside.js +0 -70
  356. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/outside.js.map +0 -1
  357. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/simplify.cjs +0 -48
  358. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/simplify.cjs.map +0 -1
  359. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/simplify.js +0 -44
  360. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/simplify.js.map +0 -1
  361. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/subset.cjs +0 -112
  362. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/subset.cjs.map +0 -1
  363. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/subset.js +0 -108
  364. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/subset.js.map +0 -1
  365. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/to-comparators.cjs +0 -21
  366. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/to-comparators.cjs.map +0 -1
  367. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/to-comparators.js +0 -17
  368. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/to-comparators.js.map +0 -1
  369. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/valid.cjs +0 -27
  370. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/valid.cjs.map +0 -1
  371. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/valid.js +0 -23
  372. package/dist/node_modules/.pnpm/semver@7.7.2/node_modules/semver/ranges/valid.js.map +0 -1
@@ -2,11 +2,13 @@ import { initChatModel } from "../../chat_models/universal.js";
2
2
  import { hasToolCalls } from "../utils.js";
3
3
  import { countTokensApproximately } from "./utils.js";
4
4
  import { createMiddleware } from "../middleware.js";
5
- import { RemoveMessage, SystemMessage, ToolMessage, trimMessages } from "@langchain/core/messages";
5
+ import { AIMessage, HumanMessage, RemoveMessage, SystemMessage, ToolMessage, trimMessages } from "@langchain/core/messages";
6
6
  import { REMOVE_ALL_MESSAGES } from "@langchain/langgraph";
7
7
  import { z } from "zod/v3";
8
- import { interopParse } from "@langchain/core/utils/types";
8
+ import { interopSafeParse } from "@langchain/core/utils/types";
9
+ import { z as z$1 } from "zod/v4";
9
10
  import { v4 } from "uuid";
11
+ import { getModelContextSize } from "@langchain/core/language_models/base";
10
12
 
11
13
  //#region src/agents/middleware/summarization.ts
12
14
  const DEFAULT_SUMMARY_PROMPT = `<role>
@@ -36,20 +38,68 @@ Respond ONLY with the extracted context. Do not include any additional informati
36
38
  Messages to summarize:
37
39
  {messages}
38
40
  </messages>`;
39
- const SUMMARY_PREFIX = "## Previous conversation summary:";
40
41
  const DEFAULT_MESSAGES_TO_KEEP = 20;
41
42
  const DEFAULT_TRIM_TOKEN_LIMIT = 4e3;
42
43
  const DEFAULT_FALLBACK_MESSAGE_COUNT = 15;
43
44
  const SEARCH_RANGE_FOR_TOOL_PAIRS = 5;
45
+ const tokenCounterSchema = z.function().args(z.array(z.custom())).returns(z.union([z.number(), z.promise(z.number())]));
46
+ const contextSizeSchema = z.object({
47
+ fraction: z.number().gt(0, "Fraction must be greater than 0").max(1, "Fraction must be less than or equal to 1").optional(),
48
+ tokens: z.number().positive("Tokens must be greater than 0").optional(),
49
+ messages: z.number().int("Messages must be an integer").positive("Messages must be greater than 0").optional()
50
+ }).refine((data) => {
51
+ const count = [
52
+ data.fraction,
53
+ data.tokens,
54
+ data.messages
55
+ ].filter((v) => v !== void 0).length;
56
+ return count >= 1;
57
+ }, { message: "At least one of fraction, tokens, or messages must be provided" });
58
+ const keepSchema = z.object({
59
+ fraction: z.number().gt(0, "Fraction must be greater than 0").max(1, "Fraction must be less than or equal to 1").optional(),
60
+ tokens: z.number().positive("Tokens must be greater than 0").optional(),
61
+ messages: z.number().int("Messages must be an integer").positive("Messages must be greater than 0").optional()
62
+ }).refine((data) => {
63
+ const count = [
64
+ data.fraction,
65
+ data.tokens,
66
+ data.messages
67
+ ].filter((v) => v !== void 0).length;
68
+ return count === 1;
69
+ }, { message: "Exactly one of fraction, tokens, or messages must be provided" });
44
70
  const contextSchema = z.object({
45
71
  model: z.custom(),
46
- maxTokensBeforeSummary: z.number().optional(),
47
- messagesToKeep: z.number().default(DEFAULT_MESSAGES_TO_KEEP),
48
- tokenCounter: z.function().args(z.array(z.any())).returns(z.union([z.number(), z.promise(z.number())])).optional(),
72
+ trigger: z.union([contextSizeSchema, z.array(contextSizeSchema)]).optional(),
73
+ keep: keepSchema.optional(),
74
+ tokenCounter: tokenCounterSchema.optional(),
49
75
  summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),
50
- summaryPrefix: z.string().default(SUMMARY_PREFIX)
76
+ trimTokensToSummarize: z.number().optional(),
77
+ summaryPrefix: z.string().optional(),
78
+ maxTokensBeforeSummary: z.number().optional(),
79
+ messagesToKeep: z.number().optional()
51
80
  });
52
81
  /**
82
+ * Get max input tokens from model profile or fallback to model name lookup
83
+ */
84
+ function getProfileLimits(model) {
85
+ try {
86
+ /**
87
+ * Try to access profile property (for future compatibility with model-profiles)
88
+ */
89
+ const modelWithProfile = model;
90
+ if (modelWithProfile.profile && typeof modelWithProfile.profile.max_input_tokens === "number") return modelWithProfile.profile.max_input_tokens;
91
+ } catch {}
92
+ /**
93
+ * Fallback: try to get model name and use getModelContextSize
94
+ */
95
+ try {
96
+ const modelWithName = model;
97
+ const modelName = modelWithName.model || modelWithName.modelName;
98
+ if (typeof modelName === "string") return getModelContextSize(modelName);
99
+ } catch {}
100
+ return void 0;
101
+ }
102
+ /**
53
103
  * Summarization middleware that automatically summarizes conversation history when token limits are approached.
54
104
  *
55
105
  * This middleware monitors message token counts and automatically summarizes older
@@ -64,14 +114,31 @@ const contextSchema = z.object({
64
114
  * import { summarizationMiddleware } from "langchain";
65
115
  * import { createAgent } from "langchain";
66
116
  *
67
- * const agent = createAgent({
117
+ * // Single condition: trigger if tokens >= 4000 AND messages >= 10
118
+ * const agent1 = createAgent({
119
+ * llm: model,
120
+ * tools: [getWeather],
121
+ * middleware: [
122
+ * summarizationMiddleware({
123
+ * model: new ChatOpenAI({ model: "gpt-4o" }),
124
+ * trigger: { tokens: 4000, messages: 10 },
125
+ * keep: { messages: 20 },
126
+ * })
127
+ * ],
128
+ * });
129
+ *
130
+ * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)
131
+ * const agent2 = createAgent({
68
132
  * llm: model,
69
133
  * tools: [getWeather],
70
134
  * middleware: [
71
135
  * summarizationMiddleware({
72
136
  * model: new ChatOpenAI({ model: "gpt-4o" }),
73
- * maxTokensBeforeSummary: 4000,
74
- * messagesToKeep: 20,
137
+ * trigger: [
138
+ * { tokens: 5000, messages: 3 },
139
+ * { tokens: 3000, messages: 6 },
140
+ * ],
141
+ * keep: { messages: 20 },
75
142
  * })
76
143
  * ],
77
144
  * });
@@ -79,40 +146,80 @@ const contextSchema = z.object({
79
146
  * ```
80
147
  */
81
148
  function summarizationMiddleware(options) {
149
+ /**
150
+ * Parse user options to get their explicit values
151
+ */
152
+ const { data: userOptions, error } = interopSafeParse(contextSchema, options);
153
+ if (error) throw new Error(`Invalid summarization middleware options: ${z$1.prettifyError(error)}`);
82
154
  return createMiddleware({
83
155
  name: "SummarizationMiddleware",
84
156
  contextSchema: contextSchema.extend({ model: z.custom().optional() }),
85
157
  beforeModel: async (state, runtime) => {
158
+ let trigger = userOptions.trigger;
159
+ let keep = userOptions.keep;
160
+ /**
161
+ * Handle deprecated parameters
162
+ */
163
+ if (userOptions.maxTokensBeforeSummary !== void 0) {
164
+ console.warn("maxTokensBeforeSummary is deprecated. Use `trigger: { tokens: value }` instead.");
165
+ if (trigger === void 0) trigger = { tokens: userOptions.maxTokensBeforeSummary };
166
+ }
167
+ /**
168
+ * Handle deprecated parameters
169
+ */
170
+ if (userOptions.messagesToKeep !== void 0) {
171
+ console.warn("messagesToKeep is deprecated. Use `keep: { messages: value }` instead.");
172
+ if (!keep || keep && "messages" in keep && keep.messages === DEFAULT_MESSAGES_TO_KEEP) keep = { messages: userOptions.messagesToKeep };
173
+ }
174
+ /**
175
+ * Merge context with user options
176
+ */
177
+ const resolvedTrigger = runtime.context.trigger !== void 0 ? runtime.context.trigger : trigger;
178
+ const resolvedKeep = runtime.context.keep !== void 0 ? runtime.context.keep : keep ?? { messages: DEFAULT_MESSAGES_TO_KEEP };
179
+ const validatedKeep = keepSchema.parse(resolvedKeep);
180
+ /**
181
+ * Validate trigger conditions
182
+ */
183
+ let triggerConditions = [];
184
+ if (resolvedTrigger === void 0) triggerConditions = [];
185
+ else if (Array.isArray(resolvedTrigger))
186
+ /**
187
+ * It's an array of ContextSize objects
188
+ */
189
+ triggerConditions = resolvedTrigger.map((t) => contextSizeSchema.parse(t));
190
+ else
191
+ /**
192
+ * Single ContextSize object - all properties must be satisfied (AND logic)
193
+ */
194
+ triggerConditions = [contextSizeSchema.parse(resolvedTrigger)];
86
195
  /**
87
- * Parse user options to get their explicit values
196
+ * Check if profile is required
88
197
  */
89
- const userOptions = interopParse(contextSchema, options);
198
+ const requiresProfile = triggerConditions.some((c) => "fraction" in c) || "fraction" in validatedKeep;
199
+ const model = typeof userOptions.model === "string" ? await initChatModel(userOptions.model) : userOptions.model;
200
+ if (requiresProfile && !getProfileLimits(model)) throw new Error("Model profile information is required to use fractional token limits. Use absolute token counts instead.");
201
+ const summaryPrompt = runtime.context.summaryPrompt === DEFAULT_SUMMARY_PROMPT ? userOptions.summaryPrompt ?? DEFAULT_SUMMARY_PROMPT : runtime.context.summaryPrompt ?? userOptions.summaryPrompt ?? DEFAULT_SUMMARY_PROMPT;
202
+ const trimTokensToSummarize = runtime.context.trimTokensToSummarize !== void 0 ? runtime.context.trimTokensToSummarize : userOptions.trimTokensToSummarize ?? DEFAULT_TRIM_TOKEN_LIMIT;
90
203
  /**
91
- * Merge context with user options, preferring user options when context has default values
204
+ * Ensure all messages have IDs
92
205
  */
93
- const config = {
94
- model: userOptions.model,
95
- maxTokensBeforeSummary: runtime.context.maxTokensBeforeSummary !== void 0 ? runtime.context.maxTokensBeforeSummary : userOptions.maxTokensBeforeSummary,
96
- messagesToKeep: runtime.context.messagesToKeep === DEFAULT_MESSAGES_TO_KEEP ? userOptions.messagesToKeep : runtime.context.messagesToKeep ?? userOptions.messagesToKeep,
97
- tokenCounter: runtime.context.tokenCounter !== void 0 ? runtime.context.tokenCounter : userOptions.tokenCounter,
98
- summaryPrompt: runtime.context.summaryPrompt === DEFAULT_SUMMARY_PROMPT ? userOptions.summaryPrompt : runtime.context.summaryPrompt ?? userOptions.summaryPrompt,
99
- summaryPrefix: runtime.context.summaryPrefix === SUMMARY_PREFIX ? userOptions.summaryPrefix : runtime.context.summaryPrefix ?? userOptions.summaryPrefix
100
- };
101
- const { messages } = state;
102
- const model = typeof config.model === "string" ? await initChatModel(config.model) : config.model;
103
- ensureMessageIds(messages);
104
- const tokenCounter = config.tokenCounter || countTokensApproximately;
105
- const totalTokens = await tokenCounter(messages);
106
- if (config.maxTokensBeforeSummary == null || totalTokens < config.maxTokensBeforeSummary) return;
107
- const { systemPrompt, conversationMessages } = splitSystemMessage(messages);
108
- const cutoffIndex = findSafeCutoff(conversationMessages, config.messagesToKeep);
206
+ ensureMessageIds(state.messages);
207
+ const tokenCounter = runtime.context.tokenCounter !== void 0 ? runtime.context.tokenCounter : userOptions.tokenCounter ?? countTokensApproximately;
208
+ const totalTokens = await tokenCounter(state.messages);
209
+ const doSummarize = await shouldSummarize(state.messages, totalTokens, triggerConditions, model);
210
+ if (!doSummarize) return;
211
+ const { systemPrompt, conversationMessages } = splitSystemMessage(state.messages);
212
+ const cutoffIndex = await determineCutoffIndex(conversationMessages, validatedKeep, tokenCounter, model);
109
213
  if (cutoffIndex <= 0) return;
110
214
  const { messagesToSummarize, preservedMessages } = partitionMessages(systemPrompt, conversationMessages, cutoffIndex);
111
- const summary = await createSummary(messagesToSummarize, model, config.summaryPrompt, tokenCounter);
112
- const updatedSystemMessage = buildUpdatedSystemMessage(systemPrompt, summary, config.summaryPrefix);
215
+ const summary = await createSummary(messagesToSummarize, model, summaryPrompt, tokenCounter, trimTokensToSummarize);
216
+ const summaryMessage = new HumanMessage({
217
+ content: `Here is a summary of the conversation to date:\n\n${summary}`,
218
+ id: v4()
219
+ });
113
220
  return { messages: [
114
221
  new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),
115
- updatedSystemMessage,
222
+ summaryMessage,
116
223
  ...preservedMessages
117
224
  ] };
118
225
  }
@@ -132,10 +239,7 @@ function splitSystemMessage(messages) {
132
239
  systemPrompt: messages[0],
133
240
  conversationMessages: messages.slice(1)
134
241
  };
135
- return {
136
- systemPrompt: null,
137
- conversationMessages: messages
138
- };
242
+ return { conversationMessages: messages };
139
243
  }
140
244
  /**
141
245
  * Partition messages into those to summarize and those to preserve
@@ -150,19 +254,112 @@ function partitionMessages(systemPrompt, conversationMessages, cutoffIndex) {
150
254
  };
151
255
  }
152
256
  /**
153
- * Build updated system message incorporating the summary
257
+ * Determine whether summarization should run for the current token usage
258
+ *
259
+ * @param messages - Current messages in the conversation
260
+ * @param totalTokens - Total token count for all messages
261
+ * @param triggerConditions - Array of trigger conditions. Returns true if ANY condition is satisfied (OR logic).
262
+ * Within each condition, ALL specified properties must be satisfied (AND logic).
263
+ * @param model - The language model being used
264
+ * @returns true if summarization should be triggered
154
265
  */
155
- function buildUpdatedSystemMessage(originalSystemMessage, summary, summaryPrefix) {
156
- let originalContent = "";
157
- if (originalSystemMessage) {
158
- const { content: content$1 } = originalSystemMessage;
159
- if (typeof content$1 === "string") originalContent = content$1.split(summaryPrefix)[0].trim();
266
+ async function shouldSummarize(messages, totalTokens, triggerConditions, model) {
267
+ if (triggerConditions.length === 0) return false;
268
+ /**
269
+ * Check each condition (OR logic between conditions)
270
+ */
271
+ for (const trigger of triggerConditions) {
272
+ /**
273
+ * Within a single condition, all specified properties must be satisfied (AND logic)
274
+ */
275
+ let conditionMet = true;
276
+ let hasAnyProperty = false;
277
+ if (trigger.messages !== void 0) {
278
+ hasAnyProperty = true;
279
+ if (messages.length < trigger.messages) conditionMet = false;
280
+ }
281
+ if (trigger.tokens !== void 0) {
282
+ hasAnyProperty = true;
283
+ if (totalTokens < trigger.tokens) conditionMet = false;
284
+ }
285
+ if (trigger.fraction !== void 0) {
286
+ hasAnyProperty = true;
287
+ const maxInputTokens = getProfileLimits(model);
288
+ if (typeof maxInputTokens === "number") {
289
+ const threshold = Math.floor(maxInputTokens * trigger.fraction);
290
+ if (totalTokens < threshold) conditionMet = false;
291
+ } else
292
+ /**
293
+ * If fraction is specified but we can't get model limits, skip this condition
294
+ */
295
+ conditionMet = false;
296
+ }
297
+ /**
298
+ * If condition has at least one property and all properties are satisfied, trigger summarization
299
+ */
300
+ if (hasAnyProperty && conditionMet) return true;
160
301
  }
161
- const content = originalContent ? `${originalContent}\n${summaryPrefix}\n${summary}` : `${summaryPrefix}\n${summary}`;
162
- return new SystemMessage({
163
- content,
164
- id: originalSystemMessage?.id || v4()
165
- });
302
+ return false;
303
+ }
304
+ /**
305
+ * Determine cutoff index respecting retention configuration
306
+ */
307
+ async function determineCutoffIndex(messages, keep, tokenCounter, model) {
308
+ if ("tokens" in keep || "fraction" in keep) {
309
+ const tokenBasedCutoff = await findTokenBasedCutoff(messages, keep, tokenCounter, model);
310
+ if (typeof tokenBasedCutoff === "number") return tokenBasedCutoff;
311
+ /**
312
+ * Fallback to message count if token-based fails
313
+ */
314
+ return findSafeCutoff(messages, DEFAULT_MESSAGES_TO_KEEP);
315
+ }
316
+ /**
317
+ * find cutoff index based on message count
318
+ */
319
+ return findSafeCutoff(messages, keep.messages ?? DEFAULT_MESSAGES_TO_KEEP);
320
+ }
321
+ /**
322
+ * Find cutoff index based on target token retention
323
+ */
324
+ async function findTokenBasedCutoff(messages, keep, tokenCounter, model) {
325
+ if (messages.length === 0) return 0;
326
+ let targetTokenCount;
327
+ if ("fraction" in keep && keep.fraction !== void 0) {
328
+ const maxInputTokens = getProfileLimits(model);
329
+ if (typeof maxInputTokens !== "number") return;
330
+ targetTokenCount = Math.floor(maxInputTokens * keep.fraction);
331
+ } else if ("tokens" in keep && keep.tokens !== void 0) targetTokenCount = Math.floor(keep.tokens);
332
+ else return;
333
+ if (targetTokenCount <= 0) targetTokenCount = 1;
334
+ const totalTokens = await tokenCounter(messages);
335
+ if (totalTokens <= targetTokenCount) return 0;
336
+ /**
337
+ * Use binary search to identify the earliest message index that keeps the
338
+ * suffix within the token budget.
339
+ */
340
+ let left = 0;
341
+ let right = messages.length;
342
+ let cutoffCandidate = messages.length;
343
+ const maxIterations = Math.floor(Math.log2(messages.length)) + 1;
344
+ for (let i = 0; i < maxIterations; i++) {
345
+ if (left >= right) break;
346
+ const mid = Math.floor((left + right) / 2);
347
+ const suffixTokens = await tokenCounter(messages.slice(mid));
348
+ if (suffixTokens <= targetTokenCount) {
349
+ cutoffCandidate = mid;
350
+ right = mid;
351
+ } else left = mid + 1;
352
+ }
353
+ if (cutoffCandidate === messages.length) cutoffCandidate = left;
354
+ if (cutoffCandidate >= messages.length) {
355
+ if (messages.length === 1) return 0;
356
+ cutoffCandidate = messages.length - 1;
357
+ }
358
+ /**
359
+ * Find safe cutoff point that preserves tool pairs
360
+ */
361
+ for (let i = cutoffCandidate; i >= 0; i--) if (isSafeCutoffPoint(messages, i)) return i;
362
+ return 0;
166
363
  }
167
364
  /**
168
365
  * Find safe cutoff point that preserves AI/Tool message pairs
@@ -178,6 +375,10 @@ function findSafeCutoff(messages, messagesToKeep) {
178
375
  */
179
376
  function isSafeCutoffPoint(messages, cutoffIndex) {
180
377
  if (cutoffIndex >= messages.length) return true;
378
+ /**
379
+ * Prevent preserved messages from starting with AI message containing tool calls
380
+ */
381
+ if (cutoffIndex < messages.length && AIMessage.isInstance(messages[cutoffIndex]) && hasToolCalls(messages[cutoffIndex])) return false;
181
382
  const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);
182
383
  const searchEnd = Math.min(messages.length, cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS);
183
384
  for (let i = searchStart; i < searchEnd; i++) {
@@ -215,15 +416,30 @@ function cutoffSeparatesToolPair(messages, aiMessageIndex, cutoffIndex, toolCall
215
416
  /**
216
417
  * Generate summary for the given messages
217
418
  */
218
- async function createSummary(messagesToSummarize, model, summaryPrompt, tokenCounter) {
419
+ async function createSummary(messagesToSummarize, model, summaryPrompt, tokenCounter, trimTokensToSummarize) {
219
420
  if (!messagesToSummarize.length) return "No previous conversation history.";
220
- const trimmedMessages = await trimMessagesForSummary(messagesToSummarize, tokenCounter);
421
+ const trimmedMessages = await trimMessagesForSummary(messagesToSummarize, tokenCounter, trimTokensToSummarize);
221
422
  if (!trimmedMessages.length) return "Previous conversation was too long to summarize.";
222
423
  try {
223
424
  const formattedPrompt = summaryPrompt.replace("{messages}", JSON.stringify(trimmedMessages, null, 2));
224
425
  const response = await model.invoke(formattedPrompt);
225
- const { content } = response;
226
- return typeof content === "string" ? content.trim() : "Error generating summary: Invalid response format";
426
+ const content = response.content;
427
+ /**
428
+ * Handle both string content and MessageContent array
429
+ */
430
+ if (typeof content === "string") return content.trim();
431
+ else if (Array.isArray(content)) {
432
+ /**
433
+ * Extract text from MessageContent array
434
+ */
435
+ const textContent = content.map((item) => {
436
+ if (typeof item === "string") return item;
437
+ if (typeof item === "object" && item !== null && "text" in item) return item.text;
438
+ return "";
439
+ }).join("");
440
+ return textContent.trim();
441
+ }
442
+ return "Error generating summary: Invalid response format";
227
443
  } catch (e) {
228
444
  return `Error generating summary: ${e}`;
229
445
  }
@@ -231,16 +447,20 @@ async function createSummary(messagesToSummarize, model, summaryPrompt, tokenCou
231
447
  /**
232
448
  * Trim messages to fit within summary generation limits
233
449
  */
234
- async function trimMessagesForSummary(messages, tokenCounter) {
450
+ async function trimMessagesForSummary(messages, tokenCounter, trimTokensToSummarize) {
451
+ if (trimTokensToSummarize === void 0) return messages;
235
452
  try {
236
453
  return await trimMessages(messages, {
237
- maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,
238
- tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),
454
+ maxTokens: trimTokensToSummarize,
455
+ tokenCounter: async (msgs) => tokenCounter(msgs),
239
456
  strategy: "last",
240
457
  allowPartial: true,
241
458
  includeSystem: true
242
459
  });
243
460
  } catch {
461
+ /**
462
+ * Fallback to last N messages if trimming fails
463
+ */
244
464
  return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);
245
465
  }
246
466
  }
@@ -1 +1 @@
1
- {"version":3,"file":"summarization.js","names":["options: SummarizationMiddlewareConfig","messages: BaseMessage[]","uuid","systemPrompt: SystemMessage | null","conversationMessages: BaseMessage[]","cutoffIndex: number","originalSystemMessage: SystemMessage | null","summary: string","summaryPrefix: string","content","messagesToKeep: number","aiMessage: AIMessage","aiMessageIndex: number","toolCallIds: Set<string>","messagesToSummarize: BaseMessage[]","model: BaseLanguageModel","summaryPrompt: string","tokenCounter: TokenCounter"],"sources":["../../../src/agents/middleware/summarization.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { v4 as uuid } from \"uuid\";\nimport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n ToolMessage,\n RemoveMessage,\n trimMessages,\n} from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport {\n interopParse,\n InferInteropZodOutput,\n InferInteropZodInput,\n} from \"@langchain/core/utils/types\";\nimport { REMOVE_ALL_MESSAGES } from \"@langchain/langgraph\";\nimport { createMiddleware } from \"../middleware.js\";\nimport { countTokensApproximately } from \"./utils.js\";\nimport { hasToolCalls } from \"../utils.js\";\nimport { initChatModel } from \"../../chat_models/universal.js\";\n\nconst DEFAULT_SUMMARY_PROMPT = `<role>\nContext Extraction Assistant\n</role>\n\n<primary_objective>\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\n</primary_objective>\n\n<objective_information>\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\n</objective_information>\n\n<instructions>\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\n</instructions>\n\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\n\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\n\n<messages>\nMessages to summarize:\n{messages}\n</messages>`;\n\nconst SUMMARY_PREFIX = \"## Previous conversation summary:\";\n\nconst DEFAULT_MESSAGES_TO_KEEP = 20;\nconst DEFAULT_TRIM_TOKEN_LIMIT = 4000;\nconst DEFAULT_FALLBACK_MESSAGE_COUNT = 15;\nconst SEARCH_RANGE_FOR_TOOL_PAIRS = 5;\n\ntype TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\n\nconst contextSchema = z.object({\n model: z.custom<string | BaseLanguageModel>(),\n maxTokensBeforeSummary: z.number().optional(),\n messagesToKeep: z.number().default(DEFAULT_MESSAGES_TO_KEEP),\n tokenCounter: z\n .function()\n .args(z.array(z.any()))\n .returns(z.union([z.number(), z.promise(z.number())]))\n .optional(),\n summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),\n summaryPrefix: z.string().default(SUMMARY_PREFIX),\n});\n\nexport type SummarizationMiddlewareConfig = InferInteropZodInput<\n typeof contextSchema\n>;\n\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport function summarizationMiddleware(\n options: SummarizationMiddlewareConfig\n) {\n return createMiddleware({\n name: \"SummarizationMiddleware\",\n contextSchema: contextSchema.extend({\n /**\n * `model` should be required when initializing the middleware,\n * but can be omitted within context when invoking the middleware.\n */\n model: z.custom<BaseLanguageModel>().optional(),\n }),\n beforeModel: async (state, runtime) => {\n /**\n * Parse user options to get their explicit values\n */\n const userOptions = interopParse(contextSchema, options);\n\n /**\n * Merge context with user options, preferring user options when context has default values\n */\n const config = {\n model: userOptions.model,\n maxTokensBeforeSummary:\n runtime.context.maxTokensBeforeSummary !== undefined\n ? runtime.context.maxTokensBeforeSummary\n : userOptions.maxTokensBeforeSummary,\n messagesToKeep:\n runtime.context.messagesToKeep === DEFAULT_MESSAGES_TO_KEEP\n ? userOptions.messagesToKeep\n : runtime.context.messagesToKeep ?? userOptions.messagesToKeep,\n tokenCounter:\n runtime.context.tokenCounter !== undefined\n ? runtime.context.tokenCounter\n : userOptions.tokenCounter,\n summaryPrompt:\n runtime.context.summaryPrompt === DEFAULT_SUMMARY_PROMPT\n ? userOptions.summaryPrompt\n : runtime.context.summaryPrompt ?? userOptions.summaryPrompt,\n summaryPrefix:\n runtime.context.summaryPrefix === SUMMARY_PREFIX\n ? userOptions.summaryPrefix\n : runtime.context.summaryPrefix ?? userOptions.summaryPrefix,\n } as InferInteropZodOutput<typeof contextSchema>;\n const { messages } = state;\n\n const model =\n typeof config.model === \"string\"\n ? await initChatModel(config.model)\n : config.model;\n\n // Ensure all messages have IDs\n ensureMessageIds(messages);\n\n const tokenCounter = config.tokenCounter || countTokensApproximately;\n const totalTokens = await tokenCounter(messages);\n\n if (\n config.maxTokensBeforeSummary == null ||\n totalTokens < config.maxTokensBeforeSummary\n ) {\n return;\n }\n\n const { systemPrompt, conversationMessages } =\n splitSystemMessage(messages);\n const cutoffIndex = findSafeCutoff(\n conversationMessages,\n config.messagesToKeep\n );\n\n if (cutoffIndex <= 0) {\n return;\n }\n\n const { messagesToSummarize, preservedMessages } = partitionMessages(\n systemPrompt,\n conversationMessages,\n cutoffIndex\n );\n\n const summary = await createSummary(\n messagesToSummarize,\n model,\n config.summaryPrompt,\n tokenCounter\n );\n\n const updatedSystemMessage = buildUpdatedSystemMessage(\n systemPrompt,\n summary,\n config.summaryPrefix\n );\n\n return {\n messages: [\n new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),\n updatedSystemMessage,\n ...preservedMessages,\n ],\n };\n },\n });\n}\n\n/**\n * Ensure all messages have unique IDs\n */\nfunction ensureMessageIds(messages: BaseMessage[]): void {\n for (const msg of messages) {\n if (!msg.id) {\n msg.id = uuid();\n }\n }\n}\n\n/**\n * Separate system message from conversation messages\n */\nfunction splitSystemMessage(messages: BaseMessage[]): {\n systemPrompt: SystemMessage | null;\n conversationMessages: BaseMessage[];\n} {\n if (messages.length > 0 && SystemMessage.isInstance(messages[0])) {\n return {\n systemPrompt: messages[0] as SystemMessage,\n conversationMessages: messages.slice(1),\n };\n }\n return {\n systemPrompt: null,\n conversationMessages: messages,\n };\n}\n\n/**\n * Partition messages into those to summarize and those to preserve\n */\nfunction partitionMessages(\n systemPrompt: SystemMessage | null,\n conversationMessages: BaseMessage[],\n cutoffIndex: number\n): { messagesToSummarize: BaseMessage[]; preservedMessages: BaseMessage[] } {\n const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);\n const preservedMessages = conversationMessages.slice(cutoffIndex);\n\n // Include system message in messages to summarize to capture previous summaries\n if (systemPrompt) {\n messagesToSummarize.unshift(systemPrompt);\n }\n\n return { messagesToSummarize, preservedMessages };\n}\n\n/**\n * Build updated system message incorporating the summary\n */\nfunction buildUpdatedSystemMessage(\n originalSystemMessage: SystemMessage | null,\n summary: string,\n summaryPrefix: string\n): SystemMessage {\n let originalContent = \"\";\n if (originalSystemMessage) {\n const { content } = originalSystemMessage;\n if (typeof content === \"string\") {\n originalContent = content.split(summaryPrefix)[0].trim();\n }\n }\n\n const content = originalContent\n ? `${originalContent}\\n${summaryPrefix}\\n${summary}`\n : `${summaryPrefix}\\n${summary}`;\n\n return new SystemMessage({\n content,\n id: originalSystemMessage?.id || uuid(),\n });\n}\n\n/**\n * Find safe cutoff point that preserves AI/Tool message pairs\n */\nfunction findSafeCutoff(\n messages: BaseMessage[],\n messagesToKeep: number\n): number {\n if (messages.length <= messagesToKeep) {\n return 0;\n }\n\n const targetCutoff = messages.length - messagesToKeep;\n\n for (let i = targetCutoff; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Check if cutting at index would separate AI/Tool message pairs\n */\nfunction isSafeCutoffPoint(\n messages: BaseMessage[],\n cutoffIndex: number\n): boolean {\n if (cutoffIndex >= messages.length) {\n return true;\n }\n\n const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);\n const searchEnd = Math.min(\n messages.length,\n cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS\n );\n\n for (let i = searchStart; i < searchEnd; i++) {\n if (!hasToolCalls(messages[i])) {\n continue;\n }\n\n const toolCallIds = extractToolCallIds(messages[i] as AIMessage);\n if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Extract tool call IDs from an AI message\n */\nfunction extractToolCallIds(aiMessage: AIMessage): Set<string> {\n const toolCallIds = new Set<string>();\n if (aiMessage.tool_calls) {\n for (const toolCall of aiMessage.tool_calls) {\n const id =\n typeof toolCall === \"object\" && \"id\" in toolCall ? toolCall.id : null;\n if (id) {\n toolCallIds.add(id);\n }\n }\n }\n return toolCallIds;\n}\n\n/**\n * Check if cutoff separates an AI message from its corresponding tool messages\n */\nfunction cutoffSeparatesToolPair(\n messages: BaseMessage[],\n aiMessageIndex: number,\n cutoffIndex: number,\n toolCallIds: Set<string>\n): boolean {\n for (let j = aiMessageIndex + 1; j < messages.length; j++) {\n const message = messages[j];\n if (\n ToolMessage.isInstance(message) &&\n toolCallIds.has(message.tool_call_id)\n ) {\n const aiBeforeCutoff = aiMessageIndex < cutoffIndex;\n const toolBeforeCutoff = j < cutoffIndex;\n if (aiBeforeCutoff !== toolBeforeCutoff) {\n return true;\n }\n }\n }\n return false;\n}\n\n/**\n * Generate summary for the given messages\n */\nasync function createSummary(\n messagesToSummarize: BaseMessage[],\n model: BaseLanguageModel,\n summaryPrompt: string,\n tokenCounter: TokenCounter\n): Promise<string> {\n if (!messagesToSummarize.length) {\n return \"No previous conversation history.\";\n }\n\n const trimmedMessages = await trimMessagesForSummary(\n messagesToSummarize,\n tokenCounter\n );\n\n if (!trimmedMessages.length) {\n return \"Previous conversation was too long to summarize.\";\n }\n\n try {\n const formattedPrompt = summaryPrompt.replace(\n \"{messages}\",\n JSON.stringify(trimmedMessages, null, 2)\n );\n const response = await model.invoke(formattedPrompt);\n const { content } = response;\n return typeof content === \"string\"\n ? content.trim()\n : \"Error generating summary: Invalid response format\";\n } catch (e) {\n return `Error generating summary: ${e}`;\n }\n}\n\n/**\n * Trim messages to fit within summary generation limits\n */\nasync function trimMessagesForSummary(\n messages: BaseMessage[],\n tokenCounter: TokenCounter\n): Promise<BaseMessage[]> {\n try {\n return await trimMessages(messages, {\n maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,\n tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),\n strategy: \"last\",\n allowPartial: true,\n includeSystem: true,\n });\n } catch {\n // Fallback to last N messages if trimming fails\n return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);\n }\n}\n"],"mappings":";;;;;;;;;;;AAsBA,MAAM,yBAAyB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;WA0BrB,CAAC;AAEZ,MAAM,iBAAiB;AAEvB,MAAM,2BAA2B;AACjC,MAAM,2BAA2B;AACjC,MAAM,iCAAiC;AACvC,MAAM,8BAA8B;AAIpC,MAAM,gBAAgB,EAAE,OAAO;CAC7B,OAAO,EAAE,QAAoC;CAC7C,wBAAwB,EAAE,QAAQ,CAAC,UAAU;CAC7C,gBAAgB,EAAE,QAAQ,CAAC,QAAQ,yBAAyB;CAC5D,cAAc,EACX,UAAU,CACV,KAAK,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC,CACtB,QAAQ,EAAE,MAAM,CAAC,EAAE,QAAQ,EAAE,EAAE,QAAQ,EAAE,QAAQ,CAAC,AAAC,EAAC,CAAC,CACrD,UAAU;CACb,eAAe,EAAE,QAAQ,CAAC,QAAQ,uBAAuB;CACzD,eAAe,EAAE,QAAQ,CAAC,QAAQ,eAAe;AAClD,EAAC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCF,SAAgB,wBACdA,SACA;AACA,QAAO,iBAAiB;EACtB,MAAM;EACN,eAAe,cAAc,OAAO,EAKlC,OAAO,EAAE,QAA2B,CAAC,UAAU,CAChD,EAAC;EACF,aAAa,OAAO,OAAO,YAAY;;;;GAIrC,MAAM,cAAc,aAAa,eAAe,QAAQ;;;;GAKxD,MAAM,SAAS;IACb,OAAO,YAAY;IACnB,wBACE,QAAQ,QAAQ,2BAA2B,SACvC,QAAQ,QAAQ,yBAChB,YAAY;IAClB,gBACE,QAAQ,QAAQ,mBAAmB,2BAC/B,YAAY,iBACZ,QAAQ,QAAQ,kBAAkB,YAAY;IACpD,cACE,QAAQ,QAAQ,iBAAiB,SAC7B,QAAQ,QAAQ,eAChB,YAAY;IAClB,eACE,QAAQ,QAAQ,kBAAkB,yBAC9B,YAAY,gBACZ,QAAQ,QAAQ,iBAAiB,YAAY;IACnD,eACE,QAAQ,QAAQ,kBAAkB,iBAC9B,YAAY,gBACZ,QAAQ,QAAQ,iBAAiB,YAAY;GACpD;GACD,MAAM,EAAE,UAAU,GAAG;GAErB,MAAM,QACJ,OAAO,OAAO,UAAU,WACpB,MAAM,cAAc,OAAO,MAAM,GACjC,OAAO;GAGb,iBAAiB,SAAS;GAE1B,MAAM,eAAe,OAAO,gBAAgB;GAC5C,MAAM,cAAc,MAAM,aAAa,SAAS;AAEhD,OACE,OAAO,0BAA0B,QACjC,cAAc,OAAO,uBAErB;GAGF,MAAM,EAAE,cAAc,sBAAsB,GAC1C,mBAAmB,SAAS;GAC9B,MAAM,cAAc,eAClB,sBACA,OAAO,eACR;AAED,OAAI,eAAe,EACjB;GAGF,MAAM,EAAE,qBAAqB,mBAAmB,GAAG,kBACjD,cACA,sBACA,YACD;GAED,MAAM,UAAU,MAAM,cACpB,qBACA,OACA,OAAO,eACP,aACD;GAED,MAAM,uBAAuB,0BAC3B,cACA,SACA,OAAO,cACR;AAED,UAAO,EACL,UAAU;IACR,IAAI,cAAc,EAAE,IAAI,oBAAqB;IAC7C;IACA,GAAG;GACJ,EACF;EACF;CACF,EAAC;AACH;;;;AAKD,SAAS,iBAAiBC,UAA+B;AACvD,MAAK,MAAM,OAAO,SAChB,KAAI,CAAC,IAAI,IACP,IAAI,KAAKC,IAAM;AAGpB;;;;AAKD,SAAS,mBAAmBD,UAG1B;AACA,KAAI,SAAS,SAAS,KAAK,cAAc,WAAW,SAAS,GAAG,CAC9D,QAAO;EACL,cAAc,SAAS;EACvB,sBAAsB,SAAS,MAAM,EAAE;CACxC;AAEH,QAAO;EACL,cAAc;EACd,sBAAsB;CACvB;AACF;;;;AAKD,SAAS,kBACPE,cACAC,sBACAC,aAC0E;CAC1E,MAAM,sBAAsB,qBAAqB,MAAM,GAAG,YAAY;CACtE,MAAM,oBAAoB,qBAAqB,MAAM,YAAY;AAGjE,KAAI,cACF,oBAAoB,QAAQ,aAAa;AAG3C,QAAO;EAAE;EAAqB;CAAmB;AAClD;;;;AAKD,SAAS,0BACPC,uBACAC,SACAC,eACe;CACf,IAAI,kBAAkB;AACtB,KAAI,uBAAuB;EACzB,MAAM,EAAE,oBAAS,GAAG;AACpB,MAAI,OAAOC,cAAY,UACrB,kBAAkBA,UAAQ,MAAM,cAAc,CAAC,GAAG,MAAM;CAE3D;CAED,MAAM,UAAU,kBACZ,GAAG,gBAAgB,EAAE,EAAE,cAAc,EAAE,EAAE,SAAS,GAClD,GAAG,cAAc,EAAE,EAAE,SAAS;AAElC,QAAO,IAAI,cAAc;EACvB;EACA,IAAI,uBAAuB,MAAMP,IAAM;CACxC;AACF;;;;AAKD,SAAS,eACPD,UACAS,gBACQ;AACR,KAAI,SAAS,UAAU,eACrB,QAAO;CAGT,MAAM,eAAe,SAAS,SAAS;AAEvC,MAAK,IAAI,IAAI,cAAc,KAAK,GAAG,IACjC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,kBACPT,UACAI,aACS;AACT,KAAI,eAAe,SAAS,OAC1B,QAAO;CAGT,MAAM,cAAc,KAAK,IAAI,GAAG,cAAc,4BAA4B;CAC1E,MAAM,YAAY,KAAK,IACrB,SAAS,QACT,cAAc,4BACf;AAED,MAAK,IAAI,IAAI,aAAa,IAAI,WAAW,KAAK;AAC5C,MAAI,CAAC,aAAa,SAAS,GAAG,CAC5B;EAGF,MAAM,cAAc,mBAAmB,SAAS,GAAgB;AAChE,MAAI,wBAAwB,UAAU,GAAG,aAAa,YAAY,CAChE,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,SAAS,mBAAmBM,WAAmC;CAC7D,MAAM,8BAAc,IAAI;AACxB,KAAI,UAAU,WACZ,MAAK,MAAM,YAAY,UAAU,YAAY;EAC3C,MAAM,KACJ,OAAO,aAAa,YAAY,QAAQ,WAAW,SAAS,KAAK;AACnE,MAAI,IACF,YAAY,IAAI,GAAG;CAEtB;AAEH,QAAO;AACR;;;;AAKD,SAAS,wBACPV,UACAW,gBACAP,aACAQ,aACS;AACT,MAAK,IAAI,IAAI,iBAAiB,GAAG,IAAI,SAAS,QAAQ,KAAK;EACzD,MAAM,UAAU,SAAS;AACzB,MACE,YAAY,WAAW,QAAQ,IAC/B,YAAY,IAAI,QAAQ,aAAa,EACrC;GACA,MAAM,iBAAiB,iBAAiB;GACxC,MAAM,mBAAmB,IAAI;AAC7B,OAAI,mBAAmB,iBACrB,QAAO;EAEV;CACF;AACD,QAAO;AACR;;;;AAKD,eAAe,cACbC,qBACAC,OACAC,eACAC,cACiB;AACjB,KAAI,CAAC,oBAAoB,OACvB,QAAO;CAGT,MAAM,kBAAkB,MAAM,uBAC5B,qBACA,aACD;AAED,KAAI,CAAC,gBAAgB,OACnB,QAAO;AAGT,KAAI;EACF,MAAM,kBAAkB,cAAc,QACpC,cACA,KAAK,UAAU,iBAAiB,MAAM,EAAE,CACzC;EACD,MAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;EACpD,MAAM,EAAE,SAAS,GAAG;AACpB,SAAO,OAAO,YAAY,WACtB,QAAQ,MAAM,GACd;CACL,SAAQ,GAAG;AACV,SAAO,CAAC,0BAA0B,EAAE,GAAG;CACxC;AACF;;;;AAKD,eAAe,uBACbhB,UACAgB,cACwB;AACxB,KAAI;AACF,SAAO,MAAM,aAAa,UAAU;GAClC,WAAW;GACX,cAAc,OAAO,SAAS,QAAQ,QAAQ,aAAa,KAAK,CAAC;GACjE,UAAU;GACV,cAAc;GACd,eAAe;EAChB,EAAC;CACH,QAAO;AAEN,SAAO,SAAS,MAAM,CAAC,+BAA+B;CACvD;AACF"}
1
+ {"version":3,"file":"summarization.js","names":["model: BaseLanguageModel","options: SummarizationMiddlewareConfig","z4","trigger: ContextSize | ContextSize[] | undefined","keep: ContextSize","triggerConditions: ContextSize[]","uuid","messages: BaseMessage[]","systemPrompt: SystemMessage | undefined","conversationMessages: BaseMessage[]","cutoffIndex: number","totalTokens: number","tokenCounter: TokenCounter","targetTokenCount: number","messagesToKeep: number","aiMessage: AIMessage","aiMessageIndex: number","toolCallIds: Set<string>","messagesToSummarize: BaseMessage[]","summaryPrompt: string","trimTokensToSummarize: number | undefined"],"sources":["../../../src/agents/middleware/summarization.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nimport { z as z4 } from \"zod/v4\";\nimport { v4 as uuid } from \"uuid\";\nimport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n ToolMessage,\n RemoveMessage,\n trimMessages,\n HumanMessage,\n} from \"@langchain/core/messages\";\nimport {\n BaseLanguageModel,\n getModelContextSize,\n} from \"@langchain/core/language_models/base\";\nimport {\n interopSafeParse,\n InferInteropZodInput,\n InferInteropZodOutput,\n} from \"@langchain/core/utils/types\";\nimport { REMOVE_ALL_MESSAGES } from \"@langchain/langgraph\";\nimport { createMiddleware } from \"../middleware.js\";\nimport { countTokensApproximately } from \"./utils.js\";\nimport { hasToolCalls } from \"../utils.js\";\nimport { initChatModel } from \"../../chat_models/universal.js\";\n\nexport const DEFAULT_SUMMARY_PROMPT = `<role>\nContext Extraction Assistant\n</role>\n\n<primary_objective>\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\n</primary_objective>\n\n<objective_information>\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\n</objective_information>\n\n<instructions>\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\n</instructions>\n\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\n\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\n\n<messages>\nMessages to summarize:\n{messages}\n</messages>`;\n\nconst DEFAULT_MESSAGES_TO_KEEP = 20;\nconst DEFAULT_TRIM_TOKEN_LIMIT = 4000;\nconst DEFAULT_FALLBACK_MESSAGE_COUNT = 15;\nconst SEARCH_RANGE_FOR_TOOL_PAIRS = 5;\n\nconst tokenCounterSchema = z\n .function()\n .args(z.array(z.custom<BaseMessage>()))\n .returns(z.union([z.number(), z.promise(z.number())]));\nexport type TokenCounter = (\n messages: BaseMessage[]\n) => number | Promise<number>;\n\nconst contextSizeSchema = z\n .object({\n /**\n * Fraction of the model's context size to use as the trigger\n */\n fraction: z\n .number()\n .gt(0, \"Fraction must be greater than 0\")\n .max(1, \"Fraction must be less than or equal to 1\")\n .optional(),\n /**\n * Number of tokens to use as the trigger\n */\n tokens: z.number().positive(\"Tokens must be greater than 0\").optional(),\n /**\n * Number of messages to use as the trigger\n */\n messages: z\n .number()\n .int(\"Messages must be an integer\")\n .positive(\"Messages must be greater than 0\")\n .optional(),\n })\n .refine(\n (data) => {\n const count = [data.fraction, data.tokens, data.messages].filter(\n (v) => v !== undefined\n ).length;\n return count >= 1;\n },\n {\n message: \"At least one of fraction, tokens, or messages must be provided\",\n }\n );\nexport type ContextSize = z.infer<typeof contextSizeSchema>;\n\nconst keepSchema = z\n .object({\n /**\n * Fraction of the model's context size to keep\n */\n fraction: z\n .number()\n .gt(0, \"Fraction must be greater than 0\")\n .max(1, \"Fraction must be less than or equal to 1\")\n .optional(),\n /**\n * Number of tokens to keep\n */\n tokens: z.number().positive(\"Tokens must be greater than 0\").optional(),\n messages: z\n .number()\n .int(\"Messages must be an integer\")\n .positive(\"Messages must be greater than 0\")\n .optional(),\n })\n .refine(\n (data) => {\n const count = [data.fraction, data.tokens, data.messages].filter(\n (v) => v !== undefined\n ).length;\n return count === 1;\n },\n {\n message: \"Exactly one of fraction, tokens, or messages must be provided\",\n }\n );\n\nconst contextSchema = z.object({\n /**\n * Model to use for summarization\n */\n model: z.custom<string | BaseLanguageModel>(),\n /**\n * Trigger conditions for summarization.\n * Can be a single condition object (all properties must be met) or an array of conditions (any condition must be met).\n *\n * @example\n * ```ts\n * // Single condition: trigger if tokens >= 5000 AND messages >= 3\n * trigger: { tokens: 5000, messages: 3 }\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 }\n * ]\n * ```\n */\n trigger: z.union([contextSizeSchema, z.array(contextSizeSchema)]).optional(),\n /**\n * Keep conditions for summarization\n */\n keep: keepSchema.optional(),\n /**\n * Token counter function to use for summarization\n */\n tokenCounter: tokenCounterSchema.optional(),\n /**\n * Summary prompt to use for summarization\n * @default {@link DEFAULT_SUMMARY_PROMPT}\n */\n summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),\n /**\n * Number of tokens to trim to before summarizing\n */\n trimTokensToSummarize: z.number().optional(),\n /**\n * Prefix to add to the summary\n */\n summaryPrefix: z.string().optional(),\n /**\n * @deprecated Use `trigger: { tokens: value }` instead.\n */\n maxTokensBeforeSummary: z.number().optional(),\n /**\n * @deprecated Use `keep: { messages: value }` instead.\n */\n messagesToKeep: z.number().optional(),\n});\n\nexport type SummarizationMiddlewareConfig = InferInteropZodInput<\n typeof contextSchema\n>;\n\n/**\n * Get max input tokens from model profile or fallback to model name lookup\n */\nfunction getProfileLimits(model: BaseLanguageModel): number | undefined {\n try {\n /**\n * Try to access profile property (for future compatibility with model-profiles)\n */\n const modelWithProfile = model as BaseLanguageModel & {\n profile?: { max_input_tokens?: number };\n };\n if (\n modelWithProfile.profile &&\n typeof modelWithProfile.profile.max_input_tokens === \"number\"\n ) {\n return modelWithProfile.profile.max_input_tokens;\n }\n } catch {\n /**\n * Profile not available, continue to fallback\n */\n }\n\n /**\n * Fallback: try to get model name and use getModelContextSize\n */\n try {\n const modelWithName = model as BaseLanguageModel & {\n model?: string;\n modelName?: string;\n };\n const modelName = modelWithName.model || modelWithName.modelName;\n if (typeof modelName === \"string\") {\n return getModelContextSize(modelName);\n }\n } catch {\n /**\n * Model name not available\n */\n }\n\n return undefined;\n}\n\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain\";\n * import { createAgent } from \"langchain\";\n *\n * // Single condition: trigger if tokens >= 4000 AND messages >= 10\n * const agent1 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: { tokens: 4000, messages: 10 },\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * // Multiple conditions: trigger if (tokens >= 5000 AND messages >= 3) OR (tokens >= 3000 AND messages >= 6)\n * const agent2 = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middleware: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * trigger: [\n * { tokens: 5000, messages: 3 },\n * { tokens: 3000, messages: 6 },\n * ],\n * keep: { messages: 20 },\n * })\n * ],\n * });\n *\n * ```\n */\nexport function summarizationMiddleware(\n options: SummarizationMiddlewareConfig\n) {\n /**\n * Parse user options to get their explicit values\n */\n const { data: userOptions, error } = interopSafeParse(contextSchema, options);\n if (error) {\n throw new Error(\n `Invalid summarization middleware options: ${z4.prettifyError(error)}`\n );\n }\n\n return createMiddleware({\n name: \"SummarizationMiddleware\",\n contextSchema: contextSchema.extend({\n /**\n * `model` should be required when initializing the middleware,\n * but can be omitted within context when invoking the middleware.\n */\n model: z.custom<BaseLanguageModel>().optional(),\n }),\n beforeModel: async (state, runtime) => {\n let trigger: ContextSize | ContextSize[] | undefined =\n userOptions.trigger;\n let keep: ContextSize = userOptions.keep as InferInteropZodOutput<\n typeof keepSchema\n >;\n\n /**\n * Handle deprecated parameters\n */\n if (userOptions.maxTokensBeforeSummary !== undefined) {\n console.warn(\n \"maxTokensBeforeSummary is deprecated. Use `trigger: { tokens: value }` instead.\"\n );\n if (trigger === undefined) {\n trigger = { tokens: userOptions.maxTokensBeforeSummary };\n }\n }\n\n /**\n * Handle deprecated parameters\n */\n if (userOptions.messagesToKeep !== undefined) {\n console.warn(\n \"messagesToKeep is deprecated. Use `keep: { messages: value }` instead.\"\n );\n if (\n !keep ||\n (keep &&\n \"messages\" in keep &&\n keep.messages === DEFAULT_MESSAGES_TO_KEEP)\n ) {\n keep = { messages: userOptions.messagesToKeep };\n }\n }\n\n /**\n * Merge context with user options\n */\n const resolvedTrigger =\n runtime.context.trigger !== undefined\n ? runtime.context.trigger\n : trigger;\n const resolvedKeep =\n runtime.context.keep !== undefined\n ? runtime.context.keep\n : keep ?? { messages: DEFAULT_MESSAGES_TO_KEEP };\n\n const validatedKeep = keepSchema.parse(resolvedKeep);\n\n /**\n * Validate trigger conditions\n */\n let triggerConditions: ContextSize[] = [];\n if (resolvedTrigger === undefined) {\n triggerConditions = [];\n } else if (Array.isArray(resolvedTrigger)) {\n /**\n * It's an array of ContextSize objects\n */\n triggerConditions = (resolvedTrigger as ContextSize[]).map((t) =>\n contextSizeSchema.parse(t)\n );\n } else {\n /**\n * Single ContextSize object - all properties must be satisfied (AND logic)\n */\n triggerConditions = [contextSizeSchema.parse(resolvedTrigger)];\n }\n\n /**\n * Check if profile is required\n */\n const requiresProfile =\n triggerConditions.some((c) => \"fraction\" in c) ||\n \"fraction\" in validatedKeep;\n\n const model =\n typeof userOptions.model === \"string\"\n ? await initChatModel(userOptions.model)\n : userOptions.model;\n\n if (requiresProfile && !getProfileLimits(model)) {\n throw new Error(\n \"Model profile information is required to use fractional token limits. \" +\n \"Use absolute token counts instead.\"\n );\n }\n\n const summaryPrompt =\n runtime.context.summaryPrompt === DEFAULT_SUMMARY_PROMPT\n ? userOptions.summaryPrompt ?? DEFAULT_SUMMARY_PROMPT\n : runtime.context.summaryPrompt ??\n userOptions.summaryPrompt ??\n DEFAULT_SUMMARY_PROMPT;\n const trimTokensToSummarize =\n runtime.context.trimTokensToSummarize !== undefined\n ? runtime.context.trimTokensToSummarize\n : userOptions.trimTokensToSummarize ?? DEFAULT_TRIM_TOKEN_LIMIT;\n\n /**\n * Ensure all messages have IDs\n */\n ensureMessageIds(state.messages);\n\n const tokenCounter =\n runtime.context.tokenCounter !== undefined\n ? runtime.context.tokenCounter\n : userOptions.tokenCounter ?? countTokensApproximately;\n const totalTokens = await tokenCounter(state.messages);\n const doSummarize = await shouldSummarize(\n state.messages,\n totalTokens,\n triggerConditions,\n model\n );\n\n if (!doSummarize) {\n return;\n }\n\n const { systemPrompt, conversationMessages } = splitSystemMessage(\n state.messages\n );\n const cutoffIndex = await determineCutoffIndex(\n conversationMessages,\n validatedKeep,\n tokenCounter,\n model\n );\n\n if (cutoffIndex <= 0) {\n return;\n }\n\n const { messagesToSummarize, preservedMessages } = partitionMessages(\n systemPrompt,\n conversationMessages,\n cutoffIndex\n );\n\n const summary = await createSummary(\n messagesToSummarize,\n model,\n summaryPrompt,\n tokenCounter,\n trimTokensToSummarize\n );\n\n const summaryMessage = new HumanMessage({\n content: `Here is a summary of the conversation to date:\\n\\n${summary}`,\n id: uuid(),\n });\n\n return {\n messages: [\n new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),\n summaryMessage,\n ...preservedMessages,\n ],\n };\n },\n });\n}\n\n/**\n * Ensure all messages have unique IDs\n */\nfunction ensureMessageIds(messages: BaseMessage[]): void {\n for (const msg of messages) {\n if (!msg.id) {\n msg.id = uuid();\n }\n }\n}\n\n/**\n * Separate system message from conversation messages\n */\nfunction splitSystemMessage(messages: BaseMessage[]): {\n systemPrompt?: SystemMessage;\n conversationMessages: BaseMessage[];\n} {\n if (messages.length > 0 && SystemMessage.isInstance(messages[0])) {\n return {\n systemPrompt: messages[0] as SystemMessage,\n conversationMessages: messages.slice(1),\n };\n }\n return {\n conversationMessages: messages,\n };\n}\n\n/**\n * Partition messages into those to summarize and those to preserve\n */\nfunction partitionMessages(\n systemPrompt: SystemMessage | undefined,\n conversationMessages: BaseMessage[],\n cutoffIndex: number\n): { messagesToSummarize: BaseMessage[]; preservedMessages: BaseMessage[] } {\n const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);\n const preservedMessages = conversationMessages.slice(cutoffIndex);\n\n // Include system message in messages to summarize to capture previous summaries\n if (systemPrompt) {\n messagesToSummarize.unshift(systemPrompt);\n }\n\n return { messagesToSummarize, preservedMessages };\n}\n\n/**\n * Determine whether summarization should run for the current token usage\n *\n * @param messages - Current messages in the conversation\n * @param totalTokens - Total token count for all messages\n * @param triggerConditions - Array of trigger conditions. Returns true if ANY condition is satisfied (OR logic).\n * Within each condition, ALL specified properties must be satisfied (AND logic).\n * @param model - The language model being used\n * @returns true if summarization should be triggered\n */\nasync function shouldSummarize(\n messages: BaseMessage[],\n totalTokens: number,\n triggerConditions: ContextSize[],\n model: BaseLanguageModel\n): Promise<boolean> {\n if (triggerConditions.length === 0) {\n return false;\n }\n\n /**\n * Check each condition (OR logic between conditions)\n */\n for (const trigger of triggerConditions) {\n /**\n * Within a single condition, all specified properties must be satisfied (AND logic)\n */\n let conditionMet = true;\n let hasAnyProperty = false;\n\n if (trigger.messages !== undefined) {\n hasAnyProperty = true;\n if (messages.length < trigger.messages) {\n conditionMet = false;\n }\n }\n\n if (trigger.tokens !== undefined) {\n hasAnyProperty = true;\n if (totalTokens < trigger.tokens) {\n conditionMet = false;\n }\n }\n\n if (trigger.fraction !== undefined) {\n hasAnyProperty = true;\n const maxInputTokens = getProfileLimits(model);\n if (typeof maxInputTokens === \"number\") {\n const threshold = Math.floor(maxInputTokens * trigger.fraction);\n if (totalTokens < threshold) {\n conditionMet = false;\n }\n } else {\n /**\n * If fraction is specified but we can't get model limits, skip this condition\n */\n conditionMet = false;\n }\n }\n\n /**\n * If condition has at least one property and all properties are satisfied, trigger summarization\n */\n if (hasAnyProperty && conditionMet) {\n return true;\n }\n }\n\n return false;\n}\n\n/**\n * Determine cutoff index respecting retention configuration\n */\nasync function determineCutoffIndex(\n messages: BaseMessage[],\n keep: ContextSize,\n tokenCounter: TokenCounter,\n model: BaseLanguageModel\n): Promise<number> {\n if (\"tokens\" in keep || \"fraction\" in keep) {\n const tokenBasedCutoff = await findTokenBasedCutoff(\n messages,\n keep,\n tokenCounter,\n model\n );\n if (typeof tokenBasedCutoff === \"number\") {\n return tokenBasedCutoff;\n }\n /**\n * Fallback to message count if token-based fails\n */\n return findSafeCutoff(messages, DEFAULT_MESSAGES_TO_KEEP);\n }\n /**\n * find cutoff index based on message count\n */\n return findSafeCutoff(messages, keep.messages ?? DEFAULT_MESSAGES_TO_KEEP);\n}\n\n/**\n * Find cutoff index based on target token retention\n */\nasync function findTokenBasedCutoff(\n messages: BaseMessage[],\n keep: ContextSize,\n tokenCounter: TokenCounter,\n model: BaseLanguageModel\n): Promise<number | undefined> {\n if (messages.length === 0) {\n return 0;\n }\n\n let targetTokenCount: number;\n\n if (\"fraction\" in keep && keep.fraction !== undefined) {\n const maxInputTokens = getProfileLimits(model);\n if (typeof maxInputTokens !== \"number\") {\n return;\n }\n targetTokenCount = Math.floor(maxInputTokens * keep.fraction);\n } else if (\"tokens\" in keep && keep.tokens !== undefined) {\n targetTokenCount = Math.floor(keep.tokens);\n } else {\n return;\n }\n\n if (targetTokenCount <= 0) {\n targetTokenCount = 1;\n }\n\n const totalTokens = await tokenCounter(messages);\n if (totalTokens <= targetTokenCount) {\n return 0;\n }\n\n /**\n * Use binary search to identify the earliest message index that keeps the\n * suffix within the token budget.\n */\n let left = 0;\n let right = messages.length;\n let cutoffCandidate = messages.length;\n const maxIterations = Math.floor(Math.log2(messages.length)) + 1;\n\n for (let i = 0; i < maxIterations; i++) {\n if (left >= right) {\n break;\n }\n\n const mid = Math.floor((left + right) / 2);\n const suffixTokens = await tokenCounter(messages.slice(mid));\n if (suffixTokens <= targetTokenCount) {\n cutoffCandidate = mid;\n right = mid;\n } else {\n left = mid + 1;\n }\n }\n\n if (cutoffCandidate === messages.length) {\n cutoffCandidate = left;\n }\n\n if (cutoffCandidate >= messages.length) {\n if (messages.length === 1) {\n return 0;\n }\n cutoffCandidate = messages.length - 1;\n }\n\n /**\n * Find safe cutoff point that preserves tool pairs\n */\n for (let i = cutoffCandidate; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Find safe cutoff point that preserves AI/Tool message pairs\n */\nfunction findSafeCutoff(\n messages: BaseMessage[],\n messagesToKeep: number\n): number {\n if (messages.length <= messagesToKeep) {\n return 0;\n }\n\n const targetCutoff = messages.length - messagesToKeep;\n\n for (let i = targetCutoff; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Check if cutting at index would separate AI/Tool message pairs\n */\nfunction isSafeCutoffPoint(\n messages: BaseMessage[],\n cutoffIndex: number\n): boolean {\n if (cutoffIndex >= messages.length) {\n return true;\n }\n\n /**\n * Prevent preserved messages from starting with AI message containing tool calls\n */\n if (\n cutoffIndex < messages.length &&\n AIMessage.isInstance(messages[cutoffIndex]) &&\n hasToolCalls(messages[cutoffIndex])\n ) {\n return false;\n }\n\n const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);\n const searchEnd = Math.min(\n messages.length,\n cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS\n );\n\n for (let i = searchStart; i < searchEnd; i++) {\n if (!hasToolCalls(messages[i])) {\n continue;\n }\n\n const toolCallIds = extractToolCallIds(messages[i] as AIMessage);\n if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Extract tool call IDs from an AI message\n */\nfunction extractToolCallIds(aiMessage: AIMessage): Set<string> {\n const toolCallIds = new Set<string>();\n if (aiMessage.tool_calls) {\n for (const toolCall of aiMessage.tool_calls) {\n const id =\n typeof toolCall === \"object\" && \"id\" in toolCall ? toolCall.id : null;\n if (id) {\n toolCallIds.add(id);\n }\n }\n }\n return toolCallIds;\n}\n\n/**\n * Check if cutoff separates an AI message from its corresponding tool messages\n */\nfunction cutoffSeparatesToolPair(\n messages: BaseMessage[],\n aiMessageIndex: number,\n cutoffIndex: number,\n toolCallIds: Set<string>\n): boolean {\n for (let j = aiMessageIndex + 1; j < messages.length; j++) {\n const message = messages[j];\n if (\n ToolMessage.isInstance(message) &&\n toolCallIds.has(message.tool_call_id)\n ) {\n const aiBeforeCutoff = aiMessageIndex < cutoffIndex;\n const toolBeforeCutoff = j < cutoffIndex;\n if (aiBeforeCutoff !== toolBeforeCutoff) {\n return true;\n }\n }\n }\n return false;\n}\n\n/**\n * Generate summary for the given messages\n */\nasync function createSummary(\n messagesToSummarize: BaseMessage[],\n model: BaseLanguageModel,\n summaryPrompt: string,\n tokenCounter: TokenCounter,\n trimTokensToSummarize: number | undefined\n): Promise<string> {\n if (!messagesToSummarize.length) {\n return \"No previous conversation history.\";\n }\n\n const trimmedMessages = await trimMessagesForSummary(\n messagesToSummarize,\n tokenCounter,\n trimTokensToSummarize\n );\n\n if (!trimmedMessages.length) {\n return \"Previous conversation was too long to summarize.\";\n }\n\n try {\n const formattedPrompt = summaryPrompt.replace(\n \"{messages}\",\n JSON.stringify(trimmedMessages, null, 2)\n );\n const response = await model.invoke(formattedPrompt);\n const content = response.content;\n /**\n * Handle both string content and MessageContent array\n */\n if (typeof content === \"string\") {\n return content.trim();\n } else if (Array.isArray(content)) {\n /**\n * Extract text from MessageContent array\n */\n const textContent = content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (typeof item === \"object\" && item !== null && \"text\" in item) {\n return (item as { text: string }).text;\n }\n return \"\";\n })\n .join(\"\");\n return textContent.trim();\n }\n return \"Error generating summary: Invalid response format\";\n } catch (e) {\n return `Error generating summary: ${e}`;\n }\n}\n\n/**\n * Trim messages to fit within summary generation limits\n */\nasync function trimMessagesForSummary(\n messages: BaseMessage[],\n tokenCounter: TokenCounter,\n trimTokensToSummarize: number | undefined\n): Promise<BaseMessage[]> {\n if (trimTokensToSummarize === undefined) {\n return messages;\n }\n\n try {\n return await trimMessages(messages, {\n maxTokens: trimTokensToSummarize,\n tokenCounter: async (msgs) => tokenCounter(msgs),\n strategy: \"last\",\n allowPartial: true,\n includeSystem: true,\n });\n } catch {\n /**\n * Fallback to last N messages if trimming fails\n */\n return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);\n }\n}\n"],"mappings":";;;;;;;;;;;;;AA2BA,MAAa,yBAAyB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;WA0B5B,CAAC;AAEZ,MAAM,2BAA2B;AACjC,MAAM,2BAA2B;AACjC,MAAM,iCAAiC;AACvC,MAAM,8BAA8B;AAEpC,MAAM,qBAAqB,EACxB,UAAU,CACV,KAAK,EAAE,MAAM,EAAE,QAAqB,CAAC,CAAC,CACtC,QAAQ,EAAE,MAAM,CAAC,EAAE,QAAQ,EAAE,EAAE,QAAQ,EAAE,QAAQ,CAAC,AAAC,EAAC,CAAC;AAKxD,MAAM,oBAAoB,EACvB,OAAO;CAIN,UAAU,EACP,QAAQ,CACR,GAAG,GAAG,kCAAkC,CACxC,IAAI,GAAG,2CAA2C,CAClD,UAAU;CAIb,QAAQ,EAAE,QAAQ,CAAC,SAAS,gCAAgC,CAAC,UAAU;CAIvE,UAAU,EACP,QAAQ,CACR,IAAI,8BAA8B,CAClC,SAAS,kCAAkC,CAC3C,UAAU;AACd,EAAC,CACD,OACC,CAAC,SAAS;CACR,MAAM,QAAQ;EAAC,KAAK;EAAU,KAAK;EAAQ,KAAK;CAAS,EAAC,OACxD,CAAC,MAAM,MAAM,OACd,CAAC;AACF,QAAO,SAAS;AACjB,GACD,EACE,SAAS,iEACV,EACF;AAGH,MAAM,aAAa,EAChB,OAAO;CAIN,UAAU,EACP,QAAQ,CACR,GAAG,GAAG,kCAAkC,CACxC,IAAI,GAAG,2CAA2C,CAClD,UAAU;CAIb,QAAQ,EAAE,QAAQ,CAAC,SAAS,gCAAgC,CAAC,UAAU;CACvE,UAAU,EACP,QAAQ,CACR,IAAI,8BAA8B,CAClC,SAAS,kCAAkC,CAC3C,UAAU;AACd,EAAC,CACD,OACC,CAAC,SAAS;CACR,MAAM,QAAQ;EAAC,KAAK;EAAU,KAAK;EAAQ,KAAK;CAAS,EAAC,OACxD,CAAC,MAAM,MAAM,OACd,CAAC;AACF,QAAO,UAAU;AAClB,GACD,EACE,SAAS,gEACV,EACF;AAEH,MAAM,gBAAgB,EAAE,OAAO;CAI7B,OAAO,EAAE,QAAoC;CAiB7C,SAAS,EAAE,MAAM,CAAC,mBAAmB,EAAE,MAAM,kBAAkB,AAAC,EAAC,CAAC,UAAU;CAI5E,MAAM,WAAW,UAAU;CAI3B,cAAc,mBAAmB,UAAU;CAK3C,eAAe,EAAE,QAAQ,CAAC,QAAQ,uBAAuB;CAIzD,uBAAuB,EAAE,QAAQ,CAAC,UAAU;CAI5C,eAAe,EAAE,QAAQ,CAAC,UAAU;CAIpC,wBAAwB,EAAE,QAAQ,CAAC,UAAU;CAI7C,gBAAgB,EAAE,QAAQ,CAAC,UAAU;AACtC,EAAC;;;;AASF,SAAS,iBAAiBA,OAA8C;AACtE,KAAI;;;;EAIF,MAAM,mBAAmB;AAGzB,MACE,iBAAiB,WACjB,OAAO,iBAAiB,QAAQ,qBAAqB,SAErD,QAAO,iBAAiB,QAAQ;CAEnC,QAAO,CAIP;;;;AAKD,KAAI;EACF,MAAM,gBAAgB;EAItB,MAAM,YAAY,cAAc,SAAS,cAAc;AACvD,MAAI,OAAO,cAAc,SACvB,QAAO,oBAAoB,UAAU;CAExC,QAAO,CAIP;AAED,QAAO;AACR;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAgDD,SAAgB,wBACdC,SACA;;;;CAIA,MAAM,EAAE,MAAM,aAAa,OAAO,GAAG,iBAAiB,eAAe,QAAQ;AAC7E,KAAI,MACF,OAAM,IAAI,MACR,CAAC,0CAA0C,EAAEC,IAAG,cAAc,MAAM,EAAE;AAI1E,QAAO,iBAAiB;EACtB,MAAM;EACN,eAAe,cAAc,OAAO,EAKlC,OAAO,EAAE,QAA2B,CAAC,UAAU,CAChD,EAAC;EACF,aAAa,OAAO,OAAO,YAAY;GACrC,IAAIC,UACF,YAAY;GACd,IAAIC,OAAoB,YAAY;;;;AAOpC,OAAI,YAAY,2BAA2B,QAAW;IACpD,QAAQ,KACN,kFACD;AACD,QAAI,YAAY,QACd,UAAU,EAAE,QAAQ,YAAY,uBAAwB;GAE3D;;;;AAKD,OAAI,YAAY,mBAAmB,QAAW;IAC5C,QAAQ,KACN,yEACD;AACD,QACE,CAAC,QACA,QACC,cAAc,QACd,KAAK,aAAa,0BAEpB,OAAO,EAAE,UAAU,YAAY,eAAgB;GAElD;;;;GAKD,MAAM,kBACJ,QAAQ,QAAQ,YAAY,SACxB,QAAQ,QAAQ,UAChB;GACN,MAAM,eACJ,QAAQ,QAAQ,SAAS,SACrB,QAAQ,QAAQ,OAChB,QAAQ,EAAE,UAAU,yBAA0B;GAEpD,MAAM,gBAAgB,WAAW,MAAM,aAAa;;;;GAKpD,IAAIC,oBAAmC,CAAE;AACzC,OAAI,oBAAoB,QACtB,oBAAoB,CAAE;YACb,MAAM,QAAQ,gBAAgB;;;;GAIvC,oBAAqB,gBAAkC,IAAI,CAAC,MAC1D,kBAAkB,MAAM,EAAE,CAC3B;;;;;GAKD,oBAAoB,CAAC,kBAAkB,MAAM,gBAAgB,AAAC;;;;GAMhE,MAAM,kBACJ,kBAAkB,KAAK,CAAC,MAAM,cAAc,EAAE,IAC9C,cAAc;GAEhB,MAAM,QACJ,OAAO,YAAY,UAAU,WACzB,MAAM,cAAc,YAAY,MAAM,GACtC,YAAY;AAElB,OAAI,mBAAmB,CAAC,iBAAiB,MAAM,CAC7C,OAAM,IAAI,MACR;GAKJ,MAAM,gBACJ,QAAQ,QAAQ,kBAAkB,yBAC9B,YAAY,iBAAiB,yBAC7B,QAAQ,QAAQ,iBAChB,YAAY,iBACZ;GACN,MAAM,wBACJ,QAAQ,QAAQ,0BAA0B,SACtC,QAAQ,QAAQ,wBAChB,YAAY,yBAAyB;;;;GAK3C,iBAAiB,MAAM,SAAS;GAEhC,MAAM,eACJ,QAAQ,QAAQ,iBAAiB,SAC7B,QAAQ,QAAQ,eAChB,YAAY,gBAAgB;GAClC,MAAM,cAAc,MAAM,aAAa,MAAM,SAAS;GACtD,MAAM,cAAc,MAAM,gBACxB,MAAM,UACN,aACA,mBACA,MACD;AAED,OAAI,CAAC,YACH;GAGF,MAAM,EAAE,cAAc,sBAAsB,GAAG,mBAC7C,MAAM,SACP;GACD,MAAM,cAAc,MAAM,qBACxB,sBACA,eACA,cACA,MACD;AAED,OAAI,eAAe,EACjB;GAGF,MAAM,EAAE,qBAAqB,mBAAmB,GAAG,kBACjD,cACA,sBACA,YACD;GAED,MAAM,UAAU,MAAM,cACpB,qBACA,OACA,eACA,cACA,sBACD;GAED,MAAM,iBAAiB,IAAI,aAAa;IACtC,SAAS,CAAC,kDAAkD,EAAE,SAAS;IACvE,IAAIC,IAAM;GACX;AAED,UAAO,EACL,UAAU;IACR,IAAI,cAAc,EAAE,IAAI,oBAAqB;IAC7C;IACA,GAAG;GACJ,EACF;EACF;CACF,EAAC;AACH;;;;AAKD,SAAS,iBAAiBC,UAA+B;AACvD,MAAK,MAAM,OAAO,SAChB,KAAI,CAAC,IAAI,IACP,IAAI,KAAKD,IAAM;AAGpB;;;;AAKD,SAAS,mBAAmBC,UAG1B;AACA,KAAI,SAAS,SAAS,KAAK,cAAc,WAAW,SAAS,GAAG,CAC9D,QAAO;EACL,cAAc,SAAS;EACvB,sBAAsB,SAAS,MAAM,EAAE;CACxC;AAEH,QAAO,EACL,sBAAsB,SACvB;AACF;;;;AAKD,SAAS,kBACPC,cACAC,sBACAC,aAC0E;CAC1E,MAAM,sBAAsB,qBAAqB,MAAM,GAAG,YAAY;CACtE,MAAM,oBAAoB,qBAAqB,MAAM,YAAY;AAGjE,KAAI,cACF,oBAAoB,QAAQ,aAAa;AAG3C,QAAO;EAAE;EAAqB;CAAmB;AAClD;;;;;;;;;;;AAYD,eAAe,gBACbH,UACAI,aACAN,mBACAL,OACkB;AAClB,KAAI,kBAAkB,WAAW,EAC/B,QAAO;;;;AAMT,MAAK,MAAM,WAAW,mBAAmB;;;;EAIvC,IAAI,eAAe;EACnB,IAAI,iBAAiB;AAErB,MAAI,QAAQ,aAAa,QAAW;GAClC,iBAAiB;AACjB,OAAI,SAAS,SAAS,QAAQ,UAC5B,eAAe;EAElB;AAED,MAAI,QAAQ,WAAW,QAAW;GAChC,iBAAiB;AACjB,OAAI,cAAc,QAAQ,QACxB,eAAe;EAElB;AAED,MAAI,QAAQ,aAAa,QAAW;GAClC,iBAAiB;GACjB,MAAM,iBAAiB,iBAAiB,MAAM;AAC9C,OAAI,OAAO,mBAAmB,UAAU;IACtC,MAAM,YAAY,KAAK,MAAM,iBAAiB,QAAQ,SAAS;AAC/D,QAAI,cAAc,WAChB,eAAe;GAElB;;;;GAIC,eAAe;EAElB;;;;AAKD,MAAI,kBAAkB,aACpB,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,eAAe,qBACbO,UACAH,MACAQ,cACAZ,OACiB;AACjB,KAAI,YAAY,QAAQ,cAAc,MAAM;EAC1C,MAAM,mBAAmB,MAAM,qBAC7B,UACA,MACA,cACA,MACD;AACD,MAAI,OAAO,qBAAqB,SAC9B,QAAO;;;;AAKT,SAAO,eAAe,UAAU,yBAAyB;CAC1D;;;;AAID,QAAO,eAAe,UAAU,KAAK,YAAY,yBAAyB;AAC3E;;;;AAKD,eAAe,qBACbO,UACAH,MACAQ,cACAZ,OAC6B;AAC7B,KAAI,SAAS,WAAW,EACtB,QAAO;CAGT,IAAIa;AAEJ,KAAI,cAAc,QAAQ,KAAK,aAAa,QAAW;EACrD,MAAM,iBAAiB,iBAAiB,MAAM;AAC9C,MAAI,OAAO,mBAAmB,SAC5B;EAEF,mBAAmB,KAAK,MAAM,iBAAiB,KAAK,SAAS;CAC9D,WAAU,YAAY,QAAQ,KAAK,WAAW,QAC7C,mBAAmB,KAAK,MAAM,KAAK,OAAO;KAE1C;AAGF,KAAI,oBAAoB,GACtB,mBAAmB;CAGrB,MAAM,cAAc,MAAM,aAAa,SAAS;AAChD,KAAI,eAAe,iBACjB,QAAO;;;;;CAOT,IAAI,OAAO;CACX,IAAI,QAAQ,SAAS;CACrB,IAAI,kBAAkB,SAAS;CAC/B,MAAM,gBAAgB,KAAK,MAAM,KAAK,KAAK,SAAS,OAAO,CAAC,GAAG;AAE/D,MAAK,IAAI,IAAI,GAAG,IAAI,eAAe,KAAK;AACtC,MAAI,QAAQ,MACV;EAGF,MAAM,MAAM,KAAK,OAAO,OAAO,SAAS,EAAE;EAC1C,MAAM,eAAe,MAAM,aAAa,SAAS,MAAM,IAAI,CAAC;AAC5D,MAAI,gBAAgB,kBAAkB;GACpC,kBAAkB;GAClB,QAAQ;EACT,OACC,OAAO,MAAM;CAEhB;AAED,KAAI,oBAAoB,SAAS,QAC/B,kBAAkB;AAGpB,KAAI,mBAAmB,SAAS,QAAQ;AACtC,MAAI,SAAS,WAAW,EACtB,QAAO;EAET,kBAAkB,SAAS,SAAS;CACrC;;;;AAKD,MAAK,IAAI,IAAI,iBAAiB,KAAK,GAAG,IACpC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,eACPN,UACAO,gBACQ;AACR,KAAI,SAAS,UAAU,eACrB,QAAO;CAGT,MAAM,eAAe,SAAS,SAAS;AAEvC,MAAK,IAAI,IAAI,cAAc,KAAK,GAAG,IACjC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,kBACPP,UACAG,aACS;AACT,KAAI,eAAe,SAAS,OAC1B,QAAO;;;;AAMT,KACE,cAAc,SAAS,UACvB,UAAU,WAAW,SAAS,aAAa,IAC3C,aAAa,SAAS,aAAa,CAEnC,QAAO;CAGT,MAAM,cAAc,KAAK,IAAI,GAAG,cAAc,4BAA4B;CAC1E,MAAM,YAAY,KAAK,IACrB,SAAS,QACT,cAAc,4BACf;AAED,MAAK,IAAI,IAAI,aAAa,IAAI,WAAW,KAAK;AAC5C,MAAI,CAAC,aAAa,SAAS,GAAG,CAC5B;EAGF,MAAM,cAAc,mBAAmB,SAAS,GAAgB;AAChE,MAAI,wBAAwB,UAAU,GAAG,aAAa,YAAY,CAChE,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,SAAS,mBAAmBK,WAAmC;CAC7D,MAAM,8BAAc,IAAI;AACxB,KAAI,UAAU,WACZ,MAAK,MAAM,YAAY,UAAU,YAAY;EAC3C,MAAM,KACJ,OAAO,aAAa,YAAY,QAAQ,WAAW,SAAS,KAAK;AACnE,MAAI,IACF,YAAY,IAAI,GAAG;CAEtB;AAEH,QAAO;AACR;;;;AAKD,SAAS,wBACPR,UACAS,gBACAN,aACAO,aACS;AACT,MAAK,IAAI,IAAI,iBAAiB,GAAG,IAAI,SAAS,QAAQ,KAAK;EACzD,MAAM,UAAU,SAAS;AACzB,MACE,YAAY,WAAW,QAAQ,IAC/B,YAAY,IAAI,QAAQ,aAAa,EACrC;GACA,MAAM,iBAAiB,iBAAiB;GACxC,MAAM,mBAAmB,IAAI;AAC7B,OAAI,mBAAmB,iBACrB,QAAO;EAEV;CACF;AACD,QAAO;AACR;;;;AAKD,eAAe,cACbC,qBACAlB,OACAmB,eACAP,cACAQ,uBACiB;AACjB,KAAI,CAAC,oBAAoB,OACvB,QAAO;CAGT,MAAM,kBAAkB,MAAM,uBAC5B,qBACA,cACA,sBACD;AAED,KAAI,CAAC,gBAAgB,OACnB,QAAO;AAGT,KAAI;EACF,MAAM,kBAAkB,cAAc,QACpC,cACA,KAAK,UAAU,iBAAiB,MAAM,EAAE,CACzC;EACD,MAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;EACpD,MAAM,UAAU,SAAS;;;;AAIzB,MAAI,OAAO,YAAY,SACrB,QAAO,QAAQ,MAAM;WACZ,MAAM,QAAQ,QAAQ,EAAE;;;;GAIjC,MAAM,cAAc,QACjB,IAAI,CAAC,SAAS;AACb,QAAI,OAAO,SAAS,SAAU,QAAO;AACrC,QAAI,OAAO,SAAS,YAAY,SAAS,QAAQ,UAAU,KACzD,QAAQ,KAA0B;AAEpC,WAAO;GACR,EAAC,CACD,KAAK,GAAG;AACX,UAAO,YAAY,MAAM;EAC1B;AACD,SAAO;CACR,SAAQ,GAAG;AACV,SAAO,CAAC,0BAA0B,EAAE,GAAG;CACxC;AACF;;;;AAKD,eAAe,uBACbb,UACAK,cACAQ,uBACwB;AACxB,KAAI,0BAA0B,OAC5B,QAAO;AAGT,KAAI;AACF,SAAO,MAAM,aAAa,UAAU;GAClC,WAAW;GACX,cAAc,OAAO,SAAS,aAAa,KAAK;GAChD,UAAU;GACV,cAAc;GACd,eAAe;EAChB,EAAC;CACH,QAAO;;;;AAIN,SAAO,SAAS,MAAM,CAAC,+BAA+B;CACvD;AACF"}
@@ -1 +1 @@
1
- {"version":3,"file":"todoListMiddleware.d.cts","names":["z","TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT","stateSchema","ZodString","ZodEnum","ZodTypeAny","ZodObject","ZodArray","ZodDefault","TodoMiddlewareState","infer","TodoListMiddlewareOptions","todoListMiddleware","__types_js4","AgentMiddleware"],"sources":["../../../src/agents/middleware/todoListMiddleware.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nexport declare const TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT = \"## `write_todos`\\n\\nYou have access to the `write_todos` tool to help you manage and plan complex objectives. \\nUse this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.\\nThis tool is very helpful for planning complex objectives, and for breaking down these larger complex objectives into smaller steps.\\n\\nIt is critical that you mark todos as completed as soon as you are done with a step. Do not batch up multiple steps before marking them as completed.\\nFor simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.\\nWriting todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.\\n\\n## Important To-Do List Usage Notes to Remember\\n- The `write_todos` tool should never be called multiple times in parallel.\\n- Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant.\";\ndeclare const stateSchema: z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>;\nexport type TodoMiddlewareState = z.infer<typeof stateSchema>;\nexport interface TodoListMiddlewareOptions {\n /**\n * Custom system prompt to guide the agent on using the todo tool.\n * If not provided, uses the default {@link PLANNING_MIDDLEWARE_SYSTEM_PROMPT}.\n */\n systemPrompt?: string;\n /**\n * Custom description for the {@link writeTodos} tool.\n * If not provided, uses the default {@link WRITE_TODOS_DESCRIPTION}.\n */\n toolDescription?: string;\n}\n/**\n * Creates a middleware that provides todo list management capabilities to agents.\n *\n * This middleware adds a `write_todos` tool that allows agents to create and manage\n * structured task lists for complex multi-step operations. It's designed to help\n * agents track progress, organize complex tasks, and provide users with visibility\n * into task completion status.\n *\n * The middleware automatically injects system prompts that guide the agent on when\n * and how to use the todo functionality effectively.\n *\n * @example\n * ```typescript\n * import { todoListMiddleware, createAgent } from 'langchain';\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [todoListMiddleware()],\n * });\n *\n * // Agent now has access to write_todos tool and todo state tracking\n * const result = await agent.invoke({\n * messages: [new HumanMessage(\"Help me refactor my codebase\")]\n * });\n *\n * console.log(result.todos); // Array of todo items with status tracking\n * ```\n *\n * @returns A configured middleware instance that provides todo management capabilities\n *\n * @see {@link TodoMiddlewareState} for the state schema\n * @see {@link writeTodos} for the tool implementation\n */\nexport declare function todoListMiddleware(options?: TodoListMiddlewareOptions): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>, undefined, any>;\nexport {};\n"],"mappings":";;;;cACqBC,kCAAAA;UAwBJU,yBAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA6COC,kBAAAA,WAA6BD,4BAA4E,gBAAXX,CAAAA,CAAEM;SAC7GN,CAAAA,CAAEQ,WAAWR,CAAAA,CAAEO,SAASP,CAAAA,CAAEM;aACpBN,CAAAA,CAAEG;YACHH,CAAAA,CAAEI;cACFJ,CAAAA,CAAEK;;;;;;;YAONL,CAAAA,CAAEK"}
1
+ {"version":3,"file":"todoListMiddleware.d.cts","names":["z","TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT","stateSchema","ZodString","ZodEnum","ZodTypeAny","ZodObject","ZodArray","ZodDefault","TodoMiddlewareState","infer","TodoListMiddlewareOptions","todoListMiddleware","__types_js14","AgentMiddleware"],"sources":["../../../src/agents/middleware/todoListMiddleware.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nexport declare const TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT = \"## `write_todos`\\n\\nYou have access to the `write_todos` tool to help you manage and plan complex objectives. \\nUse this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.\\nThis tool is very helpful for planning complex objectives, and for breaking down these larger complex objectives into smaller steps.\\n\\nIt is critical that you mark todos as completed as soon as you are done with a step. Do not batch up multiple steps before marking them as completed.\\nFor simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.\\nWriting todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.\\n\\n## Important To-Do List Usage Notes to Remember\\n- The `write_todos` tool should never be called multiple times in parallel.\\n- Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant.\";\ndeclare const stateSchema: z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>;\nexport type TodoMiddlewareState = z.infer<typeof stateSchema>;\nexport interface TodoListMiddlewareOptions {\n /**\n * Custom system prompt to guide the agent on using the todo tool.\n * If not provided, uses the default {@link PLANNING_MIDDLEWARE_SYSTEM_PROMPT}.\n */\n systemPrompt?: string;\n /**\n * Custom description for the {@link writeTodos} tool.\n * If not provided, uses the default {@link WRITE_TODOS_DESCRIPTION}.\n */\n toolDescription?: string;\n}\n/**\n * Creates a middleware that provides todo list management capabilities to agents.\n *\n * This middleware adds a `write_todos` tool that allows agents to create and manage\n * structured task lists for complex multi-step operations. It's designed to help\n * agents track progress, organize complex tasks, and provide users with visibility\n * into task completion status.\n *\n * The middleware automatically injects system prompts that guide the agent on when\n * and how to use the todo functionality effectively.\n *\n * @example\n * ```typescript\n * import { todoListMiddleware, createAgent } from 'langchain';\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [todoListMiddleware()],\n * });\n *\n * // Agent now has access to write_todos tool and todo state tracking\n * const result = await agent.invoke({\n * messages: [new HumanMessage(\"Help me refactor my codebase\")]\n * });\n *\n * console.log(result.todos); // Array of todo items with status tracking\n * ```\n *\n * @returns A configured middleware instance that provides todo management capabilities\n *\n * @see {@link TodoMiddlewareState} for the state schema\n * @see {@link writeTodos} for the tool implementation\n */\nexport declare function todoListMiddleware(options?: TodoListMiddlewareOptions): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>, undefined, any>;\nexport {};\n"],"mappings":";;;;cACqBC,kCAAAA;UAwBJU,yBAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA6COC,kBAAAA,WAA6BD,4BAA4E,gBAAXX,CAAAA,CAAEM;SAC7GN,CAAAA,CAAEQ,WAAWR,CAAAA,CAAEO,SAASP,CAAAA,CAAEM;aACpBN,CAAAA,CAAEG;YACHH,CAAAA,CAAEI;cACFJ,CAAAA,CAAEK;;;;;;;YAONL,CAAAA,CAAEK"}
@@ -1 +1 @@
1
- {"version":3,"file":"todoListMiddleware.d.ts","names":["z","TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT","stateSchema","ZodString","ZodEnum","ZodTypeAny","ZodObject","ZodArray","ZodDefault","TodoMiddlewareState","infer","TodoListMiddlewareOptions","todoListMiddleware","__types_js3","AgentMiddleware"],"sources":["../../../src/agents/middleware/todoListMiddleware.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nexport declare const TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT = \"## `write_todos`\\n\\nYou have access to the `write_todos` tool to help you manage and plan complex objectives. \\nUse this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.\\nThis tool is very helpful for planning complex objectives, and for breaking down these larger complex objectives into smaller steps.\\n\\nIt is critical that you mark todos as completed as soon as you are done with a step. Do not batch up multiple steps before marking them as completed.\\nFor simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.\\nWriting todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.\\n\\n## Important To-Do List Usage Notes to Remember\\n- The `write_todos` tool should never be called multiple times in parallel.\\n- Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant.\";\ndeclare const stateSchema: z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>;\nexport type TodoMiddlewareState = z.infer<typeof stateSchema>;\nexport interface TodoListMiddlewareOptions {\n /**\n * Custom system prompt to guide the agent on using the todo tool.\n * If not provided, uses the default {@link PLANNING_MIDDLEWARE_SYSTEM_PROMPT}.\n */\n systemPrompt?: string;\n /**\n * Custom description for the {@link writeTodos} tool.\n * If not provided, uses the default {@link WRITE_TODOS_DESCRIPTION}.\n */\n toolDescription?: string;\n}\n/**\n * Creates a middleware that provides todo list management capabilities to agents.\n *\n * This middleware adds a `write_todos` tool that allows agents to create and manage\n * structured task lists for complex multi-step operations. It's designed to help\n * agents track progress, organize complex tasks, and provide users with visibility\n * into task completion status.\n *\n * The middleware automatically injects system prompts that guide the agent on when\n * and how to use the todo functionality effectively.\n *\n * @example\n * ```typescript\n * import { todoListMiddleware, createAgent } from 'langchain';\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [todoListMiddleware()],\n * });\n *\n * // Agent now has access to write_todos tool and todo state tracking\n * const result = await agent.invoke({\n * messages: [new HumanMessage(\"Help me refactor my codebase\")]\n * });\n *\n * console.log(result.todos); // Array of todo items with status tracking\n * ```\n *\n * @returns A configured middleware instance that provides todo management capabilities\n *\n * @see {@link TodoMiddlewareState} for the state schema\n * @see {@link writeTodos} for the tool implementation\n */\nexport declare function todoListMiddleware(options?: TodoListMiddlewareOptions): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>, undefined, any>;\nexport {};\n"],"mappings":";;;;cACqBC,kCAAAA;UAwBJU,yBAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA6COC,kBAAAA,WAA6BD,4BAA4E,gBAAXX,CAAAA,CAAEM;SAC7GN,CAAAA,CAAEQ,WAAWR,CAAAA,CAAEO,SAASP,CAAAA,CAAEM;aACpBN,CAAAA,CAAEG;YACHH,CAAAA,CAAEI;cACFJ,CAAAA,CAAEK;;;;;;;YAONL,CAAAA,CAAEK"}
1
+ {"version":3,"file":"todoListMiddleware.d.ts","names":["z","TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT","stateSchema","ZodString","ZodEnum","ZodTypeAny","ZodObject","ZodArray","ZodDefault","TodoMiddlewareState","infer","TodoListMiddlewareOptions","todoListMiddleware","__types_js5","AgentMiddleware"],"sources":["../../../src/agents/middleware/todoListMiddleware.d.ts"],"sourcesContent":["import { z } from \"zod/v3\";\nexport declare const TODO_LIST_MIDDLEWARE_SYSTEM_PROMPT = \"## `write_todos`\\n\\nYou have access to the `write_todos` tool to help you manage and plan complex objectives. \\nUse this tool for complex objectives to ensure that you are tracking each necessary step and giving the user visibility into your progress.\\nThis tool is very helpful for planning complex objectives, and for breaking down these larger complex objectives into smaller steps.\\n\\nIt is critical that you mark todos as completed as soon as you are done with a step. Do not batch up multiple steps before marking them as completed.\\nFor simple objectives that only require a few steps, it is better to just complete the objective directly and NOT use this tool.\\nWriting todos takes time and tokens, use it when it is helpful for managing complex many-step problems! But not for simple few-step requests.\\n\\n## Important To-Do List Usage Notes to Remember\\n- The `write_todos` tool should never be called multiple times in parallel.\\n- Don't be afraid to revise the To-Do list as you go. New information may reveal new tasks that need to be done, or old tasks that are irrelevant.\";\ndeclare const stateSchema: z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>;\nexport type TodoMiddlewareState = z.infer<typeof stateSchema>;\nexport interface TodoListMiddlewareOptions {\n /**\n * Custom system prompt to guide the agent on using the todo tool.\n * If not provided, uses the default {@link PLANNING_MIDDLEWARE_SYSTEM_PROMPT}.\n */\n systemPrompt?: string;\n /**\n * Custom description for the {@link writeTodos} tool.\n * If not provided, uses the default {@link WRITE_TODOS_DESCRIPTION}.\n */\n toolDescription?: string;\n}\n/**\n * Creates a middleware that provides todo list management capabilities to agents.\n *\n * This middleware adds a `write_todos` tool that allows agents to create and manage\n * structured task lists for complex multi-step operations. It's designed to help\n * agents track progress, organize complex tasks, and provide users with visibility\n * into task completion status.\n *\n * The middleware automatically injects system prompts that guide the agent on when\n * and how to use the todo functionality effectively.\n *\n * @example\n * ```typescript\n * import { todoListMiddleware, createAgent } from 'langchain';\n *\n * const agent = createAgent({\n * model: \"openai:gpt-4o\",\n * middleware: [todoListMiddleware()],\n * });\n *\n * // Agent now has access to write_todos tool and todo state tracking\n * const result = await agent.invoke({\n * messages: [new HumanMessage(\"Help me refactor my codebase\")]\n * });\n *\n * console.log(result.todos); // Array of todo items with status tracking\n * ```\n *\n * @returns A configured middleware instance that provides todo management capabilities\n *\n * @see {@link TodoMiddlewareState} for the state schema\n * @see {@link writeTodos} for the tool implementation\n */\nexport declare function todoListMiddleware(options?: TodoListMiddlewareOptions): import(\"./types.js\").AgentMiddleware<z.ZodObject<{\n todos: z.ZodDefault<z.ZodArray<z.ZodObject<{\n content: z.ZodString;\n status: z.ZodEnum<[\"pending\", \"in_progress\", \"completed\"]>;\n }, \"strip\", z.ZodTypeAny, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }, {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }>, \"many\">>;\n}, \"strip\", z.ZodTypeAny, {\n todos: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[];\n}, {\n todos?: {\n content: string;\n status: \"completed\" | \"in_progress\" | \"pending\";\n }[] | undefined;\n}>, undefined, any>;\nexport {};\n"],"mappings":";;;;cACqBC,kCAAAA;UAwBJU,yBAAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA6COC,kBAAAA,WAA6BD,4BAA4E,gBAAXX,CAAAA,CAAEM;SAC7GN,CAAAA,CAAEQ,WAAWR,CAAAA,CAAEO,SAASP,CAAAA,CAAEM;aACpBN,CAAAA,CAAEG;YACHH,CAAAA,CAAEI;cACFJ,CAAAA,CAAEK;;;;;;;YAONL,CAAAA,CAAEK"}