@librechat/agents 2.4.63 → 2.4.65

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,6 +25,8 @@ export type RunTitleOptions = {
25
25
  clientOptions?: l.ClientOptions;
26
26
  chainOptions?: Partial<RunnableConfig> | undefined;
27
27
  omitOptions?: Set<string>;
28
+ titleMethod?: e.TitleMethod;
29
+ convoPromptTemplate?: string;
28
30
  };
29
31
  export interface AgentStateChannels {
30
32
  messages: BaseMessage[];
@@ -1,3 +1,4 @@
1
1
  import type { Runnable } from '@langchain/core/runnables';
2
- import * as t from '@/types';
2
+ import type * as t from '@/types';
3
3
  export declare const createTitleRunnable: (model: t.ChatModelInstance, _titlePrompt?: string) => Promise<Runnable>;
4
+ export declare const createCompletionTitleRunnable: (model: t.ChatModelInstance, titlePrompt?: string) => Promise<Runnable>;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "2.4.63",
3
+ "version": "2.4.65",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -67,6 +67,8 @@
67
67
  "start:collab5": "node --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/collab_design_v5.ts",
68
68
  "start:dev": "node --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/main.ts",
69
69
  "test": "jest",
70
+ "test:memory": "NODE_OPTIONS='--expose-gc' npx jest src/specs/title.memory-leak.test.ts",
71
+ "test:all": "npm test -- --testPathIgnorePatterns=title.memory-leak.test.ts && npm run test:memory",
70
72
  "reinstall": "npm run clean && npm ci && rm -rf ./dist && npm run build",
71
73
  "re": "bun run clean && bun install && rm -rf ./dist && turbo build",
72
74
  "lint": "eslint \"{,!(node_modules|venv)/**/}*.{js,jsx,ts,tsx}\" --fix",
@@ -164,6 +164,12 @@ export enum Constants {
164
164
  CONTENT_AND_ARTIFACT = 'content_and_artifact',
165
165
  }
166
166
 
167
+ export enum TitleMethod {
168
+ STRUCTURED = 'structured',
169
+ FUNCTIONS = 'functions',
170
+ COMPLETION = 'completion',
171
+ }
172
+
167
173
  export enum EnvVar {
168
174
  CODE_API_KEY = 'LIBRECHAT_CODE_API_KEY',
169
175
  CODE_BASEURL = 'LIBRECHAT_CODE_BASEURL',
package/src/run.ts CHANGED
@@ -10,10 +10,13 @@ import type {
10
10
  import type { ClientCallbacks, SystemCallbacks } from '@/graphs/Graph';
11
11
  import type { RunnableConfig } from '@langchain/core/runnables';
12
12
  import type * as t from '@/types';
13
- import { GraphEvents, Providers, Callback } from '@/common';
13
+ import { GraphEvents, Providers, Callback, TitleMethod } from '@/common';
14
14
  import { manualToolStreamProviders } from '@/llm/providers';
15
15
  import { shiftIndexTokenCountMap } from '@/messages/format';
16
- import { createTitleRunnable } from '@/utils/title';
16
+ import {
17
+ createTitleRunnable,
18
+ createCompletionTitleRunnable,
19
+ } from '@/utils/title';
17
20
  import { createTokenCounter } from '@/utils/tokens';
18
21
  import { StandardGraph } from '@/graphs/Graph';
19
22
  import { HandlerRegistry } from '@/events';
@@ -259,9 +262,11 @@ export class Run<T extends t.BaseGraphState> {
259
262
  chainOptions,
260
263
  skipLanguage,
261
264
  omitOptions = defaultOmitOptions,
262
- }: t.RunTitleOptions): Promise<{ language: string; title: string }> {
265
+ titleMethod = TitleMethod.COMPLETION,
266
+ convoPromptTemplate,
267
+ }: t.RunTitleOptions): Promise<{ language?: string; title?: string }> {
263
268
  const convoTemplate = PromptTemplate.fromTemplate(
264
- 'User: {input}\nAI: {output}'
269
+ convoPromptTemplate ?? 'User: {input}\nAI: {output}'
265
270
  );
266
271
  const response = contentParts
267
272
  .map((part) => {
@@ -297,10 +302,10 @@ export class Run<T extends t.BaseGraphState> {
297
302
  model.n = (clientOptions as t.OpenAIClientOptions | undefined)
298
303
  ?.n as number;
299
304
  }
300
- const chain = await createTitleRunnable(model, titlePrompt);
301
- return (await chain.invoke(
302
- { convo, inputText, skipLanguage },
303
- chainOptions
304
- )) as { language: string; title: string };
305
+ const chain =
306
+ titleMethod === TitleMethod.COMPLETION
307
+ ? await createCompletionTitleRunnable(model, titlePrompt)
308
+ : await createTitleRunnable(model, titlePrompt);
309
+ return await chain.invoke({ convo, inputText, skipLanguage }, chainOptions);
305
310
  }
306
311
  }
@@ -14,9 +14,9 @@ import {
14
14
  ModelEndHandler,
15
15
  createMetadataAggregator,
16
16
  } from '@/events';
17
+ import { GraphEvents, Providers, TitleMethod } from '@/common';
17
18
  import { getLLMConfig } from '@/utils/llmConfig';
18
19
  import { getArgs } from '@/scripts/args';
19
- import { GraphEvents, Providers } from '@/common';
20
20
  import { Run } from '@/run';
21
21
 
22
22
  const conversationHistory: BaseMessage[] = [];
@@ -16,8 +16,8 @@ import {
16
16
  ModelEndHandler,
17
17
  createMetadataAggregator,
18
18
  } from '@/events';
19
+ import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
19
20
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
20
- import { ContentTypes, GraphEvents, Providers } from '@/common';
21
21
  import { capitalizeFirstLetter } from './spec.utils';
22
22
  import { getLLMConfig } from '@/utils/llmConfig';
23
23
  import { getArgs } from '@/scripts/args';
@@ -175,6 +175,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
175
175
  const titleResult = await run.generateTitle({
176
176
  provider,
177
177
  inputText: userMessage,
178
+ titleMethod: TitleMethod.STRUCTURED,
178
179
  contentParts,
179
180
  chainOptions: {
180
181
  callbacks: [
@@ -191,6 +192,61 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
191
192
  expect(collected).toBeDefined();
192
193
  });
193
194
 
195
+ test(`${capitalizeFirstLetter(provider)}: should generate title using completion method`, async () => {
196
+ const { userName, location } = await getArgs();
197
+ const llmConfig = getLLMConfig(provider);
198
+ const customHandlers = setupCustomHandlers();
199
+
200
+ run = await Run.create<t.IState>({
201
+ runId: 'test-run-id-completion',
202
+ graphConfig: {
203
+ type: 'standard',
204
+ llmConfig,
205
+ tools: [new Calculator()],
206
+ instructions:
207
+ 'You are a friendly AI assistant. Always address the user by their name.',
208
+ additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
209
+ },
210
+ returnContent: true,
211
+ customHandlers,
212
+ });
213
+
214
+ const userMessage =
215
+ 'Can you help me calculate the area of a circle with radius 5?';
216
+ conversationHistory = [];
217
+ conversationHistory.push(new HumanMessage(userMessage));
218
+
219
+ const inputs = {
220
+ messages: conversationHistory,
221
+ };
222
+
223
+ const finalContentParts = await run.processStream(inputs, config);
224
+ expect(finalContentParts).toBeDefined();
225
+
226
+ const { handleLLMEnd, collected } = createMetadataAggregator();
227
+ const titleResult = await run.generateTitle({
228
+ provider,
229
+ inputText: userMessage,
230
+ titleMethod: TitleMethod.COMPLETION, // Using completion method
231
+ contentParts,
232
+ chainOptions: {
233
+ callbacks: [
234
+ {
235
+ handleLLMEnd,
236
+ },
237
+ ],
238
+ },
239
+ });
240
+
241
+ expect(titleResult).toBeDefined();
242
+ expect(titleResult.title).toBeDefined();
243
+ expect(titleResult.title).not.toBe('');
244
+ // Completion method doesn't return language
245
+ expect(titleResult.language).toBeUndefined();
246
+ expect(collected).toBeDefined();
247
+ console.log(`Completion method generated title: "${titleResult.title}"`);
248
+ });
249
+
194
250
  test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
195
251
  console.log('Previous conversation length:', runningHistory.length);
196
252
  console.log(
@@ -16,8 +16,8 @@ import {
16
16
  ModelEndHandler,
17
17
  createMetadataAggregator,
18
18
  } from '@/events';
19
+ import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
19
20
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
20
- import { ContentTypes, GraphEvents, Providers } from '@/common';
21
21
  import { capitalizeFirstLetter } from './spec.utils';
22
22
  import { getLLMConfig } from '@/utils/llmConfig';
23
23
  import { getArgs } from '@/scripts/args';
@@ -175,6 +175,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
175
175
  const titleResult = await run.generateTitle({
176
176
  provider,
177
177
  inputText: userMessage,
178
+ titleMethod: TitleMethod.STRUCTURED,
178
179
  contentParts,
179
180
  chainOptions: {
180
181
  callbacks: [
@@ -191,6 +192,60 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
191
192
  expect(collected).toBeDefined();
192
193
  });
193
194
 
195
+ test(`${capitalizeFirstLetter(provider)}: should generate title using completion method`, async () => {
196
+ const { userName, location } = await getArgs();
197
+ const llmConfig = getLLMConfig(provider);
198
+ const customHandlers = setupCustomHandlers();
199
+
200
+ run = await Run.create<t.IState>({
201
+ runId: 'test-run-id-completion',
202
+ graphConfig: {
203
+ type: 'standard',
204
+ llmConfig,
205
+ tools: [new Calculator()],
206
+ instructions:
207
+ 'You are a friendly AI assistant. Always address the user by their name.',
208
+ additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
209
+ },
210
+ returnContent: true,
211
+ customHandlers,
212
+ });
213
+
214
+ const userMessage = 'What is the weather like today?';
215
+ conversationHistory = [];
216
+ conversationHistory.push(new HumanMessage(userMessage));
217
+
218
+ const inputs = {
219
+ messages: conversationHistory,
220
+ };
221
+
222
+ const finalContentParts = await run.processStream(inputs, config);
223
+ expect(finalContentParts).toBeDefined();
224
+
225
+ const { handleLLMEnd, collected } = createMetadataAggregator();
226
+ const titleResult = await run.generateTitle({
227
+ provider,
228
+ inputText: userMessage,
229
+ titleMethod: TitleMethod.COMPLETION, // Using completion method
230
+ contentParts,
231
+ chainOptions: {
232
+ callbacks: [
233
+ {
234
+ handleLLMEnd,
235
+ },
236
+ ],
237
+ },
238
+ });
239
+
240
+ expect(titleResult).toBeDefined();
241
+ expect(titleResult.title).toBeDefined();
242
+ expect(titleResult.title).not.toBe('');
243
+ // Completion method doesn't return language
244
+ expect(titleResult.language).toBeUndefined();
245
+ expect(collected).toBeDefined();
246
+ console.log(`Completion method generated title: "${titleResult.title}"`);
247
+ });
248
+
194
249
  test(`${capitalizeFirstLetter(provider)}: should follow-up`, async () => {
195
250
  console.log('Previous conversation length:', runningHistory.length);
196
251
  console.log(