langchain 0.0.199 → 0.0.201

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/agents/toolkits/connery.cjs +1 -0
  2. package/agents/toolkits/connery.d.ts +1 -0
  3. package/agents/toolkits/connery.js +1 -0
  4. package/dist/agents/index.cjs +3 -1
  5. package/dist/agents/index.d.ts +1 -1
  6. package/dist/agents/index.js +1 -1
  7. package/dist/agents/toolkits/connery/index.cjs +39 -0
  8. package/dist/agents/toolkits/connery/index.d.ts +23 -0
  9. package/dist/agents/toolkits/connery/index.js +35 -0
  10. package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -1
  11. package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -1
  12. package/dist/chat_models/fake.cjs +2 -114
  13. package/dist/chat_models/fake.d.ts +1 -52
  14. package/dist/chat_models/fake.js +1 -113
  15. package/dist/chat_models/llama_cpp.cjs +2 -1
  16. package/dist/chat_models/llama_cpp.d.ts +1 -1
  17. package/dist/chat_models/llama_cpp.js +2 -1
  18. package/dist/chat_models/minimax.d.ts +1 -1
  19. package/dist/document_loaders/fs/obsidian.cjs +240 -0
  20. package/dist/document_loaders/fs/obsidian.d.ts +26 -0
  21. package/dist/document_loaders/fs/obsidian.js +233 -0
  22. package/dist/embeddings/gradient_ai.cjs +103 -0
  23. package/dist/embeddings/gradient_ai.d.ts +48 -0
  24. package/dist/embeddings/gradient_ai.js +99 -0
  25. package/dist/llms/gradient_ai.cjs +22 -8
  26. package/dist/llms/gradient_ai.d.ts +7 -2
  27. package/dist/llms/gradient_ai.js +22 -8
  28. package/dist/llms/llama_cpp.cjs +2 -1
  29. package/dist/llms/llama_cpp.d.ts +1 -1
  30. package/dist/llms/llama_cpp.js +2 -1
  31. package/dist/load/import_constants.cjs +3 -0
  32. package/dist/load/import_constants.js +3 -0
  33. package/dist/load/import_map.cjs +5 -3
  34. package/dist/load/import_map.d.ts +2 -0
  35. package/dist/load/import_map.js +2 -0
  36. package/dist/memory/vector_store.cjs +1 -1
  37. package/dist/memory/vector_store.js +1 -1
  38. package/dist/tools/connery.cjs +279 -0
  39. package/dist/tools/connery.d.ts +145 -0
  40. package/dist/tools/connery.js +274 -0
  41. package/dist/tools/gmail/base.cjs +69 -0
  42. package/dist/tools/gmail/base.d.ts +19 -0
  43. package/dist/tools/gmail/base.js +65 -0
  44. package/dist/tools/gmail/create_draft.cjs +62 -0
  45. package/dist/tools/gmail/create_draft.d.ts +35 -0
  46. package/dist/tools/gmail/create_draft.js +58 -0
  47. package/dist/tools/gmail/descriptions.cjs +118 -0
  48. package/dist/tools/gmail/descriptions.d.ts +5 -0
  49. package/dist/tools/gmail/descriptions.js +115 -0
  50. package/dist/tools/gmail/get_message.cjs +83 -0
  51. package/dist/tools/gmail/get_message.d.ts +18 -0
  52. package/dist/tools/gmail/get_message.js +79 -0
  53. package/dist/tools/gmail/get_thread.cjs +89 -0
  54. package/dist/tools/gmail/get_thread.d.ts +18 -0
  55. package/dist/tools/gmail/get_thread.js +85 -0
  56. package/dist/tools/gmail/index.cjs +13 -0
  57. package/dist/tools/gmail/index.d.ts +11 -0
  58. package/dist/tools/gmail/index.js +5 -0
  59. package/dist/tools/gmail/search.cjs +118 -0
  60. package/dist/tools/gmail/search.d.ts +29 -0
  61. package/dist/tools/gmail/search.js +114 -0
  62. package/dist/tools/gmail/send_message.cjs +74 -0
  63. package/dist/tools/gmail/send_message.d.ts +35 -0
  64. package/dist/tools/gmail/send_message.js +70 -0
  65. package/dist/tools/webbrowser.cjs +1 -1
  66. package/dist/tools/webbrowser.js +1 -1
  67. package/dist/tools/wolframalpha.cjs +1 -1
  68. package/dist/tools/wolframalpha.js +1 -1
  69. package/dist/util/document.cjs +1 -1
  70. package/dist/util/document.d.ts +1 -1
  71. package/dist/util/document.js +1 -1
  72. package/dist/util/tiktoken.cjs +15 -24
  73. package/dist/util/tiktoken.d.ts +1 -9
  74. package/dist/util/tiktoken.js +1 -21
  75. package/document_loaders/fs/obsidian.cjs +1 -0
  76. package/document_loaders/fs/obsidian.d.ts +1 -0
  77. package/document_loaders/fs/obsidian.js +1 -0
  78. package/embeddings/gradient_ai.cjs +1 -0
  79. package/embeddings/gradient_ai.d.ts +1 -0
  80. package/embeddings/gradient_ai.js +1 -0
  81. package/package.json +43 -3
  82. package/tools/connery.cjs +1 -0
  83. package/tools/connery.d.ts +1 -0
  84. package/tools/connery.js +1 -0
  85. package/tools/gmail.cjs +1 -0
  86. package/tools/gmail.d.ts +1 -0
  87. package/tools/gmail.js +1 -0
@@ -0,0 +1,99 @@
1
+ import { Gradient } from "@gradientai/nodejs-sdk";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ import { chunkArray } from "../util/chunk.js";
4
+ import { Embeddings } from "./base.js";
5
+ /**
6
+ * Class for generating embeddings using the Gradient AI's API. Extends the
7
+ * Embeddings class and implements GradientEmbeddingsParams and
8
+ */
9
+ export class GradientEmbeddings extends Embeddings {
10
+ constructor(fields) {
11
+ super(fields);
12
+ Object.defineProperty(this, "gradientAccessKey", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: void 0
17
+ });
18
+ Object.defineProperty(this, "workspaceId", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "batchSize", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: 128
29
+ });
30
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
31
+ Object.defineProperty(this, "model", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: void 0
36
+ });
37
+ this.gradientAccessKey =
38
+ fields?.gradientAccessKey ??
39
+ getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
40
+ this.workspaceId =
41
+ fields?.workspaceId ?? getEnvironmentVariable("GRADIENT_WORKSPACE_ID");
42
+ if (!this.gradientAccessKey) {
43
+ throw new Error("Missing Gradient AI Access Token");
44
+ }
45
+ if (!this.workspaceId) {
46
+ throw new Error("Missing Gradient AI Workspace ID");
47
+ }
48
+ }
49
+ /**
50
+ * Method to generate embeddings for an array of documents. Splits the
51
+ * documents into batches and makes requests to the Gradient API to generate
52
+ * embeddings.
53
+ * @param texts Array of documents to generate embeddings for.
54
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
55
+ */
56
+ async embedDocuments(texts) {
57
+ await this.setModel();
58
+ const mappedTexts = texts.map((text) => ({ input: text }));
59
+ const batches = chunkArray(mappedTexts, this.batchSize);
60
+ const batchRequests = batches.map((batch) => this.caller.call(async () => this.model.generateEmbeddings({
61
+ inputs: batch,
62
+ })));
63
+ const batchResponses = await Promise.all(batchRequests);
64
+ const embeddings = [];
65
+ for (let i = 0; i < batchResponses.length; i += 1) {
66
+ const batch = batches[i];
67
+ const { embeddings: batchResponse } = batchResponses[i];
68
+ for (let j = 0; j < batch.length; j += 1) {
69
+ embeddings.push(batchResponse[j].embedding);
70
+ }
71
+ }
72
+ return embeddings;
73
+ }
74
+ /**
75
+ * Method to generate an embedding for a single document. Calls the
76
+ * embedDocuments method with the document as the input.
77
+ * @param text Document to generate an embedding for.
78
+ * @returns Promise that resolves to an embedding for the document.
79
+ */
80
+ async embedQuery(text) {
81
+ const data = await this.embedDocuments([text]);
82
+ return data[0];
83
+ }
84
+ /**
85
+ * Method to set the model to use for generating embeddings.
86
+ * @sets the class' `model` value to that of the retrieved Embeddings Model.
87
+ */
88
+ async setModel() {
89
+ if (this.model)
90
+ return;
91
+ const gradient = new Gradient({
92
+ accessToken: this.gradientAccessKey,
93
+ workspaceId: this.workspaceId,
94
+ });
95
+ this.model = await gradient.getEmbeddingsModel({
96
+ slug: "bge-large",
97
+ });
98
+ }
99
+ }
@@ -26,6 +26,12 @@ class GradientLLM extends base_js_1.LLM {
26
26
  writable: true,
27
27
  value: "llama2-7b-chat"
28
28
  });
29
+ Object.defineProperty(this, "adapterId", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
29
35
  Object.defineProperty(this, "gradientAccessKey", {
30
36
  enumerable: true,
31
37
  configurable: true,
@@ -46,13 +52,14 @@ class GradientLLM extends base_js_1.LLM {
46
52
  });
47
53
  // Gradient AI does not export the BaseModel type. Once it does, we can use it here.
48
54
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
49
- Object.defineProperty(this, "baseModel", {
55
+ Object.defineProperty(this, "model", {
50
56
  enumerable: true,
51
57
  configurable: true,
52
58
  writable: true,
53
59
  value: void 0
54
60
  });
55
61
  this.modelSlug = fields?.modelSlug ?? this.modelSlug;
62
+ this.adapterId = fields?.adapterId;
56
63
  this.gradientAccessKey =
57
64
  fields?.gradientAccessKey ??
58
65
  (0, env_js_1.getEnvironmentVariable)("GRADIENT_ACCESS_TOKEN");
@@ -76,23 +83,30 @@ class GradientLLM extends base_js_1.LLM {
76
83
  */
77
84
  /** @ignore */
78
85
  async _call(prompt, _options) {
79
- await this.setBaseModel();
80
- const response = (await this.caller.call(async () => this.baseModel.complete({
86
+ await this.setModel();
87
+ const response = (await this.caller.call(async () => this.model.complete({
81
88
  query: prompt,
82
89
  ...this.inferenceParameters,
83
90
  })));
84
91
  return response.generatedOutput;
85
92
  }
86
- async setBaseModel() {
87
- if (this.baseModel)
93
+ async setModel() {
94
+ if (this.model)
88
95
  return;
89
96
  const gradient = new nodejs_sdk_1.Gradient({
90
97
  accessToken: this.gradientAccessKey,
91
98
  workspaceId: this.workspaceId,
92
99
  });
93
- this.baseModel = await gradient.getBaseModel({
94
- baseModelSlug: this.modelSlug,
95
- });
100
+ if (this.adapterId) {
101
+ this.model = await gradient.getModelAdapter({
102
+ modelAdapterId: this.adapterId,
103
+ });
104
+ }
105
+ else {
106
+ this.model = await gradient.getBaseModel({
107
+ baseModelSlug: this.modelSlug,
108
+ });
109
+ }
96
110
  }
97
111
  }
98
112
  exports.GradientLLM = GradientLLM;
@@ -22,6 +22,10 @@ export interface GradientLLMParams extends BaseLLMParams {
22
22
  * Gradient AI Model Slug.
23
23
  */
24
24
  modelSlug?: string;
25
+ /**
26
+ * Gradient Adapter ID for custom fine tuned models.
27
+ */
28
+ adapterId?: string;
25
29
  }
26
30
  /**
27
31
  * The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
@@ -33,10 +37,11 @@ export declare class GradientLLM extends LLM<BaseLLMCallOptions> {
33
37
  [key: string]: string;
34
38
  } | undefined;
35
39
  modelSlug: string;
40
+ adapterId?: string;
36
41
  gradientAccessKey?: string;
37
42
  workspaceId?: string;
38
43
  inferenceParameters?: Record<string, unknown>;
39
- baseModel: any;
44
+ model: any;
40
45
  constructor(fields: GradientLLMParams);
41
46
  _llmType(): string;
42
47
  /**
@@ -46,5 +51,5 @@ export declare class GradientLLM extends LLM<BaseLLMCallOptions> {
46
51
  */
47
52
  /** @ignore */
48
53
  _call(prompt: string, _options: this["ParsedCallOptions"]): Promise<string>;
49
- setBaseModel(): Promise<void>;
54
+ setModel(): Promise<void>;
50
55
  }
@@ -23,6 +23,12 @@ export class GradientLLM extends LLM {
23
23
  writable: true,
24
24
  value: "llama2-7b-chat"
25
25
  });
26
+ Object.defineProperty(this, "adapterId", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
26
32
  Object.defineProperty(this, "gradientAccessKey", {
27
33
  enumerable: true,
28
34
  configurable: true,
@@ -43,13 +49,14 @@ export class GradientLLM extends LLM {
43
49
  });
44
50
  // Gradient AI does not export the BaseModel type. Once it does, we can use it here.
45
51
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
46
- Object.defineProperty(this, "baseModel", {
52
+ Object.defineProperty(this, "model", {
47
53
  enumerable: true,
48
54
  configurable: true,
49
55
  writable: true,
50
56
  value: void 0
51
57
  });
52
58
  this.modelSlug = fields?.modelSlug ?? this.modelSlug;
59
+ this.adapterId = fields?.adapterId;
53
60
  this.gradientAccessKey =
54
61
  fields?.gradientAccessKey ??
55
62
  getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
@@ -73,22 +80,29 @@ export class GradientLLM extends LLM {
73
80
  */
74
81
  /** @ignore */
75
82
  async _call(prompt, _options) {
76
- await this.setBaseModel();
77
- const response = (await this.caller.call(async () => this.baseModel.complete({
83
+ await this.setModel();
84
+ const response = (await this.caller.call(async () => this.model.complete({
78
85
  query: prompt,
79
86
  ...this.inferenceParameters,
80
87
  })));
81
88
  return response.generatedOutput;
82
89
  }
83
- async setBaseModel() {
84
- if (this.baseModel)
90
+ async setModel() {
91
+ if (this.model)
85
92
  return;
86
93
  const gradient = new Gradient({
87
94
  accessToken: this.gradientAccessKey,
88
95
  workspaceId: this.workspaceId,
89
96
  });
90
- this.baseModel = await gradient.getBaseModel({
91
- baseModelSlug: this.modelSlug,
92
- });
97
+ if (this.adapterId) {
98
+ this.model = await gradient.getModelAdapter({
99
+ modelAdapterId: this.adapterId,
100
+ });
101
+ }
102
+ else {
103
+ this.model = await gradient.getBaseModel({
104
+ baseModelSlug: this.modelSlug,
105
+ });
106
+ }
93
107
  }
94
108
  }
@@ -77,9 +77,10 @@ class LlamaCpp extends base_js_1.LLM {
77
77
  return "llama2_cpp";
78
78
  }
79
79
  /** @ignore */
80
- async _call(prompt, _options) {
80
+ async _call(prompt, options) {
81
81
  try {
82
82
  const promptOptions = {
83
+ onToken: options?.onToken,
83
84
  maxTokens: this?.maxTokens,
84
85
  temperature: this?.temperature,
85
86
  topK: this?.topK,
@@ -36,6 +36,6 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
36
36
  constructor(inputs: LlamaCppInputs);
37
37
  _llmType(): string;
38
38
  /** @ignore */
39
- _call(prompt: string, _options?: this["ParsedCallOptions"]): Promise<string>;
39
+ _call(prompt: string, options?: this["ParsedCallOptions"]): Promise<string>;
40
40
  _streamResponseChunks(prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
41
41
  }
@@ -74,9 +74,10 @@ export class LlamaCpp extends LLM {
74
74
  return "llama2_cpp";
75
75
  }
76
76
  /** @ignore */
77
- async _call(prompt, _options) {
77
+ async _call(prompt, options) {
78
78
  try {
79
79
  const promptOptions = {
80
+ onToken: options?.onToken,
80
81
  maxTokens: this?.maxTokens,
81
82
  temperature: this?.temperature,
82
83
  topK: this?.topK,
@@ -11,6 +11,7 @@ exports.optionalImportEntrypoints = [
11
11
  "langchain/tools/calculator",
12
12
  "langchain/tools/sql",
13
13
  "langchain/tools/webbrowser",
14
+ "langchain/tools/gmail",
14
15
  "langchain/tools/google_calendar",
15
16
  "langchain/chains/load",
16
17
  "langchain/chains/query_constructor",
@@ -26,6 +27,7 @@ exports.optionalImportEntrypoints = [
26
27
  "langchain/embeddings/googlevertexai",
27
28
  "langchain/embeddings/googlepalm",
28
29
  "langchain/embeddings/llama_cpp",
30
+ "langchain/embeddings/gradient_ai",
29
31
  "langchain/llms/load",
30
32
  "langchain/llms/cohere",
31
33
  "langchain/llms/hf",
@@ -111,6 +113,7 @@ exports.optionalImportEntrypoints = [
111
113
  "langchain/document_loaders/fs/epub",
112
114
  "langchain/document_loaders/fs/csv",
113
115
  "langchain/document_loaders/fs/notion",
116
+ "langchain/document_loaders/fs/obsidian",
114
117
  "langchain/document_loaders/fs/unstructured",
115
118
  "langchain/document_loaders/fs/openai_whisper_audio",
116
119
  "langchain/document_loaders/fs/pptx",
@@ -8,6 +8,7 @@ export const optionalImportEntrypoints = [
8
8
  "langchain/tools/calculator",
9
9
  "langchain/tools/sql",
10
10
  "langchain/tools/webbrowser",
11
+ "langchain/tools/gmail",
11
12
  "langchain/tools/google_calendar",
12
13
  "langchain/chains/load",
13
14
  "langchain/chains/query_constructor",
@@ -23,6 +24,7 @@ export const optionalImportEntrypoints = [
23
24
  "langchain/embeddings/googlevertexai",
24
25
  "langchain/embeddings/googlepalm",
25
26
  "langchain/embeddings/llama_cpp",
27
+ "langchain/embeddings/gradient_ai",
26
28
  "langchain/llms/load",
27
29
  "langchain/llms/cohere",
28
30
  "langchain/llms/hf",
@@ -108,6 +110,7 @@ export const optionalImportEntrypoints = [
108
110
  "langchain/document_loaders/fs/epub",
109
111
  "langchain/document_loaders/fs/csv",
110
112
  "langchain/document_loaders/fs/notion",
113
+ "langchain/document_loaders/fs/obsidian",
111
114
  "langchain/document_loaders/fs/unstructured",
112
115
  "langchain/document_loaders/fs/openai_whisper_audio",
113
116
  "langchain/document_loaders/fs/pptx",
@@ -24,12 +24,13 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__google_places = exports.tools__render = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.experimental__chat_models__ollama_functions = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = void 0;
29
- exports.runnables__remote = exports.runnables = exports.evaluation = exports.experimental__chains__violation_of_expectations = void 0;
27
+ exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__google_places = exports.tools__render = exports.tools__connery = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad__openai_tools = exports.agents__format_scratchpad = exports.agents__toolkits__connery = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_files = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = void 0;
29
+ exports.runnables__remote = exports.runnables = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__ollama_functions = exports.experimental__chat_models__bittensor = void 0;
30
30
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
31
31
  exports.agents = __importStar(require("../agents/index.cjs"));
32
32
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
33
+ exports.agents__toolkits__connery = __importStar(require("../agents/toolkits/connery/index.cjs"));
33
34
  exports.agents__format_scratchpad = __importStar(require("../agents/format_scratchpad/openai_functions.cjs"));
34
35
  exports.agents__format_scratchpad__openai_tools = __importStar(require("../agents/format_scratchpad/openai_tools.cjs"));
35
36
  exports.agents__format_scratchpad__log = __importStar(require("../agents/format_scratchpad/log.cjs"));
@@ -40,6 +41,7 @@ exports.agents__xml__output_parser = __importStar(require("../agents/xml/output_
40
41
  exports.agents__openai__output_parser = __importStar(require("../agents/openai/output_parser.cjs"));
41
42
  exports.base_language = __importStar(require("../base_language/index.cjs"));
42
43
  exports.tools = __importStar(require("../tools/index.cjs"));
44
+ exports.tools__connery = __importStar(require("../tools/connery.cjs"));
43
45
  exports.tools__render = __importStar(require("../tools/render.cjs"));
44
46
  exports.tools__google_places = __importStar(require("../tools/google_places.cjs"));
45
47
  exports.chains = __importStar(require("../chains/index.cjs"));
@@ -1,6 +1,7 @@
1
1
  export * as load__serializable from "../load/serializable.js";
2
2
  export * as agents from "../agents/index.js";
3
3
  export * as agents__toolkits from "../agents/toolkits/index.js";
4
+ export * as agents__toolkits__connery from "../agents/toolkits/connery/index.js";
4
5
  export * as agents__format_scratchpad from "../agents/format_scratchpad/openai_functions.js";
5
6
  export * as agents__format_scratchpad__openai_tools from "../agents/format_scratchpad/openai_tools.js";
6
7
  export * as agents__format_scratchpad__log from "../agents/format_scratchpad/log.js";
@@ -11,6 +12,7 @@ export * as agents__xml__output_parser from "../agents/xml/output_parser.js";
11
12
  export * as agents__openai__output_parser from "../agents/openai/output_parser.js";
12
13
  export * as base_language from "../base_language/index.js";
13
14
  export * as tools from "../tools/index.js";
15
+ export * as tools__connery from "../tools/connery.js";
14
16
  export * as tools__render from "../tools/render.js";
15
17
  export * as tools__google_places from "../tools/google_places.js";
16
18
  export * as chains from "../chains/index.js";
@@ -2,6 +2,7 @@
2
2
  export * as load__serializable from "../load/serializable.js";
3
3
  export * as agents from "../agents/index.js";
4
4
  export * as agents__toolkits from "../agents/toolkits/index.js";
5
+ export * as agents__toolkits__connery from "../agents/toolkits/connery/index.js";
5
6
  export * as agents__format_scratchpad from "../agents/format_scratchpad/openai_functions.js";
6
7
  export * as agents__format_scratchpad__openai_tools from "../agents/format_scratchpad/openai_tools.js";
7
8
  export * as agents__format_scratchpad__log from "../agents/format_scratchpad/log.js";
@@ -12,6 +13,7 @@ export * as agents__xml__output_parser from "../agents/xml/output_parser.js";
12
13
  export * as agents__openai__output_parser from "../agents/openai/output_parser.js";
13
14
  export * as base_language from "../base_language/index.js";
14
15
  export * as tools from "../tools/index.js";
16
+ export * as tools__connery from "../tools/connery.js";
15
17
  export * as tools__render from "../tools/render.js";
16
18
  export * as tools__google_places from "../tools/google_places.js";
17
19
  export * as chains from "../chains/index.js";
@@ -83,7 +83,7 @@ class VectorStoreRetrieverMemory extends base_js_1.BaseMemory {
83
83
  return {
84
84
  [this.memoryKey]: this.returnDocs
85
85
  ? results
86
- : (0, document_js_2.formatDocumentsAsString)(results, "\n"),
86
+ : (0, document_js_2.formatDocumentsAsString)(results),
87
87
  };
88
88
  }
89
89
  /**
@@ -80,7 +80,7 @@ export class VectorStoreRetrieverMemory extends BaseMemory {
80
80
  return {
81
81
  [this.memoryKey]: this.returnDocs
82
82
  ? results
83
- : formatDocumentsAsString(results, "\n"),
83
+ : formatDocumentsAsString(results),
84
84
  };
85
85
  }
86
86
  /**