langchain 0.1.25 → 0.1.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  ⚡ Building applications with LLMs through composability ⚡
4
4
 
5
- [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs)
5
+ [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dm/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs)
6
6
  [<img src="https://github.com/codespaces/badge.svg" title="Open in Github Codespace" width="150" height="20">](https://codespaces.new/langchain-ai/langchainjs)
7
7
 
8
8
  Looking for the Python version? Check out [LangChain](https://github.com/langchain-ai/langchain).
@@ -23,6 +23,13 @@ class GitbookLoader extends cheerio_js_1.CheerioWebBaseLoader {
23
23
  writable: true,
24
24
  value: false
25
25
  });
26
+ Object.defineProperty(this, "baseUrl", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ this.baseUrl = webPath;
26
33
  this.webPath = path;
27
34
  this.shouldLoadAllPaths =
28
35
  params.shouldLoadAllPaths ?? this.shouldLoadAllPaths;
@@ -84,9 +91,10 @@ class GitbookLoader extends cheerio_js_1.CheerioWebBaseLoader {
84
91
  .map((element) => $(element).text());
85
92
  const documents = [];
86
93
  for (const url of urls) {
87
- console.log(`Fetching text from ${url}`);
88
- const html = await GitbookLoader._scrape(url, this.caller, this.timeout);
89
- documents.push(...this.loadPath(html, url));
94
+ const buildUrl = url.includes(this.baseUrl) ? url : this.baseUrl + url;
95
+ console.log(`Fetching text from ${buildUrl}`);
96
+ const html = await GitbookLoader._scrape(buildUrl, this.caller, this.timeout);
97
+ documents.push(...this.loadPath(html, buildUrl));
90
98
  }
91
99
  console.log(`Fetched ${documents.length} documents.`);
92
100
  return documents;
@@ -15,6 +15,7 @@ interface GitbookLoaderParams {
15
15
  export declare class GitbookLoader extends CheerioWebBaseLoader {
16
16
  webPath: string;
17
17
  shouldLoadAllPaths: boolean;
18
+ private readonly baseUrl;
18
19
  constructor(webPath: string, params?: GitbookLoaderParams);
19
20
  /**
20
21
  * Method that scrapes the web document using Cheerio and loads the
@@ -20,6 +20,13 @@ export class GitbookLoader extends CheerioWebBaseLoader {
20
20
  writable: true,
21
21
  value: false
22
22
  });
23
+ Object.defineProperty(this, "baseUrl", {
24
+ enumerable: true,
25
+ configurable: true,
26
+ writable: true,
27
+ value: void 0
28
+ });
29
+ this.baseUrl = webPath;
23
30
  this.webPath = path;
24
31
  this.shouldLoadAllPaths =
25
32
  params.shouldLoadAllPaths ?? this.shouldLoadAllPaths;
@@ -81,9 +88,10 @@ export class GitbookLoader extends CheerioWebBaseLoader {
81
88
  .map((element) => $(element).text());
82
89
  const documents = [];
83
90
  for (const url of urls) {
84
- console.log(`Fetching text from ${url}`);
85
- const html = await GitbookLoader._scrape(url, this.caller, this.timeout);
86
- documents.push(...this.loadPath(html, url));
91
+ const buildUrl = url.includes(this.baseUrl) ? url : this.baseUrl + url;
92
+ console.log(`Fetching text from ${buildUrl}`);
93
+ const html = await GitbookLoader._scrape(buildUrl, this.caller, this.timeout);
94
+ documents.push(...this.loadPath(html, buildUrl));
87
95
  }
88
96
  console.log(`Fetched ${documents.length} documents.`);
89
97
  return documents;
@@ -28,6 +28,7 @@ for the weather in SF you would respond:
28
28
 
29
29
  <tool>search</tool><tool_input><query>weather in SF</query></tool_input>
30
30
  <observation>64 degrees</observation>`);
31
+ /** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
31
32
  class AnthropicFunctions extends chat_models_1.BaseChatModel {
32
33
  static lc_name() {
33
34
  return "AnthropicFunctions";
@@ -6,13 +6,16 @@ import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
6
6
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
7
7
  import { BasePromptTemplate } from "@langchain/core/prompts";
8
8
  import { type AnthropicInput } from "../../chat_models/anthropic.js";
9
+ /** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
9
10
  export interface ChatAnthropicFunctionsCallOptions extends BaseFunctionCallOptions {
10
11
  tools?: StructuredToolInterface[];
11
12
  }
13
+ /** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
12
14
  export type AnthropicFunctionsInput = Partial<AnthropicInput> & BaseChatModelParams & {
13
15
  llm?: BaseChatModel;
14
16
  systemPromptTemplate?: BasePromptTemplate;
15
17
  };
18
+ /** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
16
19
  export declare class AnthropicFunctions extends BaseChatModel<ChatAnthropicFunctionsCallOptions> {
17
20
  llm: BaseChatModel;
18
21
  stopSequences?: string[];
@@ -25,6 +25,7 @@ for the weather in SF you would respond:
25
25
 
26
26
  <tool>search</tool><tool_input><query>weather in SF</query></tool_input>
27
27
  <observation>64 degrees</observation>`);
28
+ /** @deprecated Install and use in "@langchain/anthropic/experimental" instead */
28
29
  export class AnthropicFunctions extends BaseChatModel {
29
30
  static lc_name() {
30
31
  return "AnthropicFunctions";
@@ -11,6 +11,7 @@ class Bedrock extends web_js_1.Bedrock {
11
11
  static lc_name() {
12
12
  return "Bedrock";
13
13
  }
14
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
14
15
  constructor(fields) {
15
16
  super({
16
17
  ...fields,
@@ -1,7 +1,6 @@
1
1
  import { BaseLLMParams } from "@langchain/core/language_models/llms";
2
- import { BaseBedrockInput } from "../../util/bedrock.js";
3
2
  import { Bedrock as BaseBedrock } from "./web.js";
4
3
  export declare class Bedrock extends BaseBedrock {
5
4
  static lc_name(): string;
6
- constructor(fields?: Partial<BaseBedrockInput> & BaseLLMParams);
5
+ constructor(fields?: Partial<any> & BaseLLMParams);
7
6
  }
@@ -8,6 +8,7 @@ export class Bedrock extends BaseBedrock {
8
8
  static lc_name() {
9
9
  return "Bedrock";
10
10
  }
11
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
11
12
  constructor(fields) {
12
13
  super({
13
14
  ...fields,
@@ -153,9 +153,10 @@ class ParentDocumentRetriever extends multi_vector_js_1.MultiVectorRetriever {
153
153
  * This can be false if and only if `ids` are provided. You may want
154
154
  * to set this to False if the documents are already in the docstore
155
155
  * and you don't want to re-add them.
156
+ * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers
156
157
  */
157
158
  async addDocuments(docs, config) {
158
- const { ids, addToDocstore = true } = config ?? {};
159
+ const { ids, addToDocstore = true, childDocChunkHeaderOptions = {}, } = config ?? {};
159
160
  const parentDocs = this.parentSplitter
160
161
  ? await this.parentSplitter.splitDocuments(docs)
161
162
  : docs;
@@ -177,7 +178,7 @@ class ParentDocumentRetriever extends multi_vector_js_1.MultiVectorRetriever {
177
178
  for (let i = 0; i < parentDocs.length; i += 1) {
178
179
  const parentDoc = parentDocs[i];
179
180
  const parentDocId = parentDocIds[i];
180
- const subDocs = await this.childSplitter.splitDocuments([parentDoc]);
181
+ const subDocs = await this.childSplitter.splitDocuments([parentDoc], childDocChunkHeaderOptions);
181
182
  const taggedSubDocs = subDocs.map((subDoc) => new documents_1.Document({
182
183
  pageContent: subDoc.pageContent,
183
184
  metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },
@@ -1,6 +1,6 @@
1
1
  import { type VectorStoreInterface, type VectorStoreRetrieverInterface } from "@langchain/core/vectorstores";
2
2
  import { Document } from "@langchain/core/documents";
3
- import { TextSplitter } from "../text_splitter.js";
3
+ import { TextSplitter, TextSplitterChunkHeaderOptions } from "../text_splitter.js";
4
4
  import { MultiVectorRetriever, type MultiVectorRetrieverInput } from "./multi_vector.js";
5
5
  /**
6
6
  * Interface for the fields required to initialize a
@@ -69,9 +69,11 @@ export declare class ParentDocumentRetriever extends MultiVectorRetriever {
69
69
  * This can be false if and only if `ids` are provided. You may want
70
70
  * to set this to False if the documents are already in the docstore
71
71
  * and you don't want to re-add them.
72
+ * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers
72
73
  */
73
74
  addDocuments(docs: Document[], config?: {
74
75
  ids?: string[];
75
76
  addToDocstore?: boolean;
77
+ childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;
76
78
  }): Promise<void>;
77
79
  }
@@ -127,9 +127,10 @@ export class ParentDocumentRetriever extends MultiVectorRetriever {
127
127
  * This can be false if and only if `ids` are provided. You may want
128
128
  * to set this to False if the documents are already in the docstore
129
129
  * and you don't want to re-add them.
130
+ * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers
130
131
  */
131
132
  async addDocuments(docs, config) {
132
- const { ids, addToDocstore = true } = config ?? {};
133
+ const { ids, addToDocstore = true, childDocChunkHeaderOptions = {}, } = config ?? {};
133
134
  const parentDocs = this.parentSplitter
134
135
  ? await this.parentSplitter.splitDocuments(docs)
135
136
  : docs;
@@ -151,7 +152,7 @@ export class ParentDocumentRetriever extends MultiVectorRetriever {
151
152
  for (let i = 0; i < parentDocs.length; i += 1) {
152
153
  const parentDoc = parentDocs[i];
153
154
  const parentDocId = parentDocIds[i];
154
- const subDocs = await this.childSplitter.splitDocuments([parentDoc]);
155
+ const subDocs = await this.childSplitter.splitDocuments([parentDoc], childDocChunkHeaderOptions);
155
156
  const taggedSubDocs = subDocs.map((subDoc) => new Document({
156
157
  pageContent: subDoc.pageContent,
157
158
  metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },
@@ -83,7 +83,8 @@ const getTableAndColumnsName = async (appDataSource) => {
83
83
  const rep = await appDataSource.query(sql);
84
84
  return formatToSqlTable(rep);
85
85
  }
86
- if (appDataSource.options.type === "mysql") {
86
+ if (appDataSource.options.type === "mysql" ||
87
+ appDataSource.options.type === "aurora-mysql") {
87
88
  sql =
88
89
  "SELECT " +
89
90
  "TABLE_NAME AS table_name, " +
@@ -77,7 +77,8 @@ export const getTableAndColumnsName = async (appDataSource) => {
77
77
  const rep = await appDataSource.query(sql);
78
78
  return formatToSqlTable(rep);
79
79
  }
80
- if (appDataSource.options.type === "mysql") {
80
+ if (appDataSource.options.type === "mysql" ||
81
+ appDataSource.options.type === "aurora-mysql") {
81
82
  sql =
82
83
  "SELECT " +
83
84
  "TABLE_NAME AS table_name, " +
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.1.25",
3
+ "version": "0.1.27",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -1328,7 +1328,7 @@
1328
1328
  "couchbase": "^4.2.10",
1329
1329
  "d3-dsv": "^2.0.0",
1330
1330
  "epub2": "^3.0.1",
1331
- "fast-xml-parser": "^4.2.7",
1331
+ "fast-xml-parser": "*",
1332
1332
  "google-auth-library": "^8.9.0",
1333
1333
  "handlebars": "^4.7.8",
1334
1334
  "html-to-text": "^9.0.5",
@@ -1512,9 +1512,9 @@
1512
1512
  },
1513
1513
  "dependencies": {
1514
1514
  "@anthropic-ai/sdk": "^0.9.1",
1515
- "@langchain/community": "~0.0.33",
1516
- "@langchain/core": "~0.1.36",
1517
- "@langchain/openai": "~0.0.14",
1515
+ "@langchain/community": "~0.0.36",
1516
+ "@langchain/core": "~0.1.44",
1517
+ "@langchain/openai": "~0.0.19",
1518
1518
  "binary-extensions": "^2.2.0",
1519
1519
  "expr-eval": "^2.0.2",
1520
1520
  "js-tiktoken": "^1.0.7",
@@ -1,75 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.BedrockLLMInputOutputAdapter = void 0;
4
- /**
5
- * A helper class used within the `Bedrock` class. It is responsible for
6
- * preparing the input and output for the Bedrock service. It formats the
7
- * input prompt based on the provider (e.g., "anthropic", "ai21",
8
- * "amazon") and extracts the generated text from the service response.
9
- */
10
- class BedrockLLMInputOutputAdapter {
11
- /** Adapter class to prepare the inputs from Langchain to a format
12
- that LLM model expects. Also, provides a helper function to extract
13
- the generated text from the model response. */
14
- static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}, bedrockMethod = "invoke") {
15
- const inputBody = {};
16
- if (provider === "anthropic") {
17
- inputBody.prompt = prompt;
18
- inputBody.max_tokens_to_sample = maxTokens;
19
- inputBody.temperature = temperature;
20
- inputBody.stop_sequences = stopSequences;
21
- }
22
- else if (provider === "ai21") {
23
- inputBody.prompt = prompt;
24
- inputBody.maxTokens = maxTokens;
25
- inputBody.temperature = temperature;
26
- inputBody.stopSequences = stopSequences;
27
- }
28
- else if (provider === "meta") {
29
- inputBody.prompt = prompt;
30
- inputBody.max_gen_len = maxTokens;
31
- inputBody.temperature = temperature;
32
- }
33
- else if (provider === "amazon") {
34
- inputBody.inputText = prompt;
35
- inputBody.textGenerationConfig = {
36
- maxTokenCount: maxTokens,
37
- temperature,
38
- };
39
- }
40
- else if (provider === "cohere") {
41
- inputBody.prompt = prompt;
42
- inputBody.max_tokens = maxTokens;
43
- inputBody.temperature = temperature;
44
- inputBody.stop_sequences = stopSequences;
45
- if (bedrockMethod === "invoke-with-response-stream") {
46
- inputBody.stream = true;
47
- }
48
- }
49
- return { ...inputBody, ...modelKwargs };
50
- }
51
- /**
52
- * Extracts the generated text from the service response.
53
- * @param provider The provider name.
54
- * @param responseBody The response body from the service.
55
- * @returns The generated text.
56
- */
57
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
58
- static prepareOutput(provider, responseBody) {
59
- if (provider === "anthropic") {
60
- return responseBody.completion;
61
- }
62
- else if (provider === "ai21") {
63
- return responseBody?.completions?.[0]?.data?.text ?? "";
64
- }
65
- else if (provider === "cohere") {
66
- return responseBody?.generations?.[0]?.text ?? responseBody?.text ?? "";
67
- }
68
- else if (provider === "meta") {
69
- return responseBody.generation;
70
- }
71
- // I haven't been able to get a response with more than one result in it.
72
- return responseBody.results?.[0]?.outputText;
73
- }
74
- }
75
- exports.BedrockLLMInputOutputAdapter = BedrockLLMInputOutputAdapter;
@@ -1,64 +0,0 @@
1
- import type { AwsCredentialIdentity, Provider } from "@aws-sdk/types";
2
- export type CredentialType = AwsCredentialIdentity | Provider<AwsCredentialIdentity>;
3
- /** Bedrock models.
4
- To authenticate, the AWS client uses the following methods to automatically load credentials:
5
- https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
6
- If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used.
7
- Make sure the credentials / roles used have the required policies to access the Bedrock service.
8
- */
9
- export interface BaseBedrockInput {
10
- /** Model to use.
11
- For example, "amazon.titan-tg1-large", this is equivalent to the modelId property in the list-foundation-models api.
12
- */
13
- model: string;
14
- /** The AWS region e.g. `us-west-2`.
15
- Fallback to AWS_DEFAULT_REGION env variable or region specified in ~/.aws/config in case it is not provided here.
16
- */
17
- region?: string;
18
- /** AWS Credentials.
19
- If no credentials are provided, the default credentials from `@aws-sdk/credential-provider-node` will be used.
20
- */
21
- credentials?: CredentialType;
22
- /** Temperature. */
23
- temperature?: number;
24
- /** Max tokens. */
25
- maxTokens?: number;
26
- /** A custom fetch function for low-level access to AWS API. Defaults to fetch(). */
27
- fetchFn?: typeof fetch;
28
- /** @deprecated Use endpointHost instead Override the default endpoint url. */
29
- endpointUrl?: string;
30
- /** Override the default endpoint hostname. */
31
- endpointHost?: string;
32
- /**
33
- * Optional additional stop sequences to pass to the model. Currently only supported for Anthropic and AI21.
34
- * @deprecated Use .bind({ "stop": [...] }) instead
35
- * */
36
- stopSequences?: string[];
37
- /** Additional kwargs to pass to the model. */
38
- modelKwargs?: Record<string, unknown>;
39
- /** Whether or not to stream responses */
40
- streaming: boolean;
41
- }
42
- type Dict = {
43
- [key: string]: unknown;
44
- };
45
- /**
46
- * A helper class used within the `Bedrock` class. It is responsible for
47
- * preparing the input and output for the Bedrock service. It formats the
48
- * input prompt based on the provider (e.g., "anthropic", "ai21",
49
- * "amazon") and extracts the generated text from the service response.
50
- */
51
- export declare class BedrockLLMInputOutputAdapter {
52
- /** Adapter class to prepare the inputs from Langchain to a format
53
- that LLM model expects. Also, provides a helper function to extract
54
- the generated text from the model response. */
55
- static prepareInput(provider: string, prompt: string, maxTokens?: number, temperature?: number, stopSequences?: string[] | undefined, modelKwargs?: Record<string, unknown>, bedrockMethod?: "invoke" | "invoke-with-response-stream"): Dict;
56
- /**
57
- * Extracts the generated text from the service response.
58
- * @param provider The provider name.
59
- * @param responseBody The response body from the service.
60
- * @returns The generated text.
61
- */
62
- static prepareOutput(provider: string, responseBody: any): string;
63
- }
64
- export {};
@@ -1,71 +0,0 @@
1
- /**
2
- * A helper class used within the `Bedrock` class. It is responsible for
3
- * preparing the input and output for the Bedrock service. It formats the
4
- * input prompt based on the provider (e.g., "anthropic", "ai21",
5
- * "amazon") and extracts the generated text from the service response.
6
- */
7
- export class BedrockLLMInputOutputAdapter {
8
- /** Adapter class to prepare the inputs from Langchain to a format
9
- that LLM model expects. Also, provides a helper function to extract
10
- the generated text from the model response. */
11
- static prepareInput(provider, prompt, maxTokens = 50, temperature = 0, stopSequences = undefined, modelKwargs = {}, bedrockMethod = "invoke") {
12
- const inputBody = {};
13
- if (provider === "anthropic") {
14
- inputBody.prompt = prompt;
15
- inputBody.max_tokens_to_sample = maxTokens;
16
- inputBody.temperature = temperature;
17
- inputBody.stop_sequences = stopSequences;
18
- }
19
- else if (provider === "ai21") {
20
- inputBody.prompt = prompt;
21
- inputBody.maxTokens = maxTokens;
22
- inputBody.temperature = temperature;
23
- inputBody.stopSequences = stopSequences;
24
- }
25
- else if (provider === "meta") {
26
- inputBody.prompt = prompt;
27
- inputBody.max_gen_len = maxTokens;
28
- inputBody.temperature = temperature;
29
- }
30
- else if (provider === "amazon") {
31
- inputBody.inputText = prompt;
32
- inputBody.textGenerationConfig = {
33
- maxTokenCount: maxTokens,
34
- temperature,
35
- };
36
- }
37
- else if (provider === "cohere") {
38
- inputBody.prompt = prompt;
39
- inputBody.max_tokens = maxTokens;
40
- inputBody.temperature = temperature;
41
- inputBody.stop_sequences = stopSequences;
42
- if (bedrockMethod === "invoke-with-response-stream") {
43
- inputBody.stream = true;
44
- }
45
- }
46
- return { ...inputBody, ...modelKwargs };
47
- }
48
- /**
49
- * Extracts the generated text from the service response.
50
- * @param provider The provider name.
51
- * @param responseBody The response body from the service.
52
- * @returns The generated text.
53
- */
54
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
55
- static prepareOutput(provider, responseBody) {
56
- if (provider === "anthropic") {
57
- return responseBody.completion;
58
- }
59
- else if (provider === "ai21") {
60
- return responseBody?.completions?.[0]?.data?.text ?? "";
61
- }
62
- else if (provider === "cohere") {
63
- return responseBody?.generations?.[0]?.text ?? responseBody?.text ?? "";
64
- }
65
- else if (provider === "meta") {
66
- return responseBody.generation;
67
- }
68
- // I haven't been able to get a response with more than one result in it.
69
- return responseBody.results?.[0]?.outputText;
70
- }
71
- }