langchain 0.0.176 → 0.0.178

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/chat_models/iflytek_xinghuo/web.cjs +1 -0
  2. package/chat_models/iflytek_xinghuo/web.d.ts +1 -0
  3. package/chat_models/iflytek_xinghuo/web.js +1 -0
  4. package/chat_models/iflytek_xinghuo.cjs +1 -0
  5. package/chat_models/iflytek_xinghuo.d.ts +1 -0
  6. package/chat_models/iflytek_xinghuo.js +1 -0
  7. package/dist/chat_models/bedrock.cjs +25 -4
  8. package/dist/chat_models/bedrock.d.ts +2 -1
  9. package/dist/chat_models/bedrock.js +25 -4
  10. package/dist/chat_models/cloudflare_workersai.cjs +70 -24
  11. package/dist/chat_models/cloudflare_workersai.d.ts +6 -2
  12. package/dist/chat_models/cloudflare_workersai.js +71 -25
  13. package/dist/chat_models/iflytek_xinghuo/common.cjs +335 -0
  14. package/dist/chat_models/iflytek_xinghuo/common.d.ts +165 -0
  15. package/dist/chat_models/iflytek_xinghuo/common.js +331 -0
  16. package/dist/chat_models/iflytek_xinghuo/index.cjs +35 -0
  17. package/dist/chat_models/iflytek_xinghuo/index.d.ts +5 -0
  18. package/dist/chat_models/iflytek_xinghuo/index.js +28 -0
  19. package/dist/chat_models/iflytek_xinghuo/web.cjs +30 -0
  20. package/dist/chat_models/iflytek_xinghuo/web.d.ts +5 -0
  21. package/dist/chat_models/iflytek_xinghuo/web.js +26 -0
  22. package/dist/chat_models/llama_cpp.cjs +31 -79
  23. package/dist/chat_models/llama_cpp.d.ts +15 -58
  24. package/dist/chat_models/llama_cpp.js +32 -80
  25. package/dist/chat_models/openai.cjs +91 -6
  26. package/dist/chat_models/openai.d.ts +10 -0
  27. package/dist/chat_models/openai.js +91 -6
  28. package/dist/embeddings/hf.cjs +10 -1
  29. package/dist/embeddings/hf.d.ts +4 -2
  30. package/dist/embeddings/hf.js +10 -1
  31. package/dist/embeddings/llama_cpp.cjs +67 -0
  32. package/dist/embeddings/llama_cpp.d.ts +26 -0
  33. package/dist/embeddings/llama_cpp.js +63 -0
  34. package/dist/embeddings/ollama.cjs +7 -1
  35. package/dist/embeddings/ollama.js +7 -1
  36. package/dist/graphs/neo4j_graph.cjs +36 -5
  37. package/dist/graphs/neo4j_graph.js +14 -3
  38. package/dist/llms/bedrock.cjs +25 -3
  39. package/dist/llms/bedrock.d.ts +2 -1
  40. package/dist/llms/bedrock.js +25 -3
  41. package/dist/llms/cloudflare_workersai.cjs +59 -13
  42. package/dist/llms/cloudflare_workersai.d.ts +9 -3
  43. package/dist/llms/cloudflare_workersai.js +59 -13
  44. package/dist/llms/hf.cjs +10 -1
  45. package/dist/llms/hf.d.ts +3 -0
  46. package/dist/llms/hf.js +10 -1
  47. package/dist/llms/llama_cpp.cjs +25 -65
  48. package/dist/llms/llama_cpp.d.ts +7 -43
  49. package/dist/llms/llama_cpp.js +25 -65
  50. package/dist/load/import_constants.cjs +3 -0
  51. package/dist/load/import_constants.js +3 -0
  52. package/dist/prompts/chat.cjs +8 -0
  53. package/dist/prompts/chat.d.ts +5 -0
  54. package/dist/prompts/chat.js +8 -0
  55. package/dist/prompts/few_shot.cjs +162 -1
  56. package/dist/prompts/few_shot.d.ts +90 -2
  57. package/dist/prompts/few_shot.js +160 -0
  58. package/dist/prompts/index.cjs +2 -1
  59. package/dist/prompts/index.d.ts +1 -1
  60. package/dist/prompts/index.js +1 -1
  61. package/dist/retrievers/zep.cjs +26 -3
  62. package/dist/retrievers/zep.d.ts +11 -2
  63. package/dist/retrievers/zep.js +26 -3
  64. package/dist/util/bedrock.d.ts +2 -0
  65. package/dist/util/event-source-parse.cjs +20 -1
  66. package/dist/util/event-source-parse.d.ts +2 -0
  67. package/dist/util/event-source-parse.js +18 -0
  68. package/dist/util/iflytek_websocket_stream.cjs +81 -0
  69. package/dist/util/iflytek_websocket_stream.d.ts +27 -0
  70. package/dist/util/iflytek_websocket_stream.js +77 -0
  71. package/dist/util/llama_cpp.cjs +34 -0
  72. package/dist/util/llama_cpp.d.ts +46 -0
  73. package/dist/util/llama_cpp.js +28 -0
  74. package/dist/util/openai-format-fndef.cjs +81 -0
  75. package/dist/util/openai-format-fndef.d.ts +44 -0
  76. package/dist/util/openai-format-fndef.js +77 -0
  77. package/dist/util/openapi.d.ts +2 -2
  78. package/dist/vectorstores/pinecone.cjs +5 -5
  79. package/dist/vectorstores/pinecone.d.ts +2 -2
  80. package/dist/vectorstores/pinecone.js +5 -5
  81. package/embeddings/llama_cpp.cjs +1 -0
  82. package/embeddings/llama_cpp.d.ts +1 -0
  83. package/embeddings/llama_cpp.js +1 -0
  84. package/package.json +34 -5
@@ -1,5 +1,7 @@
1
1
  import { LLM } from "./base.js";
2
2
  import { getEnvironmentVariable } from "../util/env.js";
3
+ import { GenerationChunk } from "../schema/index.js";
4
+ import { convertEventStreamToIterableReadableDataStream } from "../util/event-source-parse.js";
3
5
  /**
4
6
  * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
5
7
  * Language Model) class, providing a standard interface for interacting
@@ -35,6 +37,12 @@ export class CloudflareWorkersAI extends LLM {
35
37
  writable: true,
36
38
  value: void 0
37
39
  });
40
+ Object.defineProperty(this, "streaming", {
41
+ enumerable: true,
42
+ configurable: true,
43
+ writable: true,
44
+ value: false
45
+ });
38
46
  Object.defineProperty(this, "lc_serializable", {
39
47
  enumerable: true,
40
48
  configurable: true,
@@ -42,6 +50,7 @@ export class CloudflareWorkersAI extends LLM {
42
50
  value: true
43
51
  });
44
52
  this.model = fields?.model ?? this.model;
53
+ this.streaming = fields?.streaming ?? this.streaming;
45
54
  this.cloudflareAccountId =
46
55
  fields?.cloudflareAccountId ??
47
56
  getEnvironmentVariable("CLOUDFLARE_ACCOUNT_ID");
@@ -84,23 +93,15 @@ export class CloudflareWorkersAI extends LLM {
84
93
  _llmType() {
85
94
  return "cloudflare";
86
95
  }
87
- /** Call out to CloudflareWorkersAI's complete endpoint.
88
- Args:
89
- prompt: The prompt to pass into the model.
90
- Returns:
91
- The string generated by the model.
92
- Example:
93
- let response = CloudflareWorkersAI.call("Tell me a joke.");
94
- */
95
- async _call(prompt, options) {
96
+ async _request(prompt, options, stream) {
96
97
  this.validateEnvironment();
97
98
  const url = `${this.baseUrl}/${this.model}`;
98
99
  const headers = {
99
100
  Authorization: `Bearer ${this.cloudflareApiToken}`,
100
101
  "Content-Type": "application/json",
101
102
  };
102
- const data = { prompt };
103
- const responseData = await this.caller.call(async () => {
103
+ const data = { prompt, stream };
104
+ return this.caller.call(async () => {
104
105
  const response = await fetch(url, {
105
106
  method: "POST",
106
107
  headers,
@@ -113,8 +114,53 @@ export class CloudflareWorkersAI extends LLM {
113
114
  error.response = response;
114
115
  throw error;
115
116
  }
116
- return response.json();
117
+ return response;
117
118
  });
118
- return responseData.result.response;
119
+ }
120
+ async *_streamResponseChunks(prompt, options, runManager) {
121
+ const response = await this._request(prompt, options, true);
122
+ if (!response.body) {
123
+ throw new Error("Empty response from Cloudflare. Please try again.");
124
+ }
125
+ const stream = convertEventStreamToIterableReadableDataStream(response.body);
126
+ for await (const chunk of stream) {
127
+ if (chunk !== "[DONE]") {
128
+ const parsedChunk = JSON.parse(chunk);
129
+ const generationChunk = new GenerationChunk({
130
+ text: parsedChunk.response,
131
+ });
132
+ yield generationChunk;
133
+ // eslint-disable-next-line no-void
134
+ void runManager?.handleLLMNewToken(generationChunk.text ?? "");
135
+ }
136
+ }
137
+ }
138
+ /** Call out to CloudflareWorkersAI's complete endpoint.
139
+ Args:
140
+ prompt: The prompt to pass into the model.
141
+ Returns:
142
+ The string generated by the model.
143
+ Example:
144
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
145
+ */
146
+ async _call(prompt, options, runManager) {
147
+ if (!this.streaming) {
148
+ const response = await this._request(prompt, options);
149
+ const responseData = await response.json();
150
+ return responseData.result.response;
151
+ }
152
+ else {
153
+ const stream = this._streamResponseChunks(prompt, options, runManager);
154
+ let finalResult;
155
+ for await (const chunk of stream) {
156
+ if (finalResult === undefined) {
157
+ finalResult = chunk;
158
+ }
159
+ else {
160
+ finalResult = finalResult.concat(chunk);
161
+ }
162
+ }
163
+ return finalResult?.text ?? "";
164
+ }
119
165
  }
120
166
  }
package/dist/llms/hf.cjs CHANGED
@@ -57,6 +57,12 @@ class HuggingFaceInference extends base_js_1.LLM {
57
57
  writable: true,
58
58
  value: undefined
59
59
  });
60
+ Object.defineProperty(this, "endpointUrl", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: undefined
65
+ });
60
66
  this.model = fields?.model ?? this.model;
61
67
  this.temperature = fields?.temperature ?? this.temperature;
62
68
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
@@ -65,6 +71,7 @@ class HuggingFaceInference extends base_js_1.LLM {
65
71
  this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
66
72
  this.apiKey =
67
73
  fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("HUGGINGFACEHUB_API_KEY");
74
+ this.endpointUrl = fields?.endpointUrl;
68
75
  if (!this.apiKey) {
69
76
  throw new Error("Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.");
70
77
  }
@@ -75,7 +82,9 @@ class HuggingFaceInference extends base_js_1.LLM {
75
82
  /** @ignore */
76
83
  async _call(prompt, options) {
77
84
  const { HfInference } = await HuggingFaceInference.imports();
78
- const hf = new HfInference(this.apiKey);
85
+ const hf = this.endpointUrl
86
+ ? new HfInference(this.apiKey).endpoint(this.endpointUrl)
87
+ : new HfInference(this.apiKey);
79
88
  const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
80
89
  model: this.model,
81
90
  parameters: {
package/dist/llms/hf.d.ts CHANGED
@@ -6,6 +6,8 @@ import { LLM, BaseLLMParams } from "./base.js";
6
6
  export interface HFInput {
7
7
  /** Model to use */
8
8
  model: string;
9
+ /** Custom inference endpoint URL to use */
10
+ endpointUrl?: string;
9
11
  /** Sampling temperature to use */
10
12
  temperature?: number;
11
13
  /**
@@ -36,6 +38,7 @@ export declare class HuggingFaceInference extends LLM implements HFInput {
36
38
  topK: number | undefined;
37
39
  frequencyPenalty: number | undefined;
38
40
  apiKey: string | undefined;
41
+ endpointUrl: string | undefined;
39
42
  constructor(fields?: Partial<HFInput> & BaseLLMParams);
40
43
  _llmType(): string;
41
44
  /** @ignore */
package/dist/llms/hf.js CHANGED
@@ -54,6 +54,12 @@ export class HuggingFaceInference extends LLM {
54
54
  writable: true,
55
55
  value: undefined
56
56
  });
57
+ Object.defineProperty(this, "endpointUrl", {
58
+ enumerable: true,
59
+ configurable: true,
60
+ writable: true,
61
+ value: undefined
62
+ });
57
63
  this.model = fields?.model ?? this.model;
58
64
  this.temperature = fields?.temperature ?? this.temperature;
59
65
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
@@ -62,6 +68,7 @@ export class HuggingFaceInference extends LLM {
62
68
  this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty;
63
69
  this.apiKey =
64
70
  fields?.apiKey ?? getEnvironmentVariable("HUGGINGFACEHUB_API_KEY");
71
+ this.endpointUrl = fields?.endpointUrl;
65
72
  if (!this.apiKey) {
66
73
  throw new Error("Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.");
67
74
  }
@@ -72,7 +79,9 @@ export class HuggingFaceInference extends LLM {
72
79
  /** @ignore */
73
80
  async _call(prompt, options) {
74
81
  const { HfInference } = await HuggingFaceInference.imports();
75
- const hf = new HfInference(this.apiKey);
82
+ const hf = this.endpointUrl
83
+ ? new HfInference(this.apiKey).endpoint(this.endpointUrl)
84
+ : new HfInference(this.apiKey);
76
85
  const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
77
86
  model: this.model,
78
87
  parameters: {
@@ -1,7 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.LlamaCpp = void 0;
4
- const node_llama_cpp_1 = require("node-llama-cpp");
4
+ const llama_cpp_js_1 = require("../util/llama_cpp.cjs");
5
5
  const base_js_1 = require("./base.cjs");
6
6
  /**
7
7
  * To use this model you need to have the `node-llama-cpp` module installed.
@@ -15,73 +15,31 @@ class LlamaCpp extends base_js_1.LLM {
15
15
  }
16
16
  constructor(inputs) {
17
17
  super(inputs);
18
- Object.defineProperty(this, "batchSize", {
18
+ Object.defineProperty(this, "maxTokens", {
19
19
  enumerable: true,
20
20
  configurable: true,
21
21
  writable: true,
22
22
  value: void 0
23
23
  });
24
- Object.defineProperty(this, "contextSize", {
24
+ Object.defineProperty(this, "temperature", {
25
25
  enumerable: true,
26
26
  configurable: true,
27
27
  writable: true,
28
28
  value: void 0
29
29
  });
30
- Object.defineProperty(this, "embedding", {
30
+ Object.defineProperty(this, "topK", {
31
31
  enumerable: true,
32
32
  configurable: true,
33
33
  writable: true,
34
34
  value: void 0
35
35
  });
36
- Object.defineProperty(this, "f16Kv", {
36
+ Object.defineProperty(this, "topP", {
37
37
  enumerable: true,
38
38
  configurable: true,
39
39
  writable: true,
40
40
  value: void 0
41
41
  });
42
- Object.defineProperty(this, "gpuLayers", {
43
- enumerable: true,
44
- configurable: true,
45
- writable: true,
46
- value: void 0
47
- });
48
- Object.defineProperty(this, "logitsAll", {
49
- enumerable: true,
50
- configurable: true,
51
- writable: true,
52
- value: void 0
53
- });
54
- Object.defineProperty(this, "lowVram", {
55
- enumerable: true,
56
- configurable: true,
57
- writable: true,
58
- value: void 0
59
- });
60
- Object.defineProperty(this, "seed", {
61
- enumerable: true,
62
- configurable: true,
63
- writable: true,
64
- value: void 0
65
- });
66
- Object.defineProperty(this, "useMlock", {
67
- enumerable: true,
68
- configurable: true,
69
- writable: true,
70
- value: void 0
71
- });
72
- Object.defineProperty(this, "useMmap", {
73
- enumerable: true,
74
- configurable: true,
75
- writable: true,
76
- value: void 0
77
- });
78
- Object.defineProperty(this, "vocabOnly", {
79
- enumerable: true,
80
- configurable: true,
81
- writable: true,
82
- value: void 0
83
- });
84
- Object.defineProperty(this, "modelPath", {
42
+ Object.defineProperty(this, "trimWhitespaceSuffix", {
85
43
  enumerable: true,
86
44
  configurable: true,
87
45
  writable: true,
@@ -105,29 +63,31 @@ class LlamaCpp extends base_js_1.LLM {
105
63
  writable: true,
106
64
  value: void 0
107
65
  });
108
- this.batchSize = inputs.batchSize;
109
- this.contextSize = inputs.contextSize;
110
- this.embedding = inputs.embedding;
111
- this.f16Kv = inputs.f16Kv;
112
- this.gpuLayers = inputs.gpuLayers;
113
- this.logitsAll = inputs.logitsAll;
114
- this.lowVram = inputs.lowVram;
115
- this.modelPath = inputs.modelPath;
116
- this.seed = inputs.seed;
117
- this.useMlock = inputs.useMlock;
118
- this.useMmap = inputs.useMmap;
119
- this.vocabOnly = inputs.vocabOnly;
120
- this._model = new node_llama_cpp_1.LlamaModel(inputs);
121
- this._context = new node_llama_cpp_1.LlamaContext({ model: this._model });
122
- this._session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
66
+ this.maxTokens = inputs?.maxTokens;
67
+ this.temperature = inputs?.temperature;
68
+ this.topK = inputs?.topK;
69
+ this.topP = inputs?.topP;
70
+ this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix;
71
+ this._model = (0, llama_cpp_js_1.createLlamaModel)(inputs);
72
+ this._context = (0, llama_cpp_js_1.createLlamaContext)(this._model, inputs);
73
+ this._session = (0, llama_cpp_js_1.createLlamaSession)(this._context);
123
74
  }
124
75
  _llmType() {
125
76
  return "llama2_cpp";
126
77
  }
127
78
  /** @ignore */
128
- async _call(prompt, options) {
79
+ async _call(prompt,
80
+ // @ts-expect-error - TS6133: 'options' is declared but its value is never read.
81
+ options) {
129
82
  try {
130
- const completion = await this._session.prompt(prompt, options);
83
+ const promptOptions = {
84
+ maxTokens: this?.maxTokens,
85
+ temperature: this?.temperature,
86
+ topK: this?.topK,
87
+ topP: this?.topP,
88
+ trimWhitespaceSuffix: this?.trimWhitespaceSuffix,
89
+ };
90
+ const completion = await this._session.prompt(prompt, promptOptions);
131
91
  return completion;
132
92
  }
133
93
  catch (e) {
@@ -1,40 +1,11 @@
1
1
  import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
2
+ import { LlamaBaseCppInputs } from "../util/llama_cpp.js";
2
3
  import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
3
4
  /**
4
5
  * Note that the modelPath is the only required parameter. For testing you
5
6
  * can set this in the environment variable `LLAMA_PATH`.
6
7
  */
7
- export interface LlamaCppInputs extends BaseLLMParams {
8
- /** Prompt processing batch size. */
9
- batchSize?: number;
10
- /** Text context size. */
11
- contextSize?: number;
12
- /** Embedding mode only. */
13
- embedding?: boolean;
14
- /** Use fp16 for KV cache. */
15
- f16Kv?: boolean;
16
- /** Number of layers to store in VRAM. */
17
- gpuLayers?: number;
18
- /** The llama_eval() call computes all logits, not just the last one. */
19
- logitsAll?: boolean;
20
- /** If true, reduce VRAM usage at the cost of performance. */
21
- lowVram?: boolean;
22
- /** Path to the model on the filesystem. */
23
- modelPath: string;
24
- /** If null, a random seed will be used. */
25
- seed?: null | number;
26
- /** The randomness of the responses, e.g. 0.1 deterministic, 1.5 creative, 0.8 balanced, 0 disables. */
27
- temperature?: number;
28
- /** Consider the n most likely tokens, where n is 1 to vocabulary size, 0 disables (uses full vocabulary). Note: only applies when `temperature` > 0. */
29
- topK?: number;
30
- /** Selects the smallest token set whose probability exceeds P, where P is between 0 - 1, 1 disables. Note: only applies when `temperature` > 0. */
31
- topP?: number;
32
- /** Force system to keep model in RAM. */
33
- useMlock?: boolean;
34
- /** Use mmap if possible. */
35
- useMmap?: boolean;
36
- /** Only load the vocabulary, no weights. */
37
- vocabOnly?: boolean;
8
+ export interface LlamaCppInputs extends LlamaBaseCppInputs, BaseLLMParams {
38
9
  }
39
10
  export interface LlamaCppCallOptions extends BaseLLMCallOptions {
40
11
  /** The maximum number of tokens the response should contain. */
@@ -51,18 +22,11 @@ export interface LlamaCppCallOptions extends BaseLLMCallOptions {
51
22
  export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
52
23
  CallOptions: LlamaCppCallOptions;
53
24
  static inputs: LlamaCppInputs;
54
- batchSize?: number;
55
- contextSize?: number;
56
- embedding?: boolean;
57
- f16Kv?: boolean;
58
- gpuLayers?: number;
59
- logitsAll?: boolean;
60
- lowVram?: boolean;
61
- seed?: null | number;
62
- useMlock?: boolean;
63
- useMmap?: boolean;
64
- vocabOnly?: boolean;
65
- modelPath: string;
25
+ maxTokens?: number;
26
+ temperature?: number;
27
+ topK?: number;
28
+ topP?: number;
29
+ trimWhitespaceSuffix?: boolean;
66
30
  _model: LlamaModel;
67
31
  _context: LlamaContext;
68
32
  _session: LlamaChatSession;
@@ -1,4 +1,4 @@
1
- import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
1
+ import { createLlamaModel, createLlamaContext, createLlamaSession, } from "../util/llama_cpp.js";
2
2
  import { LLM } from "./base.js";
3
3
  /**
4
4
  * To use this model you need to have the `node-llama-cpp` module installed.
@@ -12,73 +12,31 @@ export class LlamaCpp extends LLM {
12
12
  }
13
13
  constructor(inputs) {
14
14
  super(inputs);
15
- Object.defineProperty(this, "batchSize", {
15
+ Object.defineProperty(this, "maxTokens", {
16
16
  enumerable: true,
17
17
  configurable: true,
18
18
  writable: true,
19
19
  value: void 0
20
20
  });
21
- Object.defineProperty(this, "contextSize", {
21
+ Object.defineProperty(this, "temperature", {
22
22
  enumerable: true,
23
23
  configurable: true,
24
24
  writable: true,
25
25
  value: void 0
26
26
  });
27
- Object.defineProperty(this, "embedding", {
27
+ Object.defineProperty(this, "topK", {
28
28
  enumerable: true,
29
29
  configurable: true,
30
30
  writable: true,
31
31
  value: void 0
32
32
  });
33
- Object.defineProperty(this, "f16Kv", {
33
+ Object.defineProperty(this, "topP", {
34
34
  enumerable: true,
35
35
  configurable: true,
36
36
  writable: true,
37
37
  value: void 0
38
38
  });
39
- Object.defineProperty(this, "gpuLayers", {
40
- enumerable: true,
41
- configurable: true,
42
- writable: true,
43
- value: void 0
44
- });
45
- Object.defineProperty(this, "logitsAll", {
46
- enumerable: true,
47
- configurable: true,
48
- writable: true,
49
- value: void 0
50
- });
51
- Object.defineProperty(this, "lowVram", {
52
- enumerable: true,
53
- configurable: true,
54
- writable: true,
55
- value: void 0
56
- });
57
- Object.defineProperty(this, "seed", {
58
- enumerable: true,
59
- configurable: true,
60
- writable: true,
61
- value: void 0
62
- });
63
- Object.defineProperty(this, "useMlock", {
64
- enumerable: true,
65
- configurable: true,
66
- writable: true,
67
- value: void 0
68
- });
69
- Object.defineProperty(this, "useMmap", {
70
- enumerable: true,
71
- configurable: true,
72
- writable: true,
73
- value: void 0
74
- });
75
- Object.defineProperty(this, "vocabOnly", {
76
- enumerable: true,
77
- configurable: true,
78
- writable: true,
79
- value: void 0
80
- });
81
- Object.defineProperty(this, "modelPath", {
39
+ Object.defineProperty(this, "trimWhitespaceSuffix", {
82
40
  enumerable: true,
83
41
  configurable: true,
84
42
  writable: true,
@@ -102,29 +60,31 @@ export class LlamaCpp extends LLM {
102
60
  writable: true,
103
61
  value: void 0
104
62
  });
105
- this.batchSize = inputs.batchSize;
106
- this.contextSize = inputs.contextSize;
107
- this.embedding = inputs.embedding;
108
- this.f16Kv = inputs.f16Kv;
109
- this.gpuLayers = inputs.gpuLayers;
110
- this.logitsAll = inputs.logitsAll;
111
- this.lowVram = inputs.lowVram;
112
- this.modelPath = inputs.modelPath;
113
- this.seed = inputs.seed;
114
- this.useMlock = inputs.useMlock;
115
- this.useMmap = inputs.useMmap;
116
- this.vocabOnly = inputs.vocabOnly;
117
- this._model = new LlamaModel(inputs);
118
- this._context = new LlamaContext({ model: this._model });
119
- this._session = new LlamaChatSession({ context: this._context });
63
+ this.maxTokens = inputs?.maxTokens;
64
+ this.temperature = inputs?.temperature;
65
+ this.topK = inputs?.topK;
66
+ this.topP = inputs?.topP;
67
+ this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix;
68
+ this._model = createLlamaModel(inputs);
69
+ this._context = createLlamaContext(this._model, inputs);
70
+ this._session = createLlamaSession(this._context);
120
71
  }
121
72
  _llmType() {
122
73
  return "llama2_cpp";
123
74
  }
124
75
  /** @ignore */
125
- async _call(prompt, options) {
76
+ async _call(prompt,
77
+ // @ts-expect-error - TS6133: 'options' is declared but its value is never read.
78
+ options) {
126
79
  try {
127
- const completion = await this._session.prompt(prompt, options);
80
+ const promptOptions = {
81
+ maxTokens: this?.maxTokens,
82
+ temperature: this?.temperature,
83
+ topK: this?.topK,
84
+ topP: this?.topP,
85
+ trimWhitespaceSuffix: this?.trimWhitespaceSuffix,
86
+ };
87
+ const completion = await this._session.prompt(prompt, promptOptions);
128
88
  return completion;
129
89
  }
130
90
  catch (e) {
@@ -25,6 +25,7 @@ exports.optionalImportEntrypoints = [
25
25
  "langchain/embeddings/hf_transformers",
26
26
  "langchain/embeddings/googlevertexai",
27
27
  "langchain/embeddings/googlepalm",
28
+ "langchain/embeddings/llama_cpp",
28
29
  "langchain/llms/load",
29
30
  "langchain/llms/cohere",
30
31
  "langchain/llms/hf",
@@ -113,6 +114,8 @@ exports.optionalImportEntrypoints = [
113
114
  "langchain/chat_models/googlevertexai",
114
115
  "langchain/chat_models/googlevertexai/web",
115
116
  "langchain/chat_models/googlepalm",
117
+ "langchain/chat_models/iflytek_xinghuo",
118
+ "langchain/chat_models/iflytek_xinghuo/web",
116
119
  "langchain/chat_models/llama_cpp",
117
120
  "langchain/sql_db",
118
121
  "langchain/callbacks/handlers/llmonitor",
@@ -22,6 +22,7 @@ export const optionalImportEntrypoints = [
22
22
  "langchain/embeddings/hf_transformers",
23
23
  "langchain/embeddings/googlevertexai",
24
24
  "langchain/embeddings/googlepalm",
25
+ "langchain/embeddings/llama_cpp",
25
26
  "langchain/llms/load",
26
27
  "langchain/llms/cohere",
27
28
  "langchain/llms/hf",
@@ -110,6 +111,8 @@ export const optionalImportEntrypoints = [
110
111
  "langchain/chat_models/googlevertexai",
111
112
  "langchain/chat_models/googlevertexai/web",
112
113
  "langchain/chat_models/googlepalm",
114
+ "langchain/chat_models/iflytek_xinghuo",
115
+ "langchain/chat_models/iflytek_xinghuo/web",
113
116
  "langchain/chat_models/llama_cpp",
114
117
  "langchain/sql_db",
115
118
  "langchain/callbacks/handlers/llmonitor",
@@ -374,6 +374,14 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
374
374
  };
375
375
  return new ChatPromptTemplate(promptDict);
376
376
  }
377
+ /**
378
+ * Load prompt template from a template f-string
379
+ */
380
+ static fromTemplate(template) {
381
+ const prompt = prompt_js_1.PromptTemplate.fromTemplate(template);
382
+ const humanTemplate = new HumanMessagePromptTemplate({ prompt });
383
+ return this.fromMessages([humanTemplate]);
384
+ }
377
385
  /**
378
386
  * Create a chat model-specific prompt from individual chat messages
379
387
  * or message-like tuples.
@@ -2,6 +2,7 @@ import { BaseCallbackConfig } from "../callbacks/manager.js";
2
2
  import { BaseMessage, BaseMessageLike, BasePromptValue, InputValues, PartialValues } from "../schema/index.js";
3
3
  import { Runnable } from "../schema/runnable/index.js";
4
4
  import { BasePromptTemplate, BasePromptTemplateInput, BaseStringPromptTemplate, TypedPromptInputValues } from "./base.js";
5
+ import { type ParamsFromFString } from "./prompt.js";
5
6
  /**
6
7
  * Abstract class that serves as a base for creating message prompt
7
8
  * templates. It defines how to format messages for different roles in a
@@ -170,6 +171,10 @@ export declare class ChatPromptTemplate<RunInput extends InputValues = any, Part
170
171
  _getPromptType(): "chat";
171
172
  formatMessages(values: TypedPromptInputValues<RunInput>): Promise<BaseMessage[]>;
172
173
  partial<NewPartialVariableName extends string>(values: PartialValues<NewPartialVariableName>): Promise<ChatPromptTemplate<InputValues<Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>>, any>>;
174
+ /**
175
+ * Load prompt template from a template f-string
176
+ */
177
+ static fromTemplate<RunInput extends InputValues = Symbol, T extends string = string>(template: T): ChatPromptTemplate<RunInput extends Symbol ? ParamsFromFString<T> : RunInput, any>;
173
178
  /**
174
179
  * Create a chat model-specific prompt from individual chat messages
175
180
  * or message-like tuples.
@@ -362,6 +362,14 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
362
362
  };
363
363
  return new ChatPromptTemplate(promptDict);
364
364
  }
365
+ /**
366
+ * Load prompt template from a template f-string
367
+ */
368
+ static fromTemplate(template) {
369
+ const prompt = PromptTemplate.fromTemplate(template);
370
+ const humanTemplate = new HumanMessagePromptTemplate({ prompt });
371
+ return this.fromMessages([humanTemplate]);
372
+ }
365
373
  /**
366
374
  * Create a chat model-specific prompt from individual chat messages
367
375
  * or message-like tuples.