langchain 0.0.167 → 0.0.169

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +4 -4
  2. package/chat_models/cloudflare_workersai.cjs +1 -0
  3. package/chat_models/cloudflare_workersai.d.ts +1 -0
  4. package/chat_models/cloudflare_workersai.js +1 -0
  5. package/chat_models/fake.cjs +1 -0
  6. package/chat_models/fake.d.ts +1 -0
  7. package/chat_models/fake.js +1 -0
  8. package/chat_models/yandex.cjs +1 -0
  9. package/chat_models/yandex.d.ts +1 -0
  10. package/chat_models/yandex.js +1 -0
  11. package/dist/agents/chat/index.cjs +3 -2
  12. package/dist/agents/chat/index.d.ts +3 -0
  13. package/dist/agents/chat/index.js +3 -2
  14. package/dist/callbacks/handlers/llmonitor.cjs +21 -17
  15. package/dist/callbacks/handlers/llmonitor.js +21 -17
  16. package/dist/chat_models/cloudflare_workersai.cjs +145 -0
  17. package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
  18. package/dist/chat_models/cloudflare_workersai.js +141 -0
  19. package/dist/chat_models/fake.cjs +101 -0
  20. package/dist/chat_models/fake.d.ts +36 -0
  21. package/dist/chat_models/fake.js +97 -0
  22. package/dist/chat_models/yandex.cjs +117 -0
  23. package/dist/chat_models/yandex.d.ts +16 -0
  24. package/dist/chat_models/yandex.js +113 -0
  25. package/dist/evaluation/comparison/prompt.d.ts +2 -2
  26. package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
  27. package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
  28. package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
  29. package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
  30. package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
  31. package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
  32. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
  33. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
  34. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
  35. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
  36. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
  37. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
  38. package/dist/llms/cloudflare_workersai.cjs +124 -0
  39. package/dist/llms/cloudflare_workersai.d.ts +49 -0
  40. package/dist/llms/cloudflare_workersai.js +120 -0
  41. package/dist/llms/fake.cjs +82 -0
  42. package/dist/llms/fake.d.ts +31 -0
  43. package/dist/llms/fake.js +78 -0
  44. package/dist/llms/sagemaker_endpoint.cjs +9 -7
  45. package/dist/llms/sagemaker_endpoint.d.ts +3 -3
  46. package/dist/llms/sagemaker_endpoint.js +9 -7
  47. package/dist/load/import_constants.cjs +2 -0
  48. package/dist/load/import_constants.js +2 -0
  49. package/dist/load/import_map.cjs +8 -2
  50. package/dist/load/import_map.d.ts +6 -0
  51. package/dist/load/import_map.js +6 -0
  52. package/dist/retrievers/zep.cjs +29 -3
  53. package/dist/retrievers/zep.d.ts +14 -0
  54. package/dist/retrievers/zep.js +29 -3
  55. package/dist/util/axios-fetch-adapter.cjs +1 -1
  56. package/dist/util/axios-fetch-adapter.js +1 -1
  57. package/dist/util/env.cjs +1 -1
  58. package/dist/util/env.js +1 -1
  59. package/dist/util/event-source-parse.cjs +1 -1
  60. package/dist/util/event-source-parse.js +1 -1
  61. package/dist/vectorstores/closevector/common.cjs +128 -0
  62. package/dist/vectorstores/closevector/common.d.ts +82 -0
  63. package/dist/vectorstores/closevector/common.js +124 -0
  64. package/dist/vectorstores/closevector/node.cjs +109 -0
  65. package/dist/vectorstores/closevector/node.d.ts +83 -0
  66. package/dist/vectorstores/closevector/node.js +105 -0
  67. package/dist/vectorstores/closevector/web.cjs +109 -0
  68. package/dist/vectorstores/closevector/web.d.ts +80 -0
  69. package/dist/vectorstores/closevector/web.js +105 -0
  70. package/dist/vectorstores/faiss.cjs +38 -6
  71. package/dist/vectorstores/faiss.d.ts +14 -2
  72. package/dist/vectorstores/faiss.js +38 -6
  73. package/dist/vectorstores/weaviate.cjs +13 -2
  74. package/dist/vectorstores/weaviate.js +13 -2
  75. package/experimental/chains/violation_of_expectations.cjs +1 -0
  76. package/experimental/chains/violation_of_expectations.d.ts +1 -0
  77. package/experimental/chains/violation_of_expectations.js +1 -0
  78. package/llms/cloudflare_workersai.cjs +1 -0
  79. package/llms/cloudflare_workersai.d.ts +1 -0
  80. package/llms/cloudflare_workersai.js +1 -0
  81. package/llms/fake.cjs +1 -0
  82. package/llms/fake.d.ts +1 -0
  83. package/llms/fake.js +1 -0
  84. package/package.json +92 -13
  85. package/vectorstores/closevector/node.cjs +1 -0
  86. package/vectorstores/closevector/node.d.ts +1 -0
  87. package/vectorstores/closevector/node.js +1 -0
  88. package/vectorstores/closevector/web.cjs +1 -0
  89. package/vectorstores/closevector/web.d.ts +1 -0
  90. package/vectorstores/closevector/web.js +1 -0
@@ -0,0 +1,101 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FakeListChatModel = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const index_js_1 = require("../schema/index.cjs");
6
+ /**
7
+ * A fake Chat Model that returns a predefined list of responses. It can be used
8
+ * for testing purposes.
9
+ */
10
+ class FakeListChatModel extends base_js_1.BaseChatModel {
11
+ static lc_name() {
12
+ return "FakeListChatModel";
13
+ }
14
+ constructor({ responses, sleep }) {
15
+ super({});
16
+ Object.defineProperty(this, "responses", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: void 0
21
+ });
22
+ Object.defineProperty(this, "i", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: 0
27
+ });
28
+ Object.defineProperty(this, "sleep", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: void 0
33
+ });
34
+ this.responses = responses;
35
+ this.sleep = sleep;
36
+ }
37
+ _combineLLMOutput() {
38
+ return [];
39
+ }
40
+ _llmType() {
41
+ return "fake-list";
42
+ }
43
+ async _generate(_messages, options) {
44
+ await this._sleepIfRequested();
45
+ if (options?.stop?.length) {
46
+ return {
47
+ generations: [this._formatGeneration(options.stop[0])],
48
+ };
49
+ }
50
+ else {
51
+ const response = this._currentResponse();
52
+ this._incrementResponse();
53
+ return {
54
+ generations: [this._formatGeneration(response)],
55
+ llmOutput: {},
56
+ };
57
+ }
58
+ }
59
+ _formatGeneration(text) {
60
+ return {
61
+ message: new index_js_1.AIMessage(text),
62
+ text,
63
+ };
64
+ }
65
+ async *_streamResponseChunks(_messages, _options, _runManager) {
66
+ const response = this._currentResponse();
67
+ this._incrementResponse();
68
+ for await (const text of response) {
69
+ await this._sleepIfRequested();
70
+ yield this._createResponseChunk(text);
71
+ }
72
+ }
73
+ async _sleepIfRequested() {
74
+ if (this.sleep !== undefined) {
75
+ await this._sleep();
76
+ }
77
+ }
78
+ async _sleep() {
79
+ return new Promise((resolve) => {
80
+ setTimeout(() => resolve(), this.sleep);
81
+ });
82
+ }
83
+ _createResponseChunk(text) {
84
+ return new index_js_1.ChatGenerationChunk({
85
+ message: new index_js_1.AIMessageChunk({ content: text }),
86
+ text,
87
+ });
88
+ }
89
+ _currentResponse() {
90
+ return this.responses[this.i];
91
+ }
92
+ _incrementResponse() {
93
+ if (this.i < this.responses.length - 1) {
94
+ this.i += 1;
95
+ }
96
+ else {
97
+ this.i = 0;
98
+ }
99
+ }
100
+ }
101
+ exports.FakeListChatModel = FakeListChatModel;
@@ -0,0 +1,36 @@
1
+ import { BaseChatModel, BaseChatModelParams } from "./base.js";
2
+ import { AIMessage, BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
3
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
+ /**
5
+ * Interface for the input parameters specific to the Fake List Chat model.
6
+ */
7
+ export interface FakeChatInput extends BaseChatModelParams {
8
+ /** Responses to return */
9
+ responses: string[];
10
+ /** Time to sleep in milliseconds between responses */
11
+ sleep?: number;
12
+ }
13
+ /**
14
+ * A fake Chat Model that returns a predefined list of responses. It can be used
15
+ * for testing purposes.
16
+ */
17
+ export declare class FakeListChatModel extends BaseChatModel {
18
+ static lc_name(): string;
19
+ responses: string[];
20
+ i: number;
21
+ sleep?: number;
22
+ constructor({ responses, sleep }: FakeChatInput);
23
+ _combineLLMOutput(): never[];
24
+ _llmType(): string;
25
+ _generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"]): Promise<ChatResult>;
26
+ _formatGeneration(text: string): {
27
+ message: AIMessage;
28
+ text: string;
29
+ };
30
+ _streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
31
+ _sleepIfRequested(): Promise<void>;
32
+ _sleep(): Promise<void>;
33
+ _createResponseChunk(text: string): ChatGenerationChunk;
34
+ _currentResponse(): string;
35
+ _incrementResponse(): void;
36
+ }
@@ -0,0 +1,97 @@
1
+ import { BaseChatModel } from "./base.js";
2
+ import { AIMessage, AIMessageChunk, ChatGenerationChunk, } from "../schema/index.js";
3
+ /**
4
+ * A fake Chat Model that returns a predefined list of responses. It can be used
5
+ * for testing purposes.
6
+ */
7
+ export class FakeListChatModel extends BaseChatModel {
8
+ static lc_name() {
9
+ return "FakeListChatModel";
10
+ }
11
+ constructor({ responses, sleep }) {
12
+ super({});
13
+ Object.defineProperty(this, "responses", {
14
+ enumerable: true,
15
+ configurable: true,
16
+ writable: true,
17
+ value: void 0
18
+ });
19
+ Object.defineProperty(this, "i", {
20
+ enumerable: true,
21
+ configurable: true,
22
+ writable: true,
23
+ value: 0
24
+ });
25
+ Object.defineProperty(this, "sleep", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: void 0
30
+ });
31
+ this.responses = responses;
32
+ this.sleep = sleep;
33
+ }
34
+ _combineLLMOutput() {
35
+ return [];
36
+ }
37
+ _llmType() {
38
+ return "fake-list";
39
+ }
40
+ async _generate(_messages, options) {
41
+ await this._sleepIfRequested();
42
+ if (options?.stop?.length) {
43
+ return {
44
+ generations: [this._formatGeneration(options.stop[0])],
45
+ };
46
+ }
47
+ else {
48
+ const response = this._currentResponse();
49
+ this._incrementResponse();
50
+ return {
51
+ generations: [this._formatGeneration(response)],
52
+ llmOutput: {},
53
+ };
54
+ }
55
+ }
56
+ _formatGeneration(text) {
57
+ return {
58
+ message: new AIMessage(text),
59
+ text,
60
+ };
61
+ }
62
+ async *_streamResponseChunks(_messages, _options, _runManager) {
63
+ const response = this._currentResponse();
64
+ this._incrementResponse();
65
+ for await (const text of response) {
66
+ await this._sleepIfRequested();
67
+ yield this._createResponseChunk(text);
68
+ }
69
+ }
70
+ async _sleepIfRequested() {
71
+ if (this.sleep !== undefined) {
72
+ await this._sleep();
73
+ }
74
+ }
75
+ async _sleep() {
76
+ return new Promise((resolve) => {
77
+ setTimeout(() => resolve(), this.sleep);
78
+ });
79
+ }
80
+ _createResponseChunk(text) {
81
+ return new ChatGenerationChunk({
82
+ message: new AIMessageChunk({ content: text }),
83
+ text,
84
+ });
85
+ }
86
+ _currentResponse() {
87
+ return this.responses[this.i];
88
+ }
89
+ _incrementResponse() {
90
+ if (this.i < this.responses.length - 1) {
91
+ this.i += 1;
92
+ }
93
+ else {
94
+ this.i = 0;
95
+ }
96
+ }
97
+ }
@@ -0,0 +1,117 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatYandexGPT = void 0;
4
+ const index_js_1 = require("../schema/index.cjs");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ const base_js_1 = require("./base.cjs");
7
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
8
+ function _parseChatHistory(history) {
9
+ const chatHistory = [];
10
+ let instruction = "";
11
+ for (const message of history) {
12
+ if ("content" in message) {
13
+ if (message._getType() === "human") {
14
+ chatHistory.push({ role: "user", text: message.content });
15
+ }
16
+ else if (message._getType() === "ai") {
17
+ chatHistory.push({ role: "assistant", text: message.content });
18
+ }
19
+ else if (message._getType() === "system") {
20
+ instruction = message.content;
21
+ }
22
+ }
23
+ }
24
+ return [chatHistory, instruction];
25
+ }
26
+ class ChatYandexGPT extends base_js_1.BaseChatModel {
27
+ constructor(fields) {
28
+ super(fields ?? {});
29
+ Object.defineProperty(this, "apiKey", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
35
+ Object.defineProperty(this, "iamToken", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ Object.defineProperty(this, "temperature", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: 0.6
46
+ });
47
+ Object.defineProperty(this, "maxTokens", {
48
+ enumerable: true,
49
+ configurable: true,
50
+ writable: true,
51
+ value: 1700
52
+ });
53
+ Object.defineProperty(this, "model", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: "general"
58
+ });
59
+ const apiKey = fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("YC_API_KEY");
60
+ const iamToken = fields?.iamToken ?? (0, env_js_1.getEnvironmentVariable)("YC_IAM_TOKEN");
61
+ if (apiKey === undefined && iamToken === undefined) {
62
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
63
+ }
64
+ this.apiKey = apiKey;
65
+ this.iamToken = iamToken;
66
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
67
+ this.temperature = fields?.temperature ?? this.temperature;
68
+ this.model = fields?.model ?? this.model;
69
+ }
70
+ _llmType() {
71
+ return "yandexgpt";
72
+ }
73
+ _combineLLMOutput() {
74
+ return {};
75
+ }
76
+ /** @ignore */
77
+ async _generate(messages, options, _) {
78
+ const [messageHistory, instruction] = _parseChatHistory(messages);
79
+ const headers = { "Content-Type": "application/json", Authorization: "" };
80
+ if (this.apiKey !== undefined) {
81
+ headers.Authorization = `Api-Key ${this.apiKey}`;
82
+ }
83
+ else {
84
+ headers.Authorization = `Bearer ${this.iamToken}`;
85
+ }
86
+ const bodyData = {
87
+ model: this.model,
88
+ generationOptions: {
89
+ temperature: this.temperature,
90
+ maxTokens: this.maxTokens,
91
+ },
92
+ messages: messageHistory,
93
+ instructionText: instruction,
94
+ };
95
+ const response = await fetch(apiUrl, {
96
+ method: "POST",
97
+ headers,
98
+ body: JSON.stringify(bodyData),
99
+ signal: options?.signal,
100
+ });
101
+ if (!response.ok) {
102
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
103
+ }
104
+ const responseData = await response.json();
105
+ const { result } = responseData;
106
+ const { text } = result.message;
107
+ const totalTokens = result.num_tokens;
108
+ const generations = [
109
+ { text, message: new index_js_1.AIMessage(text) },
110
+ ];
111
+ return {
112
+ generations,
113
+ llmOutput: { totalTokens },
114
+ };
115
+ }
116
+ }
117
+ exports.ChatYandexGPT = ChatYandexGPT;
@@ -0,0 +1,16 @@
1
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
2
+ import { YandexGPTInputs } from "../llms/yandex.js";
3
+ import { BaseMessage, ChatResult } from "../schema/index.js";
4
+ import { BaseChatModel } from "./base.js";
5
+ export declare class ChatYandexGPT extends BaseChatModel {
6
+ apiKey?: string;
7
+ iamToken?: string;
8
+ temperature: number;
9
+ maxTokens: number;
10
+ model: string;
11
+ constructor(fields?: YandexGPTInputs);
12
+ _llmType(): string;
13
+ _combineLLMOutput?(): {};
14
+ /** @ignore */
15
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
16
+ }
@@ -0,0 +1,113 @@
1
+ import { AIMessage, } from "../schema/index.js";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ import { BaseChatModel } from "./base.js";
4
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
5
+ function _parseChatHistory(history) {
6
+ const chatHistory = [];
7
+ let instruction = "";
8
+ for (const message of history) {
9
+ if ("content" in message) {
10
+ if (message._getType() === "human") {
11
+ chatHistory.push({ role: "user", text: message.content });
12
+ }
13
+ else if (message._getType() === "ai") {
14
+ chatHistory.push({ role: "assistant", text: message.content });
15
+ }
16
+ else if (message._getType() === "system") {
17
+ instruction = message.content;
18
+ }
19
+ }
20
+ }
21
+ return [chatHistory, instruction];
22
+ }
23
+ export class ChatYandexGPT extends BaseChatModel {
24
+ constructor(fields) {
25
+ super(fields ?? {});
26
+ Object.defineProperty(this, "apiKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ Object.defineProperty(this, "iamToken", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: void 0
37
+ });
38
+ Object.defineProperty(this, "temperature", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: 0.6
43
+ });
44
+ Object.defineProperty(this, "maxTokens", {
45
+ enumerable: true,
46
+ configurable: true,
47
+ writable: true,
48
+ value: 1700
49
+ });
50
+ Object.defineProperty(this, "model", {
51
+ enumerable: true,
52
+ configurable: true,
53
+ writable: true,
54
+ value: "general"
55
+ });
56
+ const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
57
+ const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
58
+ if (apiKey === undefined && iamToken === undefined) {
59
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
60
+ }
61
+ this.apiKey = apiKey;
62
+ this.iamToken = iamToken;
63
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
64
+ this.temperature = fields?.temperature ?? this.temperature;
65
+ this.model = fields?.model ?? this.model;
66
+ }
67
+ _llmType() {
68
+ return "yandexgpt";
69
+ }
70
+ _combineLLMOutput() {
71
+ return {};
72
+ }
73
+ /** @ignore */
74
+ async _generate(messages, options, _) {
75
+ const [messageHistory, instruction] = _parseChatHistory(messages);
76
+ const headers = { "Content-Type": "application/json", Authorization: "" };
77
+ if (this.apiKey !== undefined) {
78
+ headers.Authorization = `Api-Key ${this.apiKey}`;
79
+ }
80
+ else {
81
+ headers.Authorization = `Bearer ${this.iamToken}`;
82
+ }
83
+ const bodyData = {
84
+ model: this.model,
85
+ generationOptions: {
86
+ temperature: this.temperature,
87
+ maxTokens: this.maxTokens,
88
+ },
89
+ messages: messageHistory,
90
+ instructionText: instruction,
91
+ };
92
+ const response = await fetch(apiUrl, {
93
+ method: "POST",
94
+ headers,
95
+ body: JSON.stringify(bodyData),
96
+ signal: options?.signal,
97
+ });
98
+ if (!response.ok) {
99
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
100
+ }
101
+ const responseData = await response.json();
102
+ const { result } = responseData;
103
+ const { text } = result.message;
104
+ const totalTokens = result.num_tokens;
105
+ const generations = [
106
+ { text, message: new AIMessage(text) },
107
+ ];
108
+ return {
109
+ generations,
110
+ llmOutput: { totalTokens },
111
+ };
112
+ }
113
+ }
@@ -8,14 +8,14 @@
8
8
  import { PromptTemplate } from "../../prompts/index.js";
9
9
  export declare const PROMPT: PromptTemplate<{
10
10
  input: any;
11
- criteria: any;
12
11
  prediction: any;
12
+ criteria: any;
13
13
  predictionB: any;
14
14
  }, any>;
15
15
  export declare const PROMPT_WITH_REFERENCES: PromptTemplate<{
16
16
  input: any;
17
+ prediction: any;
17
18
  criteria: any;
18
19
  reference: any;
19
- prediction: any;
20
20
  predictionB: any;
21
21
  }, any>;
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ViolationOfExpectationsChain = void 0;
4
+ var violation_of_expectations_chain_js_1 = require("./violation_of_expectations_chain.cjs");
5
+ Object.defineProperty(exports, "ViolationOfExpectationsChain", { enumerable: true, get: function () { return violation_of_expectations_chain_js_1.ViolationOfExpectationsChain; } });
@@ -0,0 +1 @@
1
+ export { type ViolationOfExpectationsChainInput, ViolationOfExpectationsChain, } from "./violation_of_expectations_chain.js";
@@ -0,0 +1 @@
1
+ export { ViolationOfExpectationsChain, } from "./violation_of_expectations_chain.js";
@@ -0,0 +1,49 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.PREDICTION_VIOLATIONS_FUNCTION = exports.PREDICT_NEXT_USER_MESSAGE_FUNCTION = void 0;
4
+ exports.PREDICT_NEXT_USER_MESSAGE_FUNCTION = {
5
+ name: "predictNextUserMessage",
6
+ description: "Predicts the next user message, along with insights.",
7
+ parameters: {
8
+ type: "object",
9
+ properties: {
10
+ userState: {
11
+ type: "string",
12
+ description: "Concise reasoning about the users internal mental state.",
13
+ },
14
+ predictedUserMessage: {
15
+ type: "string",
16
+ description: "Your prediction on how they will respond to the AI's most recent message.",
17
+ },
18
+ insights: {
19
+ type: "array",
20
+ items: {
21
+ type: "string",
22
+ },
23
+ description: "A concise list of any additional insights that would be useful to improve prediction.",
24
+ },
25
+ },
26
+ required: ["userState", "predictedUserMessage", "insights"],
27
+ },
28
+ };
29
+ exports.PREDICTION_VIOLATIONS_FUNCTION = {
30
+ name: "predictionViolations",
31
+ description: "Generates violations, errors and differences between the predicted user response, and the actual response.",
32
+ parameters: {
33
+ type: "object",
34
+ properties: {
35
+ violationExplanation: {
36
+ type: "string",
37
+ description: "How was the predication violated?",
38
+ },
39
+ explainedPredictionErrors: {
40
+ type: "array",
41
+ items: {
42
+ type: "string",
43
+ },
44
+ description: "Explanations of how the prediction was violated and why",
45
+ },
46
+ },
47
+ required: ["violationExplanation", "explainedPredictionErrors"],
48
+ },
49
+ };
@@ -0,0 +1,69 @@
1
+ import { BaseMessage, HumanMessage } from "../../../schema/index.js";
2
+ /**
3
+ * Contains the chunk of messages, along with the
4
+ * users response, which is the next message after the chunk.
5
+ */
6
+ export type MessageChunkResult = {
7
+ chunkedMessages: BaseMessage[];
8
+ /**
9
+ * User response can be undefined if the last message in
10
+ * the chat history was from the AI.
11
+ */
12
+ userResponse?: HumanMessage;
13
+ };
14
+ export type PredictNextUserMessageResponse = {
15
+ userState: string;
16
+ predictedUserMessage: string;
17
+ insights: Array<string>;
18
+ };
19
+ export type GetPredictionViolationsResponse = {
20
+ userResponse?: HumanMessage;
21
+ revisedPrediction: string;
22
+ explainedPredictionErrors: Array<string>;
23
+ };
24
+ export declare const PREDICT_NEXT_USER_MESSAGE_FUNCTION: {
25
+ name: string;
26
+ description: string;
27
+ parameters: {
28
+ type: string;
29
+ properties: {
30
+ userState: {
31
+ type: string;
32
+ description: string;
33
+ };
34
+ predictedUserMessage: {
35
+ type: string;
36
+ description: string;
37
+ };
38
+ insights: {
39
+ type: string;
40
+ items: {
41
+ type: string;
42
+ };
43
+ description: string;
44
+ };
45
+ };
46
+ required: string[];
47
+ };
48
+ };
49
+ export declare const PREDICTION_VIOLATIONS_FUNCTION: {
50
+ name: string;
51
+ description: string;
52
+ parameters: {
53
+ type: string;
54
+ properties: {
55
+ violationExplanation: {
56
+ type: string;
57
+ description: string;
58
+ };
59
+ explainedPredictionErrors: {
60
+ type: string;
61
+ items: {
62
+ type: string;
63
+ };
64
+ description: string;
65
+ };
66
+ };
67
+ required: string[];
68
+ };
69
+ };
@@ -0,0 +1,46 @@
1
+ export const PREDICT_NEXT_USER_MESSAGE_FUNCTION = {
2
+ name: "predictNextUserMessage",
3
+ description: "Predicts the next user message, along with insights.",
4
+ parameters: {
5
+ type: "object",
6
+ properties: {
7
+ userState: {
8
+ type: "string",
9
+ description: "Concise reasoning about the users internal mental state.",
10
+ },
11
+ predictedUserMessage: {
12
+ type: "string",
13
+ description: "Your prediction on how they will respond to the AI's most recent message.",
14
+ },
15
+ insights: {
16
+ type: "array",
17
+ items: {
18
+ type: "string",
19
+ },
20
+ description: "A concise list of any additional insights that would be useful to improve prediction.",
21
+ },
22
+ },
23
+ required: ["userState", "predictedUserMessage", "insights"],
24
+ },
25
+ };
26
+ export const PREDICTION_VIOLATIONS_FUNCTION = {
27
+ name: "predictionViolations",
28
+ description: "Generates violations, errors and differences between the predicted user response, and the actual response.",
29
+ parameters: {
30
+ type: "object",
31
+ properties: {
32
+ violationExplanation: {
33
+ type: "string",
34
+ description: "How was the predication violated?",
35
+ },
36
+ explainedPredictionErrors: {
37
+ type: "array",
38
+ items: {
39
+ type: "string",
40
+ },
41
+ description: "Explanations of how the prediction was violated and why",
42
+ },
43
+ },
44
+ required: ["violationExplanation", "explainedPredictionErrors"],
45
+ },
46
+ };