langchain 0.0.146 → 0.0.147

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/dist/base_language/index.cjs +2 -2
  2. package/dist/base_language/index.d.ts +2 -1
  3. package/dist/base_language/index.js +1 -1
  4. package/dist/chains/base.d.ts +1 -1
  5. package/dist/chains/openai_functions/openapi.cjs +32 -27
  6. package/dist/chains/openai_functions/openapi.d.ts +9 -0
  7. package/dist/chains/openai_functions/openapi.js +31 -27
  8. package/dist/chat_models/base.d.ts +1 -1
  9. package/dist/chat_models/openai.cjs +1 -1
  10. package/dist/chat_models/openai.js +1 -1
  11. package/dist/experimental/llms/bittensor.cjs +141 -0
  12. package/dist/experimental/llms/bittensor.d.ts +33 -0
  13. package/dist/experimental/llms/bittensor.js +137 -0
  14. package/dist/hub.d.ts +1 -1
  15. package/dist/llms/base.d.ts +1 -1
  16. package/dist/llms/openai-chat.cjs +1 -1
  17. package/dist/llms/openai-chat.js +1 -1
  18. package/dist/llms/openai.cjs +1 -1
  19. package/dist/llms/openai.js +1 -1
  20. package/dist/load/import_constants.cjs +1 -0
  21. package/dist/load/import_constants.js +1 -0
  22. package/dist/load/import_map.cjs +1 -1
  23. package/dist/load/import_map.d.ts +1 -1
  24. package/dist/load/import_map.js +1 -1
  25. package/dist/load/index.cjs +2 -1
  26. package/dist/load/index.js +2 -1
  27. package/dist/prompts/base.cjs +2 -2
  28. package/dist/prompts/base.d.ts +1 -1
  29. package/dist/prompts/base.js +1 -1
  30. package/dist/prompts/chat.cjs +2 -2
  31. package/dist/prompts/chat.d.ts +1 -1
  32. package/dist/prompts/chat.js +1 -1
  33. package/dist/schema/document.cjs +2 -2
  34. package/dist/schema/document.d.ts +1 -1
  35. package/dist/schema/document.js +1 -1
  36. package/dist/schema/output_parser.cjs +2 -2
  37. package/dist/schema/output_parser.d.ts +2 -1
  38. package/dist/schema/output_parser.js +1 -1
  39. package/dist/schema/retriever.cjs +2 -2
  40. package/dist/schema/retriever.d.ts +2 -1
  41. package/dist/schema/retriever.js +1 -1
  42. package/dist/schema/runnable/config.cjs +8 -0
  43. package/dist/schema/runnable/config.d.ts +3 -0
  44. package/dist/schema/runnable/config.js +4 -0
  45. package/dist/schema/{runnable.cjs → runnable/index.cjs} +290 -101
  46. package/dist/schema/{runnable.d.ts → runnable/index.d.ts} +127 -41
  47. package/dist/schema/{runnable.js → runnable/index.js} +284 -99
  48. package/dist/tools/base.d.ts +1 -1
  49. package/dist/util/async_caller.cjs +35 -25
  50. package/dist/util/async_caller.d.ts +8 -0
  51. package/dist/util/async_caller.js +35 -25
  52. package/dist/vectorstores/pinecone.cjs +30 -22
  53. package/dist/vectorstores/pinecone.d.ts +3 -1
  54. package/dist/vectorstores/pinecone.js +30 -22
  55. package/dist/vectorstores/vectara.cjs +20 -23
  56. package/dist/vectorstores/vectara.d.ts +9 -2
  57. package/dist/vectorstores/vectara.js +20 -23
  58. package/experimental/llms/bittensor.cjs +1 -0
  59. package/experimental/llms/bittensor.d.ts +1 -0
  60. package/experimental/llms/bittensor.js +1 -0
  61. package/package.json +9 -1
  62. package/schema/runnable.cjs +1 -1
  63. package/schema/runnable.d.ts +1 -1
  64. package/schema/runnable.js +1 -1
@@ -5,14 +5,14 @@ const index_js_1 = require("../schema/index.cjs");
5
5
  const async_caller_js_1 = require("../util/async_caller.cjs");
6
6
  const count_tokens_js_1 = require("./count_tokens.cjs");
7
7
  const tiktoken_js_1 = require("../util/tiktoken.cjs");
8
- const runnable_js_1 = require("../schema/runnable.cjs");
8
+ const index_js_2 = require("../schema/runnable/index.cjs");
9
9
  const base_js_1 = require("../prompts/base.cjs");
10
10
  const chat_js_1 = require("../prompts/chat.cjs");
11
11
  const getVerbosity = () => false;
12
12
  /**
13
13
  * Base class for language models, chains, tools.
14
14
  */
15
- class BaseLangChain extends runnable_js_1.Runnable {
15
+ class BaseLangChain extends index_js_2.Runnable {
16
16
  get lc_attributes() {
17
17
  return {
18
18
  callbacks: undefined,
@@ -2,7 +2,8 @@ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { BaseMessage, BaseMessageLike, BasePromptValue, LLMResult } from "../schema/index.js";
3
3
  import { BaseCallbackConfig, CallbackManager, Callbacks } from "../callbacks/manager.js";
4
4
  import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
5
- import { Runnable, RunnableConfig } from "../schema/runnable.js";
5
+ import { Runnable } from "../schema/runnable/index.js";
6
+ import { RunnableConfig } from "../schema/runnable/config.js";
6
7
  export type SerializedLLM = {
7
8
  _model: string;
8
9
  _type: string;
@@ -2,7 +2,7 @@ import { coerceMessageLikeToMessage, } from "../schema/index.js";
2
2
  import { AsyncCaller } from "../util/async_caller.js";
3
3
  import { getModelNameForTiktoken } from "./count_tokens.js";
4
4
  import { encodingForModel } from "../util/tiktoken.js";
5
- import { Runnable } from "../schema/runnable.js";
5
+ import { Runnable } from "../schema/runnable/index.js";
6
6
  import { StringPromptValue } from "../prompts/base.js";
7
7
  import { ChatPromptValue } from "../prompts/chat.js";
8
8
  const getVerbosity = () => false;
@@ -3,7 +3,7 @@ import { ChainValues } from "../schema/index.js";
3
3
  import { CallbackManagerForChainRun, CallbackManager, Callbacks } from "../callbacks/manager.js";
4
4
  import { SerializedBaseChain } from "./serde.js";
5
5
  import { BaseLangChain, BaseLangChainParams } from "../base_language/index.js";
6
- import { RunnableConfig } from "../schema/runnable.js";
6
+ import { RunnableConfig } from "../schema/runnable/config.js";
7
7
  export type LoadValues = Record<string, any>;
8
8
  export interface ChainInputs extends BaseLangChainParams {
9
9
  memory?: BaseMemory;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.createOpenAPIChain = void 0;
3
+ exports.createOpenAPIChain = exports.convertOpenAPISchemaToJSONSchema = void 0;
4
4
  const openapi_js_1 = require("../../util/openapi.cjs");
5
5
  const base_js_1 = require("../base.cjs");
6
6
  const llm_chain_js_1 = require("../llm_chain.cjs");
@@ -122,36 +122,41 @@ function convertOpenAPIParamsToJSONSchema(params, spec) {
122
122
  * @returns The JSON schema representation of the OpenAPI schema.
123
123
  */
124
124
  function convertOpenAPISchemaToJSONSchema(schema, spec) {
125
- if (schema.type !== "object" && schema.type !== "array") {
125
+ if (schema.type === "object") {
126
+ return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
127
+ if (!schema.properties) {
128
+ return jsonSchema;
129
+ }
130
+ const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
131
+ if (openAPIProperty.type === undefined) {
132
+ return jsonSchema;
133
+ }
134
+ // eslint-disable-next-line no-param-reassign
135
+ jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
136
+ if (openAPIProperty.required && jsonSchema.required !== undefined) {
137
+ jsonSchema.required.push(propertyName);
138
+ }
139
+ return jsonSchema;
140
+ }, {
141
+ type: "object",
142
+ properties: {},
143
+ required: [],
144
+ additionalProperties: {},
145
+ });
146
+ }
147
+ if (schema.type === "array") {
126
148
  return {
127
- type: schema.type ?? "string",
149
+ type: "array",
150
+ items: convertOpenAPISchemaToJSONSchema(schema.items ?? {}, spec),
151
+ minItems: schema.minItems,
152
+ maxItems: schema.maxItems,
128
153
  };
129
154
  }
130
- return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
131
- if (!schema.properties) {
132
- return jsonSchema;
133
- }
134
- const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
135
- if (openAPIProperty.type === undefined) {
136
- return jsonSchema;
137
- }
138
- // eslint-disable-next-line no-param-reassign
139
- jsonSchema.properties[propertyName] = {
140
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
141
- type: openAPIProperty.type,
142
- description: openAPIProperty.description,
143
- };
144
- if (openAPIProperty.required && jsonSchema.required !== undefined) {
145
- jsonSchema.required.push(propertyName);
146
- }
147
- return jsonSchema;
148
- }, {
149
- type: "object",
150
- properties: {},
151
- required: [],
152
- additionalProperties: {},
153
- });
155
+ return {
156
+ type: schema.type ?? "string",
157
+ };
154
158
  }
159
+ exports.convertOpenAPISchemaToJSONSchema = convertOpenAPISchemaToJSONSchema;
155
160
  /**
156
161
  * Converts an OpenAPI specification to OpenAI functions.
157
162
  * @param spec The OpenAPI specification to convert.
@@ -1,10 +1,19 @@
1
+ import { JsonSchema7Type } from "zod-to-json-schema/src/parseDef.js";
1
2
  import type { OpenAPIV3_1 } from "openapi-types";
3
+ import { OpenAPISpec } from "../../util/openapi.js";
2
4
  import { BaseChain } from "../base.js";
3
5
  import { LLMChainInput } from "../llm_chain.js";
4
6
  import { BasePromptTemplate } from "../../prompts/base.js";
5
7
  import { SequentialChain } from "../sequential_chain.js";
6
8
  import { BaseChatModel } from "../../chat_models/base.js";
7
9
  import { BaseFunctionCallOptions } from "../../base_language/index.js";
10
+ /**
11
+ * Converts OpenAPI schemas to JSON schema format.
12
+ * @param schema The OpenAPI schema to convert.
13
+ * @param spec The OpenAPI specification that contains the schema.
14
+ * @returns The JSON schema representation of the OpenAPI schema.
15
+ */
16
+ export declare function convertOpenAPISchemaToJSONSchema(schema: OpenAPIV3_1.SchemaObject, spec: OpenAPISpec): JsonSchema7Type;
8
17
  /**
9
18
  * Type representing the options for creating an OpenAPI chain.
10
19
  */
@@ -118,36 +118,40 @@ function convertOpenAPIParamsToJSONSchema(params, spec) {
118
118
  * @param spec The OpenAPI specification that contains the schema.
119
119
  * @returns The JSON schema representation of the OpenAPI schema.
120
120
  */
121
- function convertOpenAPISchemaToJSONSchema(schema, spec) {
122
- if (schema.type !== "object" && schema.type !== "array") {
121
+ export function convertOpenAPISchemaToJSONSchema(schema, spec) {
122
+ if (schema.type === "object") {
123
+ return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
124
+ if (!schema.properties) {
125
+ return jsonSchema;
126
+ }
127
+ const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
128
+ if (openAPIProperty.type === undefined) {
129
+ return jsonSchema;
130
+ }
131
+ // eslint-disable-next-line no-param-reassign
132
+ jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
133
+ if (openAPIProperty.required && jsonSchema.required !== undefined) {
134
+ jsonSchema.required.push(propertyName);
135
+ }
136
+ return jsonSchema;
137
+ }, {
138
+ type: "object",
139
+ properties: {},
140
+ required: [],
141
+ additionalProperties: {},
142
+ });
143
+ }
144
+ if (schema.type === "array") {
123
145
  return {
124
- type: schema.type ?? "string",
146
+ type: "array",
147
+ items: convertOpenAPISchemaToJSONSchema(schema.items ?? {}, spec),
148
+ minItems: schema.minItems,
149
+ maxItems: schema.maxItems,
125
150
  };
126
151
  }
127
- return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
128
- if (!schema.properties) {
129
- return jsonSchema;
130
- }
131
- const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
132
- if (openAPIProperty.type === undefined) {
133
- return jsonSchema;
134
- }
135
- // eslint-disable-next-line no-param-reassign
136
- jsonSchema.properties[propertyName] = {
137
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
138
- type: openAPIProperty.type,
139
- description: openAPIProperty.description,
140
- };
141
- if (openAPIProperty.required && jsonSchema.required !== undefined) {
142
- jsonSchema.required.push(propertyName);
143
- }
144
- return jsonSchema;
145
- }, {
146
- type: "object",
147
- properties: {},
148
- required: [],
149
- additionalProperties: {},
150
- });
152
+ return {
153
+ type: schema.type ?? "string",
154
+ };
151
155
  }
152
156
  /**
153
157
  * Converts an OpenAPI specification to OpenAI functions.
@@ -1,7 +1,7 @@
1
1
  import { BaseMessage, BasePromptValue, ChatResult, BaseMessageChunk, LLMResult, ChatGenerationChunk, BaseMessageLike } from "../schema/index.js";
2
2
  import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
3
3
  import { CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
4
- import { RunnableConfig } from "../schema/runnable.js";
4
+ import { RunnableConfig } from "../schema/runnable/config.js";
5
5
  /**
6
6
  * Represents a serialized chat model.
7
7
  */
@@ -388,7 +388,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
388
388
  let defaultRole;
389
389
  const streamIterable = await this.completionWithRetry(params, options);
390
390
  for await (const data of streamIterable) {
391
- const choice = data.choices[0];
391
+ const choice = data?.choices[0];
392
392
  if (!choice) {
393
393
  continue;
394
394
  }
@@ -385,7 +385,7 @@ export class ChatOpenAI extends BaseChatModel {
385
385
  let defaultRole;
386
386
  const streamIterable = await this.completionWithRetry(params, options);
387
387
  for await (const data of streamIterable) {
388
- const choice = data.choices[0];
388
+ const choice = data?.choices[0];
389
389
  if (!choice) {
390
390
  continue;
391
391
  }
@@ -0,0 +1,141 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.NIBittensorLLM = void 0;
4
+ const base_js_1 = require("../../llms/base.cjs");
5
+ /**
6
+ * Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
7
+ * full of different AI models.
8
+ * To analyze API_KEYS and logs of you usage visit
9
+ * https://api.neuralinternet.ai/api-keys
10
+ * https://api.neuralinternet.ai/logs
11
+ */
12
+ class NIBittensorLLM extends base_js_1.LLM {
13
+ static lc_name() {
14
+ return "NIBittensorLLM";
15
+ }
16
+ constructor(fields) {
17
+ super(fields ?? {});
18
+ Object.defineProperty(this, "systemPrompt", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "topResponses", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: void 0
29
+ });
30
+ this.systemPrompt =
31
+ fields?.systemPrompt ??
32
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
33
+ this.topResponses = fields?.topResponses;
34
+ }
35
+ _llmType() {
36
+ return "NIBittensorLLM";
37
+ }
38
+ /** Call out to NIBittensorLLM's complete endpoint.
39
+ Args:
40
+ prompt: The prompt to pass into the model.
41
+
42
+ Returns: The string generated by the model.
43
+
44
+ Example:
45
+ let response = niBittensorLLM.call("Tell me a joke.");
46
+ */
47
+ async _call(prompt) {
48
+ try {
49
+ // Retrieve API KEY
50
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
51
+ if (!apiKeyResponse.ok) {
52
+ throw new Error("Network response was not ok");
53
+ }
54
+ const apiKeysData = await apiKeyResponse.json();
55
+ const apiKey = apiKeysData[0].api_key;
56
+ const headers = {
57
+ "Content-Type": "application/json",
58
+ Authorization: `Bearer ${apiKey}`,
59
+ "Endpoint-Version": "2023-05-19",
60
+ };
61
+ if (this.topResponses !== undefined) {
62
+ this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
63
+ }
64
+ else {
65
+ this.topResponses = 0;
66
+ }
67
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
68
+ if (!minerResponse.ok) {
69
+ throw new Error("Network response was not ok");
70
+ }
71
+ const uids = await minerResponse.json();
72
+ if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
73
+ for (const uid of uids) {
74
+ try {
75
+ const payload = {
76
+ uids: [uid],
77
+ messages: [
78
+ { role: "system", content: this.systemPrompt },
79
+ { role: "user", content: prompt },
80
+ ],
81
+ };
82
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
83
+ method: "POST",
84
+ headers,
85
+ body: JSON.stringify(payload),
86
+ });
87
+ if (!response.ok) {
88
+ throw new Error("Network response was not ok");
89
+ }
90
+ const chatData = await response.json();
91
+ if (chatData.choices) {
92
+ return chatData.choices[0].message.content;
93
+ }
94
+ }
95
+ catch (error) {
96
+ continue;
97
+ }
98
+ }
99
+ }
100
+ // For top miner based on bittensor response
101
+ if (this.topResponses === 0) {
102
+ this.topResponses = 10;
103
+ }
104
+ const payload = {
105
+ top_n: this.topResponses,
106
+ messages: [
107
+ { role: "system", content: this.systemPrompt },
108
+ { role: "user", content: prompt },
109
+ ],
110
+ };
111
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
112
+ method: "POST",
113
+ headers,
114
+ body: JSON.stringify(payload),
115
+ });
116
+ if (!response.ok) {
117
+ throw new Error("Network response was not ok");
118
+ }
119
+ const responseData = await response.json();
120
+ if (this.topResponses) {
121
+ return responseData;
122
+ }
123
+ else if (responseData.choices) {
124
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
125
+ const temp = responseData.choices;
126
+ return temp[0].message.content;
127
+ }
128
+ }
129
+ catch (error) {
130
+ return "Sorry I am unable to provide response now, Please try again later.";
131
+ }
132
+ return "default";
133
+ }
134
+ identifyingParams() {
135
+ return {
136
+ systemPrompt: this.systemPrompt,
137
+ topResponses: this.topResponses,
138
+ };
139
+ }
140
+ }
141
+ exports.NIBittensorLLM = NIBittensorLLM;
@@ -0,0 +1,33 @@
1
+ import { BaseLLMParams, LLM } from "../../llms/base.js";
2
+ export interface BittensorInput extends BaseLLMParams {
3
+ systemPrompt?: string | null | undefined;
4
+ topResponses?: number | undefined;
5
+ }
6
+ /**
7
+ * Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
8
+ * full of different AI models.
9
+ * To analyze API_KEYS and logs of you usage visit
10
+ * https://api.neuralinternet.ai/api-keys
11
+ * https://api.neuralinternet.ai/logs
12
+ */
13
+ export declare class NIBittensorLLM extends LLM implements BittensorInput {
14
+ static lc_name(): string;
15
+ systemPrompt: string;
16
+ topResponses: number | undefined;
17
+ constructor(fields?: BittensorInput);
18
+ _llmType(): string;
19
+ /** Call out to NIBittensorLLM's complete endpoint.
20
+ Args:
21
+ prompt: The prompt to pass into the model.
22
+
23
+ Returns: The string generated by the model.
24
+
25
+ Example:
26
+ let response = niBittensorLLM.call("Tell me a joke.");
27
+ */
28
+ _call(prompt: string): Promise<string>;
29
+ identifyingParams(): {
30
+ systemPrompt: string | null | undefined;
31
+ topResponses: number | undefined;
32
+ };
33
+ }
@@ -0,0 +1,137 @@
1
+ import { LLM } from "../../llms/base.js";
2
+ /**
3
+ * Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
4
+ * full of different AI models.
5
+ * To analyze API_KEYS and logs of you usage visit
6
+ * https://api.neuralinternet.ai/api-keys
7
+ * https://api.neuralinternet.ai/logs
8
+ */
9
+ export class NIBittensorLLM extends LLM {
10
+ static lc_name() {
11
+ return "NIBittensorLLM";
12
+ }
13
+ constructor(fields) {
14
+ super(fields ?? {});
15
+ Object.defineProperty(this, "systemPrompt", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ Object.defineProperty(this, "topResponses", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ this.systemPrompt =
28
+ fields?.systemPrompt ??
29
+ "You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
30
+ this.topResponses = fields?.topResponses;
31
+ }
32
+ _llmType() {
33
+ return "NIBittensorLLM";
34
+ }
35
+ /** Call out to NIBittensorLLM's complete endpoint.
36
+ Args:
37
+ prompt: The prompt to pass into the model.
38
+
39
+ Returns: The string generated by the model.
40
+
41
+ Example:
42
+ let response = niBittensorLLM.call("Tell me a joke.");
43
+ */
44
+ async _call(prompt) {
45
+ try {
46
+ // Retrieve API KEY
47
+ const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
48
+ if (!apiKeyResponse.ok) {
49
+ throw new Error("Network response was not ok");
50
+ }
51
+ const apiKeysData = await apiKeyResponse.json();
52
+ const apiKey = apiKeysData[0].api_key;
53
+ const headers = {
54
+ "Content-Type": "application/json",
55
+ Authorization: `Bearer ${apiKey}`,
56
+ "Endpoint-Version": "2023-05-19",
57
+ };
58
+ if (this.topResponses !== undefined) {
59
+ this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
60
+ }
61
+ else {
62
+ this.topResponses = 0;
63
+ }
64
+ const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
65
+ if (!minerResponse.ok) {
66
+ throw new Error("Network response was not ok");
67
+ }
68
+ const uids = await minerResponse.json();
69
+ if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
70
+ for (const uid of uids) {
71
+ try {
72
+ const payload = {
73
+ uids: [uid],
74
+ messages: [
75
+ { role: "system", content: this.systemPrompt },
76
+ { role: "user", content: prompt },
77
+ ],
78
+ };
79
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
80
+ method: "POST",
81
+ headers,
82
+ body: JSON.stringify(payload),
83
+ });
84
+ if (!response.ok) {
85
+ throw new Error("Network response was not ok");
86
+ }
87
+ const chatData = await response.json();
88
+ if (chatData.choices) {
89
+ return chatData.choices[0].message.content;
90
+ }
91
+ }
92
+ catch (error) {
93
+ continue;
94
+ }
95
+ }
96
+ }
97
+ // For top miner based on bittensor response
98
+ if (this.topResponses === 0) {
99
+ this.topResponses = 10;
100
+ }
101
+ const payload = {
102
+ top_n: this.topResponses,
103
+ messages: [
104
+ { role: "system", content: this.systemPrompt },
105
+ { role: "user", content: prompt },
106
+ ],
107
+ };
108
+ const response = await fetch("https://test.neuralinternet.ai/chat", {
109
+ method: "POST",
110
+ headers,
111
+ body: JSON.stringify(payload),
112
+ });
113
+ if (!response.ok) {
114
+ throw new Error("Network response was not ok");
115
+ }
116
+ const responseData = await response.json();
117
+ if (this.topResponses) {
118
+ return responseData;
119
+ }
120
+ else if (responseData.choices) {
121
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
122
+ const temp = responseData.choices;
123
+ return temp[0].message.content;
124
+ }
125
+ }
126
+ catch (error) {
127
+ return "Sorry I am unable to provide response now, Please try again later.";
128
+ }
129
+ return "default";
130
+ }
131
+ identifyingParams() {
132
+ return {
133
+ systemPrompt: this.systemPrompt,
134
+ topResponses: this.topResponses,
135
+ };
136
+ }
137
+ }
package/dist/hub.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ClientConfiguration, HubPushOptions } from "langchainhub";
2
- import { Runnable } from "./schema/runnable.js";
2
+ import { Runnable } from "./schema/runnable/index.js";
3
3
  /**
4
4
  * Push a prompt to the hub.
5
5
  * If the specified repo doesn't already exist, it will be created.
@@ -1,7 +1,7 @@
1
1
  import { BaseCache, BaseMessage, BasePromptValue, GenerationChunk, LLMResult } from "../schema/index.js";
2
2
  import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
3
3
  import { BaseCallbackConfig, CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
4
- import { RunnableConfig } from "../schema/runnable.js";
4
+ import { RunnableConfig } from "../schema/runnable/config.js";
5
5
  export type SerializedLLM = {
6
6
  _model: string;
7
7
  _type: string;
@@ -330,7 +330,7 @@ class OpenAIChat extends base_js_1.LLM {
330
330
  };
331
331
  const stream = await this.completionWithRetry(params, options);
332
332
  for await (const data of stream) {
333
- const choice = data.choices[0];
333
+ const choice = data?.choices[0];
334
334
  if (!choice) {
335
335
  continue;
336
336
  }
@@ -327,7 +327,7 @@ export class OpenAIChat extends LLM {
327
327
  };
328
328
  const stream = await this.completionWithRetry(params, options);
329
329
  for await (const data of stream) {
330
- const choice = data.choices[0];
330
+ const choice = data?.choices[0];
331
331
  if (!choice) {
332
332
  continue;
333
333
  }
@@ -436,7 +436,7 @@ class OpenAI extends base_js_1.BaseLLM {
436
436
  };
437
437
  const stream = await this.completionWithRetry(params, options);
438
438
  for await (const data of stream) {
439
- const choice = data.choices[0];
439
+ const choice = data?.choices[0];
440
440
  if (!choice) {
441
441
  continue;
442
442
  }
@@ -433,7 +433,7 @@ export class OpenAI extends BaseLLM {
433
433
  };
434
434
  const stream = await this.completionWithRetry(params, options);
435
435
  for await (const data of stream) {
436
- const choice = data.choices[0];
436
+ const choice = data?.choices[0];
437
437
  if (!choice) {
438
438
  continue;
439
439
  }
@@ -126,4 +126,5 @@ exports.optionalImportEntrypoints = [
126
126
  "langchain/hub",
127
127
  "langchain/experimental/multimodal_embeddings/googlevertexai",
128
128
  "langchain/experimental/chat_models/anthropic_functions",
129
+ "langchain/experimental/llms/bittensor",
129
130
  ];
@@ -123,4 +123,5 @@ export const optionalImportEntrypoints = [
123
123
  "langchain/hub",
124
124
  "langchain/experimental/multimodal_embeddings/googlevertexai",
125
125
  "langchain/experimental/chat_models/anthropic_functions",
126
+ "langchain/experimental/llms/bittensor",
126
127
  ];
@@ -68,7 +68,7 @@ exports.schema__document = __importStar(require("../schema/document.cjs"));
68
68
  exports.schema__output_parser = __importStar(require("../schema/output_parser.cjs"));
69
69
  exports.schema__query_constructor = __importStar(require("../schema/query_constructor.cjs"));
70
70
  exports.schema__retriever = __importStar(require("../schema/retriever.cjs"));
71
- exports.schema__runnable = __importStar(require("../schema/runnable.cjs"));
71
+ exports.schema__runnable = __importStar(require("../schema/runnable/index.cjs"));
72
72
  exports.schema__storage = __importStar(require("../schema/storage.cjs"));
73
73
  exports.callbacks = __importStar(require("../callbacks/index.cjs"));
74
74
  exports.output_parsers = __importStar(require("../output_parsers/index.cjs"));
@@ -40,7 +40,7 @@ export * as schema__document from "../schema/document.js";
40
40
  export * as schema__output_parser from "../schema/output_parser.js";
41
41
  export * as schema__query_constructor from "../schema/query_constructor.js";
42
42
  export * as schema__retriever from "../schema/retriever.js";
43
- export * as schema__runnable from "../schema/runnable.js";
43
+ export * as schema__runnable from "../schema/runnable/index.js";
44
44
  export * as schema__storage from "../schema/storage.js";
45
45
  export * as callbacks from "../callbacks/index.js";
46
46
  export * as output_parsers from "../output_parsers/index.js";
@@ -41,7 +41,7 @@ export * as schema__document from "../schema/document.js";
41
41
  export * as schema__output_parser from "../schema/output_parser.js";
42
42
  export * as schema__query_constructor from "../schema/query_constructor.js";
43
43
  export * as schema__retriever from "../schema/retriever.js";
44
- export * as schema__runnable from "../schema/runnable.js";
44
+ export * as schema__runnable from "../schema/runnable/index.js";
45
45
  export * as schema__storage from "../schema/storage.js";
46
46
  export * as callbacks from "../callbacks/index.js";
47
47
  export * as output_parsers from "../output_parsers/index.js";