langchain 0.0.145 → 0.0.147
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/base_language/index.cjs +2 -2
- package/dist/base_language/index.d.ts +2 -1
- package/dist/base_language/index.js +1 -1
- package/dist/chains/api/api_chain.d.ts +1 -1
- package/dist/chains/base.d.ts +1 -1
- package/dist/chains/openai_functions/extraction.d.ts +1 -1
- package/dist/chains/openai_functions/openapi.cjs +32 -27
- package/dist/chains/openai_functions/openapi.d.ts +10 -1
- package/dist/chains/openai_functions/openapi.js +31 -27
- package/dist/chains/openai_functions/structured_output.d.ts +1 -1
- package/dist/chains/openai_functions/tagging.d.ts +1 -1
- package/dist/chat_models/anthropic.d.ts +2 -2
- package/dist/chat_models/base.d.ts +1 -1
- package/dist/chat_models/openai.cjs +1 -1
- package/dist/chat_models/openai.js +1 -1
- package/dist/experimental/llms/bittensor.cjs +141 -0
- package/dist/experimental/llms/bittensor.d.ts +33 -0
- package/dist/experimental/llms/bittensor.js +137 -0
- package/dist/hub.cjs +14 -0
- package/dist/hub.d.ts +15 -1
- package/dist/hub.js +14 -0
- package/dist/llms/base.d.ts +1 -1
- package/dist/llms/openai-chat.cjs +1 -1
- package/dist/llms/openai-chat.js +1 -1
- package/dist/llms/openai.cjs +1 -1
- package/dist/llms/openai.js +1 -1
- package/dist/load/import_constants.cjs +2 -0
- package/dist/load/import_constants.js +2 -0
- package/dist/load/import_map.cjs +1 -1
- package/dist/load/import_map.d.ts +1 -1
- package/dist/load/import_map.js +1 -1
- package/dist/load/index.cjs +2 -1
- package/dist/load/index.js +2 -1
- package/dist/prompts/base.cjs +2 -2
- package/dist/prompts/base.d.ts +1 -1
- package/dist/prompts/base.js +1 -1
- package/dist/prompts/chat.cjs +2 -2
- package/dist/prompts/chat.d.ts +1 -1
- package/dist/prompts/chat.js +1 -1
- package/dist/schema/document.cjs +2 -2
- package/dist/schema/document.d.ts +1 -1
- package/dist/schema/document.js +1 -1
- package/dist/schema/index.cjs +5 -22
- package/dist/schema/index.d.ts +1 -5
- package/dist/schema/index.js +5 -22
- package/dist/schema/output_parser.cjs +2 -2
- package/dist/schema/output_parser.d.ts +2 -1
- package/dist/schema/output_parser.js +1 -1
- package/dist/schema/retriever.cjs +2 -2
- package/dist/schema/retriever.d.ts +2 -1
- package/dist/schema/retriever.js +1 -1
- package/dist/schema/runnable/config.cjs +8 -0
- package/dist/schema/runnable/config.d.ts +3 -0
- package/dist/schema/runnable/config.js +4 -0
- package/dist/schema/{runnable.cjs → runnable/index.cjs} +290 -101
- package/dist/schema/{runnable.d.ts → runnable/index.d.ts} +127 -41
- package/dist/schema/{runnable.js → runnable/index.js} +284 -99
- package/dist/tools/base.d.ts +1 -1
- package/dist/types/googlevertexai-types.d.ts +1 -1
- package/dist/util/async_caller.cjs +35 -25
- package/dist/util/async_caller.d.ts +8 -0
- package/dist/util/async_caller.js +35 -25
- package/dist/vectorstores/pinecone.cjs +30 -22
- package/dist/vectorstores/pinecone.d.ts +3 -1
- package/dist/vectorstores/pinecone.js +30 -22
- package/dist/vectorstores/vectara.cjs +20 -23
- package/dist/vectorstores/vectara.d.ts +9 -2
- package/dist/vectorstores/vectara.js +20 -23
- package/dist/vectorstores/voy.cjs +158 -0
- package/dist/vectorstores/voy.d.ts +73 -0
- package/dist/vectorstores/voy.js +154 -0
- package/experimental/llms/bittensor.cjs +1 -0
- package/experimental/llms/bittensor.d.ts +1 -0
- package/experimental/llms/bittensor.js +1 -0
- package/package.json +24 -68
- package/schema/runnable.cjs +1 -1
- package/schema/runnable.d.ts +1 -1
- package/schema/runnable.js +1 -1
- package/vectorstores/voy.cjs +1 -0
- package/vectorstores/voy.d.ts +1 -0
- package/vectorstores/voy.js +1 -0
- package/chat_models.cjs +0 -1
- package/chat_models.d.ts +0 -1
- package/chat_models.js +0 -1
- package/dist/chat_models/index.cjs +0 -11
- package/dist/chat_models/index.d.ts +0 -3
- package/dist/chat_models/index.js +0 -4
- package/dist/document_loaders/index.cjs +0 -40
- package/dist/document_loaders/index.d.ts +0 -18
- package/dist/document_loaders/index.js +0 -18
- package/dist/embeddings/index.cjs +0 -12
- package/dist/embeddings/index.d.ts +0 -4
- package/dist/embeddings/index.js +0 -5
- package/dist/index.cjs +0 -12
- package/dist/index.d.ts +0 -3
- package/dist/index.js +0 -4
- package/dist/llms/index.cjs +0 -18
- package/dist/llms/index.d.ts +0 -6
- package/dist/llms/index.js +0 -7
- package/dist/retrievers/index.cjs +0 -14
- package/dist/retrievers/index.d.ts +0 -5
- package/dist/retrievers/index.js +0 -6
- package/dist/vectorstores/index.cjs +0 -17
- package/dist/vectorstores/index.d.ts +0 -6
- package/dist/vectorstores/index.js +0 -7
- package/document_loaders.cjs +0 -1
- package/document_loaders.d.ts +0 -1
- package/document_loaders.js +0 -1
- package/embeddings.cjs +0 -1
- package/embeddings.d.ts +0 -1
- package/embeddings.js +0 -1
- package/llms.cjs +0 -1
- package/llms.d.ts +0 -1
- package/llms.js +0 -1
- package/retrievers.cjs +0 -1
- package/retrievers.d.ts +0 -1
- package/retrievers.js +0 -1
- package/vectorstores.cjs +0 -1
- package/vectorstores.d.ts +0 -1
- package/vectorstores.js +0 -1
|
@@ -5,14 +5,14 @@ const index_js_1 = require("../schema/index.cjs");
|
|
|
5
5
|
const async_caller_js_1 = require("../util/async_caller.cjs");
|
|
6
6
|
const count_tokens_js_1 = require("./count_tokens.cjs");
|
|
7
7
|
const tiktoken_js_1 = require("../util/tiktoken.cjs");
|
|
8
|
-
const
|
|
8
|
+
const index_js_2 = require("../schema/runnable/index.cjs");
|
|
9
9
|
const base_js_1 = require("../prompts/base.cjs");
|
|
10
10
|
const chat_js_1 = require("../prompts/chat.cjs");
|
|
11
11
|
const getVerbosity = () => false;
|
|
12
12
|
/**
|
|
13
13
|
* Base class for language models, chains, tools.
|
|
14
14
|
*/
|
|
15
|
-
class BaseLangChain extends
|
|
15
|
+
class BaseLangChain extends index_js_2.Runnable {
|
|
16
16
|
get lc_attributes() {
|
|
17
17
|
return {
|
|
18
18
|
callbacks: undefined,
|
|
@@ -2,7 +2,8 @@ import type { OpenAI as OpenAIClient } from "openai";
|
|
|
2
2
|
import { BaseMessage, BaseMessageLike, BasePromptValue, LLMResult } from "../schema/index.js";
|
|
3
3
|
import { BaseCallbackConfig, CallbackManager, Callbacks } from "../callbacks/manager.js";
|
|
4
4
|
import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
|
|
5
|
-
import { Runnable
|
|
5
|
+
import { Runnable } from "../schema/runnable/index.js";
|
|
6
|
+
import { RunnableConfig } from "../schema/runnable/config.js";
|
|
6
7
|
export type SerializedLLM = {
|
|
7
8
|
_model: string;
|
|
8
9
|
_type: string;
|
|
@@ -2,7 +2,7 @@ import { coerceMessageLikeToMessage, } from "../schema/index.js";
|
|
|
2
2
|
import { AsyncCaller } from "../util/async_caller.js";
|
|
3
3
|
import { getModelNameForTiktoken } from "./count_tokens.js";
|
|
4
4
|
import { encodingForModel } from "../util/tiktoken.js";
|
|
5
|
-
import { Runnable } from "../schema/runnable.js";
|
|
5
|
+
import { Runnable } from "../schema/runnable/index.js";
|
|
6
6
|
import { StringPromptValue } from "../prompts/base.js";
|
|
7
7
|
import { ChatPromptValue } from "../prompts/chat.js";
|
|
8
8
|
const getVerbosity = () => false;
|
|
@@ -4,7 +4,7 @@ import { LLMChain } from "../llm_chain.js";
|
|
|
4
4
|
import { BaseLanguageModel } from "../../base_language/index.js";
|
|
5
5
|
import { CallbackManagerForChainRun } from "../../callbacks/manager.js";
|
|
6
6
|
import { ChainValues } from "../../schema/index.js";
|
|
7
|
-
import { BasePromptTemplate } from "../../
|
|
7
|
+
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
8
8
|
/**
|
|
9
9
|
* Interface that extends ChainInputs and defines additional input
|
|
10
10
|
* parameters specific to an APIChain.
|
package/dist/chains/base.d.ts
CHANGED
|
@@ -3,7 +3,7 @@ import { ChainValues } from "../schema/index.js";
|
|
|
3
3
|
import { CallbackManagerForChainRun, CallbackManager, Callbacks } from "../callbacks/manager.js";
|
|
4
4
|
import { SerializedBaseChain } from "./serde.js";
|
|
5
5
|
import { BaseLangChain, BaseLangChainParams } from "../base_language/index.js";
|
|
6
|
-
import { RunnableConfig } from "../schema/runnable.js";
|
|
6
|
+
import { RunnableConfig } from "../schema/runnable/config.js";
|
|
7
7
|
export type LoadValues = Record<string, any>;
|
|
8
8
|
export interface ChainInputs extends BaseLangChainParams {
|
|
9
9
|
memory?: BaseMemory;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
2
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
3
3
|
import { LLMChain } from "../llm_chain.js";
|
|
4
|
-
import { BaseChatModel } from "../../chat_models/
|
|
4
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
5
5
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
6
6
|
/**
|
|
7
7
|
* Function that creates an extraction chain using the provided JSON schema.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.createOpenAPIChain = void 0;
|
|
3
|
+
exports.createOpenAPIChain = exports.convertOpenAPISchemaToJSONSchema = void 0;
|
|
4
4
|
const openapi_js_1 = require("../../util/openapi.cjs");
|
|
5
5
|
const base_js_1 = require("../base.cjs");
|
|
6
6
|
const llm_chain_js_1 = require("../llm_chain.cjs");
|
|
@@ -122,36 +122,41 @@ function convertOpenAPIParamsToJSONSchema(params, spec) {
|
|
|
122
122
|
* @returns The JSON schema representation of the OpenAPI schema.
|
|
123
123
|
*/
|
|
124
124
|
function convertOpenAPISchemaToJSONSchema(schema, spec) {
|
|
125
|
-
if (schema.type
|
|
125
|
+
if (schema.type === "object") {
|
|
126
|
+
return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
|
|
127
|
+
if (!schema.properties) {
|
|
128
|
+
return jsonSchema;
|
|
129
|
+
}
|
|
130
|
+
const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
|
|
131
|
+
if (openAPIProperty.type === undefined) {
|
|
132
|
+
return jsonSchema;
|
|
133
|
+
}
|
|
134
|
+
// eslint-disable-next-line no-param-reassign
|
|
135
|
+
jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
|
|
136
|
+
if (openAPIProperty.required && jsonSchema.required !== undefined) {
|
|
137
|
+
jsonSchema.required.push(propertyName);
|
|
138
|
+
}
|
|
139
|
+
return jsonSchema;
|
|
140
|
+
}, {
|
|
141
|
+
type: "object",
|
|
142
|
+
properties: {},
|
|
143
|
+
required: [],
|
|
144
|
+
additionalProperties: {},
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
if (schema.type === "array") {
|
|
126
148
|
return {
|
|
127
|
-
type:
|
|
149
|
+
type: "array",
|
|
150
|
+
items: convertOpenAPISchemaToJSONSchema(schema.items ?? {}, spec),
|
|
151
|
+
minItems: schema.minItems,
|
|
152
|
+
maxItems: schema.maxItems,
|
|
128
153
|
};
|
|
129
154
|
}
|
|
130
|
-
return
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
}
|
|
134
|
-
const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
|
|
135
|
-
if (openAPIProperty.type === undefined) {
|
|
136
|
-
return jsonSchema;
|
|
137
|
-
}
|
|
138
|
-
// eslint-disable-next-line no-param-reassign
|
|
139
|
-
jsonSchema.properties[propertyName] = {
|
|
140
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
141
|
-
type: openAPIProperty.type,
|
|
142
|
-
description: openAPIProperty.description,
|
|
143
|
-
};
|
|
144
|
-
if (openAPIProperty.required && jsonSchema.required !== undefined) {
|
|
145
|
-
jsonSchema.required.push(propertyName);
|
|
146
|
-
}
|
|
147
|
-
return jsonSchema;
|
|
148
|
-
}, {
|
|
149
|
-
type: "object",
|
|
150
|
-
properties: {},
|
|
151
|
-
required: [],
|
|
152
|
-
additionalProperties: {},
|
|
153
|
-
});
|
|
155
|
+
return {
|
|
156
|
+
type: schema.type ?? "string",
|
|
157
|
+
};
|
|
154
158
|
}
|
|
159
|
+
exports.convertOpenAPISchemaToJSONSchema = convertOpenAPISchemaToJSONSchema;
|
|
155
160
|
/**
|
|
156
161
|
* Converts an OpenAPI specification to OpenAI functions.
|
|
157
162
|
* @param spec The OpenAPI specification to convert.
|
|
@@ -1,10 +1,19 @@
|
|
|
1
|
+
import { JsonSchema7Type } from "zod-to-json-schema/src/parseDef.js";
|
|
1
2
|
import type { OpenAPIV3_1 } from "openapi-types";
|
|
3
|
+
import { OpenAPISpec } from "../../util/openapi.js";
|
|
2
4
|
import { BaseChain } from "../base.js";
|
|
3
5
|
import { LLMChainInput } from "../llm_chain.js";
|
|
4
6
|
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
5
7
|
import { SequentialChain } from "../sequential_chain.js";
|
|
6
|
-
import { BaseChatModel } from "../../chat_models/
|
|
8
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
7
9
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
10
|
+
/**
|
|
11
|
+
* Converts OpenAPI schemas to JSON schema format.
|
|
12
|
+
* @param schema The OpenAPI schema to convert.
|
|
13
|
+
* @param spec The OpenAPI specification that contains the schema.
|
|
14
|
+
* @returns The JSON schema representation of the OpenAPI schema.
|
|
15
|
+
*/
|
|
16
|
+
export declare function convertOpenAPISchemaToJSONSchema(schema: OpenAPIV3_1.SchemaObject, spec: OpenAPISpec): JsonSchema7Type;
|
|
8
17
|
/**
|
|
9
18
|
* Type representing the options for creating an OpenAPI chain.
|
|
10
19
|
*/
|
|
@@ -118,36 +118,40 @@ function convertOpenAPIParamsToJSONSchema(params, spec) {
|
|
|
118
118
|
* @param spec The OpenAPI specification that contains the schema.
|
|
119
119
|
* @returns The JSON schema representation of the OpenAPI schema.
|
|
120
120
|
*/
|
|
121
|
-
function convertOpenAPISchemaToJSONSchema(schema, spec) {
|
|
122
|
-
if (schema.type
|
|
121
|
+
export function convertOpenAPISchemaToJSONSchema(schema, spec) {
|
|
122
|
+
if (schema.type === "object") {
|
|
123
|
+
return Object.keys(schema.properties ?? {}).reduce((jsonSchema, propertyName) => {
|
|
124
|
+
if (!schema.properties) {
|
|
125
|
+
return jsonSchema;
|
|
126
|
+
}
|
|
127
|
+
const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
|
|
128
|
+
if (openAPIProperty.type === undefined) {
|
|
129
|
+
return jsonSchema;
|
|
130
|
+
}
|
|
131
|
+
// eslint-disable-next-line no-param-reassign
|
|
132
|
+
jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
|
|
133
|
+
if (openAPIProperty.required && jsonSchema.required !== undefined) {
|
|
134
|
+
jsonSchema.required.push(propertyName);
|
|
135
|
+
}
|
|
136
|
+
return jsonSchema;
|
|
137
|
+
}, {
|
|
138
|
+
type: "object",
|
|
139
|
+
properties: {},
|
|
140
|
+
required: [],
|
|
141
|
+
additionalProperties: {},
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
if (schema.type === "array") {
|
|
123
145
|
return {
|
|
124
|
-
type:
|
|
146
|
+
type: "array",
|
|
147
|
+
items: convertOpenAPISchemaToJSONSchema(schema.items ?? {}, spec),
|
|
148
|
+
minItems: schema.minItems,
|
|
149
|
+
maxItems: schema.maxItems,
|
|
125
150
|
};
|
|
126
151
|
}
|
|
127
|
-
return
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
}
|
|
131
|
-
const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
|
|
132
|
-
if (openAPIProperty.type === undefined) {
|
|
133
|
-
return jsonSchema;
|
|
134
|
-
}
|
|
135
|
-
// eslint-disable-next-line no-param-reassign
|
|
136
|
-
jsonSchema.properties[propertyName] = {
|
|
137
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
138
|
-
type: openAPIProperty.type,
|
|
139
|
-
description: openAPIProperty.description,
|
|
140
|
-
};
|
|
141
|
-
if (openAPIProperty.required && jsonSchema.required !== undefined) {
|
|
142
|
-
jsonSchema.required.push(propertyName);
|
|
143
|
-
}
|
|
144
|
-
return jsonSchema;
|
|
145
|
-
}, {
|
|
146
|
-
type: "object",
|
|
147
|
-
properties: {},
|
|
148
|
-
required: [],
|
|
149
|
-
additionalProperties: {},
|
|
150
|
-
});
|
|
152
|
+
return {
|
|
153
|
+
type: schema.type ?? "string",
|
|
154
|
+
};
|
|
151
155
|
}
|
|
152
156
|
/**
|
|
153
157
|
* Converts an OpenAPI specification to OpenAI functions.
|
|
@@ -7,7 +7,7 @@ import { BasePromptTemplate } from "../../prompts/index.js";
|
|
|
7
7
|
import { BaseLLMOutputParser } from "../../schema/output_parser.js";
|
|
8
8
|
import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js";
|
|
9
9
|
import { ChatGeneration } from "../../schema/index.js";
|
|
10
|
-
import { BaseChatModel } from "../../chat_models/
|
|
10
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
11
11
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
12
12
|
/**
|
|
13
13
|
* Type representing the input for creating a structured output chain. It
|
|
@@ -2,7 +2,7 @@ import { z } from "zod";
|
|
|
2
2
|
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
3
3
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
4
4
|
import { LLMChain, LLMChainInput } from "../llm_chain.js";
|
|
5
|
-
import { BaseChatModel } from "../../chat_models/
|
|
5
|
+
import { BaseChatModel } from "../../chat_models/base.js";
|
|
6
6
|
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
7
7
|
/**
|
|
8
8
|
* Type representing the options for creating a tagging chain.
|
|
@@ -103,9 +103,9 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
|
|
|
103
103
|
model: (string & {}) | "claude-2" | "claude-instant-1";
|
|
104
104
|
temperature?: number | undefined;
|
|
105
105
|
top_p?: number | undefined;
|
|
106
|
+
top_k?: number | undefined;
|
|
106
107
|
max_tokens_to_sample: number;
|
|
107
108
|
stop_sequences?: string[] | undefined;
|
|
108
|
-
top_k?: number | undefined;
|
|
109
109
|
model_name: string;
|
|
110
110
|
};
|
|
111
111
|
/**
|
|
@@ -117,9 +117,9 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
|
|
|
117
117
|
model: (string & {}) | "claude-2" | "claude-instant-1";
|
|
118
118
|
temperature?: number | undefined;
|
|
119
119
|
top_p?: number | undefined;
|
|
120
|
+
top_k?: number | undefined;
|
|
120
121
|
max_tokens_to_sample: number;
|
|
121
122
|
stop_sequences?: string[] | undefined;
|
|
122
|
-
top_k?: number | undefined;
|
|
123
123
|
model_name: string;
|
|
124
124
|
};
|
|
125
125
|
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { BaseMessage, BasePromptValue, ChatResult, BaseMessageChunk, LLMResult, ChatGenerationChunk, BaseMessageLike } from "../schema/index.js";
|
|
2
2
|
import { BaseLanguageModel, BaseLanguageModelCallOptions, BaseLanguageModelInput, BaseLanguageModelParams } from "../base_language/index.js";
|
|
3
3
|
import { CallbackManagerForLLMRun, Callbacks } from "../callbacks/manager.js";
|
|
4
|
-
import { RunnableConfig } from "../schema/runnable.js";
|
|
4
|
+
import { RunnableConfig } from "../schema/runnable/config.js";
|
|
5
5
|
/**
|
|
6
6
|
* Represents a serialized chat model.
|
|
7
7
|
*/
|
|
@@ -388,7 +388,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
388
388
|
let defaultRole;
|
|
389
389
|
const streamIterable = await this.completionWithRetry(params, options);
|
|
390
390
|
for await (const data of streamIterable) {
|
|
391
|
-
const choice = data
|
|
391
|
+
const choice = data?.choices[0];
|
|
392
392
|
if (!choice) {
|
|
393
393
|
continue;
|
|
394
394
|
}
|
|
@@ -385,7 +385,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
385
385
|
let defaultRole;
|
|
386
386
|
const streamIterable = await this.completionWithRetry(params, options);
|
|
387
387
|
for await (const data of streamIterable) {
|
|
388
|
-
const choice = data
|
|
388
|
+
const choice = data?.choices[0];
|
|
389
389
|
if (!choice) {
|
|
390
390
|
continue;
|
|
391
391
|
}
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.NIBittensorLLM = void 0;
|
|
4
|
+
const base_js_1 = require("../../llms/base.cjs");
|
|
5
|
+
/**
|
|
6
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
7
|
+
* full of different AI models.
|
|
8
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
9
|
+
* https://api.neuralinternet.ai/api-keys
|
|
10
|
+
* https://api.neuralinternet.ai/logs
|
|
11
|
+
*/
|
|
12
|
+
class NIBittensorLLM extends base_js_1.LLM {
|
|
13
|
+
static lc_name() {
|
|
14
|
+
return "NIBittensorLLM";
|
|
15
|
+
}
|
|
16
|
+
constructor(fields) {
|
|
17
|
+
super(fields ?? {});
|
|
18
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
19
|
+
enumerable: true,
|
|
20
|
+
configurable: true,
|
|
21
|
+
writable: true,
|
|
22
|
+
value: void 0
|
|
23
|
+
});
|
|
24
|
+
Object.defineProperty(this, "topResponses", {
|
|
25
|
+
enumerable: true,
|
|
26
|
+
configurable: true,
|
|
27
|
+
writable: true,
|
|
28
|
+
value: void 0
|
|
29
|
+
});
|
|
30
|
+
this.systemPrompt =
|
|
31
|
+
fields?.systemPrompt ??
|
|
32
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
33
|
+
this.topResponses = fields?.topResponses;
|
|
34
|
+
}
|
|
35
|
+
_llmType() {
|
|
36
|
+
return "NIBittensorLLM";
|
|
37
|
+
}
|
|
38
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
39
|
+
Args:
|
|
40
|
+
prompt: The prompt to pass into the model.
|
|
41
|
+
|
|
42
|
+
Returns: The string generated by the model.
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
46
|
+
*/
|
|
47
|
+
async _call(prompt) {
|
|
48
|
+
try {
|
|
49
|
+
// Retrieve API KEY
|
|
50
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
51
|
+
if (!apiKeyResponse.ok) {
|
|
52
|
+
throw new Error("Network response was not ok");
|
|
53
|
+
}
|
|
54
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
55
|
+
const apiKey = apiKeysData[0].api_key;
|
|
56
|
+
const headers = {
|
|
57
|
+
"Content-Type": "application/json",
|
|
58
|
+
Authorization: `Bearer ${apiKey}`,
|
|
59
|
+
"Endpoint-Version": "2023-05-19",
|
|
60
|
+
};
|
|
61
|
+
if (this.topResponses !== undefined) {
|
|
62
|
+
this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
|
|
63
|
+
}
|
|
64
|
+
else {
|
|
65
|
+
this.topResponses = 0;
|
|
66
|
+
}
|
|
67
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
68
|
+
if (!minerResponse.ok) {
|
|
69
|
+
throw new Error("Network response was not ok");
|
|
70
|
+
}
|
|
71
|
+
const uids = await minerResponse.json();
|
|
72
|
+
if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
|
|
73
|
+
for (const uid of uids) {
|
|
74
|
+
try {
|
|
75
|
+
const payload = {
|
|
76
|
+
uids: [uid],
|
|
77
|
+
messages: [
|
|
78
|
+
{ role: "system", content: this.systemPrompt },
|
|
79
|
+
{ role: "user", content: prompt },
|
|
80
|
+
],
|
|
81
|
+
};
|
|
82
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
83
|
+
method: "POST",
|
|
84
|
+
headers,
|
|
85
|
+
body: JSON.stringify(payload),
|
|
86
|
+
});
|
|
87
|
+
if (!response.ok) {
|
|
88
|
+
throw new Error("Network response was not ok");
|
|
89
|
+
}
|
|
90
|
+
const chatData = await response.json();
|
|
91
|
+
if (chatData.choices) {
|
|
92
|
+
return chatData.choices[0].message.content;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
catch (error) {
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
// For top miner based on bittensor response
|
|
101
|
+
if (this.topResponses === 0) {
|
|
102
|
+
this.topResponses = 10;
|
|
103
|
+
}
|
|
104
|
+
const payload = {
|
|
105
|
+
top_n: this.topResponses,
|
|
106
|
+
messages: [
|
|
107
|
+
{ role: "system", content: this.systemPrompt },
|
|
108
|
+
{ role: "user", content: prompt },
|
|
109
|
+
],
|
|
110
|
+
};
|
|
111
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
112
|
+
method: "POST",
|
|
113
|
+
headers,
|
|
114
|
+
body: JSON.stringify(payload),
|
|
115
|
+
});
|
|
116
|
+
if (!response.ok) {
|
|
117
|
+
throw new Error("Network response was not ok");
|
|
118
|
+
}
|
|
119
|
+
const responseData = await response.json();
|
|
120
|
+
if (this.topResponses) {
|
|
121
|
+
return responseData;
|
|
122
|
+
}
|
|
123
|
+
else if (responseData.choices) {
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
125
|
+
const temp = responseData.choices;
|
|
126
|
+
return temp[0].message.content;
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
catch (error) {
|
|
130
|
+
return "Sorry I am unable to provide response now, Please try again later.";
|
|
131
|
+
}
|
|
132
|
+
return "default";
|
|
133
|
+
}
|
|
134
|
+
identifyingParams() {
|
|
135
|
+
return {
|
|
136
|
+
systemPrompt: this.systemPrompt,
|
|
137
|
+
topResponses: this.topResponses,
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
exports.NIBittensorLLM = NIBittensorLLM;
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { BaseLLMParams, LLM } from "../../llms/base.js";
|
|
2
|
+
export interface BittensorInput extends BaseLLMParams {
|
|
3
|
+
systemPrompt?: string | null | undefined;
|
|
4
|
+
topResponses?: number | undefined;
|
|
5
|
+
}
|
|
6
|
+
/**
|
|
7
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
8
|
+
* full of different AI models.
|
|
9
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
10
|
+
* https://api.neuralinternet.ai/api-keys
|
|
11
|
+
* https://api.neuralinternet.ai/logs
|
|
12
|
+
*/
|
|
13
|
+
export declare class NIBittensorLLM extends LLM implements BittensorInput {
|
|
14
|
+
static lc_name(): string;
|
|
15
|
+
systemPrompt: string;
|
|
16
|
+
topResponses: number | undefined;
|
|
17
|
+
constructor(fields?: BittensorInput);
|
|
18
|
+
_llmType(): string;
|
|
19
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
20
|
+
Args:
|
|
21
|
+
prompt: The prompt to pass into the model.
|
|
22
|
+
|
|
23
|
+
Returns: The string generated by the model.
|
|
24
|
+
|
|
25
|
+
Example:
|
|
26
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
27
|
+
*/
|
|
28
|
+
_call(prompt: string): Promise<string>;
|
|
29
|
+
identifyingParams(): {
|
|
30
|
+
systemPrompt: string | null | undefined;
|
|
31
|
+
topResponses: number | undefined;
|
|
32
|
+
};
|
|
33
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { LLM } from "../../llms/base.js";
|
|
2
|
+
/**
|
|
3
|
+
* Class representing the Neural Internet language model powerd by Bittensor, a decentralized network
|
|
4
|
+
* full of different AI models.
|
|
5
|
+
* To analyze API_KEYS and logs of you usage visit
|
|
6
|
+
* https://api.neuralinternet.ai/api-keys
|
|
7
|
+
* https://api.neuralinternet.ai/logs
|
|
8
|
+
*/
|
|
9
|
+
export class NIBittensorLLM extends LLM {
|
|
10
|
+
static lc_name() {
|
|
11
|
+
return "NIBittensorLLM";
|
|
12
|
+
}
|
|
13
|
+
constructor(fields) {
|
|
14
|
+
super(fields ?? {});
|
|
15
|
+
Object.defineProperty(this, "systemPrompt", {
|
|
16
|
+
enumerable: true,
|
|
17
|
+
configurable: true,
|
|
18
|
+
writable: true,
|
|
19
|
+
value: void 0
|
|
20
|
+
});
|
|
21
|
+
Object.defineProperty(this, "topResponses", {
|
|
22
|
+
enumerable: true,
|
|
23
|
+
configurable: true,
|
|
24
|
+
writable: true,
|
|
25
|
+
value: void 0
|
|
26
|
+
});
|
|
27
|
+
this.systemPrompt =
|
|
28
|
+
fields?.systemPrompt ??
|
|
29
|
+
"You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor. Your task is to provide accurate response based on user prompt";
|
|
30
|
+
this.topResponses = fields?.topResponses;
|
|
31
|
+
}
|
|
32
|
+
_llmType() {
|
|
33
|
+
return "NIBittensorLLM";
|
|
34
|
+
}
|
|
35
|
+
/** Call out to NIBittensorLLM's complete endpoint.
|
|
36
|
+
Args:
|
|
37
|
+
prompt: The prompt to pass into the model.
|
|
38
|
+
|
|
39
|
+
Returns: The string generated by the model.
|
|
40
|
+
|
|
41
|
+
Example:
|
|
42
|
+
let response = niBittensorLLM.call("Tell me a joke.");
|
|
43
|
+
*/
|
|
44
|
+
async _call(prompt) {
|
|
45
|
+
try {
|
|
46
|
+
// Retrieve API KEY
|
|
47
|
+
const apiKeyResponse = await fetch("https://test.neuralinternet.ai/admin/api-keys/");
|
|
48
|
+
if (!apiKeyResponse.ok) {
|
|
49
|
+
throw new Error("Network response was not ok");
|
|
50
|
+
}
|
|
51
|
+
const apiKeysData = await apiKeyResponse.json();
|
|
52
|
+
const apiKey = apiKeysData[0].api_key;
|
|
53
|
+
const headers = {
|
|
54
|
+
"Content-Type": "application/json",
|
|
55
|
+
Authorization: `Bearer ${apiKey}`,
|
|
56
|
+
"Endpoint-Version": "2023-05-19",
|
|
57
|
+
};
|
|
58
|
+
if (this.topResponses !== undefined) {
|
|
59
|
+
this.topResponses = this.topResponses > 100 ? 100 : this.topResponses;
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
this.topResponses = 0;
|
|
63
|
+
}
|
|
64
|
+
const minerResponse = await fetch("https://test.neuralinternet.ai/top_miner_uids", { headers });
|
|
65
|
+
if (!minerResponse.ok) {
|
|
66
|
+
throw new Error("Network response was not ok");
|
|
67
|
+
}
|
|
68
|
+
const uids = await minerResponse.json();
|
|
69
|
+
if (Array.isArray(uids) && uids.length && this.topResponses === 0) {
|
|
70
|
+
for (const uid of uids) {
|
|
71
|
+
try {
|
|
72
|
+
const payload = {
|
|
73
|
+
uids: [uid],
|
|
74
|
+
messages: [
|
|
75
|
+
{ role: "system", content: this.systemPrompt },
|
|
76
|
+
{ role: "user", content: prompt },
|
|
77
|
+
],
|
|
78
|
+
};
|
|
79
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
80
|
+
method: "POST",
|
|
81
|
+
headers,
|
|
82
|
+
body: JSON.stringify(payload),
|
|
83
|
+
});
|
|
84
|
+
if (!response.ok) {
|
|
85
|
+
throw new Error("Network response was not ok");
|
|
86
|
+
}
|
|
87
|
+
const chatData = await response.json();
|
|
88
|
+
if (chatData.choices) {
|
|
89
|
+
return chatData.choices[0].message.content;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
catch (error) {
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
// For top miner based on bittensor response
|
|
98
|
+
if (this.topResponses === 0) {
|
|
99
|
+
this.topResponses = 10;
|
|
100
|
+
}
|
|
101
|
+
const payload = {
|
|
102
|
+
top_n: this.topResponses,
|
|
103
|
+
messages: [
|
|
104
|
+
{ role: "system", content: this.systemPrompt },
|
|
105
|
+
{ role: "user", content: prompt },
|
|
106
|
+
],
|
|
107
|
+
};
|
|
108
|
+
const response = await fetch("https://test.neuralinternet.ai/chat", {
|
|
109
|
+
method: "POST",
|
|
110
|
+
headers,
|
|
111
|
+
body: JSON.stringify(payload),
|
|
112
|
+
});
|
|
113
|
+
if (!response.ok) {
|
|
114
|
+
throw new Error("Network response was not ok");
|
|
115
|
+
}
|
|
116
|
+
const responseData = await response.json();
|
|
117
|
+
if (this.topResponses) {
|
|
118
|
+
return responseData;
|
|
119
|
+
}
|
|
120
|
+
else if (responseData.choices) {
|
|
121
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
122
|
+
const temp = responseData.choices;
|
|
123
|
+
return temp[0].message.content;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
catch (error) {
|
|
127
|
+
return "Sorry I am unable to provide response now, Please try again later.";
|
|
128
|
+
}
|
|
129
|
+
return "default";
|
|
130
|
+
}
|
|
131
|
+
identifyingParams() {
|
|
132
|
+
return {
|
|
133
|
+
systemPrompt: this.systemPrompt,
|
|
134
|
+
topResponses: this.topResponses,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
}
|
package/dist/hub.cjs
CHANGED
|
@@ -3,11 +3,25 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.pull = exports.push = void 0;
|
|
4
4
|
const langchainhub_1 = require("langchainhub");
|
|
5
5
|
const index_js_1 = require("./load/index.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* Push a prompt to the hub.
|
|
8
|
+
* If the specified repo doesn't already exist, it will be created.
|
|
9
|
+
* @param repoFullName The full name of the repo.
|
|
10
|
+
* @param runnable The prompt to push.
|
|
11
|
+
* @param options
|
|
12
|
+
* @returns The URL of the newly pushed prompt in the hub.
|
|
13
|
+
*/
|
|
6
14
|
async function push(repoFullName, runnable, options) {
|
|
7
15
|
const client = new langchainhub_1.Client(options);
|
|
8
16
|
return client.push(repoFullName, JSON.stringify(runnable), options);
|
|
9
17
|
}
|
|
10
18
|
exports.push = push;
|
|
19
|
+
/**
|
|
20
|
+
* Pull a prompt from the hub.
|
|
21
|
+
* @param ownerRepoCommit The name of the repo containing the prompt, as well as an optional commit hash separated by a slash.
|
|
22
|
+
* @param options
|
|
23
|
+
* @returns
|
|
24
|
+
*/
|
|
11
25
|
async function pull(ownerRepoCommit, options) {
|
|
12
26
|
const client = new langchainhub_1.Client(options);
|
|
13
27
|
const result = await client.pull(ownerRepoCommit);
|