notdiamond 1.1.1 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -2
- package/dist/index.cjs +0 -571
- package/dist/index.d.cts +0 -212
- package/dist/index.d.mts +0 -212
- package/dist/index.d.ts +0 -212
- package/dist/index.mjs +0 -550
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "notdiamond",
|
|
3
3
|
"type": "module",
|
|
4
|
-
"version": "1.1.
|
|
4
|
+
"version": "1.1.3",
|
|
5
5
|
"author": "not-diamond",
|
|
6
6
|
"license": "MIT",
|
|
7
7
|
"description": "TS/JS client for the NotDiamond API",
|
|
@@ -70,6 +70,7 @@
|
|
|
70
70
|
"decamelize": "^6.0.0",
|
|
71
71
|
"dotenv": "^16.4.5",
|
|
72
72
|
"eventemitter3": "^5.0.1",
|
|
73
|
+
"form-data": "^4.0.4",
|
|
73
74
|
"langchain": "^0.3.2",
|
|
74
75
|
"langsmith": "^0.1.60",
|
|
75
76
|
"p-finally": "^2.0.1",
|
|
@@ -120,7 +121,8 @@
|
|
|
120
121
|
"@octokit/request": "^9.2.1",
|
|
121
122
|
"@octokit/plugin-paginate-rest": "^11.4.1",
|
|
122
123
|
"@babel/helpers": "^7.26.10",
|
|
123
|
-
"esbuild": "^0.25.0"
|
|
124
|
+
"esbuild": "^0.25.0",
|
|
125
|
+
"form-data": "^4.0.4"
|
|
124
126
|
},
|
|
125
127
|
"engines": {
|
|
126
128
|
"node": ">=20",
|
package/dist/index.cjs
DELETED
|
@@ -1,571 +0,0 @@
|
|
|
1
|
-
'use strict';
|
|
2
|
-
|
|
3
|
-
const dotenv = require('dotenv');
|
|
4
|
-
const openai = require('@langchain/openai');
|
|
5
|
-
const messages = require('@langchain/core/messages');
|
|
6
|
-
const anthropic = require('@langchain/anthropic');
|
|
7
|
-
const googleGenai = require('@langchain/google-genai');
|
|
8
|
-
const mistralai = require('@langchain/mistralai');
|
|
9
|
-
const chat_models = require('@langchain/core/language_models/chat_models');
|
|
10
|
-
const axios = require('axios');
|
|
11
|
-
const cohere = require('@langchain/cohere');
|
|
12
|
-
const togetherai = require('@langchain/community/chat_models/togetherai');
|
|
13
|
-
|
|
14
|
-
function _interopDefaultCompat (e) { return e && typeof e === 'object' && 'default' in e ? e.default : e; }
|
|
15
|
-
|
|
16
|
-
function _interopNamespaceCompat(e) {
|
|
17
|
-
if (e && typeof e === 'object' && 'default' in e) return e;
|
|
18
|
-
const n = Object.create(null);
|
|
19
|
-
if (e) {
|
|
20
|
-
for (const k in e) {
|
|
21
|
-
n[k] = e[k];
|
|
22
|
-
}
|
|
23
|
-
}
|
|
24
|
-
n.default = e;
|
|
25
|
-
return n;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
const dotenv__namespace = /*#__PURE__*/_interopNamespaceCompat(dotenv);
|
|
29
|
-
const axios__default = /*#__PURE__*/_interopDefaultCompat(axios);
|
|
30
|
-
|
|
31
|
-
const version = "1.1.0";
|
|
32
|
-
const packageJson = {
|
|
33
|
-
version: version};
|
|
34
|
-
|
|
35
|
-
class ChatPerplexity extends chat_models.BaseChatModel {
|
|
36
|
-
_generate(messages, options, runManager) {
|
|
37
|
-
throw new Error(
|
|
38
|
-
"Method not implemented." + JSON.stringify(messages) + JSON.stringify(options) + JSON.stringify(runManager)
|
|
39
|
-
);
|
|
40
|
-
}
|
|
41
|
-
apiKey;
|
|
42
|
-
model;
|
|
43
|
-
constructor({ apiKey, model }) {
|
|
44
|
-
super({});
|
|
45
|
-
this.apiKey = apiKey;
|
|
46
|
-
this.model = model;
|
|
47
|
-
}
|
|
48
|
-
_llmType() {
|
|
49
|
-
return "perplexity";
|
|
50
|
-
}
|
|
51
|
-
/**
|
|
52
|
-
* Invokes the Perplexity model.
|
|
53
|
-
* @param messages The messages to send to the model.
|
|
54
|
-
* @returns The results of the model.
|
|
55
|
-
*/
|
|
56
|
-
async invoke(messages$1) {
|
|
57
|
-
try {
|
|
58
|
-
const { data } = await axios__default.post(
|
|
59
|
-
"https://api.perplexity.ai/chat/completions",
|
|
60
|
-
{
|
|
61
|
-
model: this.model,
|
|
62
|
-
messages: messages$1.map((m) => ({
|
|
63
|
-
role: m._getType() === "human" ? "user" : m._getType(),
|
|
64
|
-
content: m.content
|
|
65
|
-
}))
|
|
66
|
-
},
|
|
67
|
-
{
|
|
68
|
-
headers: {
|
|
69
|
-
Authorization: `Bearer ${this.apiKey}`
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
);
|
|
73
|
-
return new messages.AIMessage(data.choices[0].message.content);
|
|
74
|
-
} catch (error) {
|
|
75
|
-
if (axios__default.isAxiosError(error) && error.response) {
|
|
76
|
-
throw new Error(`Perplexity API error: ${error.response.statusText}`);
|
|
77
|
-
}
|
|
78
|
-
throw error;
|
|
79
|
-
}
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
const SupportedProvider = {
|
|
84
|
-
OPENAI: "openai",
|
|
85
|
-
ANTHROPIC: "anthropic",
|
|
86
|
-
GOOGLE: "google",
|
|
87
|
-
MISTRAL: "mistral",
|
|
88
|
-
PERPLEXITY: "perplexity",
|
|
89
|
-
COHERE: "cohere",
|
|
90
|
-
TOGETHERAI: "togetherai"
|
|
91
|
-
};
|
|
92
|
-
const SupportedModel = {
|
|
93
|
-
GPT_3_5_TURBO: "gpt-3.5-turbo",
|
|
94
|
-
GPT_3_5_TURBO_0125: "gpt-3.5-turbo-0125",
|
|
95
|
-
GPT_4: "gpt-4",
|
|
96
|
-
GPT_4_0613: "gpt-4-0613",
|
|
97
|
-
GPT_4_1106_PREVIEW: "gpt-4-1106-preview",
|
|
98
|
-
GPT_4_TURBO: "gpt-4-turbo",
|
|
99
|
-
GPT_4_TURBO_PREVIEW: "gpt-4-turbo-preview",
|
|
100
|
-
GPT_4_TURBO_2024_04_09: "gpt-4-turbo-2024-04-09",
|
|
101
|
-
GPT_4O_2024_05_13: "gpt-4o-2024-05-13",
|
|
102
|
-
GPT_4O_2024_08_06: "gpt-4o-2024-08-06",
|
|
103
|
-
GPT_4O: "gpt-4o",
|
|
104
|
-
GPT_4O_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
|
|
105
|
-
GPT_4O_MINI: "gpt-4o-mini",
|
|
106
|
-
GPT_4_0125_PREVIEW: "gpt-4-0125-preview",
|
|
107
|
-
CHATGPT_4O_LATEST: "chatgpt-4o-latest",
|
|
108
|
-
O1_PREVIEW: "o1-preview",
|
|
109
|
-
O1_PREVIEW_2024_09_12: "o1-preview-2024-09-12",
|
|
110
|
-
O1_MINI: "o1-mini",
|
|
111
|
-
O1_MINI_2024_09_12: "o1-mini-2024-09-12",
|
|
112
|
-
CLAUDE_2_1: "claude-2.1",
|
|
113
|
-
CLAUDE_3_OPUS_20240229: "claude-3-opus-20240229",
|
|
114
|
-
CLAUDE_3_SONNET_20240229: "claude-3-sonnet-20240229",
|
|
115
|
-
CLAUDE_3_5_SONNET_20240620: "claude-3-5-sonnet-20240620",
|
|
116
|
-
CLAUDE_3_5_SONNET_20241022: "claude-3-5-sonnet-20241022",
|
|
117
|
-
CLAUDE_3_5_SONNET_LATEST: "claude-3-5-sonnet-latest",
|
|
118
|
-
CLAUDE_3_HAIKU_20240307: "claude-3-haiku-20240307",
|
|
119
|
-
CLAUDE_3_5_HAIKU_20241022: "claude-3-5-haiku-20241022",
|
|
120
|
-
GEMINI_PRO: "gemini-pro",
|
|
121
|
-
GEMINI_1_PRO_LATEST: "gemini-1.0-pro-latest",
|
|
122
|
-
GEMINI_15_PRO_LATEST: "gemini-1.5-pro-latest",
|
|
123
|
-
GEMINI_15_PRO_EXP_0801: "gemini-1.5-pro-exp-0801",
|
|
124
|
-
GEMINI_15_FLASH_LATEST: "gemini-1.5-flash-latest",
|
|
125
|
-
COMMAND_R: "command-r",
|
|
126
|
-
COMMAND_R_PLUS: "command-r-plus",
|
|
127
|
-
MISTRAL_LARGE_LATEST: "mistral-large-latest",
|
|
128
|
-
MISTRAL_LARGE_2407: "mistral-large-2407",
|
|
129
|
-
MISTRAL_LARGE_2402: "mistral-large-2402",
|
|
130
|
-
MISTRAL_MEDIUM_LATEST: "mistral-medium-latest",
|
|
131
|
-
MISTRAL_SMALL_LATEST: "mistral-small-latest",
|
|
132
|
-
CODESTRAL_LATEST: "codestral-latest",
|
|
133
|
-
OPEN_MISTRAL_7B: "open-mistral-7b",
|
|
134
|
-
OPEN_MIXTRAL_8X7B: "open-mixtral-8x7b",
|
|
135
|
-
OPEN_MIXTRAL_8X22B: "open-mixtral-8x22b",
|
|
136
|
-
MISTRAL_7B_INSTRUCT_V0_2: "Mistral-7B-Instruct-v0.2",
|
|
137
|
-
MIXTRAL_8X7B_INSTRUCT_V0_1: "Mixtral-8x7B-Instruct-v0.1",
|
|
138
|
-
MIXTRAL_8X22B_INSTRUCT_V0_1: "Mixtral-8x22B-Instruct-v0.1",
|
|
139
|
-
LLAMA_3_70B_CHAT_HF: "Llama-3-70b-chat-hf",
|
|
140
|
-
LLAMA_3_8B_CHAT_HF: "Llama-3-8b-chat-hf",
|
|
141
|
-
QWEN2_72B_INSTRUCT: "Qwen2-72B-Instruct",
|
|
142
|
-
LLAMA_3_1_8B_INSTRUCT_TURBO: "Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
143
|
-
LLAMA_3_1_70B_INSTRUCT_TURBO: "Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
144
|
-
LLAMA_3_1_405B_INSTRUCT_TURBO: "Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
145
|
-
PERPLEXITY_SONAR: "sonar",
|
|
146
|
-
OPEN_MISTRAL_NEMO: "open-mistral-nemo",
|
|
147
|
-
DEEPSEEK_R1: "DeepSeek-R1"
|
|
148
|
-
};
|
|
149
|
-
({
|
|
150
|
-
[SupportedProvider.OPENAI]: [
|
|
151
|
-
SupportedModel.GPT_3_5_TURBO,
|
|
152
|
-
SupportedModel.GPT_3_5_TURBO_0125,
|
|
153
|
-
SupportedModel.GPT_4,
|
|
154
|
-
SupportedModel.GPT_4_0613,
|
|
155
|
-
SupportedModel.GPT_4_1106_PREVIEW,
|
|
156
|
-
SupportedModel.GPT_4_TURBO,
|
|
157
|
-
SupportedModel.GPT_4_TURBO_PREVIEW,
|
|
158
|
-
SupportedModel.GPT_4_TURBO_2024_04_09,
|
|
159
|
-
SupportedModel.GPT_4O_2024_05_13,
|
|
160
|
-
SupportedModel.GPT_4O_2024_08_06,
|
|
161
|
-
SupportedModel.GPT_4O,
|
|
162
|
-
SupportedModel.GPT_4O_MINI_2024_07_18,
|
|
163
|
-
SupportedModel.GPT_4O_MINI,
|
|
164
|
-
SupportedModel.GPT_4_0125_PREVIEW,
|
|
165
|
-
SupportedModel.O1_PREVIEW,
|
|
166
|
-
SupportedModel.O1_PREVIEW_2024_09_12,
|
|
167
|
-
SupportedModel.O1_MINI,
|
|
168
|
-
SupportedModel.O1_MINI_2024_09_12,
|
|
169
|
-
SupportedModel.CHATGPT_4O_LATEST
|
|
170
|
-
],
|
|
171
|
-
[SupportedProvider.ANTHROPIC]: [
|
|
172
|
-
SupportedModel.CLAUDE_2_1,
|
|
173
|
-
SupportedModel.CLAUDE_3_OPUS_20240229,
|
|
174
|
-
SupportedModel.CLAUDE_3_SONNET_20240229,
|
|
175
|
-
SupportedModel.CLAUDE_3_5_SONNET_20240620,
|
|
176
|
-
SupportedModel.CLAUDE_3_5_SONNET_20241022,
|
|
177
|
-
SupportedModel.CLAUDE_3_5_SONNET_LATEST,
|
|
178
|
-
SupportedModel.CLAUDE_3_HAIKU_20240307,
|
|
179
|
-
SupportedModel.CLAUDE_3_5_HAIKU_20241022
|
|
180
|
-
],
|
|
181
|
-
[SupportedProvider.GOOGLE]: [
|
|
182
|
-
SupportedModel.GEMINI_PRO,
|
|
183
|
-
SupportedModel.GEMINI_1_PRO_LATEST,
|
|
184
|
-
SupportedModel.GEMINI_15_PRO_LATEST,
|
|
185
|
-
SupportedModel.GEMINI_15_PRO_EXP_0801,
|
|
186
|
-
SupportedModel.GEMINI_15_FLASH_LATEST
|
|
187
|
-
],
|
|
188
|
-
[SupportedProvider.MISTRAL]: [
|
|
189
|
-
SupportedModel.MISTRAL_LARGE_LATEST,
|
|
190
|
-
SupportedModel.MISTRAL_LARGE_2407,
|
|
191
|
-
SupportedModel.MISTRAL_LARGE_2402,
|
|
192
|
-
SupportedModel.MISTRAL_MEDIUM_LATEST,
|
|
193
|
-
SupportedModel.MISTRAL_SMALL_LATEST,
|
|
194
|
-
SupportedModel.CODESTRAL_LATEST,
|
|
195
|
-
SupportedModel.OPEN_MISTRAL_7B,
|
|
196
|
-
SupportedModel.OPEN_MIXTRAL_8X7B,
|
|
197
|
-
SupportedModel.OPEN_MIXTRAL_8X22B,
|
|
198
|
-
SupportedModel.OPEN_MISTRAL_NEMO
|
|
199
|
-
],
|
|
200
|
-
[SupportedProvider.PERPLEXITY]: [
|
|
201
|
-
SupportedModel.PERPLEXITY_SONAR
|
|
202
|
-
],
|
|
203
|
-
[SupportedProvider.COHERE]: [
|
|
204
|
-
SupportedModel.COMMAND_R,
|
|
205
|
-
SupportedModel.COMMAND_R_PLUS
|
|
206
|
-
],
|
|
207
|
-
[SupportedProvider.TOGETHERAI]: [
|
|
208
|
-
SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
|
|
209
|
-
SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
|
|
210
|
-
SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
|
|
211
|
-
SupportedModel.LLAMA_3_70B_CHAT_HF,
|
|
212
|
-
SupportedModel.LLAMA_3_8B_CHAT_HF,
|
|
213
|
-
SupportedModel.QWEN2_72B_INSTRUCT,
|
|
214
|
-
SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO,
|
|
215
|
-
SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO,
|
|
216
|
-
SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO,
|
|
217
|
-
SupportedModel.DEEPSEEK_R1
|
|
218
|
-
]
|
|
219
|
-
});
|
|
220
|
-
|
|
221
|
-
function getLangChainModel(provider, llmKeys, responseModel) {
|
|
222
|
-
const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHERAI } = SupportedProvider;
|
|
223
|
-
switch (provider.provider) {
|
|
224
|
-
case OPENAI:
|
|
225
|
-
if (responseModel) {
|
|
226
|
-
return new openai.ChatOpenAI({
|
|
227
|
-
modelName: provider.model,
|
|
228
|
-
apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
|
|
229
|
-
}).withStructuredOutput(responseModel);
|
|
230
|
-
}
|
|
231
|
-
return new openai.ChatOpenAI({
|
|
232
|
-
modelName: provider.model,
|
|
233
|
-
apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
|
|
234
|
-
});
|
|
235
|
-
case ANTHROPIC:
|
|
236
|
-
if (responseModel) {
|
|
237
|
-
return new anthropic.ChatAnthropic({
|
|
238
|
-
modelName: provider.model,
|
|
239
|
-
anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
|
|
240
|
-
}).withStructuredOutput(responseModel);
|
|
241
|
-
}
|
|
242
|
-
return new anthropic.ChatAnthropic({
|
|
243
|
-
modelName: provider.model,
|
|
244
|
-
anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
|
|
245
|
-
});
|
|
246
|
-
case GOOGLE:
|
|
247
|
-
if (responseModel) {
|
|
248
|
-
return new googleGenai.ChatGoogleGenerativeAI({
|
|
249
|
-
modelName: provider.model,
|
|
250
|
-
apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
|
|
251
|
-
}).withStructuredOutput(responseModel);
|
|
252
|
-
}
|
|
253
|
-
return new googleGenai.ChatGoogleGenerativeAI({
|
|
254
|
-
modelName: provider.model,
|
|
255
|
-
apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
|
|
256
|
-
});
|
|
257
|
-
case MISTRAL:
|
|
258
|
-
if (responseModel) {
|
|
259
|
-
return new mistralai.ChatMistralAI({
|
|
260
|
-
modelName: provider.model,
|
|
261
|
-
apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
|
|
262
|
-
}).withStructuredOutput(responseModel);
|
|
263
|
-
}
|
|
264
|
-
return new mistralai.ChatMistralAI({
|
|
265
|
-
modelName: provider.model,
|
|
266
|
-
apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
|
|
267
|
-
});
|
|
268
|
-
case PERPLEXITY:
|
|
269
|
-
if (responseModel) {
|
|
270
|
-
return new ChatPerplexity({
|
|
271
|
-
apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
|
|
272
|
-
model: provider.model
|
|
273
|
-
}).withStructuredOutput(responseModel);
|
|
274
|
-
}
|
|
275
|
-
return new ChatPerplexity({
|
|
276
|
-
apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
|
|
277
|
-
model: provider.model
|
|
278
|
-
});
|
|
279
|
-
case COHERE:
|
|
280
|
-
if (responseModel) {
|
|
281
|
-
return new cohere.ChatCohere({
|
|
282
|
-
apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
|
|
283
|
-
model: provider.model
|
|
284
|
-
}).withStructuredOutput(responseModel);
|
|
285
|
-
}
|
|
286
|
-
return new cohere.ChatCohere({
|
|
287
|
-
apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
|
|
288
|
-
model: provider.model
|
|
289
|
-
});
|
|
290
|
-
case TOGETHERAI:
|
|
291
|
-
if (responseModel) {
|
|
292
|
-
return new togetherai.ChatTogetherAI({
|
|
293
|
-
apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
|
|
294
|
-
model: getTogetheraiModel(provider.model)
|
|
295
|
-
}).withStructuredOutput(responseModel);
|
|
296
|
-
}
|
|
297
|
-
return new togetherai.ChatTogetherAI({
|
|
298
|
-
apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
|
|
299
|
-
model: getTogetheraiModel(provider.model)
|
|
300
|
-
});
|
|
301
|
-
default:
|
|
302
|
-
throw new Error(`Unsupported provider: ${provider.provider}`);
|
|
303
|
-
}
|
|
304
|
-
}
|
|
305
|
-
const getTogetheraiModel = (model) => {
|
|
306
|
-
if (model === SupportedModel.MISTRAL_7B_INSTRUCT_V0_2 || model === SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1 || model === SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1) {
|
|
307
|
-
return `mistralai/${model}`;
|
|
308
|
-
}
|
|
309
|
-
if (model === SupportedModel.LLAMA_3_70B_CHAT_HF || model === SupportedModel.LLAMA_3_8B_CHAT_HF || model === SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO) {
|
|
310
|
-
return `meta-llama/${model}`;
|
|
311
|
-
}
|
|
312
|
-
if (model === SupportedModel.QWEN2_72B_INSTRUCT) {
|
|
313
|
-
return `Qwen/${model}`;
|
|
314
|
-
}
|
|
315
|
-
return model;
|
|
316
|
-
};
|
|
317
|
-
async function callLLM(provider, options, llmKeys, runtimeArgs) {
|
|
318
|
-
const model = getLangChainModel(provider, llmKeys, options.responseModel);
|
|
319
|
-
const langChainMessages = extendProviderSystemPrompt(
|
|
320
|
-
options.messages.map(convertToLangChainMessage),
|
|
321
|
-
options,
|
|
322
|
-
provider
|
|
323
|
-
);
|
|
324
|
-
const response = await model.invoke(langChainMessages, runtimeArgs);
|
|
325
|
-
return extractContent(response);
|
|
326
|
-
}
|
|
327
|
-
function extendProviderSystemPrompt(messages$1, options, provider) {
|
|
328
|
-
const matchingProvider = options.llmProviders.find(
|
|
329
|
-
(p) => p.provider === provider.provider && p.model === provider.model
|
|
330
|
-
);
|
|
331
|
-
if (matchingProvider && matchingProvider.systemPrompt) {
|
|
332
|
-
messages$1.unshift(new messages.SystemMessage(matchingProvider.systemPrompt));
|
|
333
|
-
}
|
|
334
|
-
return messages$1;
|
|
335
|
-
}
|
|
336
|
-
function convertToLangChainMessage(msg) {
|
|
337
|
-
switch (msg.role) {
|
|
338
|
-
case "user":
|
|
339
|
-
return new messages.HumanMessage(msg.content);
|
|
340
|
-
case "assistant":
|
|
341
|
-
return new messages.AIMessage(msg.content);
|
|
342
|
-
case "system":
|
|
343
|
-
return new messages.SystemMessage(msg.content);
|
|
344
|
-
default:
|
|
345
|
-
return new messages.HumanMessage(msg.content);
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
|
|
349
|
-
const model = getLangChainModel(provider, llmKeys, options.responseModel);
|
|
350
|
-
const langChainMessages = extendProviderSystemPrompt(
|
|
351
|
-
options.messages.map(convertToLangChainMessage),
|
|
352
|
-
options,
|
|
353
|
-
provider
|
|
354
|
-
);
|
|
355
|
-
const stream = await model.stream(langChainMessages, runtimeArgs);
|
|
356
|
-
for await (const chunk of stream) {
|
|
357
|
-
yield extractContent(chunk);
|
|
358
|
-
}
|
|
359
|
-
}
|
|
360
|
-
function extractContent(response) {
|
|
361
|
-
if ("content" in response) {
|
|
362
|
-
return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
|
|
363
|
-
}
|
|
364
|
-
return typeof response === "string" ? response : JSON.stringify(response);
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
const SDK_VERSION = packageJson.version;
|
|
368
|
-
dotenv__namespace.config();
|
|
369
|
-
const DEFAULT_TIMEOUT = 5;
|
|
370
|
-
const BASE_URL = "https://api.notdiamond.ai";
|
|
371
|
-
class NotDiamond {
|
|
372
|
-
apiKey;
|
|
373
|
-
apiUrl;
|
|
374
|
-
modelSelectUrl;
|
|
375
|
-
feedbackUrl;
|
|
376
|
-
createUrl;
|
|
377
|
-
llmKeys;
|
|
378
|
-
constructor(options = {}) {
|
|
379
|
-
this.apiKey = options.apiKey || process.env.NOTDIAMOND_API_KEY || "";
|
|
380
|
-
this.apiUrl = options.apiUrl || process.env.NOTDIAMOND_API_URL || BASE_URL;
|
|
381
|
-
this.llmKeys = options.llmKeys || {};
|
|
382
|
-
this.modelSelectUrl = `${this.apiUrl}/v2/modelRouter/modelSelect`;
|
|
383
|
-
this.feedbackUrl = `${this.apiUrl}/v2/report/metrics/feedback`;
|
|
384
|
-
this.createUrl = `${this.apiUrl}/v2/preferences/userPreferenceCreate`;
|
|
385
|
-
}
|
|
386
|
-
getAuthHeader() {
|
|
387
|
-
return `Bearer ${this.apiKey}`;
|
|
388
|
-
}
|
|
389
|
-
async postRequest(url, body) {
|
|
390
|
-
try {
|
|
391
|
-
const response = await axios__default.post(url, body, {
|
|
392
|
-
headers: {
|
|
393
|
-
Authorization: this.getAuthHeader(),
|
|
394
|
-
Accept: "application/json",
|
|
395
|
-
"Content-Type": "application/json",
|
|
396
|
-
"User-Agent": `TS-SDK/${SDK_VERSION}`
|
|
397
|
-
}
|
|
398
|
-
});
|
|
399
|
-
return response.data;
|
|
400
|
-
} catch (error) {
|
|
401
|
-
if (axios__default.isAxiosError(error) && error.response) {
|
|
402
|
-
return { detail: "An error occurred." };
|
|
403
|
-
}
|
|
404
|
-
console.error("error", error);
|
|
405
|
-
return { detail: "An unexpected error occurred." };
|
|
406
|
-
}
|
|
407
|
-
}
|
|
408
|
-
/**
|
|
409
|
-
* Selects the best model for the given messages.
|
|
410
|
-
* @param options The options for the model.
|
|
411
|
-
* @returns The results of the model.
|
|
412
|
-
*/
|
|
413
|
-
async modelSelect(options) {
|
|
414
|
-
const requestBody = {
|
|
415
|
-
messages: options.messages,
|
|
416
|
-
llm_providers: options.llmProviders.map((provider) => ({
|
|
417
|
-
provider: provider.provider,
|
|
418
|
-
model: provider.model,
|
|
419
|
-
...provider.contextLength !== void 0 && {
|
|
420
|
-
context_length: provider.contextLength
|
|
421
|
-
},
|
|
422
|
-
...provider.customInputPrice !== void 0 && {
|
|
423
|
-
input_price: provider.customInputPrice
|
|
424
|
-
},
|
|
425
|
-
...provider.inputPrice !== void 0 && {
|
|
426
|
-
input_price: provider.inputPrice
|
|
427
|
-
},
|
|
428
|
-
...provider.customOutputPrice !== void 0 && {
|
|
429
|
-
output_price: provider.customOutputPrice
|
|
430
|
-
},
|
|
431
|
-
...provider.outputPrice !== void 0 && {
|
|
432
|
-
output_price: provider.outputPrice
|
|
433
|
-
},
|
|
434
|
-
...provider.customLatency !== void 0 && {
|
|
435
|
-
latency: provider.customLatency
|
|
436
|
-
},
|
|
437
|
-
...provider.latency !== void 0 && { latency: provider.latency },
|
|
438
|
-
...provider.isCustom !== void 0 && {
|
|
439
|
-
is_custom: provider.isCustom
|
|
440
|
-
}
|
|
441
|
-
})),
|
|
442
|
-
...options.tradeoff && {
|
|
443
|
-
tradeoff: options.tradeoff
|
|
444
|
-
},
|
|
445
|
-
...options.maxModelDepth && {
|
|
446
|
-
max_model_depth: options.maxModelDepth
|
|
447
|
-
},
|
|
448
|
-
...options.tools && { tools: options.tools },
|
|
449
|
-
...options.hashContent !== void 0 && {
|
|
450
|
-
hash_content: options.hashContent
|
|
451
|
-
},
|
|
452
|
-
...options.preferenceId && { preference_id: options.preferenceId },
|
|
453
|
-
...options.timeout ? { timeout: options.timeout } : {
|
|
454
|
-
timeout: DEFAULT_TIMEOUT
|
|
455
|
-
},
|
|
456
|
-
...options.default && { default: options.default },
|
|
457
|
-
...options.previousSession && {
|
|
458
|
-
previous_session: options.previousSession
|
|
459
|
-
},
|
|
460
|
-
...options.responseModel && {
|
|
461
|
-
response_model: options.responseModel
|
|
462
|
-
}
|
|
463
|
-
};
|
|
464
|
-
return this.postRequest(
|
|
465
|
-
this.modelSelectUrl,
|
|
466
|
-
requestBody
|
|
467
|
-
);
|
|
468
|
-
}
|
|
469
|
-
/**
|
|
470
|
-
* Sends feedback to the NotDiamond API.
|
|
471
|
-
* @param options The options for the feedback.
|
|
472
|
-
* @returns The results of the feedback.
|
|
473
|
-
*/
|
|
474
|
-
async feedback(options) {
|
|
475
|
-
return this.postRequest(this.feedbackUrl, {
|
|
476
|
-
session_id: options.sessionId,
|
|
477
|
-
feedback: options.feedback,
|
|
478
|
-
provider: options.provider
|
|
479
|
-
});
|
|
480
|
-
}
|
|
481
|
-
/**
|
|
482
|
-
* Creates a preference id.
|
|
483
|
-
* @returns The preference id.
|
|
484
|
-
*/
|
|
485
|
-
async createPreferenceId() {
|
|
486
|
-
const response = await this.postRequest(
|
|
487
|
-
this.createUrl,
|
|
488
|
-
{}
|
|
489
|
-
);
|
|
490
|
-
if ("preference_id" in response) {
|
|
491
|
-
return response.preference_id;
|
|
492
|
-
}
|
|
493
|
-
throw new Error("Invalid response: preference_id not found");
|
|
494
|
-
}
|
|
495
|
-
/**
|
|
496
|
-
*
|
|
497
|
-
* @param options The options for the model.
|
|
498
|
-
* @returns A promise that resolves to the results of the model.
|
|
499
|
-
*/
|
|
500
|
-
async acreate(options, runtimeArgs = {}) {
|
|
501
|
-
const selectedModel = await this.modelSelect(options);
|
|
502
|
-
const { providers } = selectedModel;
|
|
503
|
-
const content = await callLLM(
|
|
504
|
-
providers[0],
|
|
505
|
-
options,
|
|
506
|
-
this.llmKeys,
|
|
507
|
-
runtimeArgs
|
|
508
|
-
);
|
|
509
|
-
return { content, providers };
|
|
510
|
-
}
|
|
511
|
-
/**
|
|
512
|
-
*
|
|
513
|
-
* @param options The options for the model.
|
|
514
|
-
* @param callback Optional callback function to handle the result.
|
|
515
|
-
* @returns A promise that resolves to the results of the model or a callback function
|
|
516
|
-
*/
|
|
517
|
-
create(options, runtimeArgs = {}, callback) {
|
|
518
|
-
const promise = this.acreate(options, runtimeArgs);
|
|
519
|
-
if (callback) {
|
|
520
|
-
promise.then((result) => callback(null, result)).catch((error) => callback(error));
|
|
521
|
-
} else {
|
|
522
|
-
return promise;
|
|
523
|
-
}
|
|
524
|
-
}
|
|
525
|
-
/**
|
|
526
|
-
* Streams the results of the model asynchronously.
|
|
527
|
-
* @param options The options for the model.
|
|
528
|
-
* @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
|
|
529
|
-
*/
|
|
530
|
-
async astream(options, runtimeArgs = {}) {
|
|
531
|
-
const selectedModel = await this.modelSelect(options);
|
|
532
|
-
const { providers } = selectedModel;
|
|
533
|
-
const stream = await Promise.resolve(
|
|
534
|
-
callLLMStream(
|
|
535
|
-
providers?.[0] || options.default,
|
|
536
|
-
options,
|
|
537
|
-
this.llmKeys,
|
|
538
|
-
runtimeArgs
|
|
539
|
-
)
|
|
540
|
-
);
|
|
541
|
-
return {
|
|
542
|
-
provider: providers?.[0] || options.default,
|
|
543
|
-
stream
|
|
544
|
-
};
|
|
545
|
-
}
|
|
546
|
-
/**
|
|
547
|
-
* Streams the results of the model.
|
|
548
|
-
* @param options The options for the model.
|
|
549
|
-
* @param callback Optional callback function to handle each chunk of the stream.
|
|
550
|
-
* @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
|
|
551
|
-
*/
|
|
552
|
-
stream(options, runtimeArgs = {}, callback) {
|
|
553
|
-
if (!options.llmProviders || options.llmProviders.length === 0) {
|
|
554
|
-
throw new Error("No LLM providers specified");
|
|
555
|
-
}
|
|
556
|
-
const promise = this.astream(options, runtimeArgs);
|
|
557
|
-
if (callback) {
|
|
558
|
-
promise.then(async ({ provider, stream }) => {
|
|
559
|
-
for await (const chunk of stream) {
|
|
560
|
-
callback(null, { provider, chunk });
|
|
561
|
-
}
|
|
562
|
-
}).catch((error) => callback(error));
|
|
563
|
-
} else {
|
|
564
|
-
return promise;
|
|
565
|
-
}
|
|
566
|
-
}
|
|
567
|
-
}
|
|
568
|
-
|
|
569
|
-
exports.NotDiamond = NotDiamond;
|
|
570
|
-
exports.SupportedModel = SupportedModel;
|
|
571
|
-
exports.SupportedProvider = SupportedProvider;
|