notdiamond 1.1.2 → 1.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "notdiamond",
3
3
  "type": "module",
4
- "version": "1.1.2",
4
+ "version": "1.1.3",
5
5
  "author": "not-diamond",
6
6
  "license": "MIT",
7
7
  "description": "TS/JS client for the NotDiamond API",
@@ -70,6 +70,7 @@
70
70
  "decamelize": "^6.0.0",
71
71
  "dotenv": "^16.4.5",
72
72
  "eventemitter3": "^5.0.1",
73
+ "form-data": "^4.0.4",
73
74
  "langchain": "^0.3.2",
74
75
  "langsmith": "^0.1.60",
75
76
  "p-finally": "^2.0.1",
@@ -120,7 +121,8 @@
120
121
  "@octokit/request": "^9.2.1",
121
122
  "@octokit/plugin-paginate-rest": "^11.4.1",
122
123
  "@babel/helpers": "^7.26.10",
123
- "esbuild": "^0.25.0"
124
+ "esbuild": "^0.25.0",
125
+ "form-data": "^4.0.4"
124
126
  },
125
127
  "engines": {
126
128
  "node": ">=20",
package/dist/index.cjs DELETED
@@ -1,583 +0,0 @@
1
- 'use strict';
2
-
3
- const dotenv = require('dotenv');
4
- const openai = require('@langchain/openai');
5
- const messages = require('@langchain/core/messages');
6
- const anthropic = require('@langchain/anthropic');
7
- const googleGenai = require('@langchain/google-genai');
8
- const mistralai = require('@langchain/mistralai');
9
- const chat_models = require('@langchain/core/language_models/chat_models');
10
- const axios = require('axios');
11
- const cohere = require('@langchain/cohere');
12
- const togetherai = require('@langchain/community/chat_models/togetherai');
13
-
14
- function _interopDefaultCompat (e) { return e && typeof e === 'object' && 'default' in e ? e.default : e; }
15
-
16
- function _interopNamespaceCompat(e) {
17
- if (e && typeof e === 'object' && 'default' in e) return e;
18
- const n = Object.create(null);
19
- if (e) {
20
- for (const k in e) {
21
- n[k] = e[k];
22
- }
23
- }
24
- n.default = e;
25
- return n;
26
- }
27
-
28
- const dotenv__namespace = /*#__PURE__*/_interopNamespaceCompat(dotenv);
29
- const axios__default = /*#__PURE__*/_interopDefaultCompat(axios);
30
-
31
- const version = "1.1.1";
32
- const packageJson = {
33
- version: version};
34
-
35
- class ChatPerplexity extends chat_models.BaseChatModel {
36
- _generate(messages, options, runManager) {
37
- throw new Error(
38
- "Method not implemented." + JSON.stringify(messages) + JSON.stringify(options) + JSON.stringify(runManager)
39
- );
40
- }
41
- apiKey;
42
- model;
43
- constructor({ apiKey, model }) {
44
- super({});
45
- this.apiKey = apiKey;
46
- this.model = model;
47
- }
48
- _llmType() {
49
- return "perplexity";
50
- }
51
- /**
52
- * Invokes the Perplexity model.
53
- * @param messages The messages to send to the model.
54
- * @returns The results of the model.
55
- */
56
- async invoke(messages$1) {
57
- try {
58
- const { data } = await axios__default.post(
59
- "https://api.perplexity.ai/chat/completions",
60
- {
61
- model: this.model,
62
- messages: messages$1.map((m) => ({
63
- role: m._getType() === "human" ? "user" : m._getType(),
64
- content: m.content
65
- }))
66
- },
67
- {
68
- headers: {
69
- Authorization: `Bearer ${this.apiKey}`
70
- }
71
- }
72
- );
73
- return new messages.AIMessage(data.choices[0].message.content);
74
- } catch (error) {
75
- if (axios__default.isAxiosError(error) && error.response) {
76
- throw new Error(`Perplexity API error: ${error.response.statusText}`);
77
- }
78
- throw error;
79
- }
80
- }
81
- }
82
-
83
- const SupportedProvider = {
84
- OPENAI: "openai",
85
- ANTHROPIC: "anthropic",
86
- GOOGLE: "google",
87
- MISTRAL: "mistral",
88
- PERPLEXITY: "perplexity",
89
- COHERE: "cohere",
90
- TOGETHERAI: "togetherai"
91
- };
92
- const SupportedModel = {
93
- GPT_3_5_TURBO: "gpt-3.5-turbo",
94
- GPT_3_5_TURBO_0125: "gpt-3.5-turbo-0125",
95
- GPT_4: "gpt-4",
96
- GPT_4_0613: "gpt-4-0613",
97
- GPT_4_1106_PREVIEW: "gpt-4-1106-preview",
98
- GPT_4_TURBO: "gpt-4-turbo",
99
- GPT_4_TURBO_PREVIEW: "gpt-4-turbo-preview",
100
- GPT_4_TURBO_2024_04_09: "gpt-4-turbo-2024-04-09",
101
- GPT_4O_2024_05_13: "gpt-4o-2024-05-13",
102
- GPT_4O_2024_08_06: "gpt-4o-2024-08-06",
103
- GPT_4O: "gpt-4o",
104
- GPT_4O_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
105
- GPT_4O_MINI: "gpt-4o-mini",
106
- GPT_4_0125_PREVIEW: "gpt-4-0125-preview",
107
- GPT_4_5_PREVIEW: "gpt-4.5-preview",
108
- GPT_4_5_PREVIEW_2025_02_27: "gpt-4.5-preview-2025-02-27",
109
- CHATGPT_4O_LATEST: "chatgpt-4o-latest",
110
- O1_PREVIEW: "o1-preview",
111
- O1_PREVIEW_2024_09_12: "o1-preview-2024-09-12",
112
- O1_MINI: "o1-mini",
113
- O1_MINI_2024_09_12: "o1-mini-2024-09-12",
114
- CLAUDE_2_1: "claude-2.1",
115
- CLAUDE_3_OPUS_20240229: "claude-3-opus-20240229",
116
- CLAUDE_3_SONNET_20240229: "claude-3-sonnet-20240229",
117
- CLAUDE_3_5_SONNET_20240620: "claude-3-5-sonnet-20240620",
118
- CLAUDE_3_5_SONNET_20241022: "claude-3-5-sonnet-20241022",
119
- CLAUDE_3_5_SONNET_LATEST: "claude-3-5-sonnet-latest",
120
- CLAUDE_3_HAIKU_20240307: "claude-3-haiku-20240307",
121
- CLAUDE_3_5_HAIKU_20241022: "claude-3-5-haiku-20241022",
122
- CLAUDE_3_7_SONNET_LATEST: "claude-3-7-sonnet-latest",
123
- CLAUDE_3_7_SONNET_20250219: "claude-3-7-sonnet-20250219",
124
- GEMINI_PRO: "gemini-pro",
125
- GEMINI_1_PRO_LATEST: "gemini-1.0-pro-latest",
126
- GEMINI_15_PRO_LATEST: "gemini-1.5-pro-latest",
127
- GEMINI_15_PRO_EXP_0801: "gemini-1.5-pro-exp-0801",
128
- GEMINI_15_FLASH_LATEST: "gemini-1.5-flash-latest",
129
- GEMINI_2_0_FLASH: "gemini-2.0-flash",
130
- GEMINI_2_0_FLASH_001: "gemini-2.0-flash-001",
131
- COMMAND_R: "command-r",
132
- COMMAND_R_PLUS: "command-r-plus",
133
- MISTRAL_LARGE_LATEST: "mistral-large-latest",
134
- MISTRAL_LARGE_2407: "mistral-large-2407",
135
- MISTRAL_LARGE_2402: "mistral-large-2402",
136
- MISTRAL_MEDIUM_LATEST: "mistral-medium-latest",
137
- MISTRAL_SMALL_LATEST: "mistral-small-latest",
138
- CODESTRAL_LATEST: "codestral-latest",
139
- OPEN_MISTRAL_7B: "open-mistral-7b",
140
- OPEN_MIXTRAL_8X7B: "open-mixtral-8x7b",
141
- OPEN_MIXTRAL_8X22B: "open-mixtral-8x22b",
142
- MISTRAL_7B_INSTRUCT_V0_2: "Mistral-7B-Instruct-v0.2",
143
- MIXTRAL_8X7B_INSTRUCT_V0_1: "Mixtral-8x7B-Instruct-v0.1",
144
- MIXTRAL_8X22B_INSTRUCT_V0_1: "Mixtral-8x22B-Instruct-v0.1",
145
- LLAMA_3_70B_CHAT_HF: "Llama-3-70b-chat-hf",
146
- LLAMA_3_8B_CHAT_HF: "Llama-3-8b-chat-hf",
147
- QWEN2_72B_INSTRUCT: "Qwen2-72B-Instruct",
148
- LLAMA_3_1_8B_INSTRUCT_TURBO: "Meta-Llama-3.1-8B-Instruct-Turbo",
149
- LLAMA_3_1_70B_INSTRUCT_TURBO: "Meta-Llama-3.1-70B-Instruct-Turbo",
150
- LLAMA_3_1_405B_INSTRUCT_TURBO: "Meta-Llama-3.1-405B-Instruct-Turbo",
151
- PERPLEXITY_SONAR: "sonar",
152
- OPEN_MISTRAL_NEMO: "open-mistral-nemo",
153
- DEEPSEEK_R1: "DeepSeek-R1"
154
- };
155
- ({
156
- [SupportedProvider.OPENAI]: [
157
- SupportedModel.GPT_3_5_TURBO,
158
- SupportedModel.GPT_3_5_TURBO_0125,
159
- SupportedModel.GPT_4,
160
- SupportedModel.GPT_4_0613,
161
- SupportedModel.GPT_4_1106_PREVIEW,
162
- SupportedModel.GPT_4_TURBO,
163
- SupportedModel.GPT_4_TURBO_PREVIEW,
164
- SupportedModel.GPT_4_TURBO_2024_04_09,
165
- SupportedModel.GPT_4O_2024_05_13,
166
- SupportedModel.GPT_4O_2024_08_06,
167
- SupportedModel.GPT_4O,
168
- SupportedModel.GPT_4O_MINI_2024_07_18,
169
- SupportedModel.GPT_4O_MINI,
170
- SupportedModel.GPT_4_0125_PREVIEW,
171
- SupportedModel.O1_PREVIEW,
172
- SupportedModel.O1_PREVIEW_2024_09_12,
173
- SupportedModel.O1_MINI,
174
- SupportedModel.O1_MINI_2024_09_12,
175
- SupportedModel.CHATGPT_4O_LATEST,
176
- SupportedModel.GPT_4_5_PREVIEW,
177
- SupportedModel.GPT_4_5_PREVIEW_2025_02_27
178
- ],
179
- [SupportedProvider.ANTHROPIC]: [
180
- SupportedModel.CLAUDE_2_1,
181
- SupportedModel.CLAUDE_3_OPUS_20240229,
182
- SupportedModel.CLAUDE_3_SONNET_20240229,
183
- SupportedModel.CLAUDE_3_5_SONNET_20240620,
184
- SupportedModel.CLAUDE_3_5_SONNET_20241022,
185
- SupportedModel.CLAUDE_3_5_SONNET_LATEST,
186
- SupportedModel.CLAUDE_3_HAIKU_20240307,
187
- SupportedModel.CLAUDE_3_5_HAIKU_20241022,
188
- SupportedModel.CLAUDE_3_7_SONNET_LATEST,
189
- SupportedModel.CLAUDE_3_7_SONNET_20250219
190
- ],
191
- [SupportedProvider.GOOGLE]: [
192
- SupportedModel.GEMINI_PRO,
193
- SupportedModel.GEMINI_1_PRO_LATEST,
194
- SupportedModel.GEMINI_15_PRO_LATEST,
195
- SupportedModel.GEMINI_15_PRO_EXP_0801,
196
- SupportedModel.GEMINI_15_FLASH_LATEST,
197
- SupportedModel.GEMINI_2_0_FLASH,
198
- SupportedModel.GEMINI_2_0_FLASH_001
199
- ],
200
- [SupportedProvider.MISTRAL]: [
201
- SupportedModel.MISTRAL_LARGE_LATEST,
202
- SupportedModel.MISTRAL_LARGE_2407,
203
- SupportedModel.MISTRAL_LARGE_2402,
204
- SupportedModel.MISTRAL_MEDIUM_LATEST,
205
- SupportedModel.MISTRAL_SMALL_LATEST,
206
- SupportedModel.CODESTRAL_LATEST,
207
- SupportedModel.OPEN_MISTRAL_7B,
208
- SupportedModel.OPEN_MIXTRAL_8X7B,
209
- SupportedModel.OPEN_MIXTRAL_8X22B,
210
- SupportedModel.OPEN_MISTRAL_NEMO
211
- ],
212
- [SupportedProvider.PERPLEXITY]: [
213
- SupportedModel.PERPLEXITY_SONAR
214
- ],
215
- [SupportedProvider.COHERE]: [
216
- SupportedModel.COMMAND_R,
217
- SupportedModel.COMMAND_R_PLUS
218
- ],
219
- [SupportedProvider.TOGETHERAI]: [
220
- SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
221
- SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
222
- SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
223
- SupportedModel.LLAMA_3_70B_CHAT_HF,
224
- SupportedModel.LLAMA_3_8B_CHAT_HF,
225
- SupportedModel.QWEN2_72B_INSTRUCT,
226
- SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO,
227
- SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO,
228
- SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO,
229
- SupportedModel.DEEPSEEK_R1
230
- ]
231
- });
232
-
233
- function getLangChainModel(provider, llmKeys, responseModel) {
234
- const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHERAI } = SupportedProvider;
235
- switch (provider.provider) {
236
- case OPENAI:
237
- if (responseModel) {
238
- return new openai.ChatOpenAI({
239
- modelName: provider.model,
240
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
241
- }).withStructuredOutput(responseModel);
242
- }
243
- return new openai.ChatOpenAI({
244
- modelName: provider.model,
245
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
246
- });
247
- case ANTHROPIC:
248
- if (responseModel) {
249
- return new anthropic.ChatAnthropic({
250
- modelName: provider.model,
251
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
252
- }).withStructuredOutput(responseModel);
253
- }
254
- return new anthropic.ChatAnthropic({
255
- modelName: provider.model,
256
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
257
- });
258
- case GOOGLE:
259
- if (responseModel) {
260
- return new googleGenai.ChatGoogleGenerativeAI({
261
- modelName: provider.model,
262
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
263
- }).withStructuredOutput(responseModel);
264
- }
265
- return new googleGenai.ChatGoogleGenerativeAI({
266
- modelName: provider.model,
267
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
268
- });
269
- case MISTRAL:
270
- if (responseModel) {
271
- return new mistralai.ChatMistralAI({
272
- modelName: provider.model,
273
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
274
- }).withStructuredOutput(responseModel);
275
- }
276
- return new mistralai.ChatMistralAI({
277
- modelName: provider.model,
278
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
279
- });
280
- case PERPLEXITY:
281
- if (responseModel) {
282
- return new ChatPerplexity({
283
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
284
- model: provider.model
285
- }).withStructuredOutput(responseModel);
286
- }
287
- return new ChatPerplexity({
288
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
289
- model: provider.model
290
- });
291
- case COHERE:
292
- if (responseModel) {
293
- return new cohere.ChatCohere({
294
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
295
- model: provider.model
296
- }).withStructuredOutput(responseModel);
297
- }
298
- return new cohere.ChatCohere({
299
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
300
- model: provider.model
301
- });
302
- case TOGETHERAI:
303
- if (responseModel) {
304
- return new togetherai.ChatTogetherAI({
305
- apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
306
- model: getTogetheraiModel(provider.model)
307
- }).withStructuredOutput(responseModel);
308
- }
309
- return new togetherai.ChatTogetherAI({
310
- apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
311
- model: getTogetheraiModel(provider.model)
312
- });
313
- default:
314
- throw new Error(`Unsupported provider: ${provider.provider}`);
315
- }
316
- }
317
- const getTogetheraiModel = (model) => {
318
- if (model === SupportedModel.MISTRAL_7B_INSTRUCT_V0_2 || model === SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1 || model === SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1) {
319
- return `mistralai/${model}`;
320
- }
321
- if (model === SupportedModel.LLAMA_3_70B_CHAT_HF || model === SupportedModel.LLAMA_3_8B_CHAT_HF || model === SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO) {
322
- return `meta-llama/${model}`;
323
- }
324
- if (model === SupportedModel.QWEN2_72B_INSTRUCT) {
325
- return `Qwen/${model}`;
326
- }
327
- return model;
328
- };
329
- async function callLLM(provider, options, llmKeys, runtimeArgs) {
330
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
331
- const langChainMessages = extendProviderSystemPrompt(
332
- options.messages.map(convertToLangChainMessage),
333
- options,
334
- provider
335
- );
336
- const response = await model.invoke(langChainMessages, runtimeArgs);
337
- return extractContent(response);
338
- }
339
- function extendProviderSystemPrompt(messages$1, options, provider) {
340
- const matchingProvider = options.llmProviders.find(
341
- (p) => p.provider === provider.provider && p.model === provider.model
342
- );
343
- if (matchingProvider && matchingProvider.systemPrompt) {
344
- messages$1.unshift(new messages.SystemMessage(matchingProvider.systemPrompt));
345
- }
346
- return messages$1;
347
- }
348
- function convertToLangChainMessage(msg) {
349
- switch (msg.role) {
350
- case "user":
351
- return new messages.HumanMessage(msg.content);
352
- case "assistant":
353
- return new messages.AIMessage(msg.content);
354
- case "system":
355
- return new messages.SystemMessage(msg.content);
356
- default:
357
- return new messages.HumanMessage(msg.content);
358
- }
359
- }
360
- async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
361
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
362
- const langChainMessages = extendProviderSystemPrompt(
363
- options.messages.map(convertToLangChainMessage),
364
- options,
365
- provider
366
- );
367
- const stream = await model.stream(langChainMessages, runtimeArgs);
368
- for await (const chunk of stream) {
369
- yield extractContent(chunk);
370
- }
371
- }
372
- function extractContent(response) {
373
- if ("content" in response) {
374
- return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
375
- }
376
- return typeof response === "string" ? response : JSON.stringify(response);
377
- }
378
-
379
- const SDK_VERSION = packageJson.version;
380
- dotenv__namespace.config();
381
- const DEFAULT_TIMEOUT = 5;
382
- const BASE_URL = "https://api.notdiamond.ai";
383
- class NotDiamond {
384
- apiKey;
385
- apiUrl;
386
- modelSelectUrl;
387
- feedbackUrl;
388
- createUrl;
389
- llmKeys;
390
- constructor(options = {}) {
391
- this.apiKey = options.apiKey || process.env.NOTDIAMOND_API_KEY || "";
392
- this.apiUrl = options.apiUrl || process.env.NOTDIAMOND_API_URL || BASE_URL;
393
- this.llmKeys = options.llmKeys || {};
394
- this.modelSelectUrl = `${this.apiUrl}/v2/modelRouter/modelSelect`;
395
- this.feedbackUrl = `${this.apiUrl}/v2/report/metrics/feedback`;
396
- this.createUrl = `${this.apiUrl}/v2/preferences/userPreferenceCreate`;
397
- }
398
- getAuthHeader() {
399
- return `Bearer ${this.apiKey}`;
400
- }
401
- async postRequest(url, body) {
402
- try {
403
- const response = await axios__default.post(url, body, {
404
- headers: {
405
- Authorization: this.getAuthHeader(),
406
- Accept: "application/json",
407
- "Content-Type": "application/json",
408
- "User-Agent": `TS-SDK/${SDK_VERSION}`
409
- }
410
- });
411
- return response.data;
412
- } catch (error) {
413
- if (axios__default.isAxiosError(error) && error.response) {
414
- return { detail: "An error occurred." };
415
- }
416
- console.error("error", error);
417
- return { detail: "An unexpected error occurred." };
418
- }
419
- }
420
- /**
421
- * Selects the best model for the given messages.
422
- * @param options The options for the model.
423
- * @returns The results of the model.
424
- */
425
- async modelSelect(options) {
426
- const requestBody = {
427
- messages: options.messages,
428
- llm_providers: options.llmProviders.map((provider) => ({
429
- provider: provider.provider,
430
- model: provider.model,
431
- ...provider.contextLength !== void 0 && {
432
- context_length: provider.contextLength
433
- },
434
- ...provider.customInputPrice !== void 0 && {
435
- input_price: provider.customInputPrice
436
- },
437
- ...provider.inputPrice !== void 0 && {
438
- input_price: provider.inputPrice
439
- },
440
- ...provider.customOutputPrice !== void 0 && {
441
- output_price: provider.customOutputPrice
442
- },
443
- ...provider.outputPrice !== void 0 && {
444
- output_price: provider.outputPrice
445
- },
446
- ...provider.customLatency !== void 0 && {
447
- latency: provider.customLatency
448
- },
449
- ...provider.latency !== void 0 && { latency: provider.latency },
450
- ...provider.isCustom !== void 0 && {
451
- is_custom: provider.isCustom
452
- }
453
- })),
454
- ...options.tradeoff && {
455
- tradeoff: options.tradeoff
456
- },
457
- ...options.maxModelDepth && {
458
- max_model_depth: options.maxModelDepth
459
- },
460
- ...options.tools && { tools: options.tools },
461
- ...options.hashContent !== void 0 && {
462
- hash_content: options.hashContent
463
- },
464
- ...options.preferenceId && { preference_id: options.preferenceId },
465
- ...options.timeout ? { timeout: options.timeout } : {
466
- timeout: DEFAULT_TIMEOUT
467
- },
468
- ...options.default && { default: options.default },
469
- ...options.previousSession && {
470
- previous_session: options.previousSession
471
- },
472
- ...options.responseModel && {
473
- response_model: options.responseModel
474
- }
475
- };
476
- return this.postRequest(
477
- this.modelSelectUrl,
478
- requestBody
479
- );
480
- }
481
- /**
482
- * Sends feedback to the NotDiamond API.
483
- * @param options The options for the feedback.
484
- * @returns The results of the feedback.
485
- */
486
- async feedback(options) {
487
- return this.postRequest(this.feedbackUrl, {
488
- session_id: options.sessionId,
489
- feedback: options.feedback,
490
- provider: options.provider
491
- });
492
- }
493
- /**
494
- * Creates a preference id.
495
- * @returns The preference id.
496
- */
497
- async createPreferenceId() {
498
- const response = await this.postRequest(
499
- this.createUrl,
500
- {}
501
- );
502
- if ("preference_id" in response) {
503
- return response.preference_id;
504
- }
505
- throw new Error("Invalid response: preference_id not found");
506
- }
507
- /**
508
- *
509
- * @param options The options for the model.
510
- * @returns A promise that resolves to the results of the model.
511
- */
512
- async acreate(options, runtimeArgs = {}) {
513
- const selectedModel = await this.modelSelect(options);
514
- const { providers } = selectedModel;
515
- const content = await callLLM(
516
- providers[0],
517
- options,
518
- this.llmKeys,
519
- runtimeArgs
520
- );
521
- return { content, providers };
522
- }
523
- /**
524
- *
525
- * @param options The options for the model.
526
- * @param callback Optional callback function to handle the result.
527
- * @returns A promise that resolves to the results of the model or a callback function
528
- */
529
- create(options, runtimeArgs = {}, callback) {
530
- const promise = this.acreate(options, runtimeArgs);
531
- if (callback) {
532
- promise.then((result) => callback(null, result)).catch((error) => callback(error));
533
- } else {
534
- return promise;
535
- }
536
- }
537
- /**
538
- * Streams the results of the model asynchronously.
539
- * @param options The options for the model.
540
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
541
- */
542
- async astream(options, runtimeArgs = {}) {
543
- const selectedModel = await this.modelSelect(options);
544
- const { providers } = selectedModel;
545
- const stream = await Promise.resolve(
546
- callLLMStream(
547
- providers?.[0] || options.default,
548
- options,
549
- this.llmKeys,
550
- runtimeArgs
551
- )
552
- );
553
- return {
554
- provider: providers?.[0] || options.default,
555
- stream
556
- };
557
- }
558
- /**
559
- * Streams the results of the model.
560
- * @param options The options for the model.
561
- * @param callback Optional callback function to handle each chunk of the stream.
562
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
563
- */
564
- stream(options, runtimeArgs = {}, callback) {
565
- if (!options.llmProviders || options.llmProviders.length === 0) {
566
- throw new Error("No LLM providers specified");
567
- }
568
- const promise = this.astream(options, runtimeArgs);
569
- if (callback) {
570
- promise.then(async ({ provider, stream }) => {
571
- for await (const chunk of stream) {
572
- callback(null, { provider, chunk });
573
- }
574
- }).catch((error) => callback(error));
575
- } else {
576
- return promise;
577
- }
578
- }
579
- }
580
-
581
- exports.NotDiamond = NotDiamond;
582
- exports.SupportedModel = SupportedModel;
583
- exports.SupportedProvider = SupportedProvider;