notdiamond 0.3.10 → 0.3.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs DELETED
@@ -1,645 +0,0 @@
1
- import * as dotenv from 'dotenv';
2
- import { ChatOpenAI } from '@langchain/openai';
3
- import { AIMessage, HumanMessage, SystemMessage } from '@langchain/core/messages';
4
- import { ChatAnthropic } from '@langchain/anthropic';
5
- import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
6
- import { ChatMistralAI } from '@langchain/mistralai';
7
- import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
- import axios from 'axios';
9
- import { ChatCohere } from '@langchain/cohere';
10
- import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
11
-
12
- const name = "notdiamond";
13
- const type = "module";
14
- const version = "0.3.9";
15
- const author = "not-diamond";
16
- const license = "MIT";
17
- const description = "TS/JS client for the NotDiamond API";
18
- const main = "./dist/index.cjs";
19
- const exports = {
20
- ".": {
21
- "import": "./dist/index.mjs",
22
- require: "./dist/index.cjs"
23
- }
24
- };
25
- const types = "./dist/index.d.ts";
26
- const repository = {
27
- type: "git",
28
- url: "https://github.com/Not-Diamond/notdiamond-node.git"
29
- };
30
- const bugs = {
31
- url: "https://github.com/Not-Diamond/notdiamond-node/issues"
32
- };
33
- const homepage = "https://github.com/Not-Diamond/notdiamond-node#readme";
34
- const files = [
35
- "dist"
36
- ];
37
- const keywords = [
38
- "ai",
39
- "not-diamond",
40
- "typescript",
41
- "openai",
42
- "chatgpt",
43
- "anthropic",
44
- "claude",
45
- "gemini",
46
- "model router"
47
- ];
48
- const scripts = {
49
- prepare: "husky install",
50
- "start:cjs": "node ./dist/index.cjs",
51
- "start:esm": "node ./dist/index.mjs",
52
- dev: "nodemon",
53
- develop: "node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts",
54
- test: "jest --passWithNoTests",
55
- "test:watch": "jest --watch --passWithNoTests",
56
- "test:coverage": "jest --coverage --passWithNoTests",
57
- clean: "rimraf build && rimraf dist",
58
- build: "swc ./src -d build",
59
- "build:watch": "swc ./src -d build -w",
60
- lint: "eslint ./src --ext .ts",
61
- "lint:fix": "eslint ./src --ext .ts --fix",
62
- prettier: "prettier '**/*.{ts,json,md}'",
63
- "prettier:write": "prettier --write '**/*.{ts,json,md}'",
64
- "type-check": "tsc --noEmit",
65
- "lint-staged": "lint-staged",
66
- release: "semantic-release",
67
- bundle: "unbuild"
68
- };
69
- const dependencies = {
70
- "@langchain/anthropic": "^0.3.1",
71
- "@langchain/cohere": "^0.3.0",
72
- "@langchain/community": "^0.3.3",
73
- "@langchain/core": "^0.3.3",
74
- "@langchain/google-genai": "^0.1.0",
75
- "@langchain/mistralai": "^0.1.1",
76
- "@langchain/openai": "^0.3.0",
77
- "ansi-styles": "^6.2.1",
78
- axios: "^1.7.7",
79
- camelcase: "^8.0.0",
80
- decamelize: "^6.0.0",
81
- dotenv: "^16.4.5",
82
- eventemitter3: "^5.0.1",
83
- langchain: "^0.3.2",
84
- langsmith: "^0.1.60",
85
- "p-finally": "^2.0.1",
86
- "p-queue": "^8.0.1",
87
- "p-retry": "^6.2.0",
88
- "p-timeout": "^6.1.2",
89
- retry: "^0.13.1",
90
- semver: "^7.6.3",
91
- uuid: "^10.0.0",
92
- zod: "^3.23.8",
93
- "zod-to-json-schema": "^3.23.3"
94
- };
95
- const devDependencies = {
96
- "@semantic-release/changelog": "^6.0.3",
97
- "@semantic-release/commit-analyzer": "^12.0.0",
98
- "@semantic-release/git": "^10.0.1",
99
- "@semantic-release/github": "^10.0.0",
100
- "@semantic-release/npm": "^12.0.0",
101
- "@semantic-release/release-notes-generator": "^13.0.0",
102
- "@swc/cli": "0.3.12",
103
- "@swc/core": "1.5.7",
104
- "@swc/jest": "0.2.36",
105
- "@types/jest": "29.5.12",
106
- "@types/node": "20.12.12",
107
- "@typescript-eslint/eslint-plugin": "7.9.0",
108
- "@typescript-eslint/parser": "7.9.0",
109
- eslint: "8.57.0",
110
- "eslint-config-prettier": "9.1.0",
111
- "eslint-plugin-jest": "27.9.0",
112
- "eslint-plugin-prettier": "5.1.3",
113
- husky: "^9.0.0",
114
- jest: "29.7.0",
115
- "lint-staged": "^15.0.0",
116
- nodemon: "3.1.0",
117
- prettier: "3.2.5",
118
- "regenerator-runtime": "^0.14.0",
119
- rimraf: "5.0.7",
120
- "semantic-release": "^23.0.0",
121
- "ts-node": "^10.9.1",
122
- typescript: "5.4.5",
123
- unbuild: "^2.0.0"
124
- };
125
- const resolutions = {
126
- "wrap-ansi": "7.0.0"
127
- };
128
- const engines = {
129
- node: ">=20",
130
- npm: ">=8"
131
- };
132
- const packageJson = {
133
- name: name,
134
- type: type,
135
- version: version,
136
- author: author,
137
- license: license,
138
- description: description,
139
- main: main,
140
- exports: exports,
141
- types: types,
142
- repository: repository,
143
- bugs: bugs,
144
- homepage: homepage,
145
- files: files,
146
- keywords: keywords,
147
- scripts: scripts,
148
- dependencies: dependencies,
149
- devDependencies: devDependencies,
150
- resolutions: resolutions,
151
- engines: engines
152
- };
153
-
154
- var __defProp$1 = Object.defineProperty;
155
- var __defNormalProp$1 = (obj, key, value) => key in obj ? __defProp$1(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
156
- var __publicField$1 = (obj, key, value) => {
157
- __defNormalProp$1(obj, typeof key !== "symbol" ? key + "" : key, value);
158
- return value;
159
- };
160
- class ChatPerplexity extends BaseChatModel {
161
- constructor({ apiKey, model }) {
162
- super({});
163
- __publicField$1(this, "apiKey");
164
- __publicField$1(this, "model");
165
- this.apiKey = apiKey;
166
- this.model = model;
167
- }
168
- _generate(messages, options, runManager) {
169
- throw new Error(
170
- "Method not implemented." + JSON.stringify(messages) + JSON.stringify(options) + JSON.stringify(runManager)
171
- );
172
- }
173
- _llmType() {
174
- return "perplexity";
175
- }
176
- /**
177
- * Invokes the Perplexity model.
178
- * @param messages The messages to send to the model.
179
- * @returns The results of the model.
180
- */
181
- async invoke(messages) {
182
- try {
183
- const { data } = await axios.post(
184
- "https://api.perplexity.ai/chat/completions",
185
- {
186
- model: this.model,
187
- messages: messages.map((m) => ({
188
- role: m._getType() === "human" ? "user" : m._getType(),
189
- content: m.content
190
- }))
191
- },
192
- {
193
- headers: {
194
- Authorization: `Bearer ${this.apiKey}`
195
- }
196
- }
197
- );
198
- return new AIMessage(data.choices[0].message.content);
199
- } catch (error) {
200
- if (axios.isAxiosError(error) && error.response) {
201
- throw new Error(`Perplexity API error: ${error.response.statusText}`);
202
- }
203
- throw error;
204
- }
205
- }
206
- }
207
-
208
- const SupportedProvider = {
209
- OPENAI: "openai",
210
- ANTHROPIC: "anthropic",
211
- GOOGLE: "google",
212
- MISTRAL: "mistral",
213
- PERPLEXITY: "perplexity",
214
- COHERE: "cohere",
215
- TOGETHER: "together"
216
- };
217
- const SupportedModel = {
218
- GPT_3_5_TURBO: "gpt-3.5-turbo",
219
- GPT_3_5_TURBO_0125: "gpt-3.5-turbo-0125",
220
- GPT_4: "gpt-4",
221
- GPT_4_0613: "gpt-4-0613",
222
- GPT_4_1106_PREVIEW: "gpt-4-1106-preview",
223
- GPT_4_TURBO: "gpt-4-turbo",
224
- GPT_4_TURBO_PREVIEW: "gpt-4-turbo-preview",
225
- GPT_4_TURBO_2024_04_09: "gpt-4-turbo-2024-04-09",
226
- GPT_4O_2024_05_13: "gpt-4o-2024-05-13",
227
- GPT_4O_2024_08_06: "gpt-4o-2024-08-06",
228
- GPT_4O: "gpt-4o",
229
- GPT_4O_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
230
- GPT_4O_MINI: "gpt-4o-mini",
231
- GPT_4_0125_PREVIEW: "gpt-4-0125-preview",
232
- O1_PREVIEW: "o1-preview",
233
- O1_PREVIEW_2024_09_12: "o1-preview-2024-09-12",
234
- O1_MINI: "o1-mini",
235
- O1_MINI_2024_09_12: "o1-mini-2024-09-12",
236
- CLAUDE_2_1: "claude-2.1",
237
- CLAUDE_3_OPUS_20240229: "claude-3-opus-20240229",
238
- CLAUDE_3_SONNET_20240229: "claude-3-sonnet-20240229",
239
- CLAUDE_3_5_SONNET_20240620: "claude-3-5-sonnet-20240620",
240
- CLAUDE_3_HAIKU_20240307: "claude-3-haiku-20240307",
241
- GEMINI_PRO: "gemini-pro",
242
- GEMINI_1_PRO_LATEST: "gemini-1.0-pro-latest",
243
- GEMINI_15_PRO_LATEST: "gemini-1.5-pro-latest",
244
- GEMINI_15_PRO_EXP_0801: "gemini-1.5-pro-exp-0801",
245
- GEMINI_15_FLASH_LATEST: "gemini-1.5-flash-latest",
246
- COMMAND_R: "command-r",
247
- COMMAND_R_PLUS: "command-r-plus",
248
- MISTRAL_LARGE_LATEST: "mistral-large-latest",
249
- MISTRAL_LARGE_2407: "mistral-large-2407",
250
- MISTRAL_LARGE_2402: "mistral-large-2402",
251
- MISTRAL_MEDIUM_LATEST: "mistral-medium-latest",
252
- MISTRAL_SMALL_LATEST: "mistral-small-latest",
253
- CODESTRAL_LATEST: "codestral-latest",
254
- OPEN_MISTRAL_7B: "open-mistral-7b",
255
- OPEN_MIXTRAL_8X7B: "open-mixtral-8x7b",
256
- OPEN_MIXTRAL_8X22B: "open-mixtral-8x22b",
257
- MISTRAL_7B_INSTRUCT_V0_2: "Mistral-7B-Instruct-v0.2",
258
- MIXTRAL_8X7B_INSTRUCT_V0_1: "Mixtral-8x7B-Instruct-v0.1",
259
- MIXTRAL_8X22B_INSTRUCT_V0_1: "Mixtral-8x22B-Instruct-v0.1",
260
- LLAMA_3_70B_CHAT_HF: "Llama-3-70b-chat-hf",
261
- LLAMA_3_8B_CHAT_HF: "Llama-3-8b-chat-hf",
262
- QWEN2_72B_INSTRUCT: "Qwen2-72B-Instruct",
263
- LLAMA_3_1_8B_INSTRUCT_TURBO: "Meta-Llama-3.1-8B-Instruct-Turbo",
264
- LLAMA_3_1_70B_INSTRUCT_TURBO: "Meta-Llama-3.1-70B-Instruct-Turbo",
265
- LLAMA_3_1_405B_INSTRUCT_TURBO: "Meta-Llama-3.1-405B-Instruct-Turbo",
266
- LLAMA_3_1_SONAR_LARGE_128K_ONLINE: "llama-3.1-sonar-large-128k-online"
267
- };
268
- ({
269
- [SupportedProvider.OPENAI]: [
270
- SupportedModel.GPT_3_5_TURBO,
271
- SupportedModel.GPT_3_5_TURBO_0125,
272
- SupportedModel.GPT_4,
273
- SupportedModel.GPT_4_0613,
274
- SupportedModel.GPT_4_1106_PREVIEW,
275
- SupportedModel.GPT_4_TURBO,
276
- SupportedModel.GPT_4_TURBO_PREVIEW,
277
- SupportedModel.GPT_4_TURBO_2024_04_09,
278
- SupportedModel.GPT_4O_2024_05_13,
279
- SupportedModel.GPT_4O_2024_08_06,
280
- SupportedModel.GPT_4O,
281
- SupportedModel.GPT_4O_MINI_2024_07_18,
282
- SupportedModel.GPT_4O_MINI,
283
- SupportedModel.GPT_4_0125_PREVIEW,
284
- SupportedModel.O1_PREVIEW,
285
- SupportedModel.O1_PREVIEW_2024_09_12,
286
- SupportedModel.O1_MINI,
287
- SupportedModel.O1_MINI_2024_09_12
288
- ],
289
- [SupportedProvider.ANTHROPIC]: [
290
- SupportedModel.CLAUDE_2_1,
291
- SupportedModel.CLAUDE_3_OPUS_20240229,
292
- SupportedModel.CLAUDE_3_SONNET_20240229,
293
- SupportedModel.CLAUDE_3_5_SONNET_20240620,
294
- SupportedModel.CLAUDE_3_HAIKU_20240307
295
- ],
296
- [SupportedProvider.GOOGLE]: [
297
- SupportedModel.GEMINI_PRO,
298
- SupportedModel.GEMINI_1_PRO_LATEST,
299
- SupportedModel.GEMINI_15_PRO_LATEST,
300
- SupportedModel.GEMINI_15_PRO_EXP_0801,
301
- SupportedModel.GEMINI_15_FLASH_LATEST
302
- ],
303
- [SupportedProvider.MISTRAL]: [
304
- SupportedModel.MISTRAL_LARGE_LATEST,
305
- SupportedModel.MISTRAL_LARGE_2407,
306
- SupportedModel.MISTRAL_LARGE_2402,
307
- SupportedModel.MISTRAL_MEDIUM_LATEST,
308
- SupportedModel.MISTRAL_SMALL_LATEST,
309
- SupportedModel.CODESTRAL_LATEST,
310
- SupportedModel.OPEN_MISTRAL_7B,
311
- SupportedModel.OPEN_MIXTRAL_8X7B,
312
- SupportedModel.OPEN_MIXTRAL_8X22B
313
- ],
314
- [SupportedProvider.PERPLEXITY]: [
315
- SupportedModel.LLAMA_3_1_SONAR_LARGE_128K_ONLINE
316
- ],
317
- [SupportedProvider.COHERE]: [
318
- SupportedModel.COMMAND_R,
319
- SupportedModel.COMMAND_R_PLUS
320
- ],
321
- [SupportedProvider.TOGETHER]: [
322
- SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
323
- SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
324
- SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
325
- SupportedModel.LLAMA_3_70B_CHAT_HF,
326
- SupportedModel.LLAMA_3_8B_CHAT_HF,
327
- SupportedModel.QWEN2_72B_INSTRUCT,
328
- SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO,
329
- SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO,
330
- SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO
331
- ]
332
- });
333
-
334
- function getLangChainModel(provider, llmKeys, responseModel) {
335
- const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHER } = SupportedProvider;
336
- switch (provider.provider) {
337
- case OPENAI:
338
- if (responseModel) {
339
- return new ChatOpenAI({
340
- modelName: provider.model,
341
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
342
- }).withStructuredOutput(responseModel);
343
- }
344
- return new ChatOpenAI({
345
- modelName: provider.model,
346
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
347
- });
348
- case ANTHROPIC:
349
- if (responseModel) {
350
- return new ChatAnthropic({
351
- modelName: provider.model,
352
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
353
- }).withStructuredOutput(responseModel);
354
- }
355
- return new ChatAnthropic({
356
- modelName: provider.model,
357
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
358
- });
359
- case GOOGLE:
360
- if (responseModel) {
361
- return new ChatGoogleGenerativeAI({
362
- modelName: provider.model,
363
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
364
- }).withStructuredOutput(responseModel);
365
- }
366
- return new ChatGoogleGenerativeAI({
367
- modelName: provider.model,
368
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
369
- });
370
- case MISTRAL:
371
- if (responseModel) {
372
- return new ChatMistralAI({
373
- modelName: provider.model,
374
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
375
- }).withStructuredOutput(responseModel);
376
- }
377
- return new ChatMistralAI({
378
- modelName: provider.model,
379
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
380
- });
381
- case PERPLEXITY:
382
- if (responseModel) {
383
- return new ChatPerplexity({
384
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
385
- model: provider.model
386
- }).withStructuredOutput(responseModel);
387
- }
388
- return new ChatPerplexity({
389
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
390
- model: provider.model
391
- });
392
- case COHERE:
393
- if (responseModel) {
394
- return new ChatCohere({
395
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
396
- model: provider.model
397
- }).withStructuredOutput(responseModel);
398
- }
399
- return new ChatCohere({
400
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
401
- model: provider.model
402
- });
403
- case TOGETHER:
404
- if (responseModel) {
405
- return new ChatTogetherAI({
406
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
407
- model: provider.model
408
- }).withStructuredOutput(responseModel);
409
- }
410
- return new ChatTogetherAI({
411
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
412
- model: provider.model
413
- });
414
- default:
415
- throw new Error(`Unsupported provider: ${provider.provider}`);
416
- }
417
- }
418
- async function callLLM(provider, options, llmKeys) {
419
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
420
- const langChainMessages = options.messages.map(convertToLangChainMessage);
421
- const response = await model.invoke(langChainMessages);
422
- return extractContent(response);
423
- }
424
- function convertToLangChainMessage(msg) {
425
- switch (msg.role) {
426
- case "user":
427
- return new HumanMessage(msg.content);
428
- case "assistant":
429
- return new AIMessage(msg.content);
430
- case "system":
431
- return new SystemMessage(msg.content);
432
- default:
433
- return new HumanMessage(msg.content);
434
- }
435
- }
436
- async function* callLLMStream(provider, options, llmKeys) {
437
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
438
- const langChainMessages = options.messages.map(convertToLangChainMessage);
439
- const stream = await model.stream(langChainMessages);
440
- for await (const chunk of stream) {
441
- yield extractContent(chunk);
442
- }
443
- }
444
- function extractContent(response) {
445
- if ("content" in response) {
446
- return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
447
- }
448
- return typeof response === "string" ? response : JSON.stringify(response);
449
- }
450
-
451
- var __defProp = Object.defineProperty;
452
- var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
453
- var __publicField = (obj, key, value) => {
454
- __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
455
- return value;
456
- };
457
- const SDK_VERSION = packageJson.version;
458
- dotenv.config();
459
- const DEFAULT_TIMEOUT = 5;
460
- const BASE_URL = "https://not-diamond-server.onrender.com";
461
- class NotDiamond {
462
- constructor(options = {}) {
463
- __publicField(this, "apiKey");
464
- __publicField(this, "apiUrl");
465
- __publicField(this, "modelSelectUrl");
466
- __publicField(this, "feedbackUrl");
467
- __publicField(this, "createUrl");
468
- __publicField(this, "llmKeys");
469
- this.apiKey = options.apiKey || process.env.NOTDIAMOND_API_KEY || "";
470
- this.apiUrl = options.apiUrl || process.env.NOTDIAMOND_API_URL || BASE_URL;
471
- this.llmKeys = options.llmKeys || {};
472
- this.modelSelectUrl = `${this.apiUrl}/v2/modelRouter/modelSelect`;
473
- this.feedbackUrl = `${this.apiUrl}/v2/report/metrics/feedback`;
474
- this.createUrl = `${this.apiUrl}/v2/preferences/userPreferenceCreate`;
475
- }
476
- getAuthHeader() {
477
- return `Bearer ${this.apiKey}`;
478
- }
479
- async postRequest(url, body) {
480
- try {
481
- const response = await axios.post(url, body, {
482
- headers: {
483
- Authorization: this.getAuthHeader(),
484
- Accept: "application/json",
485
- "Content-Type": "application/json",
486
- "User-Agent": `TS-SDK/${SDK_VERSION}`
487
- }
488
- });
489
- return response.data;
490
- } catch (error) {
491
- if (axios.isAxiosError(error) && error.response) {
492
- return { detail: "An error occurred." };
493
- }
494
- console.error("error", error);
495
- return { detail: "An unexpected error occurred." };
496
- }
497
- }
498
- /**
499
- * Selects the best model for the given messages.
500
- * @param options The options for the model.
501
- * @returns The results of the model.
502
- */
503
- async modelSelect(options) {
504
- const requestBody = {
505
- messages: options.messages,
506
- llm_providers: options.llmProviders.map((provider) => ({
507
- provider: provider.provider,
508
- model: provider.model,
509
- ...provider.contextLength !== void 0 && {
510
- context_length: provider.contextLength
511
- },
512
- ...provider.inputPrice !== void 0 && {
513
- input_price: provider.inputPrice
514
- },
515
- ...provider.outputPrice !== void 0 && {
516
- output_price: provider.outputPrice
517
- },
518
- ...provider.latency !== void 0 && { latency: provider.latency },
519
- ...provider.isCustom !== void 0 && {
520
- is_custom: provider.isCustom
521
- }
522
- })),
523
- ...options.tradeoff && {
524
- tradeoff: options.tradeoff
525
- },
526
- ...options.maxModelDepth && {
527
- max_model_depth: options.maxModelDepth
528
- },
529
- ...options.tools && { tools: options.tools },
530
- ...options.hashContent !== void 0 && {
531
- hash_content: options.hashContent
532
- },
533
- ...options.preferenceId && { preference_id: options.preferenceId },
534
- ...options.timeout ? { timeout: options.timeout } : {
535
- timeout: DEFAULT_TIMEOUT
536
- },
537
- ...options.default && { default: options.default },
538
- ...options.previousSession && {
539
- previous_session: options.previousSession
540
- },
541
- ...options.responseModel && {
542
- response_model: options.responseModel
543
- }
544
- };
545
- return this.postRequest(
546
- this.modelSelectUrl,
547
- requestBody
548
- );
549
- }
550
- /**
551
- * Sends feedback to the NotDiamond API.
552
- * @param options The options for the feedback.
553
- * @returns The results of the feedback.
554
- */
555
- async feedback(options) {
556
- return this.postRequest(this.feedbackUrl, {
557
- session_id: options.sessionId,
558
- feedback: options.feedback,
559
- provider: options.provider
560
- });
561
- }
562
- /**
563
- * Creates a preference id.
564
- * @returns The preference id.
565
- */
566
- async createPreferenceId() {
567
- const response = await this.postRequest(
568
- this.createUrl,
569
- {}
570
- );
571
- if ("preference_id" in response) {
572
- return response.preference_id;
573
- }
574
- throw new Error("Invalid response: preference_id not found");
575
- }
576
- /**
577
- *
578
- * @param options The options for the model.
579
- * @returns A promise that resolves to the results of the model.
580
- */
581
- async acreate(options) {
582
- const selectedModel = await this.modelSelect(options);
583
- const { providers } = selectedModel;
584
- const content = await callLLM(providers[0], options, this.llmKeys);
585
- return { content, providers };
586
- }
587
- /**
588
- *
589
- * @param options The options for the model.
590
- * @param callback Optional callback function to handle the result.
591
- * @returns A promise that resolves to the results of the model or a callback function
592
- */
593
- create(options, callback) {
594
- const promise = this.acreate(options);
595
- if (callback) {
596
- promise.then((result) => callback(null, result)).catch((error) => callback(error));
597
- } else {
598
- return promise;
599
- }
600
- }
601
- /**
602
- * Streams the results of the model asynchronously.
603
- * @param options The options for the model.
604
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
605
- */
606
- async astream(options) {
607
- const selectedModel = await this.modelSelect(options);
608
- const { providers } = selectedModel;
609
- console.log("providers received from modelSelect", providers);
610
- const stream = await Promise.resolve(
611
- callLLMStream(providers?.[0] || {
612
- provider: "openai",
613
- model: "gpt-3.5-turbo"
614
- }, options, this.llmKeys)
615
- );
616
- return { provider: providers?.[0] || {
617
- provider: "openai",
618
- model: "gpt-3.5-turbo"
619
- }, stream };
620
- }
621
- /**
622
- * Streams the results of the model.
623
- * @param options The options for the model.
624
- * @param callback Optional callback function to handle each chunk of the stream.
625
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
626
- */
627
- stream(options, callback) {
628
- if (!options.llmProviders || options.llmProviders.length === 0) {
629
- throw new Error("No LLM providers specified");
630
- }
631
- console.log("options received from stream", options);
632
- const promise = this.astream(options);
633
- if (callback) {
634
- promise.then(async ({ provider, stream }) => {
635
- for await (const chunk of stream) {
636
- callback(null, { provider, chunk });
637
- }
638
- }).catch((error) => callback(error));
639
- } else {
640
- return promise;
641
- }
642
- }
643
- }
644
-
645
- export { NotDiamond, SupportedModel, SupportedProvider };