notdiamond 0.3.11 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs ADDED
@@ -0,0 +1,684 @@
1
+ import * as dotenv from 'dotenv';
2
+ import { ChatOpenAI } from '@langchain/openai';
3
+ import { AIMessage, SystemMessage, HumanMessage } from '@langchain/core/messages';
4
+ import { ChatAnthropic } from '@langchain/anthropic';
5
+ import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
6
+ import { ChatMistralAI } from '@langchain/mistralai';
7
+ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
+ import axios from 'axios';
9
+ import { ChatCohere } from '@langchain/cohere';
10
+ import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
11
+
12
+ const name = "notdiamond";
13
+ const type = "module";
14
+ const version = "1.0.1";
15
+ const author = "not-diamond";
16
+ const license = "MIT";
17
+ const description = "TS/JS client for the NotDiamond API";
18
+ const main = "./dist/index.cjs";
19
+ const exports = {
20
+ ".": {
21
+ "import": "./dist/index.mjs",
22
+ require: "./dist/index.cjs"
23
+ }
24
+ };
25
+ const types = "./dist/index.d.ts";
26
+ const repository = {
27
+ type: "git",
28
+ url: "https://github.com/Not-Diamond/notdiamond-node.git"
29
+ };
30
+ const bugs = {
31
+ url: "https://github.com/Not-Diamond/notdiamond-node/issues"
32
+ };
33
+ const homepage = "https://github.com/Not-Diamond/notdiamond-node#readme";
34
+ const files = [
35
+ "dist"
36
+ ];
37
+ const keywords = [
38
+ "ai",
39
+ "not-diamond",
40
+ "typescript",
41
+ "openai",
42
+ "chatgpt",
43
+ "anthropic",
44
+ "claude",
45
+ "gemini",
46
+ "model router"
47
+ ];
48
+ const scripts = {
49
+ prepare: "husky install",
50
+ "start:cjs": "node ./dist/index.cjs",
51
+ "start:esm": "node ./dist/index.mjs",
52
+ dev: "nodemon",
53
+ develop: "node --no-warnings=ExperimentalWarning --loader ts-node/esm ./src/index.ts",
54
+ test: "jest --passWithNoTests",
55
+ "test:watch": "jest --watch --passWithNoTests",
56
+ "test:coverage": "jest --coverage --passWithNoTests",
57
+ clean: "rimraf build && rimraf dist",
58
+ build: "swc ./src -d build",
59
+ "build:watch": "swc ./src -d build -w",
60
+ lint: "eslint ./src --ext .ts",
61
+ "lint:fix": "eslint ./src --ext .ts --fix",
62
+ prettier: "prettier '**/*.{ts,json,md}'",
63
+ "prettier:write": "prettier --write '**/*.{ts,json,md}'",
64
+ "type-check": "tsc --noEmit",
65
+ "lint-staged": "lint-staged",
66
+ release: "semantic-release",
67
+ bundle: "unbuild"
68
+ };
69
+ const dependencies = {
70
+ "@langchain/anthropic": "^0.3.1",
71
+ "@langchain/cohere": "^0.3.0",
72
+ "@langchain/community": "^0.3.3",
73
+ "@langchain/core": "^0.3.3",
74
+ "@langchain/google-genai": "^0.1.0",
75
+ "@langchain/mistralai": "^0.1.1",
76
+ "@langchain/openai": "^0.3.0",
77
+ "ansi-styles": "^6.2.1",
78
+ axios: "^1.7.7",
79
+ camelcase: "^8.0.0",
80
+ decamelize: "^6.0.0",
81
+ dotenv: "^16.4.5",
82
+ eventemitter3: "^5.0.1",
83
+ langchain: "^0.3.2",
84
+ langsmith: "^0.1.60",
85
+ "p-finally": "^2.0.1",
86
+ "p-queue": "^8.0.1",
87
+ "p-retry": "^6.2.0",
88
+ "p-timeout": "^6.1.2",
89
+ retry: "^0.13.1",
90
+ semver: "^7.6.3",
91
+ uuid: "^10.0.0",
92
+ zod: "^3.23.8",
93
+ "zod-to-json-schema": "^3.23.3"
94
+ };
95
+ const devDependencies = {
96
+ "@semantic-release/changelog": "^6.0.3",
97
+ "@semantic-release/commit-analyzer": "^12.0.0",
98
+ "@semantic-release/git": "^10.0.1",
99
+ "@semantic-release/github": "^10.0.0",
100
+ "@semantic-release/npm": "^12.0.0",
101
+ "@semantic-release/release-notes-generator": "^13.0.0",
102
+ "@swc/cli": "0.3.12",
103
+ "@swc/core": "1.5.7",
104
+ "@swc/jest": "0.2.36",
105
+ "@types/jest": "29.5.12",
106
+ "@types/node": "20.12.12",
107
+ "@typescript-eslint/eslint-plugin": "7.9.0",
108
+ "@typescript-eslint/parser": "7.9.0",
109
+ eslint: "8.57.0",
110
+ "eslint-config-prettier": "9.1.0",
111
+ "eslint-plugin-jest": "27.9.0",
112
+ "eslint-plugin-prettier": "5.1.3",
113
+ husky: "^9.0.0",
114
+ jest: "29.7.0",
115
+ "lint-staged": "^15.0.0",
116
+ nodemon: "3.1.0",
117
+ prettier: "3.2.5",
118
+ "regenerator-runtime": "^0.14.0",
119
+ rimraf: "5.0.7",
120
+ "semantic-release": "^23.0.0",
121
+ "ts-node": "^10.9.1",
122
+ typescript: "5.4.5",
123
+ unbuild: "^2.0.0"
124
+ };
125
+ const resolutions = {
126
+ "wrap-ansi": "7.0.0"
127
+ };
128
+ const engines = {
129
+ node: ">=20",
130
+ npm: ">=8"
131
+ };
132
+ const packageJson = {
133
+ name: name,
134
+ type: type,
135
+ version: version,
136
+ author: author,
137
+ license: license,
138
+ description: description,
139
+ main: main,
140
+ exports: exports,
141
+ types: types,
142
+ repository: repository,
143
+ bugs: bugs,
144
+ homepage: homepage,
145
+ files: files,
146
+ keywords: keywords,
147
+ scripts: scripts,
148
+ dependencies: dependencies,
149
+ devDependencies: devDependencies,
150
+ resolutions: resolutions,
151
+ engines: engines
152
+ };
153
+
154
+ var __defProp$1 = Object.defineProperty;
155
+ var __defNormalProp$1 = (obj, key, value) => key in obj ? __defProp$1(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
156
+ var __publicField$1 = (obj, key, value) => {
157
+ __defNormalProp$1(obj, typeof key !== "symbol" ? key + "" : key, value);
158
+ return value;
159
+ };
160
+ class ChatPerplexity extends BaseChatModel {
161
+ constructor({ apiKey, model }) {
162
+ super({});
163
+ __publicField$1(this, "apiKey");
164
+ __publicField$1(this, "model");
165
+ this.apiKey = apiKey;
166
+ this.model = model;
167
+ }
168
+ _generate(messages, options, runManager) {
169
+ throw new Error(
170
+ "Method not implemented." + JSON.stringify(messages) + JSON.stringify(options) + JSON.stringify(runManager)
171
+ );
172
+ }
173
+ _llmType() {
174
+ return "perplexity";
175
+ }
176
+ /**
177
+ * Invokes the Perplexity model.
178
+ * @param messages The messages to send to the model.
179
+ * @returns The results of the model.
180
+ */
181
+ async invoke(messages) {
182
+ try {
183
+ const { data } = await axios.post(
184
+ "https://api.perplexity.ai/chat/completions",
185
+ {
186
+ model: this.model,
187
+ messages: messages.map((m) => ({
188
+ role: m._getType() === "human" ? "user" : m._getType(),
189
+ content: m.content
190
+ }))
191
+ },
192
+ {
193
+ headers: {
194
+ Authorization: `Bearer ${this.apiKey}`
195
+ }
196
+ }
197
+ );
198
+ return new AIMessage(data.choices[0].message.content);
199
+ } catch (error) {
200
+ if (axios.isAxiosError(error) && error.response) {
201
+ throw new Error(`Perplexity API error: ${error.response.statusText}`);
202
+ }
203
+ throw error;
204
+ }
205
+ }
206
+ }
207
+
208
+ const SupportedProvider = {
209
+ OPENAI: "openai",
210
+ ANTHROPIC: "anthropic",
211
+ GOOGLE: "google",
212
+ MISTRAL: "mistral",
213
+ PERPLEXITY: "perplexity",
214
+ COHERE: "cohere",
215
+ TOGETHER: "together"
216
+ };
217
+ const SupportedModel = {
218
+ GPT_3_5_TURBO: "gpt-3.5-turbo",
219
+ GPT_3_5_TURBO_0125: "gpt-3.5-turbo-0125",
220
+ GPT_4: "gpt-4",
221
+ GPT_4_0613: "gpt-4-0613",
222
+ GPT_4_1106_PREVIEW: "gpt-4-1106-preview",
223
+ GPT_4_TURBO: "gpt-4-turbo",
224
+ GPT_4_TURBO_PREVIEW: "gpt-4-turbo-preview",
225
+ GPT_4_TURBO_2024_04_09: "gpt-4-turbo-2024-04-09",
226
+ GPT_4O_2024_05_13: "gpt-4o-2024-05-13",
227
+ GPT_4O_2024_08_06: "gpt-4o-2024-08-06",
228
+ GPT_4O: "gpt-4o",
229
+ GPT_4O_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
230
+ GPT_4O_MINI: "gpt-4o-mini",
231
+ GPT_4_0125_PREVIEW: "gpt-4-0125-preview",
232
+ O1_PREVIEW: "o1-preview",
233
+ O1_PREVIEW_2024_09_12: "o1-preview-2024-09-12",
234
+ O1_MINI: "o1-mini",
235
+ O1_MINI_2024_09_12: "o1-mini-2024-09-12",
236
+ CLAUDE_2_1: "claude-2.1",
237
+ CLAUDE_3_OPUS_20240229: "claude-3-opus-20240229",
238
+ CLAUDE_3_SONNET_20240229: "claude-3-sonnet-20240229",
239
+ CLAUDE_3_5_SONNET_20240620: "claude-3-5-sonnet-20240620",
240
+ CLAUDE_3_HAIKU_20240307: "claude-3-haiku-20240307",
241
+ GEMINI_PRO: "gemini-pro",
242
+ GEMINI_1_PRO_LATEST: "gemini-1.0-pro-latest",
243
+ GEMINI_15_PRO_LATEST: "gemini-1.5-pro-latest",
244
+ GEMINI_15_PRO_EXP_0801: "gemini-1.5-pro-exp-0801",
245
+ GEMINI_15_FLASH_LATEST: "gemini-1.5-flash-latest",
246
+ COMMAND_R: "command-r",
247
+ COMMAND_R_PLUS: "command-r-plus",
248
+ MISTRAL_LARGE_LATEST: "mistral-large-latest",
249
+ MISTRAL_LARGE_2407: "mistral-large-2407",
250
+ MISTRAL_LARGE_2402: "mistral-large-2402",
251
+ MISTRAL_MEDIUM_LATEST: "mistral-medium-latest",
252
+ MISTRAL_SMALL_LATEST: "mistral-small-latest",
253
+ CODESTRAL_LATEST: "codestral-latest",
254
+ OPEN_MISTRAL_7B: "open-mistral-7b",
255
+ OPEN_MIXTRAL_8X7B: "open-mixtral-8x7b",
256
+ OPEN_MIXTRAL_8X22B: "open-mixtral-8x22b",
257
+ MISTRAL_7B_INSTRUCT_V0_2: "Mistral-7B-Instruct-v0.2",
258
+ MIXTRAL_8X7B_INSTRUCT_V0_1: "Mixtral-8x7B-Instruct-v0.1",
259
+ MIXTRAL_8X22B_INSTRUCT_V0_1: "Mixtral-8x22B-Instruct-v0.1",
260
+ LLAMA_3_70B_CHAT_HF: "Llama-3-70b-chat-hf",
261
+ LLAMA_3_8B_CHAT_HF: "Llama-3-8b-chat-hf",
262
+ QWEN2_72B_INSTRUCT: "Qwen2-72B-Instruct",
263
+ LLAMA_3_1_8B_INSTRUCT_TURBO: "Meta-Llama-3.1-8B-Instruct-Turbo",
264
+ LLAMA_3_1_70B_INSTRUCT_TURBO: "Meta-Llama-3.1-70B-Instruct-Turbo",
265
+ LLAMA_3_1_405B_INSTRUCT_TURBO: "Meta-Llama-3.1-405B-Instruct-Turbo",
266
+ LLAMA_3_1_SONAR_LARGE_128K_ONLINE: "llama-3.1-sonar-large-128k-online",
267
+ OPEN_MISTRAL_NEMO: "open-mistral-nemo"
268
+ };
269
+ ({
270
+ [SupportedProvider.OPENAI]: [
271
+ SupportedModel.GPT_3_5_TURBO,
272
+ SupportedModel.GPT_3_5_TURBO_0125,
273
+ SupportedModel.GPT_4,
274
+ SupportedModel.GPT_4_0613,
275
+ SupportedModel.GPT_4_1106_PREVIEW,
276
+ SupportedModel.GPT_4_TURBO,
277
+ SupportedModel.GPT_4_TURBO_PREVIEW,
278
+ SupportedModel.GPT_4_TURBO_2024_04_09,
279
+ SupportedModel.GPT_4O_2024_05_13,
280
+ SupportedModel.GPT_4O_2024_08_06,
281
+ SupportedModel.GPT_4O,
282
+ SupportedModel.GPT_4O_MINI_2024_07_18,
283
+ SupportedModel.GPT_4O_MINI,
284
+ SupportedModel.GPT_4_0125_PREVIEW,
285
+ SupportedModel.O1_PREVIEW,
286
+ SupportedModel.O1_PREVIEW_2024_09_12,
287
+ SupportedModel.O1_MINI,
288
+ SupportedModel.O1_MINI_2024_09_12
289
+ ],
290
+ [SupportedProvider.ANTHROPIC]: [
291
+ SupportedModel.CLAUDE_2_1,
292
+ SupportedModel.CLAUDE_3_OPUS_20240229,
293
+ SupportedModel.CLAUDE_3_SONNET_20240229,
294
+ SupportedModel.CLAUDE_3_5_SONNET_20240620,
295
+ SupportedModel.CLAUDE_3_HAIKU_20240307
296
+ ],
297
+ [SupportedProvider.GOOGLE]: [
298
+ SupportedModel.GEMINI_PRO,
299
+ SupportedModel.GEMINI_1_PRO_LATEST,
300
+ SupportedModel.GEMINI_15_PRO_LATEST,
301
+ SupportedModel.GEMINI_15_PRO_EXP_0801,
302
+ SupportedModel.GEMINI_15_FLASH_LATEST
303
+ ],
304
+ [SupportedProvider.MISTRAL]: [
305
+ SupportedModel.MISTRAL_LARGE_LATEST,
306
+ SupportedModel.MISTRAL_LARGE_2407,
307
+ SupportedModel.MISTRAL_LARGE_2402,
308
+ SupportedModel.MISTRAL_MEDIUM_LATEST,
309
+ SupportedModel.MISTRAL_SMALL_LATEST,
310
+ SupportedModel.CODESTRAL_LATEST,
311
+ SupportedModel.OPEN_MISTRAL_7B,
312
+ SupportedModel.OPEN_MIXTRAL_8X7B,
313
+ SupportedModel.OPEN_MIXTRAL_8X22B,
314
+ SupportedModel.OPEN_MISTRAL_NEMO
315
+ ],
316
+ [SupportedProvider.PERPLEXITY]: [
317
+ SupportedModel.LLAMA_3_1_SONAR_LARGE_128K_ONLINE
318
+ ],
319
+ [SupportedProvider.COHERE]: [
320
+ SupportedModel.COMMAND_R,
321
+ SupportedModel.COMMAND_R_PLUS
322
+ ],
323
+ [SupportedProvider.TOGETHER]: [
324
+ SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
325
+ SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
326
+ SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
327
+ SupportedModel.LLAMA_3_70B_CHAT_HF,
328
+ SupportedModel.LLAMA_3_8B_CHAT_HF,
329
+ SupportedModel.QWEN2_72B_INSTRUCT,
330
+ SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO,
331
+ SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO,
332
+ SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO
333
+ ]
334
+ });
335
+
336
+ function getLangChainModel(provider, llmKeys, responseModel) {
337
+ const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHER } = SupportedProvider;
338
+ switch (provider.provider) {
339
+ case OPENAI:
340
+ if (responseModel) {
341
+ return new ChatOpenAI({
342
+ modelName: provider.model,
343
+ apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
344
+ }).withStructuredOutput(responseModel);
345
+ }
346
+ return new ChatOpenAI({
347
+ modelName: provider.model,
348
+ apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
349
+ });
350
+ case ANTHROPIC:
351
+ if (responseModel) {
352
+ return new ChatAnthropic({
353
+ modelName: provider.model,
354
+ anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
355
+ }).withStructuredOutput(responseModel);
356
+ }
357
+ return new ChatAnthropic({
358
+ modelName: provider.model,
359
+ anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
360
+ });
361
+ case GOOGLE:
362
+ if (responseModel) {
363
+ return new ChatGoogleGenerativeAI({
364
+ modelName: provider.model,
365
+ apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
366
+ }).withStructuredOutput(responseModel);
367
+ }
368
+ return new ChatGoogleGenerativeAI({
369
+ modelName: provider.model,
370
+ apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
371
+ });
372
+ case MISTRAL:
373
+ if (responseModel) {
374
+ return new ChatMistralAI({
375
+ modelName: provider.model,
376
+ apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
377
+ }).withStructuredOutput(responseModel);
378
+ }
379
+ return new ChatMistralAI({
380
+ modelName: provider.model,
381
+ apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
382
+ });
383
+ case PERPLEXITY:
384
+ if (responseModel) {
385
+ return new ChatPerplexity({
386
+ apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
387
+ model: provider.model
388
+ }).withStructuredOutput(responseModel);
389
+ }
390
+ return new ChatPerplexity({
391
+ apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
392
+ model: provider.model
393
+ });
394
+ case COHERE:
395
+ if (responseModel) {
396
+ return new ChatCohere({
397
+ apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
398
+ model: provider.model
399
+ }).withStructuredOutput(responseModel);
400
+ }
401
+ return new ChatCohere({
402
+ apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
403
+ model: provider.model
404
+ });
405
+ case TOGETHER:
406
+ if (responseModel) {
407
+ return new ChatTogetherAI({
408
+ apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
409
+ model: provider.model
410
+ }).withStructuredOutput(responseModel);
411
+ }
412
+ return new ChatTogetherAI({
413
+ apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
414
+ model: provider.model
415
+ });
416
+ default:
417
+ throw new Error(`Unsupported provider: ${provider.provider}`);
418
+ }
419
+ }
420
+ async function callLLM(provider, options, llmKeys, runtimeArgs) {
421
+ const model = getLangChainModel(provider, llmKeys, options.responseModel);
422
+ const langChainMessages = extendProviderSystemPrompt(
423
+ options.messages.map(convertToLangChainMessage),
424
+ options,
425
+ provider
426
+ );
427
+ const response = await model.invoke(langChainMessages, runtimeArgs);
428
+ return extractContent(response);
429
+ }
430
+ function extendProviderSystemPrompt(messages, options, provider) {
431
+ const matchingProvider = options.llmProviders.find(
432
+ (p) => p.provider === provider.provider && p.model === provider.model
433
+ );
434
+ if (matchingProvider && matchingProvider.systemPrompt) {
435
+ messages.unshift(new SystemMessage(matchingProvider.systemPrompt));
436
+ }
437
+ return messages;
438
+ }
439
+ function convertToLangChainMessage(msg) {
440
+ switch (msg.role) {
441
+ case "user":
442
+ return new HumanMessage(msg.content);
443
+ case "assistant":
444
+ return new AIMessage(msg.content);
445
+ case "system":
446
+ return new SystemMessage(msg.content);
447
+ default:
448
+ return new HumanMessage(msg.content);
449
+ }
450
+ }
451
+ async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
452
+ const model = getLangChainModel(provider, llmKeys, options.responseModel);
453
+ const langChainMessages = extendProviderSystemPrompt(
454
+ options.messages.map(convertToLangChainMessage),
455
+ options,
456
+ provider
457
+ );
458
+ const stream = await model.stream(langChainMessages, runtimeArgs);
459
+ for await (const chunk of stream) {
460
+ yield extractContent(chunk);
461
+ }
462
+ }
463
+ function extractContent(response) {
464
+ if ("content" in response) {
465
+ return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
466
+ }
467
+ return typeof response === "string" ? response : JSON.stringify(response);
468
+ }
469
+
470
+ var __defProp = Object.defineProperty;
471
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
472
+ var __publicField = (obj, key, value) => {
473
+ __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
474
+ return value;
475
+ };
476
+ const SDK_VERSION = packageJson.version;
477
+ dotenv.config();
478
+ const DEFAULT_TIMEOUT = 5;
479
+ const BASE_URL = "https://api.notdiamond.ai";
480
+ class NotDiamond {
481
+ constructor(options = {}) {
482
+ __publicField(this, "apiKey");
483
+ __publicField(this, "apiUrl");
484
+ __publicField(this, "modelSelectUrl");
485
+ __publicField(this, "feedbackUrl");
486
+ __publicField(this, "createUrl");
487
+ __publicField(this, "llmKeys");
488
+ this.apiKey = options.apiKey || process.env.NOTDIAMOND_API_KEY || "";
489
+ this.apiUrl = options.apiUrl || process.env.NOTDIAMOND_API_URL || BASE_URL;
490
+ this.llmKeys = options.llmKeys || {};
491
+ this.modelSelectUrl = `${this.apiUrl}/v2/modelRouter/modelSelect`;
492
+ this.feedbackUrl = `${this.apiUrl}/v2/report/metrics/feedback`;
493
+ this.createUrl = `${this.apiUrl}/v2/preferences/userPreferenceCreate`;
494
+ }
495
+ getAuthHeader() {
496
+ return `Bearer ${this.apiKey}`;
497
+ }
498
+ async postRequest(url, body) {
499
+ try {
500
+ const response = await axios.post(url, body, {
501
+ headers: {
502
+ Authorization: this.getAuthHeader(),
503
+ Accept: "application/json",
504
+ "Content-Type": "application/json",
505
+ "User-Agent": `TS-SDK/${SDK_VERSION}`
506
+ }
507
+ });
508
+ return response.data;
509
+ } catch (error) {
510
+ if (axios.isAxiosError(error) && error.response) {
511
+ return { detail: "An error occurred." };
512
+ }
513
+ console.error("error", error);
514
+ return { detail: "An unexpected error occurred." };
515
+ }
516
+ }
517
+ /**
518
+ * Selects the best model for the given messages.
519
+ * @param options The options for the model.
520
+ * @returns The results of the model.
521
+ */
522
+ async modelSelect(options) {
523
+ const requestBody = {
524
+ messages: options.messages,
525
+ llm_providers: options.llmProviders.map((provider) => ({
526
+ provider: provider.provider,
527
+ model: provider.model,
528
+ ...provider.contextLength !== void 0 && {
529
+ context_length: provider.contextLength
530
+ },
531
+ ...provider.customInputPrice !== void 0 && {
532
+ input_price: provider.customInputPrice
533
+ },
534
+ ...provider.inputPrice !== void 0 && {
535
+ input_price: provider.inputPrice
536
+ },
537
+ ...provider.customOutputPrice !== void 0 && {
538
+ output_price: provider.customOutputPrice
539
+ },
540
+ ...provider.outputPrice !== void 0 && {
541
+ output_price: provider.outputPrice
542
+ },
543
+ ...provider.customLatency !== void 0 && {
544
+ latency: provider.customLatency
545
+ },
546
+ ...provider.latency !== void 0 && { latency: provider.latency },
547
+ ...provider.isCustom !== void 0 && {
548
+ is_custom: provider.isCustom
549
+ }
550
+ })),
551
+ ...options.tradeoff && {
552
+ tradeoff: options.tradeoff
553
+ },
554
+ ...options.maxModelDepth && {
555
+ max_model_depth: options.maxModelDepth
556
+ },
557
+ ...options.tools && { tools: options.tools },
558
+ ...options.hashContent !== void 0 && {
559
+ hash_content: options.hashContent
560
+ },
561
+ ...options.preferenceId && { preference_id: options.preferenceId },
562
+ ...options.timeout ? { timeout: options.timeout } : {
563
+ timeout: DEFAULT_TIMEOUT
564
+ },
565
+ ...options.default && { default: options.default },
566
+ ...options.previousSession && {
567
+ previous_session: options.previousSession
568
+ },
569
+ ...options.responseModel && {
570
+ response_model: options.responseModel
571
+ }
572
+ };
573
+ return this.postRequest(
574
+ this.modelSelectUrl,
575
+ requestBody
576
+ );
577
+ }
578
+ /**
579
+ * Sends feedback to the NotDiamond API.
580
+ * @param options The options for the feedback.
581
+ * @returns The results of the feedback.
582
+ */
583
+ async feedback(options) {
584
+ return this.postRequest(this.feedbackUrl, {
585
+ session_id: options.sessionId,
586
+ feedback: options.feedback,
587
+ provider: options.provider
588
+ });
589
+ }
590
+ /**
591
+ * Creates a preference id.
592
+ * @returns The preference id.
593
+ */
594
+ async createPreferenceId() {
595
+ const response = await this.postRequest(
596
+ this.createUrl,
597
+ {}
598
+ );
599
+ if ("preference_id" in response) {
600
+ return response.preference_id;
601
+ }
602
+ throw new Error("Invalid response: preference_id not found");
603
+ }
604
+ /**
605
+ *
606
+ * @param options The options for the model.
607
+ * @returns A promise that resolves to the results of the model.
608
+ */
609
+ async acreate(options, runtimeArgs = {}) {
610
+ const selectedModel = await this.modelSelect(options);
611
+ const { providers } = selectedModel;
612
+ const content = await callLLM(
613
+ providers[0],
614
+ options,
615
+ this.llmKeys,
616
+ runtimeArgs
617
+ );
618
+ return { content, providers };
619
+ }
620
+ /**
621
+ *
622
+ * @param options The options for the model.
623
+ * @param callback Optional callback function to handle the result.
624
+ * @returns A promise that resolves to the results of the model or a callback function
625
+ */
626
+ create(options, runtimeArgs = {}, callback) {
627
+ const promise = this.acreate(options, runtimeArgs);
628
+ if (callback) {
629
+ promise.then((result) => callback(null, result)).catch((error) => callback(error));
630
+ } else {
631
+ return promise;
632
+ }
633
+ }
634
+ /**
635
+ * Streams the results of the model asynchronously.
636
+ * @param options The options for the model.
637
+ * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
638
+ */
639
+ async astream(options, runtimeArgs = {}) {
640
+ const selectedModel = await this.modelSelect(options);
641
+ const { providers } = selectedModel;
642
+ const stream = await Promise.resolve(
643
+ callLLMStream(
644
+ providers?.[0] || {
645
+ provider: "openai",
646
+ model: "gpt-3.5-turbo"
647
+ },
648
+ options,
649
+ this.llmKeys,
650
+ runtimeArgs
651
+ )
652
+ );
653
+ return {
654
+ provider: providers?.[0] || {
655
+ provider: "openai",
656
+ model: "gpt-3.5-turbo"
657
+ },
658
+ stream
659
+ };
660
+ }
661
+ /**
662
+ * Streams the results of the model.
663
+ * @param options The options for the model.
664
+ * @param callback Optional callback function to handle each chunk of the stream.
665
+ * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
666
+ */
667
+ stream(options, runtimeArgs = {}, callback) {
668
+ if (!options.llmProviders || options.llmProviders.length === 0) {
669
+ throw new Error("No LLM providers specified");
670
+ }
671
+ const promise = this.astream(options, runtimeArgs);
672
+ if (callback) {
673
+ promise.then(async ({ provider, stream }) => {
674
+ for await (const chunk of stream) {
675
+ callback(null, { provider, chunk });
676
+ }
677
+ }).catch((error) => callback(error));
678
+ } else {
679
+ return promise;
680
+ }
681
+ }
682
+ }
683
+
684
+ export { NotDiamond, SupportedModel, SupportedProvider };
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "notdiamond",
3
3
  "type": "module",
4
- "version": "0.3.11",
4
+ "version": "1.0.1",
5
5
  "author": "not-diamond",
6
6
  "license": "MIT",
7
7
  "description": "TS/JS client for the NotDiamond API",