notdiamond 1.1.2 → 1.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs DELETED
@@ -1,562 +0,0 @@
1
- import * as dotenv from 'dotenv';
2
- import { ChatOpenAI } from '@langchain/openai';
3
- import { AIMessage, SystemMessage, HumanMessage } from '@langchain/core/messages';
4
- import { ChatAnthropic } from '@langchain/anthropic';
5
- import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
6
- import { ChatMistralAI } from '@langchain/mistralai';
7
- import { BaseChatModel } from '@langchain/core/language_models/chat_models';
8
- import axios from 'axios';
9
- import { ChatCohere } from '@langchain/cohere';
10
- import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
11
-
12
- const version = "1.1.1";
13
- const packageJson = {
14
- version: version};
15
-
16
- class ChatPerplexity extends BaseChatModel {
17
- _generate(messages, options, runManager) {
18
- throw new Error(
19
- "Method not implemented." + JSON.stringify(messages) + JSON.stringify(options) + JSON.stringify(runManager)
20
- );
21
- }
22
- apiKey;
23
- model;
24
- constructor({ apiKey, model }) {
25
- super({});
26
- this.apiKey = apiKey;
27
- this.model = model;
28
- }
29
- _llmType() {
30
- return "perplexity";
31
- }
32
- /**
33
- * Invokes the Perplexity model.
34
- * @param messages The messages to send to the model.
35
- * @returns The results of the model.
36
- */
37
- async invoke(messages) {
38
- try {
39
- const { data } = await axios.post(
40
- "https://api.perplexity.ai/chat/completions",
41
- {
42
- model: this.model,
43
- messages: messages.map((m) => ({
44
- role: m._getType() === "human" ? "user" : m._getType(),
45
- content: m.content
46
- }))
47
- },
48
- {
49
- headers: {
50
- Authorization: `Bearer ${this.apiKey}`
51
- }
52
- }
53
- );
54
- return new AIMessage(data.choices[0].message.content);
55
- } catch (error) {
56
- if (axios.isAxiosError(error) && error.response) {
57
- throw new Error(`Perplexity API error: ${error.response.statusText}`);
58
- }
59
- throw error;
60
- }
61
- }
62
- }
63
-
64
- const SupportedProvider = {
65
- OPENAI: "openai",
66
- ANTHROPIC: "anthropic",
67
- GOOGLE: "google",
68
- MISTRAL: "mistral",
69
- PERPLEXITY: "perplexity",
70
- COHERE: "cohere",
71
- TOGETHERAI: "togetherai"
72
- };
73
- const SupportedModel = {
74
- GPT_3_5_TURBO: "gpt-3.5-turbo",
75
- GPT_3_5_TURBO_0125: "gpt-3.5-turbo-0125",
76
- GPT_4: "gpt-4",
77
- GPT_4_0613: "gpt-4-0613",
78
- GPT_4_1106_PREVIEW: "gpt-4-1106-preview",
79
- GPT_4_TURBO: "gpt-4-turbo",
80
- GPT_4_TURBO_PREVIEW: "gpt-4-turbo-preview",
81
- GPT_4_TURBO_2024_04_09: "gpt-4-turbo-2024-04-09",
82
- GPT_4O_2024_05_13: "gpt-4o-2024-05-13",
83
- GPT_4O_2024_08_06: "gpt-4o-2024-08-06",
84
- GPT_4O: "gpt-4o",
85
- GPT_4O_MINI_2024_07_18: "gpt-4o-mini-2024-07-18",
86
- GPT_4O_MINI: "gpt-4o-mini",
87
- GPT_4_0125_PREVIEW: "gpt-4-0125-preview",
88
- GPT_4_5_PREVIEW: "gpt-4.5-preview",
89
- GPT_4_5_PREVIEW_2025_02_27: "gpt-4.5-preview-2025-02-27",
90
- CHATGPT_4O_LATEST: "chatgpt-4o-latest",
91
- O1_PREVIEW: "o1-preview",
92
- O1_PREVIEW_2024_09_12: "o1-preview-2024-09-12",
93
- O1_MINI: "o1-mini",
94
- O1_MINI_2024_09_12: "o1-mini-2024-09-12",
95
- CLAUDE_2_1: "claude-2.1",
96
- CLAUDE_3_OPUS_20240229: "claude-3-opus-20240229",
97
- CLAUDE_3_SONNET_20240229: "claude-3-sonnet-20240229",
98
- CLAUDE_3_5_SONNET_20240620: "claude-3-5-sonnet-20240620",
99
- CLAUDE_3_5_SONNET_20241022: "claude-3-5-sonnet-20241022",
100
- CLAUDE_3_5_SONNET_LATEST: "claude-3-5-sonnet-latest",
101
- CLAUDE_3_HAIKU_20240307: "claude-3-haiku-20240307",
102
- CLAUDE_3_5_HAIKU_20241022: "claude-3-5-haiku-20241022",
103
- CLAUDE_3_7_SONNET_LATEST: "claude-3-7-sonnet-latest",
104
- CLAUDE_3_7_SONNET_20250219: "claude-3-7-sonnet-20250219",
105
- GEMINI_PRO: "gemini-pro",
106
- GEMINI_1_PRO_LATEST: "gemini-1.0-pro-latest",
107
- GEMINI_15_PRO_LATEST: "gemini-1.5-pro-latest",
108
- GEMINI_15_PRO_EXP_0801: "gemini-1.5-pro-exp-0801",
109
- GEMINI_15_FLASH_LATEST: "gemini-1.5-flash-latest",
110
- GEMINI_2_0_FLASH: "gemini-2.0-flash",
111
- GEMINI_2_0_FLASH_001: "gemini-2.0-flash-001",
112
- COMMAND_R: "command-r",
113
- COMMAND_R_PLUS: "command-r-plus",
114
- MISTRAL_LARGE_LATEST: "mistral-large-latest",
115
- MISTRAL_LARGE_2407: "mistral-large-2407",
116
- MISTRAL_LARGE_2402: "mistral-large-2402",
117
- MISTRAL_MEDIUM_LATEST: "mistral-medium-latest",
118
- MISTRAL_SMALL_LATEST: "mistral-small-latest",
119
- CODESTRAL_LATEST: "codestral-latest",
120
- OPEN_MISTRAL_7B: "open-mistral-7b",
121
- OPEN_MIXTRAL_8X7B: "open-mixtral-8x7b",
122
- OPEN_MIXTRAL_8X22B: "open-mixtral-8x22b",
123
- MISTRAL_7B_INSTRUCT_V0_2: "Mistral-7B-Instruct-v0.2",
124
- MIXTRAL_8X7B_INSTRUCT_V0_1: "Mixtral-8x7B-Instruct-v0.1",
125
- MIXTRAL_8X22B_INSTRUCT_V0_1: "Mixtral-8x22B-Instruct-v0.1",
126
- LLAMA_3_70B_CHAT_HF: "Llama-3-70b-chat-hf",
127
- LLAMA_3_8B_CHAT_HF: "Llama-3-8b-chat-hf",
128
- QWEN2_72B_INSTRUCT: "Qwen2-72B-Instruct",
129
- LLAMA_3_1_8B_INSTRUCT_TURBO: "Meta-Llama-3.1-8B-Instruct-Turbo",
130
- LLAMA_3_1_70B_INSTRUCT_TURBO: "Meta-Llama-3.1-70B-Instruct-Turbo",
131
- LLAMA_3_1_405B_INSTRUCT_TURBO: "Meta-Llama-3.1-405B-Instruct-Turbo",
132
- PERPLEXITY_SONAR: "sonar",
133
- OPEN_MISTRAL_NEMO: "open-mistral-nemo",
134
- DEEPSEEK_R1: "DeepSeek-R1"
135
- };
136
- ({
137
- [SupportedProvider.OPENAI]: [
138
- SupportedModel.GPT_3_5_TURBO,
139
- SupportedModel.GPT_3_5_TURBO_0125,
140
- SupportedModel.GPT_4,
141
- SupportedModel.GPT_4_0613,
142
- SupportedModel.GPT_4_1106_PREVIEW,
143
- SupportedModel.GPT_4_TURBO,
144
- SupportedModel.GPT_4_TURBO_PREVIEW,
145
- SupportedModel.GPT_4_TURBO_2024_04_09,
146
- SupportedModel.GPT_4O_2024_05_13,
147
- SupportedModel.GPT_4O_2024_08_06,
148
- SupportedModel.GPT_4O,
149
- SupportedModel.GPT_4O_MINI_2024_07_18,
150
- SupportedModel.GPT_4O_MINI,
151
- SupportedModel.GPT_4_0125_PREVIEW,
152
- SupportedModel.O1_PREVIEW,
153
- SupportedModel.O1_PREVIEW_2024_09_12,
154
- SupportedModel.O1_MINI,
155
- SupportedModel.O1_MINI_2024_09_12,
156
- SupportedModel.CHATGPT_4O_LATEST,
157
- SupportedModel.GPT_4_5_PREVIEW,
158
- SupportedModel.GPT_4_5_PREVIEW_2025_02_27
159
- ],
160
- [SupportedProvider.ANTHROPIC]: [
161
- SupportedModel.CLAUDE_2_1,
162
- SupportedModel.CLAUDE_3_OPUS_20240229,
163
- SupportedModel.CLAUDE_3_SONNET_20240229,
164
- SupportedModel.CLAUDE_3_5_SONNET_20240620,
165
- SupportedModel.CLAUDE_3_5_SONNET_20241022,
166
- SupportedModel.CLAUDE_3_5_SONNET_LATEST,
167
- SupportedModel.CLAUDE_3_HAIKU_20240307,
168
- SupportedModel.CLAUDE_3_5_HAIKU_20241022,
169
- SupportedModel.CLAUDE_3_7_SONNET_LATEST,
170
- SupportedModel.CLAUDE_3_7_SONNET_20250219
171
- ],
172
- [SupportedProvider.GOOGLE]: [
173
- SupportedModel.GEMINI_PRO,
174
- SupportedModel.GEMINI_1_PRO_LATEST,
175
- SupportedModel.GEMINI_15_PRO_LATEST,
176
- SupportedModel.GEMINI_15_PRO_EXP_0801,
177
- SupportedModel.GEMINI_15_FLASH_LATEST,
178
- SupportedModel.GEMINI_2_0_FLASH,
179
- SupportedModel.GEMINI_2_0_FLASH_001
180
- ],
181
- [SupportedProvider.MISTRAL]: [
182
- SupportedModel.MISTRAL_LARGE_LATEST,
183
- SupportedModel.MISTRAL_LARGE_2407,
184
- SupportedModel.MISTRAL_LARGE_2402,
185
- SupportedModel.MISTRAL_MEDIUM_LATEST,
186
- SupportedModel.MISTRAL_SMALL_LATEST,
187
- SupportedModel.CODESTRAL_LATEST,
188
- SupportedModel.OPEN_MISTRAL_7B,
189
- SupportedModel.OPEN_MIXTRAL_8X7B,
190
- SupportedModel.OPEN_MIXTRAL_8X22B,
191
- SupportedModel.OPEN_MISTRAL_NEMO
192
- ],
193
- [SupportedProvider.PERPLEXITY]: [
194
- SupportedModel.PERPLEXITY_SONAR
195
- ],
196
- [SupportedProvider.COHERE]: [
197
- SupportedModel.COMMAND_R,
198
- SupportedModel.COMMAND_R_PLUS
199
- ],
200
- [SupportedProvider.TOGETHERAI]: [
201
- SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
202
- SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
203
- SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
204
- SupportedModel.LLAMA_3_70B_CHAT_HF,
205
- SupportedModel.LLAMA_3_8B_CHAT_HF,
206
- SupportedModel.QWEN2_72B_INSTRUCT,
207
- SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO,
208
- SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO,
209
- SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO,
210
- SupportedModel.DEEPSEEK_R1
211
- ]
212
- });
213
-
214
- function getLangChainModel(provider, llmKeys, responseModel) {
215
- const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHERAI } = SupportedProvider;
216
- switch (provider.provider) {
217
- case OPENAI:
218
- if (responseModel) {
219
- return new ChatOpenAI({
220
- modelName: provider.model,
221
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
222
- }).withStructuredOutput(responseModel);
223
- }
224
- return new ChatOpenAI({
225
- modelName: provider.model,
226
- apiKey: llmKeys.openai || process.env.OPENAI_API_KEY
227
- });
228
- case ANTHROPIC:
229
- if (responseModel) {
230
- return new ChatAnthropic({
231
- modelName: provider.model,
232
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
233
- }).withStructuredOutput(responseModel);
234
- }
235
- return new ChatAnthropic({
236
- modelName: provider.model,
237
- anthropicApiKey: llmKeys.anthropic || process.env.ANTHROPIC_API_KEY
238
- });
239
- case GOOGLE:
240
- if (responseModel) {
241
- return new ChatGoogleGenerativeAI({
242
- modelName: provider.model,
243
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
244
- }).withStructuredOutput(responseModel);
245
- }
246
- return new ChatGoogleGenerativeAI({
247
- modelName: provider.model,
248
- apiKey: llmKeys.google || process.env.GOOGLE_API_KEY
249
- });
250
- case MISTRAL:
251
- if (responseModel) {
252
- return new ChatMistralAI({
253
- modelName: provider.model,
254
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
255
- }).withStructuredOutput(responseModel);
256
- }
257
- return new ChatMistralAI({
258
- modelName: provider.model,
259
- apiKey: llmKeys.mistral || process.env.MISTRAL_API_KEY
260
- });
261
- case PERPLEXITY:
262
- if (responseModel) {
263
- return new ChatPerplexity({
264
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
265
- model: provider.model
266
- }).withStructuredOutput(responseModel);
267
- }
268
- return new ChatPerplexity({
269
- apiKey: llmKeys.perplexity || process.env.PPLX_API_KEY || "",
270
- model: provider.model
271
- });
272
- case COHERE:
273
- if (responseModel) {
274
- return new ChatCohere({
275
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
276
- model: provider.model
277
- }).withStructuredOutput(responseModel);
278
- }
279
- return new ChatCohere({
280
- apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
281
- model: provider.model
282
- });
283
- case TOGETHERAI:
284
- if (responseModel) {
285
- return new ChatTogetherAI({
286
- apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
287
- model: getTogetheraiModel(provider.model)
288
- }).withStructuredOutput(responseModel);
289
- }
290
- return new ChatTogetherAI({
291
- apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
292
- model: getTogetheraiModel(provider.model)
293
- });
294
- default:
295
- throw new Error(`Unsupported provider: ${provider.provider}`);
296
- }
297
- }
298
- const getTogetheraiModel = (model) => {
299
- if (model === SupportedModel.MISTRAL_7B_INSTRUCT_V0_2 || model === SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1 || model === SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1) {
300
- return `mistralai/${model}`;
301
- }
302
- if (model === SupportedModel.LLAMA_3_70B_CHAT_HF || model === SupportedModel.LLAMA_3_8B_CHAT_HF || model === SupportedModel.LLAMA_3_1_8B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_70B_INSTRUCT_TURBO || model === SupportedModel.LLAMA_3_1_405B_INSTRUCT_TURBO) {
303
- return `meta-llama/${model}`;
304
- }
305
- if (model === SupportedModel.QWEN2_72B_INSTRUCT) {
306
- return `Qwen/${model}`;
307
- }
308
- return model;
309
- };
310
- async function callLLM(provider, options, llmKeys, runtimeArgs) {
311
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
312
- const langChainMessages = extendProviderSystemPrompt(
313
- options.messages.map(convertToLangChainMessage),
314
- options,
315
- provider
316
- );
317
- const response = await model.invoke(langChainMessages, runtimeArgs);
318
- return extractContent(response);
319
- }
320
- function extendProviderSystemPrompt(messages, options, provider) {
321
- const matchingProvider = options.llmProviders.find(
322
- (p) => p.provider === provider.provider && p.model === provider.model
323
- );
324
- if (matchingProvider && matchingProvider.systemPrompt) {
325
- messages.unshift(new SystemMessage(matchingProvider.systemPrompt));
326
- }
327
- return messages;
328
- }
329
- function convertToLangChainMessage(msg) {
330
- switch (msg.role) {
331
- case "user":
332
- return new HumanMessage(msg.content);
333
- case "assistant":
334
- return new AIMessage(msg.content);
335
- case "system":
336
- return new SystemMessage(msg.content);
337
- default:
338
- return new HumanMessage(msg.content);
339
- }
340
- }
341
- async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
342
- const model = getLangChainModel(provider, llmKeys, options.responseModel);
343
- const langChainMessages = extendProviderSystemPrompt(
344
- options.messages.map(convertToLangChainMessage),
345
- options,
346
- provider
347
- );
348
- const stream = await model.stream(langChainMessages, runtimeArgs);
349
- for await (const chunk of stream) {
350
- yield extractContent(chunk);
351
- }
352
- }
353
- function extractContent(response) {
354
- if ("content" in response) {
355
- return typeof response.content === "string" ? response.content : JSON.stringify(response.content);
356
- }
357
- return typeof response === "string" ? response : JSON.stringify(response);
358
- }
359
-
360
- const SDK_VERSION = packageJson.version;
361
- dotenv.config();
362
- const DEFAULT_TIMEOUT = 5;
363
- const BASE_URL = "https://api.notdiamond.ai";
364
- class NotDiamond {
365
- apiKey;
366
- apiUrl;
367
- modelSelectUrl;
368
- feedbackUrl;
369
- createUrl;
370
- llmKeys;
371
- constructor(options = {}) {
372
- this.apiKey = options.apiKey || process.env.NOTDIAMOND_API_KEY || "";
373
- this.apiUrl = options.apiUrl || process.env.NOTDIAMOND_API_URL || BASE_URL;
374
- this.llmKeys = options.llmKeys || {};
375
- this.modelSelectUrl = `${this.apiUrl}/v2/modelRouter/modelSelect`;
376
- this.feedbackUrl = `${this.apiUrl}/v2/report/metrics/feedback`;
377
- this.createUrl = `${this.apiUrl}/v2/preferences/userPreferenceCreate`;
378
- }
379
- getAuthHeader() {
380
- return `Bearer ${this.apiKey}`;
381
- }
382
- async postRequest(url, body) {
383
- try {
384
- const response = await axios.post(url, body, {
385
- headers: {
386
- Authorization: this.getAuthHeader(),
387
- Accept: "application/json",
388
- "Content-Type": "application/json",
389
- "User-Agent": `TS-SDK/${SDK_VERSION}`
390
- }
391
- });
392
- return response.data;
393
- } catch (error) {
394
- if (axios.isAxiosError(error) && error.response) {
395
- return { detail: "An error occurred." };
396
- }
397
- console.error("error", error);
398
- return { detail: "An unexpected error occurred." };
399
- }
400
- }
401
- /**
402
- * Selects the best model for the given messages.
403
- * @param options The options for the model.
404
- * @returns The results of the model.
405
- */
406
- async modelSelect(options) {
407
- const requestBody = {
408
- messages: options.messages,
409
- llm_providers: options.llmProviders.map((provider) => ({
410
- provider: provider.provider,
411
- model: provider.model,
412
- ...provider.contextLength !== void 0 && {
413
- context_length: provider.contextLength
414
- },
415
- ...provider.customInputPrice !== void 0 && {
416
- input_price: provider.customInputPrice
417
- },
418
- ...provider.inputPrice !== void 0 && {
419
- input_price: provider.inputPrice
420
- },
421
- ...provider.customOutputPrice !== void 0 && {
422
- output_price: provider.customOutputPrice
423
- },
424
- ...provider.outputPrice !== void 0 && {
425
- output_price: provider.outputPrice
426
- },
427
- ...provider.customLatency !== void 0 && {
428
- latency: provider.customLatency
429
- },
430
- ...provider.latency !== void 0 && { latency: provider.latency },
431
- ...provider.isCustom !== void 0 && {
432
- is_custom: provider.isCustom
433
- }
434
- })),
435
- ...options.tradeoff && {
436
- tradeoff: options.tradeoff
437
- },
438
- ...options.maxModelDepth && {
439
- max_model_depth: options.maxModelDepth
440
- },
441
- ...options.tools && { tools: options.tools },
442
- ...options.hashContent !== void 0 && {
443
- hash_content: options.hashContent
444
- },
445
- ...options.preferenceId && { preference_id: options.preferenceId },
446
- ...options.timeout ? { timeout: options.timeout } : {
447
- timeout: DEFAULT_TIMEOUT
448
- },
449
- ...options.default && { default: options.default },
450
- ...options.previousSession && {
451
- previous_session: options.previousSession
452
- },
453
- ...options.responseModel && {
454
- response_model: options.responseModel
455
- }
456
- };
457
- return this.postRequest(
458
- this.modelSelectUrl,
459
- requestBody
460
- );
461
- }
462
- /**
463
- * Sends feedback to the NotDiamond API.
464
- * @param options The options for the feedback.
465
- * @returns The results of the feedback.
466
- */
467
- async feedback(options) {
468
- return this.postRequest(this.feedbackUrl, {
469
- session_id: options.sessionId,
470
- feedback: options.feedback,
471
- provider: options.provider
472
- });
473
- }
474
- /**
475
- * Creates a preference id.
476
- * @returns The preference id.
477
- */
478
- async createPreferenceId() {
479
- const response = await this.postRequest(
480
- this.createUrl,
481
- {}
482
- );
483
- if ("preference_id" in response) {
484
- return response.preference_id;
485
- }
486
- throw new Error("Invalid response: preference_id not found");
487
- }
488
- /**
489
- *
490
- * @param options The options for the model.
491
- * @returns A promise that resolves to the results of the model.
492
- */
493
- async acreate(options, runtimeArgs = {}) {
494
- const selectedModel = await this.modelSelect(options);
495
- const { providers } = selectedModel;
496
- const content = await callLLM(
497
- providers[0],
498
- options,
499
- this.llmKeys,
500
- runtimeArgs
501
- );
502
- return { content, providers };
503
- }
504
- /**
505
- *
506
- * @param options The options for the model.
507
- * @param callback Optional callback function to handle the result.
508
- * @returns A promise that resolves to the results of the model or a callback function
509
- */
510
- create(options, runtimeArgs = {}, callback) {
511
- const promise = this.acreate(options, runtimeArgs);
512
- if (callback) {
513
- promise.then((result) => callback(null, result)).catch((error) => callback(error));
514
- } else {
515
- return promise;
516
- }
517
- }
518
- /**
519
- * Streams the results of the model asynchronously.
520
- * @param options The options for the model.
521
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
522
- */
523
- async astream(options, runtimeArgs = {}) {
524
- const selectedModel = await this.modelSelect(options);
525
- const { providers } = selectedModel;
526
- const stream = await Promise.resolve(
527
- callLLMStream(
528
- providers?.[0] || options.default,
529
- options,
530
- this.llmKeys,
531
- runtimeArgs
532
- )
533
- );
534
- return {
535
- provider: providers?.[0] || options.default,
536
- stream
537
- };
538
- }
539
- /**
540
- * Streams the results of the model.
541
- * @param options The options for the model.
542
- * @param callback Optional callback function to handle each chunk of the stream.
543
- * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
544
- */
545
- stream(options, runtimeArgs = {}, callback) {
546
- if (!options.llmProviders || options.llmProviders.length === 0) {
547
- throw new Error("No LLM providers specified");
548
- }
549
- const promise = this.astream(options, runtimeArgs);
550
- if (callback) {
551
- promise.then(async ({ provider, stream }) => {
552
- for await (const chunk of stream) {
553
- callback(null, { provider, chunk });
554
- }
555
- }).catch((error) => callback(error));
556
- } else {
557
- return promise;
558
- }
559
- }
560
- }
561
-
562
- export { NotDiamond, SupportedModel, SupportedProvider };