notdiamond 0.3.12 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -30,7 +30,7 @@ const axios__default = /*#__PURE__*/_interopDefaultCompat(axios);
30
30
 
31
31
  const name = "notdiamond";
32
32
  const type = "module";
33
- const version = "1.0.0";
33
+ const version = "1.0.2";
34
34
  const author = "not-diamond";
35
35
  const license = "MIT";
36
36
  const description = "TS/JS client for the NotDiamond API";
@@ -231,7 +231,7 @@ const SupportedProvider = {
231
231
  MISTRAL: "mistral",
232
232
  PERPLEXITY: "perplexity",
233
233
  COHERE: "cohere",
234
- TOGETHER: "together"
234
+ TOGETHERAI: "togetherai"
235
235
  };
236
236
  const SupportedModel = {
237
237
  GPT_3_5_TURBO: "gpt-3.5-turbo",
@@ -339,7 +339,7 @@ const SupportedModel = {
339
339
  SupportedModel.COMMAND_R,
340
340
  SupportedModel.COMMAND_R_PLUS
341
341
  ],
342
- [SupportedProvider.TOGETHER]: [
342
+ [SupportedProvider.TOGETHERAI]: [
343
343
  SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
344
344
  SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
345
345
  SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
@@ -353,7 +353,7 @@ const SupportedModel = {
353
353
  });
354
354
 
355
355
  function getLangChainModel(provider, llmKeys, responseModel) {
356
- const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHER } = SupportedProvider;
356
+ const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHERAI } = SupportedProvider;
357
357
  switch (provider.provider) {
358
358
  case OPENAI:
359
359
  if (responseModel) {
@@ -421,29 +421,29 @@ function getLangChainModel(provider, llmKeys, responseModel) {
421
421
  apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
422
422
  model: provider.model
423
423
  });
424
- case TOGETHER:
424
+ case TOGETHERAI:
425
425
  if (responseModel) {
426
426
  return new togetherai.ChatTogetherAI({
427
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
427
+ apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
428
428
  model: provider.model
429
429
  }).withStructuredOutput(responseModel);
430
430
  }
431
431
  return new togetherai.ChatTogetherAI({
432
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
432
+ apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
433
433
  model: provider.model
434
434
  });
435
435
  default:
436
436
  throw new Error(`Unsupported provider: ${provider.provider}`);
437
437
  }
438
438
  }
439
- async function callLLM(provider, options, llmKeys) {
439
+ async function callLLM(provider, options, llmKeys, runtimeArgs) {
440
440
  const model = getLangChainModel(provider, llmKeys, options.responseModel);
441
441
  const langChainMessages = extendProviderSystemPrompt(
442
442
  options.messages.map(convertToLangChainMessage),
443
443
  options,
444
444
  provider
445
445
  );
446
- const response = await model.invoke(langChainMessages);
446
+ const response = await model.invoke(langChainMessages, runtimeArgs);
447
447
  return extractContent(response);
448
448
  }
449
449
  function extendProviderSystemPrompt(messages$1, options, provider) {
@@ -467,14 +467,14 @@ function convertToLangChainMessage(msg) {
467
467
  return new messages.HumanMessage(msg.content);
468
468
  }
469
469
  }
470
- async function* callLLMStream(provider, options, llmKeys) {
470
+ async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
471
471
  const model = getLangChainModel(provider, llmKeys, options.responseModel);
472
472
  const langChainMessages = extendProviderSystemPrompt(
473
473
  options.messages.map(convertToLangChainMessage),
474
474
  options,
475
475
  provider
476
476
  );
477
- const stream = await model.stream(langChainMessages);
477
+ const stream = await model.stream(langChainMessages, runtimeArgs);
478
478
  for await (const chunk of stream) {
479
479
  yield extractContent(chunk);
480
480
  }
@@ -625,10 +625,15 @@ class NotDiamond {
625
625
  * @param options The options for the model.
626
626
  * @returns A promise that resolves to the results of the model.
627
627
  */
628
- async acreate(options) {
628
+ async acreate(options, runtimeArgs = {}) {
629
629
  const selectedModel = await this.modelSelect(options);
630
630
  const { providers } = selectedModel;
631
- const content = await callLLM(providers[0], options, this.llmKeys);
631
+ const content = await callLLM(
632
+ providers[0],
633
+ options,
634
+ this.llmKeys,
635
+ runtimeArgs
636
+ );
632
637
  return { content, providers };
633
638
  }
634
639
  /**
@@ -637,8 +642,8 @@ class NotDiamond {
637
642
  * @param callback Optional callback function to handle the result.
638
643
  * @returns A promise that resolves to the results of the model or a callback function
639
644
  */
640
- create(options, callback) {
641
- const promise = this.acreate(options);
645
+ create(options, runtimeArgs = {}, callback) {
646
+ const promise = this.acreate(options, runtimeArgs);
642
647
  if (callback) {
643
648
  promise.then((result) => callback(null, result)).catch((error) => callback(error));
644
649
  } else {
@@ -650,7 +655,7 @@ class NotDiamond {
650
655
  * @param options The options for the model.
651
656
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
652
657
  */
653
- async astream(options) {
658
+ async astream(options, runtimeArgs = {}) {
654
659
  const selectedModel = await this.modelSelect(options);
655
660
  const { providers } = selectedModel;
656
661
  const stream = await Promise.resolve(
@@ -660,7 +665,8 @@ class NotDiamond {
660
665
  model: "gpt-3.5-turbo"
661
666
  },
662
667
  options,
663
- this.llmKeys
668
+ this.llmKeys,
669
+ runtimeArgs
664
670
  )
665
671
  );
666
672
  return {
@@ -677,11 +683,11 @@ class NotDiamond {
677
683
  * @param callback Optional callback function to handle each chunk of the stream.
678
684
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
679
685
  */
680
- stream(options, callback) {
686
+ stream(options, runtimeArgs = {}, callback) {
681
687
  if (!options.llmProviders || options.llmProviders.length === 0) {
682
688
  throw new Error("No LLM providers specified");
683
689
  }
684
- const promise = this.astream(options);
690
+ const promise = this.astream(options, runtimeArgs);
685
691
  if (callback) {
686
692
  promise.then(async ({ provider, stream }) => {
687
693
  for await (const chunk of stream) {
package/dist/index.d.cts CHANGED
@@ -7,7 +7,7 @@ declare const SupportedProvider: {
7
7
  readonly MISTRAL: "mistral";
8
8
  readonly PERPLEXITY: "perplexity";
9
9
  readonly COHERE: "cohere";
10
- readonly TOGETHER: "together";
10
+ readonly TOGETHERAI: "togetherai";
11
11
  };
12
12
  declare const SupportedModel: {
13
13
  readonly GPT_3_5_TURBO: "gpt-3.5-turbo";
@@ -68,7 +68,7 @@ declare const ProviderModelMap: {
68
68
  readonly mistral: readonly ["mistral-large-latest", "mistral-large-2407", "mistral-large-2402", "mistral-medium-latest", "mistral-small-latest", "codestral-latest", "open-mistral-7b", "open-mixtral-8x7b", "open-mixtral-8x22b", "open-mistral-nemo"];
69
69
  readonly perplexity: readonly ["llama-3.1-sonar-large-128k-online"];
70
70
  readonly cohere: readonly ["command-r", "command-r-plus"];
71
- readonly together: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
71
+ readonly togetherai: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
72
72
  };
73
73
  type ProviderModelMapType = typeof ProviderModelMap;
74
74
  type SupportedProviderType = keyof ProviderModelMapType;
@@ -176,12 +176,12 @@ declare class NotDiamond {
176
176
  * @param callback Optional callback function to handle the result.
177
177
  * @returns A promise that resolves to the results of the model or a callback function
178
178
  */
179
- create(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
179
+ create(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
180
180
  content: string;
181
181
  providers: Provider[];
182
182
  }) => void): Promise<{
183
183
  content: string;
184
- providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">[];
184
+ providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">[];
185
185
  }> | undefined;
186
186
  /**
187
187
  * Streams the results of the model asynchronously.
@@ -195,11 +195,11 @@ declare class NotDiamond {
195
195
  * @param callback Optional callback function to handle each chunk of the stream.
196
196
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
197
197
  */
198
- stream(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
198
+ stream(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
199
199
  provider: Provider;
200
200
  chunk?: string;
201
201
  }) => void): Promise<{
202
- provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">;
202
+ provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">;
203
203
  stream: AsyncIterable<string>;
204
204
  }> | undefined;
205
205
  }
package/dist/index.d.mts CHANGED
@@ -7,7 +7,7 @@ declare const SupportedProvider: {
7
7
  readonly MISTRAL: "mistral";
8
8
  readonly PERPLEXITY: "perplexity";
9
9
  readonly COHERE: "cohere";
10
- readonly TOGETHER: "together";
10
+ readonly TOGETHERAI: "togetherai";
11
11
  };
12
12
  declare const SupportedModel: {
13
13
  readonly GPT_3_5_TURBO: "gpt-3.5-turbo";
@@ -68,7 +68,7 @@ declare const ProviderModelMap: {
68
68
  readonly mistral: readonly ["mistral-large-latest", "mistral-large-2407", "mistral-large-2402", "mistral-medium-latest", "mistral-small-latest", "codestral-latest", "open-mistral-7b", "open-mixtral-8x7b", "open-mixtral-8x22b", "open-mistral-nemo"];
69
69
  readonly perplexity: readonly ["llama-3.1-sonar-large-128k-online"];
70
70
  readonly cohere: readonly ["command-r", "command-r-plus"];
71
- readonly together: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
71
+ readonly togetherai: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
72
72
  };
73
73
  type ProviderModelMapType = typeof ProviderModelMap;
74
74
  type SupportedProviderType = keyof ProviderModelMapType;
@@ -176,12 +176,12 @@ declare class NotDiamond {
176
176
  * @param callback Optional callback function to handle the result.
177
177
  * @returns A promise that resolves to the results of the model or a callback function
178
178
  */
179
- create(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
179
+ create(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
180
180
  content: string;
181
181
  providers: Provider[];
182
182
  }) => void): Promise<{
183
183
  content: string;
184
- providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">[];
184
+ providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">[];
185
185
  }> | undefined;
186
186
  /**
187
187
  * Streams the results of the model asynchronously.
@@ -195,11 +195,11 @@ declare class NotDiamond {
195
195
  * @param callback Optional callback function to handle each chunk of the stream.
196
196
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
197
197
  */
198
- stream(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
198
+ stream(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
199
199
  provider: Provider;
200
200
  chunk?: string;
201
201
  }) => void): Promise<{
202
- provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">;
202
+ provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">;
203
203
  stream: AsyncIterable<string>;
204
204
  }> | undefined;
205
205
  }
package/dist/index.d.ts CHANGED
@@ -7,7 +7,7 @@ declare const SupportedProvider: {
7
7
  readonly MISTRAL: "mistral";
8
8
  readonly PERPLEXITY: "perplexity";
9
9
  readonly COHERE: "cohere";
10
- readonly TOGETHER: "together";
10
+ readonly TOGETHERAI: "togetherai";
11
11
  };
12
12
  declare const SupportedModel: {
13
13
  readonly GPT_3_5_TURBO: "gpt-3.5-turbo";
@@ -68,7 +68,7 @@ declare const ProviderModelMap: {
68
68
  readonly mistral: readonly ["mistral-large-latest", "mistral-large-2407", "mistral-large-2402", "mistral-medium-latest", "mistral-small-latest", "codestral-latest", "open-mistral-7b", "open-mixtral-8x7b", "open-mixtral-8x22b", "open-mistral-nemo"];
69
69
  readonly perplexity: readonly ["llama-3.1-sonar-large-128k-online"];
70
70
  readonly cohere: readonly ["command-r", "command-r-plus"];
71
- readonly together: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
71
+ readonly togetherai: readonly ["Mistral-7B-Instruct-v0.2", "Mixtral-8x7B-Instruct-v0.1", "Mixtral-8x22B-Instruct-v0.1", "Llama-3-70b-chat-hf", "Llama-3-8b-chat-hf", "Qwen2-72B-Instruct", "Meta-Llama-3.1-8B-Instruct-Turbo", "Meta-Llama-3.1-70B-Instruct-Turbo", "Meta-Llama-3.1-405B-Instruct-Turbo"];
72
72
  };
73
73
  type ProviderModelMapType = typeof ProviderModelMap;
74
74
  type SupportedProviderType = keyof ProviderModelMapType;
@@ -176,12 +176,12 @@ declare class NotDiamond {
176
176
  * @param callback Optional callback function to handle the result.
177
177
  * @returns A promise that resolves to the results of the model or a callback function
178
178
  */
179
- create(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
179
+ create(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
180
180
  content: string;
181
181
  providers: Provider[];
182
182
  }) => void): Promise<{
183
183
  content: string;
184
- providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">[];
184
+ providers: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">[];
185
185
  }> | undefined;
186
186
  /**
187
187
  * Streams the results of the model asynchronously.
@@ -195,11 +195,11 @@ declare class NotDiamond {
195
195
  * @param callback Optional callback function to handle each chunk of the stream.
196
196
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
197
197
  */
198
- stream(options: ModelSelectOptions, callback?: (error: Error | null, result?: {
198
+ stream(options: ModelSelectOptions, runtimeArgs?: Record<string, string>, callback?: (error: Error | null, result?: {
199
199
  provider: Provider;
200
200
  chunk?: string;
201
201
  }) => void): Promise<{
202
- provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "together">;
202
+ provider: Provider<"openai" | "anthropic" | "perplexity" | "google" | "mistral" | "cohere" | "togetherai">;
203
203
  stream: AsyncIterable<string>;
204
204
  }> | undefined;
205
205
  }
package/dist/index.mjs CHANGED
@@ -11,7 +11,7 @@ import { ChatTogetherAI } from '@langchain/community/chat_models/togetherai';
11
11
 
12
12
  const name = "notdiamond";
13
13
  const type = "module";
14
- const version = "1.0.0";
14
+ const version = "1.0.2";
15
15
  const author = "not-diamond";
16
16
  const license = "MIT";
17
17
  const description = "TS/JS client for the NotDiamond API";
@@ -212,7 +212,7 @@ const SupportedProvider = {
212
212
  MISTRAL: "mistral",
213
213
  PERPLEXITY: "perplexity",
214
214
  COHERE: "cohere",
215
- TOGETHER: "together"
215
+ TOGETHERAI: "togetherai"
216
216
  };
217
217
  const SupportedModel = {
218
218
  GPT_3_5_TURBO: "gpt-3.5-turbo",
@@ -320,7 +320,7 @@ const SupportedModel = {
320
320
  SupportedModel.COMMAND_R,
321
321
  SupportedModel.COMMAND_R_PLUS
322
322
  ],
323
- [SupportedProvider.TOGETHER]: [
323
+ [SupportedProvider.TOGETHERAI]: [
324
324
  SupportedModel.MISTRAL_7B_INSTRUCT_V0_2,
325
325
  SupportedModel.MIXTRAL_8X7B_INSTRUCT_V0_1,
326
326
  SupportedModel.MIXTRAL_8X22B_INSTRUCT_V0_1,
@@ -334,7 +334,7 @@ const SupportedModel = {
334
334
  });
335
335
 
336
336
  function getLangChainModel(provider, llmKeys, responseModel) {
337
- const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHER } = SupportedProvider;
337
+ const { OPENAI, ANTHROPIC, GOOGLE, MISTRAL, PERPLEXITY, COHERE, TOGETHERAI } = SupportedProvider;
338
338
  switch (provider.provider) {
339
339
  case OPENAI:
340
340
  if (responseModel) {
@@ -402,29 +402,29 @@ function getLangChainModel(provider, llmKeys, responseModel) {
402
402
  apiKey: process.env.COHERE_API_KEY || llmKeys.cohere,
403
403
  model: provider.model
404
404
  });
405
- case TOGETHER:
405
+ case TOGETHERAI:
406
406
  if (responseModel) {
407
407
  return new ChatTogetherAI({
408
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
408
+ apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
409
409
  model: provider.model
410
410
  }).withStructuredOutput(responseModel);
411
411
  }
412
412
  return new ChatTogetherAI({
413
- apiKey: process.env.TOGETHER_API_KEY || llmKeys.together,
413
+ apiKey: process.env.TOGETHERAI_API_KEY || llmKeys.togetherai,
414
414
  model: provider.model
415
415
  });
416
416
  default:
417
417
  throw new Error(`Unsupported provider: ${provider.provider}`);
418
418
  }
419
419
  }
420
- async function callLLM(provider, options, llmKeys) {
420
+ async function callLLM(provider, options, llmKeys, runtimeArgs) {
421
421
  const model = getLangChainModel(provider, llmKeys, options.responseModel);
422
422
  const langChainMessages = extendProviderSystemPrompt(
423
423
  options.messages.map(convertToLangChainMessage),
424
424
  options,
425
425
  provider
426
426
  );
427
- const response = await model.invoke(langChainMessages);
427
+ const response = await model.invoke(langChainMessages, runtimeArgs);
428
428
  return extractContent(response);
429
429
  }
430
430
  function extendProviderSystemPrompt(messages, options, provider) {
@@ -448,14 +448,14 @@ function convertToLangChainMessage(msg) {
448
448
  return new HumanMessage(msg.content);
449
449
  }
450
450
  }
451
- async function* callLLMStream(provider, options, llmKeys) {
451
+ async function* callLLMStream(provider, options, llmKeys, runtimeArgs) {
452
452
  const model = getLangChainModel(provider, llmKeys, options.responseModel);
453
453
  const langChainMessages = extendProviderSystemPrompt(
454
454
  options.messages.map(convertToLangChainMessage),
455
455
  options,
456
456
  provider
457
457
  );
458
- const stream = await model.stream(langChainMessages);
458
+ const stream = await model.stream(langChainMessages, runtimeArgs);
459
459
  for await (const chunk of stream) {
460
460
  yield extractContent(chunk);
461
461
  }
@@ -606,10 +606,15 @@ class NotDiamond {
606
606
  * @param options The options for the model.
607
607
  * @returns A promise that resolves to the results of the model.
608
608
  */
609
- async acreate(options) {
609
+ async acreate(options, runtimeArgs = {}) {
610
610
  const selectedModel = await this.modelSelect(options);
611
611
  const { providers } = selectedModel;
612
- const content = await callLLM(providers[0], options, this.llmKeys);
612
+ const content = await callLLM(
613
+ providers[0],
614
+ options,
615
+ this.llmKeys,
616
+ runtimeArgs
617
+ );
613
618
  return { content, providers };
614
619
  }
615
620
  /**
@@ -618,8 +623,8 @@ class NotDiamond {
618
623
  * @param callback Optional callback function to handle the result.
619
624
  * @returns A promise that resolves to the results of the model or a callback function
620
625
  */
621
- create(options, callback) {
622
- const promise = this.acreate(options);
626
+ create(options, runtimeArgs = {}, callback) {
627
+ const promise = this.acreate(options, runtimeArgs);
623
628
  if (callback) {
624
629
  promise.then((result) => callback(null, result)).catch((error) => callback(error));
625
630
  } else {
@@ -631,7 +636,7 @@ class NotDiamond {
631
636
  * @param options The options for the model.
632
637
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings.
633
638
  */
634
- async astream(options) {
639
+ async astream(options, runtimeArgs = {}) {
635
640
  const selectedModel = await this.modelSelect(options);
636
641
  const { providers } = selectedModel;
637
642
  const stream = await Promise.resolve(
@@ -641,7 +646,8 @@ class NotDiamond {
641
646
  model: "gpt-3.5-turbo"
642
647
  },
643
648
  options,
644
- this.llmKeys
649
+ this.llmKeys,
650
+ runtimeArgs
645
651
  )
646
652
  );
647
653
  return {
@@ -658,11 +664,11 @@ class NotDiamond {
658
664
  * @param callback Optional callback function to handle each chunk of the stream.
659
665
  * @returns A promise that resolves to an object containing the provider and an AsyncIterable of strings or a callback function
660
666
  */
661
- stream(options, callback) {
667
+ stream(options, runtimeArgs = {}, callback) {
662
668
  if (!options.llmProviders || options.llmProviders.length === 0) {
663
669
  throw new Error("No LLM providers specified");
664
670
  }
665
- const promise = this.astream(options);
671
+ const promise = this.astream(options, runtimeArgs);
666
672
  if (callback) {
667
673
  promise.then(async ({ provider, stream }) => {
668
674
  for await (const chunk of stream) {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "notdiamond",
3
3
  "type": "module",
4
- "version": "0.3.12",
4
+ "version": "1.0.2",
5
5
  "author": "not-diamond",
6
6
  "license": "MIT",
7
7
  "description": "TS/JS client for the NotDiamond API",