ai 3.0.26 → 3.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -230,19 +230,17 @@ This function does not stream the output. If you want to stream the output, use
230
230
 
231
231
  @param maxTokens - Maximum number of tokens to generate.
232
232
  @param temperature - Temperature setting.
233
- This is a number between 0 (almost no randomness) and 1 (very random).
233
+ The value is passed through to the provider. The range depends on the provider and model.
234
234
  It is recommended to set either `temperature` or `topP`, but not both.
235
- @param topP - Nucleus sampling. This is a number between 0 and 1.
236
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
235
+ @param topP - Nucleus sampling.
236
+ The value is passed through to the provider. The range depends on the provider and model.
237
237
  It is recommended to set either `temperature` or `topP`, but not both.
238
238
  @param presencePenalty - Presence penalty setting.
239
239
  It affects the likelihood of the model to repeat information that is already in the prompt.
240
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
241
- 0 means no penalty.
240
+ The value is passed through to the provider. The range depends on the provider and model.
242
241
  @param frequencyPenalty - Frequency penalty setting.
243
242
  It affects the likelihood of the model to repeatedly use the same words or phrases.
244
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
245
- 0 means no penalty.
243
+ The value is passed through to the provider. The range depends on the provider and model.
246
244
  @param seed - The seed (integer) to use for random sampling.
247
245
  If set and supported by the model, calls will generate deterministic results.
248
246
 
@@ -330,19 +328,17 @@ This function streams the output. If you do not want to stream the output, use `
330
328
 
331
329
  @param maxTokens - Maximum number of tokens to generate.
332
330
  @param temperature - Temperature setting.
333
- This is a number between 0 (almost no randomness) and 1 (very random).
331
+ The value is passed through to the provider. The range depends on the provider and model.
334
332
  It is recommended to set either `temperature` or `topP`, but not both.
335
- @param topP - Nucleus sampling. This is a number between 0 and 1.
336
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
333
+ @param topP - Nucleus sampling.
334
+ The value is passed through to the provider. The range depends on the provider and model.
337
335
  It is recommended to set either `temperature` or `topP`, but not both.
338
336
  @param presencePenalty - Presence penalty setting.
339
337
  It affects the likelihood of the model to repeat information that is already in the prompt.
340
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
341
- 0 means no penalty.
338
+ The value is passed through to the provider. The range depends on the provider and model.
342
339
  @param frequencyPenalty - Frequency penalty setting.
343
340
  It affects the likelihood of the model to repeatedly use the same words or phrases.
344
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
345
- 0 means no penalty.
341
+ The value is passed through to the provider. The range depends on the provider and model.
346
342
  @param seed - The seed (integer) to use for random sampling.
347
343
  If set and supported by the model, calls will generate deterministic results.
348
344
 
@@ -510,19 +506,17 @@ This function does not stream the output. If you want to stream the output, use
510
506
 
511
507
  @param maxTokens - Maximum number of tokens to generate.
512
508
  @param temperature - Temperature setting.
513
- This is a number between 0 (almost no randomness) and 1 (very random).
509
+ The value is passed through to the provider. The range depends on the provider and model.
514
510
  It is recommended to set either `temperature` or `topP`, but not both.
515
- @param topP - Nucleus sampling. This is a number between 0 and 1.
516
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
511
+ @param topP - Nucleus sampling.
512
+ The value is passed through to the provider. The range depends on the provider and model.
517
513
  It is recommended to set either `temperature` or `topP`, but not both.
518
514
  @param presencePenalty - Presence penalty setting.
519
515
  It affects the likelihood of the model to repeat information that is already in the prompt.
520
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
521
- 0 means no penalty.
516
+ The value is passed through to the provider. The range depends on the provider and model.
522
517
  @param frequencyPenalty - Frequency penalty setting.
523
518
  It affects the likelihood of the model to repeatedly use the same words or phrases.
524
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
525
- 0 means no penalty.
519
+ The value is passed through to the provider. The range depends on the provider and model.
526
520
  @param seed - The seed (integer) to use for random sampling.
527
521
  If set and supported by the model, calls will generate deterministic results.
528
522
 
@@ -595,19 +589,17 @@ This function streams the output. If you do not want to stream the output, use `
595
589
 
596
590
  @param maxTokens - Maximum number of tokens to generate.
597
591
  @param temperature - Temperature setting.
598
- This is a number between 0 (almost no randomness) and 1 (very random).
592
+ The value is passed through to the provider. The range depends on the provider and model.
599
593
  It is recommended to set either `temperature` or `topP`, but not both.
600
- @param topP - Nucleus sampling. This is a number between 0 and 1.
601
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
594
+ @param topP - Nucleus sampling.
595
+ The value is passed through to the provider. The range depends on the provider and model.
602
596
  It is recommended to set either `temperature` or `topP`, but not both.
603
597
  @param presencePenalty - Presence penalty setting.
604
598
  It affects the likelihood of the model to repeat information that is already in the prompt.
605
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
606
- 0 means no penalty.
599
+ The value is passed through to the provider. The range depends on the provider and model.
607
600
  @param frequencyPenalty - Frequency penalty setting.
608
601
  It affects the likelihood of the model to repeatedly use the same words or phrases.
609
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
610
- 0 means no penalty.
602
+ The value is passed through to the provider. The range depends on the provider and model.
611
603
  @param seed - The seed (integer) to use for random sampling.
612
604
  If set and supported by the model, calls will generate deterministic results.
613
605
 
package/dist/index.d.ts CHANGED
@@ -230,19 +230,17 @@ This function does not stream the output. If you want to stream the output, use
230
230
 
231
231
  @param maxTokens - Maximum number of tokens to generate.
232
232
  @param temperature - Temperature setting.
233
- This is a number between 0 (almost no randomness) and 1 (very random).
233
+ The value is passed through to the provider. The range depends on the provider and model.
234
234
  It is recommended to set either `temperature` or `topP`, but not both.
235
- @param topP - Nucleus sampling. This is a number between 0 and 1.
236
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
235
+ @param topP - Nucleus sampling.
236
+ The value is passed through to the provider. The range depends on the provider and model.
237
237
  It is recommended to set either `temperature` or `topP`, but not both.
238
238
  @param presencePenalty - Presence penalty setting.
239
239
  It affects the likelihood of the model to repeat information that is already in the prompt.
240
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
241
- 0 means no penalty.
240
+ The value is passed through to the provider. The range depends on the provider and model.
242
241
  @param frequencyPenalty - Frequency penalty setting.
243
242
  It affects the likelihood of the model to repeatedly use the same words or phrases.
244
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
245
- 0 means no penalty.
243
+ The value is passed through to the provider. The range depends on the provider and model.
246
244
  @param seed - The seed (integer) to use for random sampling.
247
245
  If set and supported by the model, calls will generate deterministic results.
248
246
 
@@ -330,19 +328,17 @@ This function streams the output. If you do not want to stream the output, use `
330
328
 
331
329
  @param maxTokens - Maximum number of tokens to generate.
332
330
  @param temperature - Temperature setting.
333
- This is a number between 0 (almost no randomness) and 1 (very random).
331
+ The value is passed through to the provider. The range depends on the provider and model.
334
332
  It is recommended to set either `temperature` or `topP`, but not both.
335
- @param topP - Nucleus sampling. This is a number between 0 and 1.
336
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
333
+ @param topP - Nucleus sampling.
334
+ The value is passed through to the provider. The range depends on the provider and model.
337
335
  It is recommended to set either `temperature` or `topP`, but not both.
338
336
  @param presencePenalty - Presence penalty setting.
339
337
  It affects the likelihood of the model to repeat information that is already in the prompt.
340
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
341
- 0 means no penalty.
338
+ The value is passed through to the provider. The range depends on the provider and model.
342
339
  @param frequencyPenalty - Frequency penalty setting.
343
340
  It affects the likelihood of the model to repeatedly use the same words or phrases.
344
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
345
- 0 means no penalty.
341
+ The value is passed through to the provider. The range depends on the provider and model.
346
342
  @param seed - The seed (integer) to use for random sampling.
347
343
  If set and supported by the model, calls will generate deterministic results.
348
344
 
@@ -510,19 +506,17 @@ This function does not stream the output. If you want to stream the output, use
510
506
 
511
507
  @param maxTokens - Maximum number of tokens to generate.
512
508
  @param temperature - Temperature setting.
513
- This is a number between 0 (almost no randomness) and 1 (very random).
509
+ The value is passed through to the provider. The range depends on the provider and model.
514
510
  It is recommended to set either `temperature` or `topP`, but not both.
515
- @param topP - Nucleus sampling. This is a number between 0 and 1.
516
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
511
+ @param topP - Nucleus sampling.
512
+ The value is passed through to the provider. The range depends on the provider and model.
517
513
  It is recommended to set either `temperature` or `topP`, but not both.
518
514
  @param presencePenalty - Presence penalty setting.
519
515
  It affects the likelihood of the model to repeat information that is already in the prompt.
520
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
521
- 0 means no penalty.
516
+ The value is passed through to the provider. The range depends on the provider and model.
522
517
  @param frequencyPenalty - Frequency penalty setting.
523
518
  It affects the likelihood of the model to repeatedly use the same words or phrases.
524
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
525
- 0 means no penalty.
519
+ The value is passed through to the provider. The range depends on the provider and model.
526
520
  @param seed - The seed (integer) to use for random sampling.
527
521
  If set and supported by the model, calls will generate deterministic results.
528
522
 
@@ -595,19 +589,17 @@ This function streams the output. If you do not want to stream the output, use `
595
589
 
596
590
  @param maxTokens - Maximum number of tokens to generate.
597
591
  @param temperature - Temperature setting.
598
- This is a number between 0 (almost no randomness) and 1 (very random).
592
+ The value is passed through to the provider. The range depends on the provider and model.
599
593
  It is recommended to set either `temperature` or `topP`, but not both.
600
- @param topP - Nucleus sampling. This is a number between 0 and 1.
601
- E.g. 0.1 would mean that only tokens with the top 10% probability mass are considered.
594
+ @param topP - Nucleus sampling.
595
+ The value is passed through to the provider. The range depends on the provider and model.
602
596
  It is recommended to set either `temperature` or `topP`, but not both.
603
597
  @param presencePenalty - Presence penalty setting.
604
598
  It affects the likelihood of the model to repeat information that is already in the prompt.
605
- The presence penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
606
- 0 means no penalty.
599
+ The value is passed through to the provider. The range depends on the provider and model.
607
600
  @param frequencyPenalty - Frequency penalty setting.
608
601
  It affects the likelihood of the model to repeatedly use the same words or phrases.
609
- The frequency penalty is a number between -1 (increase repetition) and 1 (maximum penalty, decrease repetition).
610
- 0 means no penalty.
602
+ The value is passed through to the provider. The range depends on the provider and model.
611
603
  @param seed - The seed (integer) to use for random sampling.
612
604
  If set and supported by the model, calls will generate deterministic results.
613
605
 
package/dist/index.js CHANGED
@@ -277,13 +277,6 @@ function prepareCallSettings({
277
277
  message: "temperature must be a number"
278
278
  });
279
279
  }
280
- if (temperature < 0 || temperature > 1) {
281
- throw new import_provider3.InvalidArgumentError({
282
- parameter: "temperature",
283
- value: temperature,
284
- message: "temperature must be between 0 and 1 (inclusive)"
285
- });
286
- }
287
280
  }
288
281
  if (topP != null) {
289
282
  if (typeof topP !== "number") {
@@ -293,13 +286,6 @@ function prepareCallSettings({
293
286
  message: "topP must be a number"
294
287
  });
295
288
  }
296
- if (topP < 0 || topP > 1) {
297
- throw new import_provider3.InvalidArgumentError({
298
- parameter: "topP",
299
- value: topP,
300
- message: "topP must be between 0 and 1 (inclusive)"
301
- });
302
- }
303
289
  }
304
290
  if (presencePenalty != null) {
305
291
  if (typeof presencePenalty !== "number") {
@@ -309,13 +295,6 @@ function prepareCallSettings({
309
295
  message: "presencePenalty must be a number"
310
296
  });
311
297
  }
312
- if (presencePenalty < -1 || presencePenalty > 1) {
313
- throw new import_provider3.InvalidArgumentError({
314
- parameter: "presencePenalty",
315
- value: presencePenalty,
316
- message: "presencePenalty must be between -1 and 1 (inclusive)"
317
- });
318
- }
319
298
  }
320
299
  if (frequencyPenalty != null) {
321
300
  if (typeof frequencyPenalty !== "number") {
@@ -325,13 +304,6 @@ function prepareCallSettings({
325
304
  message: "frequencyPenalty must be a number"
326
305
  });
327
306
  }
328
- if (frequencyPenalty < -1 || frequencyPenalty > 1) {
329
- throw new import_provider3.InvalidArgumentError({
330
- parameter: "frequencyPenalty",
331
- value: frequencyPenalty,
332
- message: "frequencyPenalty must be between -1 and 1 (inclusive)"
333
- });
334
- }
335
307
  }
336
308
  if (seed != null) {
337
309
  if (!Number.isInteger(seed)) {