@corbat-tech/coco 2.13.0 → 2.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -12300,6 +12300,15 @@ var MODELS_WITH_THINKING_MODE = ["kimi-k2.5", "kimi-k2-0324", "kimi-latest"];
12300
12300
  function needsResponsesApi(model) {
12301
12301
  return model.includes("codex") || model.startsWith("gpt-5") || model.startsWith("o4-") || model.startsWith("o3-");
12302
12302
  }
12303
+ function needsMaxCompletionTokens(model) {
12304
+ return model.startsWith("o1") || model.startsWith("o3") || model.startsWith("o4") || model.startsWith("gpt-4o") || model.startsWith("gpt-4.1") || model.startsWith("gpt-5") || model.startsWith("chatgpt-4o");
12305
+ }
12306
+ function buildMaxTokensParam(model, maxTokens) {
12307
+ if (needsMaxCompletionTokens(model)) {
12308
+ return { max_completion_tokens: maxTokens };
12309
+ }
12310
+ return { max_tokens: maxTokens };
12311
+ }
12303
12312
  var OpenAIProvider = class {
12304
12313
  id;
12305
12314
  name;
@@ -12376,9 +12385,10 @@ var OpenAIProvider = class {
12376
12385
  return withRetry(async () => {
12377
12386
  try {
12378
12387
  const supportsTemp = this.supportsTemperature(model);
12388
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
12379
12389
  const response = await this.client.chat.completions.create({
12380
12390
  model,
12381
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
12391
+ ...buildMaxTokensParam(model, maxTokens),
12382
12392
  messages: this.convertMessages(messages, options?.system),
12383
12393
  stop: options?.stopSequences,
12384
12394
  ...supportsTemp && {
@@ -12414,9 +12424,10 @@ var OpenAIProvider = class {
12414
12424
  try {
12415
12425
  const supportsTemp = this.supportsTemperature(model);
12416
12426
  const extraBody = this.getExtraBody(model);
12427
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
12417
12428
  const requestParams = {
12418
12429
  model,
12419
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
12430
+ ...buildMaxTokensParam(model, maxTokens),
12420
12431
  messages: this.convertMessages(messages, options?.system),
12421
12432
  tools: this.convertTools(options.tools),
12422
12433
  tool_choice: this.convertToolChoice(options.toolChoice)
@@ -12460,9 +12471,10 @@ var OpenAIProvider = class {
12460
12471
  }
12461
12472
  try {
12462
12473
  const supportsTemp = this.supportsTemperature(model);
12474
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
12463
12475
  const stream = await this.client.chat.completions.create({
12464
12476
  model,
12465
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
12477
+ ...buildMaxTokensParam(model, maxTokens),
12466
12478
  messages: this.convertMessages(messages, options?.system),
12467
12479
  stream: true,
12468
12480
  ...supportsTemp && { temperature: options?.temperature ?? this.config.temperature ?? 0 }
@@ -12496,9 +12508,10 @@ var OpenAIProvider = class {
12496
12508
  try {
12497
12509
  const supportsTemp = this.supportsTemperature(model);
12498
12510
  const extraBody = this.getExtraBody(model);
12511
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
12499
12512
  const requestParams = {
12500
12513
  model,
12501
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
12514
+ ...buildMaxTokensParam(model, maxTokens),
12502
12515
  messages: this.convertMessages(messages, options?.system),
12503
12516
  tools: this.convertTools(options.tools),
12504
12517
  tool_choice: this.convertToolChoice(options.toolChoice),
@@ -12760,11 +12773,20 @@ var OpenAIProvider = class {
12760
12773
  } catch {
12761
12774
  try {
12762
12775
  const model = this.config.model || DEFAULT_MODEL2;
12763
- await this.client.chat.completions.create({
12764
- model,
12765
- messages: [{ role: "user", content: "Hi" }],
12766
- max_tokens: 1
12767
- });
12776
+ if (needsResponsesApi(model)) {
12777
+ await this.client.responses.create({
12778
+ model,
12779
+ input: [{ role: "user", content: [{ type: "input_text", text: "Hi" }] }],
12780
+ max_output_tokens: 1,
12781
+ store: false
12782
+ });
12783
+ } else {
12784
+ await this.client.chat.completions.create({
12785
+ model,
12786
+ messages: [{ role: "user", content: "Hi" }],
12787
+ ...buildMaxTokensParam(model, 1)
12788
+ });
12789
+ }
12768
12790
  return true;
12769
12791
  } catch {
12770
12792
  return false;