@corbat-tech/coco 2.13.0 → 2.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -1397,6 +1397,15 @@ var init_anthropic = __esm({
1397
1397
  function needsResponsesApi(model) {
1398
1398
  return model.includes("codex") || model.startsWith("gpt-5") || model.startsWith("o4-") || model.startsWith("o3-");
1399
1399
  }
1400
+ function needsMaxCompletionTokens(model) {
1401
+ return model.startsWith("o1") || model.startsWith("o3") || model.startsWith("o4") || model.startsWith("gpt-4o") || model.startsWith("gpt-4.1") || model.startsWith("gpt-5") || model.startsWith("chatgpt-4o");
1402
+ }
1403
+ function buildMaxTokensParam(model, maxTokens) {
1404
+ if (needsMaxCompletionTokens(model)) {
1405
+ return { max_completion_tokens: maxTokens };
1406
+ }
1407
+ return { max_tokens: maxTokens };
1408
+ }
1400
1409
  function createOpenAIProvider(config) {
1401
1410
  const provider = new OpenAIProvider();
1402
1411
  if (config) {
@@ -1605,9 +1614,10 @@ var init_openai = __esm({
1605
1614
  return withRetry(async () => {
1606
1615
  try {
1607
1616
  const supportsTemp = this.supportsTemperature(model);
1617
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
1608
1618
  const response = await this.client.chat.completions.create({
1609
1619
  model,
1610
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
1620
+ ...buildMaxTokensParam(model, maxTokens),
1611
1621
  messages: this.convertMessages(messages, options?.system),
1612
1622
  stop: options?.stopSequences,
1613
1623
  ...supportsTemp && {
@@ -1643,9 +1653,10 @@ var init_openai = __esm({
1643
1653
  try {
1644
1654
  const supportsTemp = this.supportsTemperature(model);
1645
1655
  const extraBody = this.getExtraBody(model);
1656
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
1646
1657
  const requestParams = {
1647
1658
  model,
1648
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
1659
+ ...buildMaxTokensParam(model, maxTokens),
1649
1660
  messages: this.convertMessages(messages, options?.system),
1650
1661
  tools: this.convertTools(options.tools),
1651
1662
  tool_choice: this.convertToolChoice(options.toolChoice)
@@ -1689,9 +1700,10 @@ var init_openai = __esm({
1689
1700
  }
1690
1701
  try {
1691
1702
  const supportsTemp = this.supportsTemperature(model);
1703
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
1692
1704
  const stream = await this.client.chat.completions.create({
1693
1705
  model,
1694
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
1706
+ ...buildMaxTokensParam(model, maxTokens),
1695
1707
  messages: this.convertMessages(messages, options?.system),
1696
1708
  stream: true,
1697
1709
  ...supportsTemp && { temperature: options?.temperature ?? this.config.temperature ?? 0 }
@@ -1725,9 +1737,10 @@ var init_openai = __esm({
1725
1737
  try {
1726
1738
  const supportsTemp = this.supportsTemperature(model);
1727
1739
  const extraBody = this.getExtraBody(model);
1740
+ const maxTokens = options?.maxTokens ?? this.config.maxTokens ?? 8192;
1728
1741
  const requestParams = {
1729
1742
  model,
1730
- max_tokens: options?.maxTokens ?? this.config.maxTokens ?? 8192,
1743
+ ...buildMaxTokensParam(model, maxTokens),
1731
1744
  messages: this.convertMessages(messages, options?.system),
1732
1745
  tools: this.convertTools(options.tools),
1733
1746
  tool_choice: this.convertToolChoice(options.toolChoice),
@@ -1989,11 +2002,20 @@ var init_openai = __esm({
1989
2002
  } catch {
1990
2003
  try {
1991
2004
  const model = this.config.model || DEFAULT_MODEL2;
1992
- await this.client.chat.completions.create({
1993
- model,
1994
- messages: [{ role: "user", content: "Hi" }],
1995
- max_tokens: 1
1996
- });
2005
+ if (needsResponsesApi(model)) {
2006
+ await this.client.responses.create({
2007
+ model,
2008
+ input: [{ role: "user", content: [{ type: "input_text", text: "Hi" }] }],
2009
+ max_output_tokens: 1,
2010
+ store: false
2011
+ });
2012
+ } else {
2013
+ await this.client.chat.completions.create({
2014
+ model,
2015
+ messages: [{ role: "user", content: "Hi" }],
2016
+ ...buildMaxTokensParam(model, 1)
2017
+ });
2018
+ }
1997
2019
  return true;
1998
2020
  } catch {
1999
2021
  return false;