@ax-llm/ax 11.0.51 → 11.0.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.d.cts CHANGED
@@ -719,7 +719,7 @@ type AxAIOpenAIChatRequest<TModel> = {
719
719
  response_format?: {
720
720
  type: string;
721
721
  };
722
- max_completion_tokens: number;
722
+ max_completion_tokens?: number;
723
723
  temperature?: number;
724
724
  top_p?: number;
725
725
  n?: number;
@@ -1857,7 +1857,7 @@ type AxAIRekaChatRequest = {
1857
1857
  response_format?: {
1858
1858
  type: string;
1859
1859
  };
1860
- max_tokens: number;
1860
+ max_tokens?: number;
1861
1861
  temperature?: number;
1862
1862
  top_p?: number;
1863
1863
  top_k?: number;
package/index.d.ts CHANGED
@@ -719,7 +719,7 @@ type AxAIOpenAIChatRequest<TModel> = {
719
719
  response_format?: {
720
720
  type: string;
721
721
  };
722
- max_completion_tokens: number;
722
+ max_completion_tokens?: number;
723
723
  temperature?: number;
724
724
  top_p?: number;
725
725
  n?: number;
@@ -1857,7 +1857,7 @@ type AxAIRekaChatRequest = {
1857
1857
  response_format?: {
1858
1858
  type: string;
1859
1859
  };
1860
- max_tokens: number;
1860
+ max_tokens?: number;
1861
1861
  temperature?: number;
1862
1862
  top_p?: number;
1863
1863
  top_k?: number;
package/index.js CHANGED
@@ -720,13 +720,11 @@ var logResponseDelta = (delta) => {
720
720
 
721
721
  // ai/base.ts
722
722
  var axBaseAIDefaultConfig = () => structuredClone({
723
- maxTokens: 2e3,
724
723
  temperature: 0,
725
724
  topK: 40,
726
725
  topP: 0.9
727
726
  });
728
727
  var axBaseAIDefaultCreativeConfig = () => structuredClone({
729
- maxTokens: 2e3,
730
728
  temperature: 0.4,
731
729
  topP: 0.7,
732
730
  frequencyPenalty: 0.2
@@ -949,14 +947,14 @@ var AxBaseAI = class {
949
947
  [axSpanAttributes.LLM_SYSTEM]: this.name,
950
948
  [axSpanAttributes.LLM_OPERATION_NAME]: "chat",
951
949
  [axSpanAttributes.LLM_REQUEST_MODEL]: model,
952
- [axSpanAttributes.LLM_REQUEST_MAX_TOKENS]: modelConfig.maxTokens,
950
+ [axSpanAttributes.LLM_REQUEST_MAX_TOKENS]: modelConfig.maxTokens ?? "Not set",
953
951
  [axSpanAttributes.LLM_REQUEST_TEMPERATURE]: modelConfig.temperature,
954
- [axSpanAttributes.LLM_REQUEST_TOP_P]: modelConfig.topP,
955
- [axSpanAttributes.LLM_REQUEST_TOP_K]: modelConfig.topK,
956
- [axSpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY]: modelConfig.frequencyPenalty,
957
- [axSpanAttributes.LLM_REQUEST_PRESENCE_PENALTY]: modelConfig.presencePenalty,
958
- [axSpanAttributes.LLM_REQUEST_STOP_SEQUENCES]: modelConfig.stopSequences?.join(", "),
959
- [axSpanAttributes.LLM_REQUEST_LLM_IS_STREAMING]: modelConfig.stream
952
+ [axSpanAttributes.LLM_REQUEST_TOP_P]: modelConfig.topP ?? "Not set",
953
+ [axSpanAttributes.LLM_REQUEST_TOP_K]: modelConfig.topK ?? "Not set",
954
+ [axSpanAttributes.LLM_REQUEST_FREQUENCY_PENALTY]: modelConfig.frequencyPenalty ?? "Not set",
955
+ [axSpanAttributes.LLM_REQUEST_PRESENCE_PENALTY]: modelConfig.presencePenalty ?? "Not set",
956
+ [axSpanAttributes.LLM_REQUEST_STOP_SEQUENCES]: modelConfig.stopSequences?.join(", ") ?? "Not set",
957
+ [axSpanAttributes.LLM_REQUEST_LLM_IS_STREAMING]: modelConfig.stream ?? "Not set"
960
958
  }
961
959
  },
962
960
  options?.traceContext ?? context.active(),
@@ -2055,7 +2053,7 @@ var AxAIOpenAIImpl = class {
2055
2053
  response_format: this.config?.responseFormat ? { type: this.config.responseFormat } : void 0,
2056
2054
  tools,
2057
2055
  tool_choice: toolsChoice,
2058
- max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 500,
2056
+ max_completion_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
2059
2057
  temperature: req.modelConfig?.temperature ?? this.config.temperature,
2060
2058
  top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
2061
2059
  n: req.modelConfig?.n ?? this.config.n,
@@ -4611,7 +4609,6 @@ var axAIOpenAIResponsesDefaultConfig = () => ({
4611
4609
  model: "gpt-4o" /* GPT4O */,
4612
4610
  embedModel: "text-embedding-ada-002" /* TextEmbeddingAda002 */,
4613
4611
  temperature: 0.7,
4614
- maxTokens: 2048,
4615
4612
  topP: 1,
4616
4613
  stream: true
4617
4614
  // reasoningEffort: 'medium',
@@ -4778,7 +4775,7 @@ var AxAIRekaImpl = class {
4778
4775
  const reqValue = {
4779
4776
  model,
4780
4777
  messages,
4781
- max_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens ?? 500,
4778
+ max_tokens: req.modelConfig?.maxTokens ?? this.config.maxTokens,
4782
4779
  temperature: req.modelConfig?.temperature ?? this.config.temperature,
4783
4780
  top_k: req.modelConfig?.n ?? this.config.n,
4784
4781
  top_p: req.modelConfig?.topP ?? this.config.topP ?? 1,
@@ -5178,7 +5175,7 @@ var AxAIGrok = class extends AxAIOpenAIBase {
5178
5175
  };
5179
5176
 
5180
5177
  // dsp/generate.ts
5181
- import { ReadableStream as ReadableStream3 } from "node:stream/web";
5178
+ import { ReadableStream as ReadableStream3 } from "stream/web";
5182
5179
  import {
5183
5180
  context as context2,
5184
5181
  SpanKind as SpanKind2,
@@ -8519,7 +8516,7 @@ function pick(obj, keys) {
8519
8516
  }
8520
8517
 
8521
8518
  // docs/tika.ts
8522
- import { createReadStream } from "node:fs";
8519
+ import { createReadStream } from "fs";
8523
8520
  var AxApacheTika = class {
8524
8521
  tikaUrl;
8525
8522
  fetch;
@@ -10057,13 +10054,13 @@ var AxHFDataLoader = class {
10057
10054
  };
10058
10055
 
10059
10056
  // funcs/code.ts
10060
- import * as _crypto from "node:crypto";
10061
- import * as _fs from "node:fs";
10062
- import * as _http from "node:http";
10063
- import * as _https from "node:https";
10064
- import * as _os from "node:os";
10065
- import * as _process from "node:process";
10066
- import { runInNewContext } from "node:vm";
10057
+ import * as _crypto from "crypto";
10058
+ import * as _fs from "fs";
10059
+ import * as _http from "http";
10060
+ import * as _https from "https";
10061
+ import * as _os from "os";
10062
+ import * as _process from "process";
10063
+ import { runInNewContext } from "vm";
10067
10064
  var AxJSInterpreterPermission = /* @__PURE__ */ ((AxJSInterpreterPermission2) => {
10068
10065
  AxJSInterpreterPermission2["FS"] = "node:fs";
10069
10066
  AxJSInterpreterPermission2["NET"] = "net";
@@ -12915,8 +12912,8 @@ ${JSON.stringify(res, null, 2)}`
12915
12912
  };
12916
12913
 
12917
12914
  // mcp/stdioTransport.ts
12918
- import { spawn } from "node:child_process";
12919
- import readline from "node:readline";
12915
+ import { spawn } from "child_process";
12916
+ import readline from "readline";
12920
12917
  var AxMCPStdioTransport = class {
12921
12918
  process;
12922
12919
  rl;