@probeo/anymodel 0.4.0 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -39,12 +39,15 @@ __export(src_exports, {
39
39
  configureFsIO: () => configureFsIO,
40
40
  createAnthropicBatchAdapter: () => createAnthropicBatchAdapter,
41
41
  createAnyModelServer: () => createAnyModelServer,
42
+ createGoogleBatchAdapter: () => createGoogleBatchAdapter,
42
43
  createOpenAIBatchAdapter: () => createOpenAIBatchAdapter,
43
44
  ensureDir: () => ensureDir,
45
+ estimateTokenCount: () => estimateTokenCount,
44
46
  getFsQueueStatus: () => getFsQueueStatus,
45
47
  joinPath: () => joinPath,
46
48
  readFileQueued: () => readFileQueued,
47
49
  resolveConfig: () => resolveConfig,
50
+ resolveMaxTokens: () => resolveMaxTokens,
48
51
  startServer: () => startServer,
49
52
  waitForFsQueuesIdle: () => waitForFsQueuesIdle,
50
53
  writeFileFlushedQueued: () => writeFileFlushedQueued,
@@ -534,6 +537,25 @@ var Router = class {
534
537
  }
535
538
  };
536
539
 
540
+ // src/utils/fetch-with-timeout.ts
541
+ var _defaultTimeout = 12e4;
542
+ var _flexTimeout = 6e5;
543
+ function setDefaultTimeout(ms) {
544
+ _defaultTimeout = ms;
545
+ }
546
+ function getFlexTimeout() {
547
+ return _flexTimeout;
548
+ }
549
+ function fetchWithTimeout(url, init, timeoutMs) {
550
+ const ms = timeoutMs ?? _defaultTimeout;
551
+ const signal = AbortSignal.timeout(ms);
552
+ if (init?.signal) {
553
+ const combined = AbortSignal.any([signal, init.signal]);
554
+ return fetch(url, { ...init, signal: combined });
555
+ }
556
+ return fetch(url, { ...init, signal });
557
+ }
558
+
537
559
  // src/providers/openai.ts
538
560
  var OPENAI_API_BASE = "https://api.openai.com/v1";
539
561
  var SUPPORTED_PARAMS = /* @__PURE__ */ new Set([
@@ -551,19 +573,20 @@ var SUPPORTED_PARAMS = /* @__PURE__ */ new Set([
551
573
  "tools",
552
574
  "tool_choice",
553
575
  "user",
554
- "logit_bias"
576
+ "logit_bias",
577
+ "service_tier"
555
578
  ]);
556
579
  function createOpenAIAdapter(apiKey, baseURL) {
557
580
  const base = baseURL || OPENAI_API_BASE;
558
- async function makeRequest(path2, body, method = "POST") {
559
- const res = await fetch(`${base}${path2}`, {
581
+ async function makeRequest(path2, body, method = "POST", timeoutMs) {
582
+ const res = await fetchWithTimeout(`${base}${path2}`, {
560
583
  method,
561
584
  headers: {
562
585
  "Content-Type": "application/json",
563
586
  "Authorization": `Bearer ${apiKey}`
564
587
  },
565
588
  body: body ? JSON.stringify(body) : void 0
566
- });
589
+ }, timeoutMs);
567
590
  if (!res.ok) {
568
591
  let errorBody;
569
592
  try {
@@ -611,6 +634,7 @@ function createOpenAIAdapter(apiKey, baseURL) {
611
634
  if (request.tools !== void 0) body.tools = request.tools;
612
635
  if (request.tool_choice !== void 0) body.tool_choice = request.tool_choice;
613
636
  if (request.user !== void 0) body.user = request.user;
637
+ if (request.service_tier !== void 0) body.service_tier = request.service_tier;
614
638
  return body;
615
639
  }
616
640
  const adapter = {
@@ -712,13 +736,15 @@ function createOpenAIAdapter(apiKey, baseURL) {
712
736
  },
713
737
  async sendRequest(request) {
714
738
  const body = buildRequestBody(request);
715
- const res = await makeRequest("/chat/completions", body);
739
+ const timeout = request.service_tier === "flex" ? getFlexTimeout() : void 0;
740
+ const res = await makeRequest("/chat/completions", body, "POST", timeout);
716
741
  const json = await res.json();
717
742
  return adapter.translateResponse(json);
718
743
  },
719
744
  async sendStreamingRequest(request) {
720
745
  const body = buildRequestBody({ ...request, stream: true });
721
- const res = await makeRequest("/chat/completions", body);
746
+ const timeout = request.service_tier === "flex" ? getFlexTimeout() : void 0;
747
+ const res = await makeRequest("/chat/completions", body, "POST", timeout);
722
748
  if (!res.body) {
723
749
  throw new AnyModelError(502, "No response body for streaming request", {
724
750
  provider_name: "openai"
@@ -765,7 +791,7 @@ var FALLBACK_MODELS = [
765
791
  ];
766
792
  function createAnthropicAdapter(apiKey) {
767
793
  async function makeRequest(path2, body, stream = false) {
768
- const res = await fetch(`${ANTHROPIC_API_BASE}${path2}`, {
794
+ const res = await fetchWithTimeout(`${ANTHROPIC_API_BASE}${path2}`, {
769
795
  method: "POST",
770
796
  headers: {
771
797
  "Content-Type": "application/json",
@@ -1022,7 +1048,7 @@ ${body.system}` : jsonInstruction;
1022
1048
  },
1023
1049
  async listModels() {
1024
1050
  try {
1025
- const res = await fetch(`${ANTHROPIC_API_BASE}/models`, {
1051
+ const res = await fetchWithTimeout(`${ANTHROPIC_API_BASE}/models`, {
1026
1052
  method: "GET",
1027
1053
  headers: {
1028
1054
  "x-api-key": apiKey,
@@ -1307,7 +1333,7 @@ function createGoogleAdapter(apiKey) {
1307
1333
  },
1308
1334
  async listModels() {
1309
1335
  try {
1310
- const res = await fetch(`${GEMINI_API_BASE}/models?key=${apiKey}`);
1336
+ const res = await fetchWithTimeout(`${GEMINI_API_BASE}/models?key=${apiKey}`);
1311
1337
  if (!res.ok) return FALLBACK_MODELS2;
1312
1338
  const data = await res.json();
1313
1339
  const models = data.models || [];
@@ -1342,12 +1368,12 @@ function createGoogleAdapter(apiKey) {
1342
1368
  return SUPPORTED_PARAMS3.has(param);
1343
1369
  },
1344
1370
  supportsBatch() {
1345
- return false;
1371
+ return true;
1346
1372
  },
1347
1373
  async sendRequest(request) {
1348
1374
  const body = translateRequest(request);
1349
1375
  const url = getModelEndpoint(request.model, false);
1350
- const res = await fetch(url, {
1376
+ const res = await fetchWithTimeout(url, {
1351
1377
  method: "POST",
1352
1378
  headers: { "Content-Type": "application/json" },
1353
1379
  body: JSON.stringify(body)
@@ -1370,7 +1396,7 @@ function createGoogleAdapter(apiKey) {
1370
1396
  async sendStreamingRequest(request) {
1371
1397
  const body = translateRequest(request);
1372
1398
  const url = getModelEndpoint(request.model, true);
1373
- const res = await fetch(url, {
1399
+ const res = await fetchWithTimeout(url, {
1374
1400
  method: "POST",
1375
1401
  headers: { "Content-Type": "application/json" },
1376
1402
  body: JSON.stringify(body)
@@ -1420,7 +1446,7 @@ var MODELS = [
1420
1446
  ];
1421
1447
  function createPerplexityAdapter(apiKey) {
1422
1448
  async function makeRequest(path2, body, method = "POST") {
1423
- const res = await fetch(`${PERPLEXITY_API_BASE}${path2}`, {
1449
+ const res = await fetchWithTimeout(`${PERPLEXITY_API_BASE}${path2}`, {
1424
1450
  method,
1425
1451
  headers: {
1426
1452
  "Content-Type": "application/json",
@@ -1984,6 +2010,17 @@ var BatchStore = class {
1984
2010
  const entries = await readDirQueued(this.dir);
1985
2011
  return entries.filter((d) => d.isDirectory()).map((d) => d.name).sort();
1986
2012
  }
2013
+ /**
2014
+ * Stream requests from JSONL one line at a time (memory-efficient).
2015
+ */
2016
+ async *streamRequests(id) {
2017
+ const p = joinPath(this.batchDir(id), "requests.jsonl");
2018
+ if (!await fileExistsQueued(p)) return;
2019
+ const raw = await readFileQueued(p, "utf8");
2020
+ for (const line of raw.split("\n")) {
2021
+ if (line.trim()) yield JSON.parse(line);
2022
+ }
2023
+ }
1987
2024
  /**
1988
2025
  * Check if a batch exists.
1989
2026
  */
@@ -2048,7 +2085,7 @@ var BatchManager = class {
2048
2085
  this.processNativeBatch(id, request, native.adapter).catch(() => {
2049
2086
  });
2050
2087
  } else {
2051
- this.processConcurrentBatch(id, request).catch(() => {
2088
+ this.processConcurrentBatch(id, request.model, request.options).catch(() => {
2052
2089
  });
2053
2090
  }
2054
2091
  return batch;
@@ -2228,28 +2265,28 @@ var BatchManager = class {
2228
2265
  }
2229
2266
  /**
2230
2267
  * Process batch requests concurrently (fallback path).
2268
+ * Streams requests from disk to avoid holding them all in memory.
2231
2269
  */
2232
- async processConcurrentBatch(batchId, request) {
2270
+ async processConcurrentBatch(batchId, model, options) {
2233
2271
  const batch = await this.store.getMeta(batchId);
2234
2272
  if (!batch) return;
2235
2273
  batch.status = "processing";
2236
2274
  await this.store.updateMeta(batch);
2237
- const items = request.requests;
2238
2275
  const active = /* @__PURE__ */ new Set();
2239
2276
  const processItem = async (item) => {
2240
2277
  const current = await this.store.getMeta(batchId);
2241
2278
  if (current?.status === "cancelled") return;
2242
2279
  const chatRequest = {
2243
- model: request.model,
2280
+ model,
2244
2281
  messages: item.messages,
2245
- max_tokens: item.max_tokens ?? request.options?.max_tokens,
2246
- temperature: item.temperature ?? request.options?.temperature,
2247
- top_p: item.top_p ?? request.options?.top_p,
2248
- top_k: item.top_k ?? request.options?.top_k,
2249
- stop: item.stop ?? request.options?.stop,
2250
- response_format: item.response_format ?? request.options?.response_format,
2251
- tools: item.tools ?? request.options?.tools,
2252
- tool_choice: item.tool_choice ?? request.options?.tool_choice
2282
+ max_tokens: item.max_tokens ?? options?.max_tokens,
2283
+ temperature: item.temperature ?? options?.temperature,
2284
+ top_p: item.top_p ?? options?.top_p,
2285
+ top_k: item.top_k ?? options?.top_k,
2286
+ stop: item.stop ?? options?.stop,
2287
+ response_format: item.response_format ?? options?.response_format,
2288
+ tools: item.tools ?? options?.tools,
2289
+ tool_choice: item.tool_choice ?? options?.tool_choice
2253
2290
  };
2254
2291
  let result;
2255
2292
  try {
@@ -2280,7 +2317,7 @@ var BatchManager = class {
2280
2317
  await this.store.updateMeta(meta);
2281
2318
  }
2282
2319
  };
2283
- for (const item of items) {
2320
+ for await (const item of this.store.streamRequests(batchId)) {
2284
2321
  const current = await this.store.getMeta(batchId);
2285
2322
  if (current?.status === "cancelled") break;
2286
2323
  if (active.size >= this.concurrencyLimit) {
@@ -2301,6 +2338,54 @@ var BatchManager = class {
2301
2338
  }
2302
2339
  };
2303
2340
 
2341
+ // src/utils/token-estimate.ts
2342
+ var CHARS_PER_TOKEN2 = 4;
2343
+ function estimateTokenCount(text) {
2344
+ return Math.ceil(text.length / CHARS_PER_TOKEN2);
2345
+ }
2346
+ var MODEL_LIMITS = [
2347
+ // OpenAI
2348
+ { pattern: "gpt-4o-mini", limit: { contextLength: 128e3, maxCompletionTokens: 16384 } },
2349
+ { pattern: "gpt-4o", limit: { contextLength: 128e3, maxCompletionTokens: 16384 } },
2350
+ { pattern: "gpt-4-turbo", limit: { contextLength: 128e3, maxCompletionTokens: 4096 } },
2351
+ { pattern: "gpt-3.5-turbo", limit: { contextLength: 16385, maxCompletionTokens: 4096 } },
2352
+ { pattern: "o1", limit: { contextLength: 2e5, maxCompletionTokens: 1e5 } },
2353
+ { pattern: "o3", limit: { contextLength: 2e5, maxCompletionTokens: 1e5 } },
2354
+ { pattern: "o4-mini", limit: { contextLength: 2e5, maxCompletionTokens: 1e5 } },
2355
+ // Anthropic
2356
+ { pattern: "claude-opus-4", limit: { contextLength: 2e5, maxCompletionTokens: 32768 } },
2357
+ { pattern: "claude-sonnet-4", limit: { contextLength: 2e5, maxCompletionTokens: 16384 } },
2358
+ { pattern: "claude-haiku-4", limit: { contextLength: 2e5, maxCompletionTokens: 8192 } },
2359
+ { pattern: "claude-3.5-sonnet", limit: { contextLength: 2e5, maxCompletionTokens: 8192 } },
2360
+ { pattern: "claude-3-opus", limit: { contextLength: 2e5, maxCompletionTokens: 4096 } },
2361
+ // Google
2362
+ { pattern: "gemini-2.5-pro", limit: { contextLength: 1048576, maxCompletionTokens: 65536 } },
2363
+ { pattern: "gemini-2.5-flash", limit: { contextLength: 1048576, maxCompletionTokens: 65536 } },
2364
+ { pattern: "gemini-2.0-flash", limit: { contextLength: 1048576, maxCompletionTokens: 65536 } },
2365
+ { pattern: "gemini-1.5-pro", limit: { contextLength: 2097152, maxCompletionTokens: 8192 } },
2366
+ { pattern: "gemini-1.5-flash", limit: { contextLength: 1048576, maxCompletionTokens: 8192 } }
2367
+ ];
2368
+ var DEFAULT_LIMIT = { contextLength: 128e3, maxCompletionTokens: 4096 };
2369
+ function getModelLimits(model) {
2370
+ const bare = model.includes("/") ? model.slice(model.indexOf("/") + 1) : model;
2371
+ for (const entry of MODEL_LIMITS) {
2372
+ if (bare.startsWith(entry.pattern) || bare.includes(entry.pattern)) {
2373
+ return entry.limit;
2374
+ }
2375
+ }
2376
+ return DEFAULT_LIMIT;
2377
+ }
2378
+ function resolveMaxTokens(model, messages, userMaxTokens) {
2379
+ if (userMaxTokens !== void 0) return userMaxTokens;
2380
+ const inputChars = JSON.stringify(messages).length;
2381
+ const estimatedInput = Math.ceil(inputChars / CHARS_PER_TOKEN2);
2382
+ const estimatedWithMargin = Math.ceil(estimatedInput * 1.05);
2383
+ const limits = getModelLimits(model);
2384
+ const available = limits.contextLength - estimatedWithMargin;
2385
+ const result = Math.min(limits.maxCompletionTokens, available);
2386
+ return Math.max(1, result);
2387
+ }
2388
+
2304
2389
  // src/providers/openai-batch.ts
2305
2390
  var OPENAI_API_BASE2 = "https://api.openai.com/v1";
2306
2391
  function createOpenAIBatchAdapter(apiKey) {
@@ -2315,7 +2400,7 @@ function createOpenAIBatchAdapter(apiKey) {
2315
2400
  headers["Content-Type"] = "application/json";
2316
2401
  fetchBody = JSON.stringify(options.body);
2317
2402
  }
2318
- const res = await fetch(`${OPENAI_API_BASE2}${path2}`, {
2403
+ const res = await fetchWithTimeout(`${OPENAI_API_BASE2}${path2}`, {
2319
2404
  method: options.method || "GET",
2320
2405
  headers,
2321
2406
  body: fetchBody
@@ -2341,7 +2426,7 @@ function createOpenAIBatchAdapter(apiKey) {
2341
2426
  model,
2342
2427
  messages: req.messages
2343
2428
  };
2344
- if (req.max_tokens !== void 0) body.max_tokens = req.max_tokens;
2429
+ body.max_tokens = req.max_tokens !== void 0 ? req.max_tokens : resolveMaxTokens(model, req.messages);
2345
2430
  if (req.temperature !== void 0) body.temperature = req.temperature;
2346
2431
  if (req.top_p !== void 0) body.top_p = req.top_p;
2347
2432
  if (req.stop !== void 0) body.stop = req.stop;
@@ -2500,7 +2585,7 @@ function createAnthropicBatchAdapter(apiKey) {
2500
2585
  "anthropic-version": ANTHROPIC_VERSION2,
2501
2586
  "Content-Type": "application/json"
2502
2587
  };
2503
- const res = await fetch(`${ANTHROPIC_API_BASE2}${path2}`, {
2588
+ const res = await fetchWithTimeout(`${ANTHROPIC_API_BASE2}${path2}`, {
2504
2589
  method: options.method || "GET",
2505
2590
  headers,
2506
2591
  body: options.body ? JSON.stringify(options.body) : void 0
@@ -2523,7 +2608,7 @@ function createAnthropicBatchAdapter(apiKey) {
2523
2608
  function translateToAnthropicParams(model, req) {
2524
2609
  const params = {
2525
2610
  model,
2526
- max_tokens: req.max_tokens || DEFAULT_MAX_TOKENS2
2611
+ max_tokens: resolveMaxTokens(model, req.messages, req.max_tokens || DEFAULT_MAX_TOKENS2)
2527
2612
  };
2528
2613
  const systemMessages = req.messages.filter((m) => m.role === "system");
2529
2614
  const nonSystemMessages = req.messages.filter((m) => m.role !== "system");
@@ -2697,6 +2782,284 @@ ${params.system}` : jsonInstruction;
2697
2782
  };
2698
2783
  }
2699
2784
 
2785
+ // src/providers/google-batch.ts
2786
+ var GEMINI_API_BASE2 = "https://generativelanguage.googleapis.com/v1beta";
2787
+ function createGoogleBatchAdapter(apiKey) {
2788
+ async function apiRequest(path2, options = {}) {
2789
+ const headers = {
2790
+ "Content-Type": "application/json",
2791
+ "x-goog-api-key": apiKey
2792
+ };
2793
+ const res = await fetchWithTimeout(`${GEMINI_API_BASE2}${path2}`, {
2794
+ method: options.method || "GET",
2795
+ headers,
2796
+ body: options.body ? JSON.stringify(options.body) : void 0
2797
+ });
2798
+ if (!res.ok) {
2799
+ let errorBody;
2800
+ try {
2801
+ errorBody = await res.json();
2802
+ } catch {
2803
+ errorBody = { message: res.statusText };
2804
+ }
2805
+ const msg = errorBody?.error?.message || errorBody?.message || res.statusText;
2806
+ throw new AnyModelError(res.status >= 500 ? 502 : res.status, msg, {
2807
+ provider_name: "google",
2808
+ raw: errorBody
2809
+ });
2810
+ }
2811
+ return res;
2812
+ }
2813
+ function translateRequestToGemini(model, req) {
2814
+ const body = {};
2815
+ const systemMessages = req.messages.filter((m) => m.role === "system");
2816
+ const nonSystemMessages = req.messages.filter((m) => m.role !== "system");
2817
+ if (systemMessages.length > 0) {
2818
+ body.systemInstruction = {
2819
+ parts: [{ text: systemMessages.map((m) => typeof m.content === "string" ? m.content : "").join("\n") }]
2820
+ };
2821
+ }
2822
+ body.contents = nonSystemMessages.map((m) => ({
2823
+ role: m.role === "assistant" ? "model" : "user",
2824
+ parts: typeof m.content === "string" ? [{ text: m.content }] : Array.isArray(m.content) ? m.content.map((p) => p.type === "text" ? { text: p.text } : { text: "" }) : [{ text: "" }]
2825
+ }));
2826
+ const generationConfig = {};
2827
+ if (req.temperature !== void 0) generationConfig.temperature = req.temperature;
2828
+ generationConfig.maxOutputTokens = req.max_tokens !== void 0 ? req.max_tokens : resolveMaxTokens(model, req.messages);
2829
+ if (req.top_p !== void 0) generationConfig.topP = req.top_p;
2830
+ if (req.top_k !== void 0) generationConfig.topK = req.top_k;
2831
+ if (req.stop !== void 0) {
2832
+ generationConfig.stopSequences = Array.isArray(req.stop) ? req.stop : [req.stop];
2833
+ }
2834
+ if (req.response_format) {
2835
+ if (req.response_format.type === "json_object") {
2836
+ generationConfig.responseMimeType = "application/json";
2837
+ } else if (req.response_format.type === "json_schema") {
2838
+ generationConfig.responseMimeType = "application/json";
2839
+ generationConfig.responseSchema = req.response_format.json_schema?.schema;
2840
+ }
2841
+ }
2842
+ if (Object.keys(generationConfig).length > 0) {
2843
+ body.generationConfig = generationConfig;
2844
+ }
2845
+ if (req.tools && req.tools.length > 0) {
2846
+ body.tools = [{
2847
+ functionDeclarations: req.tools.map((t) => ({
2848
+ name: t.function.name,
2849
+ description: t.function.description || "",
2850
+ parameters: t.function.parameters || {}
2851
+ }))
2852
+ }];
2853
+ if (req.tool_choice) {
2854
+ if (req.tool_choice === "auto") {
2855
+ body.toolConfig = { functionCallingConfig: { mode: "AUTO" } };
2856
+ } else if (req.tool_choice === "required") {
2857
+ body.toolConfig = { functionCallingConfig: { mode: "ANY" } };
2858
+ } else if (req.tool_choice === "none") {
2859
+ body.toolConfig = { functionCallingConfig: { mode: "NONE" } };
2860
+ } else if (typeof req.tool_choice === "object") {
2861
+ body.toolConfig = {
2862
+ functionCallingConfig: {
2863
+ mode: "ANY",
2864
+ allowedFunctionNames: [req.tool_choice.function.name]
2865
+ }
2866
+ };
2867
+ }
2868
+ }
2869
+ }
2870
+ return body;
2871
+ }
2872
+ function mapFinishReason(reason) {
2873
+ switch (reason) {
2874
+ case "STOP":
2875
+ return "stop";
2876
+ case "MAX_TOKENS":
2877
+ return "length";
2878
+ case "SAFETY":
2879
+ return "content_filter";
2880
+ case "RECITATION":
2881
+ return "content_filter";
2882
+ default:
2883
+ return "stop";
2884
+ }
2885
+ }
2886
+ function translateGeminiResponse(response, model) {
2887
+ const candidate = response.candidates?.[0];
2888
+ let content = "";
2889
+ const toolCalls = [];
2890
+ for (const part of candidate?.content?.parts || []) {
2891
+ if (part.text) {
2892
+ content += part.text;
2893
+ } else if (part.functionCall) {
2894
+ toolCalls.push({
2895
+ id: generateId("call"),
2896
+ type: "function",
2897
+ function: {
2898
+ name: part.functionCall.name,
2899
+ arguments: JSON.stringify(part.functionCall.args || {})
2900
+ }
2901
+ });
2902
+ }
2903
+ }
2904
+ const message = { role: "assistant", content };
2905
+ if (toolCalls.length > 0) {
2906
+ message.tool_calls = toolCalls;
2907
+ }
2908
+ const finishReason = toolCalls.length > 0 ? "tool_calls" : mapFinishReason(candidate?.finishReason || "STOP");
2909
+ return {
2910
+ id: generateId(),
2911
+ object: "chat.completion",
2912
+ created: Math.floor(Date.now() / 1e3),
2913
+ model: `google/${model}`,
2914
+ choices: [{ index: 0, message, finish_reason: finishReason }],
2915
+ usage: {
2916
+ prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
2917
+ completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
2918
+ total_tokens: response.usageMetadata?.totalTokenCount || 0
2919
+ }
2920
+ };
2921
+ }
2922
+ function mapBatchState(state) {
2923
+ switch (state) {
2924
+ case "JOB_STATE_PENDING":
2925
+ return "pending";
2926
+ case "JOB_STATE_RUNNING":
2927
+ return "processing";
2928
+ case "JOB_STATE_SUCCEEDED":
2929
+ return "completed";
2930
+ case "JOB_STATE_FAILED":
2931
+ return "failed";
2932
+ case "JOB_STATE_CANCELLED":
2933
+ return "cancelled";
2934
+ case "JOB_STATE_EXPIRED":
2935
+ return "failed";
2936
+ default:
2937
+ return "pending";
2938
+ }
2939
+ }
2940
+ return {
2941
+ async createBatch(model, requests, _options) {
2942
+ const batchRequests = requests.map((req) => ({
2943
+ request: translateRequestToGemini(model, req),
2944
+ metadata: { key: req.custom_id }
2945
+ }));
2946
+ const res = await apiRequest(`/models/${model}:batchGenerateContent`, {
2947
+ method: "POST",
2948
+ body: {
2949
+ batch: {
2950
+ display_name: `anymodel-batch-${Date.now()}`,
2951
+ input_config: {
2952
+ requests: {
2953
+ requests: batchRequests
2954
+ }
2955
+ }
2956
+ }
2957
+ }
2958
+ });
2959
+ const data = await res.json();
2960
+ const batchName = data.name || data.batch?.name;
2961
+ if (!batchName) {
2962
+ throw new AnyModelError(502, "No batch name in Google response", {
2963
+ provider_name: "google",
2964
+ raw: data
2965
+ });
2966
+ }
2967
+ return {
2968
+ providerBatchId: batchName,
2969
+ metadata: {
2970
+ model,
2971
+ total_requests: requests.length
2972
+ }
2973
+ };
2974
+ },
2975
+ async pollBatch(providerBatchId) {
2976
+ const res = await apiRequest(`/${providerBatchId}`);
2977
+ const data = await res.json();
2978
+ const state = data.state || "JOB_STATE_PENDING";
2979
+ const status = mapBatchState(state);
2980
+ const totalCount = data.totalCount || data.metadata?.total_requests || 0;
2981
+ const successCount = data.succeededCount || 0;
2982
+ const failedCount = data.failedCount || 0;
2983
+ return {
2984
+ status,
2985
+ total: totalCount || successCount + failedCount,
2986
+ completed: successCount,
2987
+ failed: failedCount
2988
+ };
2989
+ },
2990
+ async getBatchResults(providerBatchId) {
2991
+ const batchRes = await apiRequest(`/${providerBatchId}`);
2992
+ const batchData = await batchRes.json();
2993
+ const results = [];
2994
+ const model = batchData.metadata?.model || "unknown";
2995
+ if (batchData.response?.inlinedResponses) {
2996
+ for (const item of batchData.response.inlinedResponses) {
2997
+ const customId = item.metadata?.key || `request-${results.length}`;
2998
+ if (item.response) {
2999
+ results.push({
3000
+ custom_id: customId,
3001
+ status: "success",
3002
+ response: translateGeminiResponse(item.response, model),
3003
+ error: null
3004
+ });
3005
+ } else if (item.error) {
3006
+ results.push({
3007
+ custom_id: customId,
3008
+ status: "error",
3009
+ response: null,
3010
+ error: {
3011
+ code: item.error.code || 500,
3012
+ message: item.error.message || "Batch item failed"
3013
+ }
3014
+ });
3015
+ }
3016
+ }
3017
+ return results;
3018
+ }
3019
+ const responsesFile = batchData.response?.responsesFileName || batchData.outputConfig?.file_name;
3020
+ if (responsesFile) {
3021
+ const downloadUrl = `${GEMINI_API_BASE2}/${responsesFile}:download?alt=media`;
3022
+ const fileRes = await fetchWithTimeout(downloadUrl, {
3023
+ headers: { "x-goog-api-key": apiKey }
3024
+ });
3025
+ if (!fileRes.ok) {
3026
+ throw new AnyModelError(502, "Failed to download batch results file", {
3027
+ provider_name: "google"
3028
+ });
3029
+ }
3030
+ const text = await fileRes.text();
3031
+ for (const line of text.trim().split("\n")) {
3032
+ if (!line) continue;
3033
+ const item = JSON.parse(line);
3034
+ const customId = item.key || item.metadata?.key || `request-${results.length}`;
3035
+ if (item.response) {
3036
+ results.push({
3037
+ custom_id: customId,
3038
+ status: "success",
3039
+ response: translateGeminiResponse(item.response, model),
3040
+ error: null
3041
+ });
3042
+ } else if (item.error) {
3043
+ results.push({
3044
+ custom_id: customId,
3045
+ status: "error",
3046
+ response: null,
3047
+ error: {
3048
+ code: item.error.code || 500,
3049
+ message: item.error.message || "Batch item failed"
3050
+ }
3051
+ });
3052
+ }
3053
+ }
3054
+ }
3055
+ return results;
3056
+ },
3057
+ async cancelBatch(providerBatchId) {
3058
+ await apiRequest(`/${providerBatchId}:cancel`, { method: "POST" });
3059
+ }
3060
+ };
3061
+ }
3062
+
2700
3063
  // src/client.ts
2701
3064
  var AnyModel = class {
2702
3065
  registry;
@@ -2712,6 +3075,7 @@ var AnyModel = class {
2712
3075
  constructor(config = {}) {
2713
3076
  this.config = resolveConfig(config);
2714
3077
  this.registry = new ProviderRegistry();
3078
+ setDefaultTimeout((this.config.defaults?.timeout ?? 120) * 1e3);
2715
3079
  if (this.config.io) {
2716
3080
  configureFsIO(this.config.io);
2717
3081
  }
@@ -2832,6 +3196,10 @@ var AnyModel = class {
2832
3196
  if (anthropicKey) {
2833
3197
  this.batchManager.registerBatchAdapter("anthropic", createAnthropicBatchAdapter(anthropicKey));
2834
3198
  }
3199
+ const googleKey = config.google?.apiKey || process.env.GOOGLE_API_KEY;
3200
+ if (googleKey) {
3201
+ this.batchManager.registerBatchAdapter("google", createGoogleBatchAdapter(googleKey));
3202
+ }
2835
3203
  }
2836
3204
  applyDefaults(request) {
2837
3205
  const defaults = this.config.defaults;
@@ -3011,12 +3379,15 @@ function startServer(options = {}) {
3011
3379
  configureFsIO,
3012
3380
  createAnthropicBatchAdapter,
3013
3381
  createAnyModelServer,
3382
+ createGoogleBatchAdapter,
3014
3383
  createOpenAIBatchAdapter,
3015
3384
  ensureDir,
3385
+ estimateTokenCount,
3016
3386
  getFsQueueStatus,
3017
3387
  joinPath,
3018
3388
  readFileQueued,
3019
3389
  resolveConfig,
3390
+ resolveMaxTokens,
3020
3391
  startServer,
3021
3392
  waitForFsQueuesIdle,
3022
3393
  writeFileFlushedQueued,