openclaw-autoproxy 1.0.3 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -59,6 +59,9 @@ export interface GatewayConfig {
59
59
  timeoutMs: number;
60
60
  upstreamBaseUrl: string;
61
61
  upstreamApiKey: string;
62
+ upstreamMaxConnections: number;
63
+ upstreamKeepAliveTimeoutMs: number;
64
+ upstreamKeepAliveMaxTimeoutMs: number;
62
65
  retryStatusCodes: Set<number>;
63
66
  globalFallbackModels: string[];
64
67
  modelFallbackMap: Record<string, string[]>;
@@ -81,6 +84,20 @@ function parseCsvList(value: string | undefined): string[] {
81
84
  .filter(Boolean);
82
85
  }
83
86
 
87
+ function parsePositiveInteger(value: string | undefined, fallback: number): number {
88
+ if (!value) {
89
+ return fallback;
90
+ }
91
+
92
+ const parsed = Number.parseInt(value, 10);
93
+
94
+ if (!Number.isInteger(parsed) || parsed <= 0) {
95
+ return fallback;
96
+ }
97
+
98
+ return parsed;
99
+ }
100
+
84
101
  function parseRetryCodes(value: string | undefined): Set<number> {
85
102
  const defaults = new Set([412, 429, 500, 502, 503, 504]);
86
103
 
@@ -404,6 +421,15 @@ function loadRouteFileConfig(): ParsedRouteFileConfig {
404
421
  const host = process.env.HOST ?? "0.0.0.0";
405
422
  const port = Number.parseInt(process.env.PORT ?? "8787", 10);
406
423
  const timeoutMs = Number.parseInt(process.env.REQUEST_TIMEOUT_MS ?? "60000", 10);
424
+ const upstreamMaxConnections = parsePositiveInteger(process.env.UPSTREAM_MAX_CONNECTIONS, 200);
425
+ const upstreamKeepAliveTimeoutMs = parsePositiveInteger(
426
+ process.env.UPSTREAM_KEEPALIVE_TIMEOUT_MS,
427
+ 60_000,
428
+ );
429
+ const upstreamKeepAliveMaxTimeoutMs = parsePositiveInteger(
430
+ process.env.UPSTREAM_KEEPALIVE_MAX_TIMEOUT_MS,
431
+ 300_000,
432
+ );
407
433
  const upstreamBaseUrl = (process.env.UPSTREAM_BASE_URL ?? "https://api.openai.com").replace(
408
434
  /\/+$/,
409
435
  "",
@@ -424,6 +450,9 @@ export const config: GatewayConfig = {
424
450
  timeoutMs,
425
451
  upstreamBaseUrl,
426
452
  upstreamApiKey: process.env.UPSTREAM_API_KEY ?? "",
453
+ upstreamMaxConnections,
454
+ upstreamKeepAliveTimeoutMs,
455
+ upstreamKeepAliveMaxTimeoutMs,
427
456
  retryStatusCodes: routeFileConfig.retryStatusCodes ?? parseRetryCodes(process.env.RETRY_STATUS_CODES),
428
457
  globalFallbackModels: parseCsvList(process.env.GLOBAL_FALLBACK_MODELS),
429
458
  modelFallbackMap: parseModelFallbackMap(process.env.MODEL_FALLBACK_MAP),
@@ -0,0 +1,192 @@
1
+ const DEFAULT_WINDOW_MS = 12 * 60 * 60 * 1000;
2
+ const DEFAULT_MAX_SAMPLES_PER_MODEL = 5000;
3
+
4
+ export const DEFAULT_MODEL_HEALTH_WINDOW_MS = DEFAULT_WINDOW_MS;
5
+
6
+ interface ModelRequestSample {
7
+ at: number;
8
+ ok: boolean;
9
+ responseMs: number;
10
+ statusCode: number | null;
11
+ }
12
+
13
+ export interface ModelHealthSummary {
14
+ model: string;
15
+ accessCount: number;
16
+ avgResponseMs: number;
17
+ lastResponseMs: number;
18
+ lastSeenAt: string;
19
+ lastStatusCode: number | null;
20
+ successCount: number;
21
+ successRatePct: number;
22
+ }
23
+
24
+ const modelSamples = new Map<string, ModelRequestSample[]>();
25
+
26
+ function roundMs(value: number): number {
27
+ return Math.round(value * 100) / 100;
28
+ }
29
+
30
+ function pruneModelSamples(samples: ModelRequestSample[], cutoffAt: number): ModelRequestSample[] {
31
+ let startIndex = 0;
32
+
33
+ while (startIndex < samples.length && samples[startIndex] && samples[startIndex].at < cutoffAt) {
34
+ startIndex += 1;
35
+ }
36
+
37
+ if (startIndex <= 0) {
38
+ return samples;
39
+ }
40
+
41
+ return samples.slice(startIndex);
42
+ }
43
+
44
+ function pruneExpiredSamples(cutoffAt: number): void {
45
+ for (const [model, samples] of modelSamples.entries()) {
46
+ const pruned = pruneModelSamples(samples, cutoffAt);
47
+
48
+ if (pruned.length === 0) {
49
+ modelSamples.delete(model);
50
+ continue;
51
+ }
52
+
53
+ if (pruned !== samples) {
54
+ modelSamples.set(model, pruned);
55
+ }
56
+ }
57
+ }
58
+
59
+ export function recordModelRequestSample(
60
+ model: string | null,
61
+ params: {
62
+ ok: boolean;
63
+ responseMs: number;
64
+ statusCode?: number | null;
65
+ },
66
+ ): void {
67
+ if (!model) {
68
+ return;
69
+ }
70
+
71
+ if (!Number.isFinite(params.responseMs) || params.responseMs < 0) {
72
+ return;
73
+ }
74
+
75
+ const now = Date.now();
76
+ const sample: ModelRequestSample = {
77
+ at: now,
78
+ ok: params.ok,
79
+ responseMs: params.responseMs,
80
+ statusCode: params.statusCode ?? null,
81
+ };
82
+
83
+ const existing = modelSamples.get(model) ?? [];
84
+ existing.push(sample);
85
+
86
+ if (existing.length > DEFAULT_MAX_SAMPLES_PER_MODEL) {
87
+ existing.splice(0, existing.length - DEFAULT_MAX_SAMPLES_PER_MODEL);
88
+ }
89
+
90
+ modelSamples.set(model, existing);
91
+
92
+ const cutoffAt = now - DEFAULT_WINDOW_MS;
93
+ pruneExpiredSamples(cutoffAt);
94
+ }
95
+
96
+ export function recordModelLoadSample(model: string | null, loadMs: number): void {
97
+ recordModelRequestSample(model, {
98
+ ok: true,
99
+ responseMs: loadMs,
100
+ statusCode: 200,
101
+ });
102
+ }
103
+
104
+ function summarizeModel(model: string, samples: ModelRequestSample[]): ModelHealthSummary | null {
105
+ if (samples.length === 0) {
106
+ return null;
107
+ }
108
+
109
+ const accessCount = samples.length;
110
+ const successCount = samples.reduce((count, sample) => count + (sample.ok ? 1 : 0), 0);
111
+ const totalResponseMs = samples.reduce((total, sample) => total + sample.responseMs, 0);
112
+ const lastSample = samples[samples.length - 1] ?? null;
113
+ const avgResponseMs = totalResponseMs / accessCount;
114
+ const successRatePct = accessCount > 0 ? (successCount / accessCount) * 100 : 0;
115
+
116
+ return {
117
+ model,
118
+ accessCount,
119
+ avgResponseMs: roundMs(avgResponseMs),
120
+ lastResponseMs: roundMs(lastSample?.responseMs ?? 0),
121
+ lastSeenAt: new Date(lastSample?.at ?? Date.now()).toISOString(),
122
+ lastStatusCode: lastSample?.statusCode ?? null,
123
+ successCount,
124
+ successRatePct: roundMs(successRatePct),
125
+ };
126
+ }
127
+
128
+ export function getModelHealthWindow(windowMs = DEFAULT_WINDOW_MS): {
129
+ windowHours: number;
130
+ models: Array<ModelHealthSummary & { rank: number }>;
131
+ } {
132
+ const normalizedWindowMs = Number.isFinite(windowMs) && windowMs > 0 ? windowMs : DEFAULT_WINDOW_MS;
133
+ const cutoffAt = Date.now() - normalizedWindowMs;
134
+
135
+ pruneExpiredSamples(cutoffAt);
136
+
137
+ const summaries: ModelHealthSummary[] = [];
138
+
139
+ for (const [model, samples] of modelSamples.entries()) {
140
+ const filtered = pruneModelSamples(samples, cutoffAt);
141
+
142
+ if (filtered.length === 0) {
143
+ continue;
144
+ }
145
+
146
+ if (filtered !== samples) {
147
+ modelSamples.set(model, filtered);
148
+ }
149
+
150
+ const summary = summarizeModel(model, filtered);
151
+
152
+ if (summary) {
153
+ summaries.push(summary);
154
+ }
155
+ }
156
+
157
+ summaries.sort((a, b) => {
158
+ if (a.accessCount !== b.accessCount) {
159
+ return b.accessCount - a.accessCount;
160
+ }
161
+
162
+ if (a.successRatePct !== b.successRatePct) {
163
+ return b.successRatePct - a.successRatePct;
164
+ }
165
+
166
+ if (a.avgResponseMs !== b.avgResponseMs) {
167
+ return a.avgResponseMs - b.avgResponseMs;
168
+ }
169
+
170
+ return a.model.localeCompare(b.model);
171
+ });
172
+
173
+ return {
174
+ windowHours: roundMs(normalizedWindowMs / (60 * 60 * 1000)),
175
+ models: summaries.map((entry, index) => ({
176
+ rank: index + 1,
177
+ ...entry,
178
+ })),
179
+ };
180
+ }
181
+
182
+ export function getModelLoadRankingHealth(windowMs = DEFAULT_WINDOW_MS): {
183
+ windowHours: number;
184
+ rankedModels: Array<ModelHealthSummary & { rank: number }>;
185
+ } {
186
+ const health = getModelHealthWindow(windowMs);
187
+
188
+ return {
189
+ windowHours: health.windowHours,
190
+ rankedModels: health.models,
191
+ };
192
+ }