kimi-proxy 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -137,6 +137,15 @@ The API runs on `http://127.0.0.1:8000` and serves the dashboard (built assets)
137
137
 
138
138
  ## Configuration
139
139
 
140
+ ### Dashboard & LiveStore
141
+
142
+ Control LiveStore sync behavior via environment variables:
143
+
144
+ | Variable | Default | Description |
145
+ | ----------------------- | ------- | ----------------------------------------------------------------------------------------------- |
146
+ | `LIVESTORE_BATCH` | 50 | Batch size for dashboard sync (range: 1-500) |
147
+ | `LIVESTORE_MAX_RECORDS` | 500 | Memory sliding window - max records to keep in LiveStore. Set to 0 to disable (not recommended) |
148
+
140
149
  ### Providers
141
150
 
142
151
  Set environment variables in `.env`:
@@ -169,6 +178,15 @@ models:
169
178
 
170
179
  The web dashboard shows request/response logs and metrics. Access it at the root path when running the proxy. LiveStore metadata sync pulls from `/api/livestore/pull` in batches (size controlled by `LIVESTORE_BATCH`) and lazily fetches blobs on expansion. Build the dashboard with `bun run build:all` to serve static assets from the backend.
171
180
 
181
+ ### Performance Features
182
+
183
+ - **Reverse-chronological loading**: Data loads from newest to oldest, providing immediate access to recent logs
184
+ - **Memory-efficient virtualization**: Uses TanStack Virtual to render only visible rows
185
+ - **Configurable sliding window**: Limit browser memory usage by setting `LIVESTORE_MAX_RECORDS` (see `.env.example`)
186
+ - **Automatic garbage collection**: Old records beyond the window limit are automatically purged
187
+
188
+ The dashboard uses reactive queries with TanStack Table and TanStack Virtual for fast, efficient rendering of large datasets.
189
+
172
190
  ## Development
173
191
 
174
192
  ```bash
package/dist/config.d.ts CHANGED
@@ -17,6 +17,7 @@ export interface AppConfig {
17
17
  };
18
18
  livestore: {
19
19
  batchSize: number;
20
+ maxRecords?: number;
20
21
  };
21
22
  providers: {
22
23
  openai?: OpenAIConfig;
package/dist/index.js CHANGED
@@ -91,6 +91,7 @@ function loadConfig() {
91
91
  const streamDelay = Number(process.env.STREAM_DELAY ?? "10");
92
92
  const streamChunkSize = Number(process.env.STREAM_CHUNK_SIZE ?? "5");
93
93
  const livestoreBatch = Number(process.env.LIVESTORE_BATCH ?? "50");
94
+ const livestoreMaxRecords = process.env.LIVESTORE_MAX_RECORDS ? Number(process.env.LIVESTORE_MAX_RECORDS) : 500;
94
95
  const openai = resolveOpenAI();
95
96
  const anthropic = resolveAnthropic();
96
97
  const openrouter = resolveOpenRouter();
@@ -116,7 +117,10 @@ function loadConfig() {
116
117
  server: { host, port },
117
118
  logging: { dbPath, blobRoot },
118
119
  streaming: { delay: streamDelay, chunkSize: streamChunkSize },
119
- livestore: { batchSize: Math.max(1, Math.min(500, livestoreBatch)) },
120
+ livestore: {
121
+ batchSize: Math.max(1, Math.min(500, livestoreBatch)),
122
+ maxRecords: livestoreMaxRecords
123
+ },
120
124
  providers: { openai, anthropic, openrouter, vertex },
121
125
  models: modelRegistry
122
126
  };
@@ -768,12 +772,12 @@ class HybridLogStore {
768
772
  const clauses = [];
769
773
  const params = { limit };
770
774
  if (checkpoint.timestamp) {
771
- clauses.push(`(timestamp > @ts OR (timestamp = @ts AND id > @id))`);
775
+ clauses.push(`(timestamp < @ts OR (timestamp = @ts AND id < @id))`);
772
776
  params.ts = checkpoint.timestamp;
773
- params.id = checkpoint.id ?? 0;
777
+ params.id = checkpoint.id ?? Number.MAX_SAFE_INTEGER;
774
778
  }
775
779
  const where = clauses.length ? `WHERE ${clauses.join(" AND ")}` : "";
776
- const rows = this.db.prepare(`SELECT * FROM logs ${where} ORDER BY datetime(timestamp), id LIMIT @limit`).all(params);
780
+ const rows = this.db.prepare(`SELECT * FROM logs ${where} ORDER BY datetime(timestamp) DESC, id DESC LIMIT @limit`).all(params);
777
781
  return { items: rows, total: rows.length, page: 1, pageSize: rows.length };
778
782
  }
779
783
  resolveBlobPath(record, kind) {
@@ -2663,7 +2667,7 @@ var TOOL_SECTION_END = "<|tool_calls_section_end|>";
2663
2667
  function cleanText(text) {
2664
2668
  if (!text)
2665
2669
  return "";
2666
- return text.replaceAll("(no content)", "").replace(/\n\s*\n\s*\n+/g, `
2670
+ return text.replaceAll("(no content)", "").replace(/<tool_call>[a-zA-Z0-9_:-]+/g, "").replace(/\n\s*\n\s*\n+/g, `
2667
2671
 
2668
2672
  `).trim();
2669
2673
  }
@@ -2789,6 +2793,18 @@ function fixKimiResponse(response, request) {
2789
2793
  const message = choice.message !== undefined && isJsonObject(choice.message) ? choice.message : choice.message = {};
2790
2794
  const rawToolCalls = message.tool_calls;
2791
2795
  let aggregatedToolCalls = Array.isArray(rawToolCalls) ? [...rawToolCalls] : [];
2796
+ if (typeof message.reasoning === "string" && !message.reasoning_content) {
2797
+ message.reasoning_content = message.reasoning;
2798
+ }
2799
+ if (Array.isArray(message.reasoning_details) && !message.reasoning_content) {
2800
+ const details = message.reasoning_details;
2801
+ const text = details.filter((d) => d.type === "reasoning.text" && typeof d.text === "string").map((d) => d.text).join(`
2802
+
2803
+ `);
2804
+ if (text) {
2805
+ message.reasoning_content = text;
2806
+ }
2807
+ }
2792
2808
  if (typeof message.reasoning_content === "string") {
2793
2809
  const original = message.reasoning_content;
2794
2810
  const { cleanedText, extracted } = extractToolCallSections(original);
@@ -5547,16 +5563,16 @@ async function createLiveStoreRuntime(options) {
5547
5563
  const clauses = [];
5548
5564
  const params = { limit };
5549
5565
  if (checkpoint.timestamp) {
5550
- clauses.push("(timestamp > $ts OR (timestamp = $ts AND numeric_id > $id))");
5566
+ clauses.push("(timestamp < $ts OR (timestamp = $ts AND numeric_id < $id))");
5551
5567
  params.ts = checkpoint.timestamp;
5552
- params.id = checkpoint.id ?? 0;
5568
+ params.id = checkpoint.id ?? Number.MAX_SAFE_INTEGER;
5553
5569
  }
5554
5570
  const where = clauses.length ? `WHERE ${clauses.join(" AND ")}` : "";
5555
5571
  const rows = store.query({
5556
5572
  query: `
5557
5573
  SELECT * FROM logs
5558
5574
  ${where}
5559
- ORDER BY timestamp, numeric_id
5575
+ ORDER BY timestamp DESC, numeric_id DESC
5560
5576
  LIMIT $limit
5561
5577
  `,
5562
5578
  bindValues: params
@@ -5577,6 +5593,29 @@ async function createLiveStoreRuntime(options) {
5577
5593
  return;
5578
5594
  return { timestamp: latest.timestamp, id: latest.numeric_id };
5579
5595
  },
5596
+ async trim(maxRecords) {
5597
+ const countResult = store.query({
5598
+ query: `SELECT COUNT(*) as count FROM logs`,
5599
+ bindValues: {}
5600
+ });
5601
+ const currentCount = countResult[0]?.count ?? 0;
5602
+ if (currentCount <= maxRecords)
5603
+ return 0;
5604
+ const toDelete = currentCount - maxRecords;
5605
+ const deleteResult = store.query({
5606
+ query: `
5607
+ DELETE FROM logs
5608
+ WHERE id IN (
5609
+ SELECT id FROM logs
5610
+ ORDER BY timestamp ASC, numeric_id ASC
5611
+ LIMIT @limit
5612
+ )
5613
+ RETURNING COUNT(*) as deleted
5614
+ `,
5615
+ bindValues: { limit: toDelete }
5616
+ });
5617
+ return deleteResult[0]?.deleted ?? 0;
5618
+ },
5580
5619
  async close() {
5581
5620
  await store.shutdown();
5582
5621
  }
@@ -5624,6 +5663,21 @@ async function createServer(config) {
5624
5663
  batchSize: config.livestore.batchSize
5625
5664
  });
5626
5665
  logger.info({ seeded }, "Seeded LiveStore log mirror");
5666
+ if (config.livestore.maxRecords && config.livestore.maxRecords > 0) {
5667
+ const trimInterval = setInterval(async () => {
5668
+ try {
5669
+ const deleted = await liveStoreRuntime.trim(config.livestore.maxRecords);
5670
+ if (deleted > 0) {
5671
+ logger.debug({ deleted, maxRecords: config.livestore.maxRecords }, "LiveStore trimmed old records");
5672
+ }
5673
+ } catch (error) {
5674
+ logger.error({ err: error }, "Failed to trim LiveStore records");
5675
+ }
5676
+ }, 30000);
5677
+ server.addHook("onClose", async () => {
5678
+ clearInterval(trimInterval);
5679
+ });
5680
+ }
5627
5681
  server.addHook("onClose", async () => {
5628
5682
  await liveStoreRuntime.close();
5629
5683
  });
@@ -6146,5 +6200,5 @@ async function bootstrap() {
6146
6200
  }
6147
6201
  bootstrap();
6148
6202
 
6149
- //# debugId=58CB00416A249CAB64756E2164756E21
6203
+ //# debugId=BB1E97A73BB51B4864756E2164756E21
6150
6204
  //# sourceMappingURL=index.js.map