mpd-llm-cli 0.1.36 → 0.1.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bundle/api.cjs CHANGED
@@ -27594,12 +27594,19 @@ var init_custom_llm = __esm({
27594
27594
  temperature = Number(process.env.CUSTOM_LLM_TEMPERATURE || 0);
27595
27595
  maxTokens = Number(process.env.CUSTOM_LLM_MAX_TOKENS || 8192);
27596
27596
  topP = Number(process.env.CUSTOM_LLM_TOP_P || 1);
27597
- config = {
27598
- model: this.modelName,
27599
- temperature: this.temperature,
27600
- max_tokens: this.maxTokens,
27601
- top_p: this.topP
27602
- };
27597
+ getConfig() {
27598
+ const config2 = {
27599
+ model: this.modelName,
27600
+ max_tokens: this.maxTokens
27601
+ };
27602
+ if (this.temperature !== 0) {
27603
+ config2.temperature = this.temperature;
27604
+ }
27605
+ if (this.topP !== 0) {
27606
+ config2.top_p = this.topP;
27607
+ }
27608
+ return config2;
27609
+ }
27603
27610
  constructor() {
27604
27611
  this.model = new OpenAI({
27605
27612
  apiKey: this.apiKey,
@@ -27625,7 +27632,7 @@ var init_custom_llm = __esm({
27625
27632
  stream: true,
27626
27633
  tools,
27627
27634
  stream_options: { include_usage: true },
27628
- ...this.config
27635
+ ...this.getConfig()
27629
27636
  });
27630
27637
  const map2 = /* @__PURE__ */ new Map();
27631
27638
  return async function* () {
@@ -27651,7 +27658,7 @@ var init_custom_llm = __esm({
27651
27658
  const completion = await this.model.chat.completions.create({
27652
27659
  messages,
27653
27660
  stream: false,
27654
- ...this.config
27661
+ ...this.getConfig()
27655
27662
  });
27656
27663
  return ModelConverter.toGeminiResponse(completion);
27657
27664
  }
package/bundle/api.js CHANGED
@@ -27598,12 +27598,19 @@ var init_custom_llm = __esm({
27598
27598
  temperature = Number(process.env.CUSTOM_LLM_TEMPERATURE || 0);
27599
27599
  maxTokens = Number(process.env.CUSTOM_LLM_MAX_TOKENS || 8192);
27600
27600
  topP = Number(process.env.CUSTOM_LLM_TOP_P || 1);
27601
- config = {
27602
- model: this.modelName,
27603
- temperature: this.temperature,
27604
- max_tokens: this.maxTokens,
27605
- top_p: this.topP
27606
- };
27601
+ getConfig() {
27602
+ const config2 = {
27603
+ model: this.modelName,
27604
+ max_tokens: this.maxTokens
27605
+ };
27606
+ if (this.temperature !== 0) {
27607
+ config2.temperature = this.temperature;
27608
+ }
27609
+ if (this.topP !== 0) {
27610
+ config2.top_p = this.topP;
27611
+ }
27612
+ return config2;
27613
+ }
27607
27614
  constructor() {
27608
27615
  this.model = new OpenAI({
27609
27616
  apiKey: this.apiKey,
@@ -27629,7 +27636,7 @@ var init_custom_llm = __esm({
27629
27636
  stream: true,
27630
27637
  tools,
27631
27638
  stream_options: { include_usage: true },
27632
- ...this.config
27639
+ ...this.getConfig()
27633
27640
  });
27634
27641
  const map2 = /* @__PURE__ */ new Map();
27635
27642
  return async function* () {
@@ -27655,7 +27662,7 @@ var init_custom_llm = __esm({
27655
27662
  const completion = await this.model.chat.completions.create({
27656
27663
  messages,
27657
27664
  stream: false,
27658
- ...this.config
27665
+ ...this.getConfig()
27659
27666
  });
27660
27667
  return ModelConverter.toGeminiResponse(completion);
27661
27668
  }
@@ -27717,7 +27724,7 @@ async function createContentGeneratorConfig(model, authType) {
27717
27724
  return contentGeneratorConfig;
27718
27725
  }
27719
27726
  async function createContentGenerator(config2, sessionId2) {
27720
- const version2 = "0.1.36";
27727
+ const version2 = "0.1.37";
27721
27728
  const httpOptions = {
27722
27729
  headers: {
27723
27730
  "User-Agent": `GeminiCLI/${version2} (${process.platform}; ${process.arch})`
@@ -166405,7 +166412,7 @@ async function getPackageJson() {
166405
166412
  // packages/cli/src/utils/version.ts
166406
166413
  async function getCliVersion() {
166407
166414
  const pkgJson = await getPackageJson();
166408
- return "0.1.36";
166415
+ return "0.1.37";
166409
166416
  }
166410
166417
 
166411
166418
  // packages/cli/src/config/sandboxConfig.ts
package/bundle/gemini.js CHANGED
@@ -62225,12 +62225,19 @@ var init_custom_llm = __esm({
62225
62225
  temperature = Number(process.env.CUSTOM_LLM_TEMPERATURE || 0);
62226
62226
  maxTokens = Number(process.env.CUSTOM_LLM_MAX_TOKENS || 8192);
62227
62227
  topP = Number(process.env.CUSTOM_LLM_TOP_P || 1);
62228
- config = {
62229
- model: this.modelName,
62230
- temperature: this.temperature,
62231
- max_tokens: this.maxTokens,
62232
- top_p: this.topP
62233
- };
62228
+ getConfig() {
62229
+ const config2 = {
62230
+ model: this.modelName,
62231
+ max_tokens: this.maxTokens
62232
+ };
62233
+ if (this.temperature !== 0) {
62234
+ config2.temperature = this.temperature;
62235
+ }
62236
+ if (this.topP !== 0) {
62237
+ config2.top_p = this.topP;
62238
+ }
62239
+ return config2;
62240
+ }
62234
62241
  constructor() {
62235
62242
  this.model = new OpenAI({
62236
62243
  apiKey: this.apiKey,
@@ -62256,7 +62263,7 @@ var init_custom_llm = __esm({
62256
62263
  stream: true,
62257
62264
  tools,
62258
62265
  stream_options: { include_usage: true },
62259
- ...this.config
62266
+ ...this.getConfig()
62260
62267
  });
62261
62268
  const map2 = /* @__PURE__ */ new Map();
62262
62269
  return async function* () {
@@ -62282,7 +62289,7 @@ var init_custom_llm = __esm({
62282
62289
  const completion = await this.model.chat.completions.create({
62283
62290
  messages,
62284
62291
  stream: false,
62285
- ...this.config
62292
+ ...this.getConfig()
62286
62293
  });
62287
62294
  return ModelConverter.toGeminiResponse(completion);
62288
62295
  }
@@ -62344,7 +62351,7 @@ async function createContentGeneratorConfig(model, authType) {
62344
62351
  return contentGeneratorConfig;
62345
62352
  }
62346
62353
  async function createContentGenerator(config2, sessionId2) {
62347
- const version3 = "0.1.36";
62354
+ const version3 = "0.1.37";
62348
62355
  const httpOptions = {
62349
62356
  headers: {
62350
62357
  "User-Agent": `GeminiCLI/${version3} (${process.platform}; ${process.arch})`
@@ -199404,7 +199411,7 @@ var init_langfuseClient = __esm({
199404
199411
  userId,
199405
199412
  metadata: {
199406
199413
  ...safeMetadata,
199407
- cli_version: this.safeString("0.1.36", "unknown"),
199414
+ cli_version: this.safeString("0.1.37", "unknown"),
199408
199415
  model: this.safeString(process.env.CUSTOM_LLM_MODEL_NAME, "gemini"),
199409
199416
  auth_type: process.env.USE_CUSTOM_LLM ? "custom_llm" : "google_oauth",
199410
199417
  environment: this.safeString(this.configManager.getConfig()?.environment, "unknown")
@@ -200571,7 +200578,7 @@ var init_langfuseIntegration = __esm({
200571
200578
  const metadata = {
200572
200579
  model: this.config.getModel(),
200573
200580
  auth_type: this.config.getContentGeneratorConfig()?.authType,
200574
- cli_version: "0.1.36",
200581
+ cli_version: "0.1.37",
200575
200582
  start_time: (/* @__PURE__ */ new Date()).toISOString(),
200576
200583
  session_id: this.sessionId
200577
200584
  };
@@ -200630,7 +200637,7 @@ var init_langfuseIntegration = __esm({
200630
200637
  totalCachedTokens: sessionStats.totalCachedTokens,
200631
200638
  totalPromptTokens: sessionStats.totalPromptTokens,
200632
200639
  metadata: {
200633
- cli_version: "0.1.36",
200640
+ cli_version: "0.1.37",
200634
200641
  auth_type: this.config.getContentGeneratorConfig()?.authType,
200635
200642
  session_end_time: (/* @__PURE__ */ new Date()).toISOString()
200636
200643
  }
@@ -200702,7 +200709,7 @@ var init_langfuseIntegration = __esm({
200702
200709
  error,
200703
200710
  metadata: {
200704
200711
  session_id: this.sessionId,
200705
- cli_version: "0.1.36",
200712
+ cli_version: "0.1.37",
200706
200713
  auth_type: this.config.getContentGeneratorConfig()?.authType
200707
200714
  }
200708
200715
  });
@@ -276124,7 +276131,7 @@ var require_package7 = __commonJS({
276124
276131
  "packages/router/package.json"(exports2, module2) {
276125
276132
  module2.exports = {
276126
276133
  name: "@mpdai/router",
276127
- version: "0.1.1",
276134
+ version: "0.1.2",
276128
276135
  description: "MPD AI Router - Route Claude Code to other LLM providers",
276129
276136
  type: "module",
276130
276137
  main: "dist/index.js",
@@ -310107,7 +310114,7 @@ import { promises as fs36 } from "fs";
310107
310114
  import path40 from "path";
310108
310115
 
310109
310116
  // packages/cli/src/generated/git-commit.ts
310110
- var GIT_COMMIT_INFO = "cc090d6 (local modifications)";
310117
+ var GIT_COMMIT_INFO = "fe41993 (local modifications)";
310111
310118
 
310112
310119
  // node_modules/read-package-up/index.js
310113
310120
  import path35 from "node:path";
@@ -310320,7 +310327,7 @@ async function getPackageJson() {
310320
310327
  // packages/cli/src/utils/version.ts
310321
310328
  async function getCliVersion() {
310322
310329
  const pkgJson = await getPackageJson();
310323
- return "0.1.36";
310330
+ return "0.1.37";
310324
310331
  }
310325
310332
 
310326
310333
  // packages/cli/src/ui/commands/memoryCommand.ts
@@ -310584,45 +310591,6 @@ async function waitForService(timeout2 = 1e4, initialDelay = 1e3) {
310584
310591
  }
310585
310592
  return false;
310586
310593
  }
310587
- async function handleRouterStart() {
310588
- await run();
310589
- }
310590
- async function handleRouterStop() {
310591
- try {
310592
- const pid = parseInt(readFileSync16(PID_FILE, "utf-8"));
310593
- process.kill(pid);
310594
- cleanupPidFile();
310595
- if (existsSync17(REFERENCE_COUNT_FILE)) {
310596
- try {
310597
- unlinkSync2(REFERENCE_COUNT_FILE);
310598
- } catch (e2) {
310599
- }
310600
- }
310601
- console.log("MPD AI router service has been successfully stopped.");
310602
- } catch (e2) {
310603
- console.log("Failed to stop the service. It may have already been stopped.");
310604
- cleanupPidFile();
310605
- }
310606
- }
310607
- async function handleRouterRestart() {
310608
- try {
310609
- const pid = parseInt(readFileSync16(PID_FILE, "utf-8"));
310610
- process.kill(pid);
310611
- cleanupPidFile();
310612
- if (existsSync17(REFERENCE_COUNT_FILE)) {
310613
- try {
310614
- unlinkSync2(REFERENCE_COUNT_FILE);
310615
- } catch (e2) {
310616
- }
310617
- }
310618
- console.log("MPD AI router service has been stopped.");
310619
- } catch (e2) {
310620
- console.log("Service was not running or failed to stop.");
310621
- cleanupPidFile();
310622
- }
310623
- console.log("Starting MPD AI router service...");
310624
- await run();
310625
- }
310626
310594
  async function handleRouterStatus() {
310627
310595
  await showStatus();
310628
310596
  }
@@ -310643,34 +310611,10 @@ async function handleRouterCode(args) {
310643
310611
  executeCodeCommand(codeArgs);
310644
310612
  }
310645
310613
  }
310646
- async function handleRouterModel() {
310647
- await runModelSelector();
310648
- }
310649
310614
  var routerCommand = {
310650
310615
  name: "router",
310651
310616
  description: "Router commands for managing the MPD AI router service",
310652
310617
  subCommands: [
310653
- {
310654
- name: "start",
310655
- description: "Start the router service",
310656
- action: async (_context, _args) => {
310657
- await handleRouterStart();
310658
- }
310659
- },
310660
- {
310661
- name: "stop",
310662
- description: "Stop the router service",
310663
- action: async (_context, _args) => {
310664
- await handleRouterStop();
310665
- }
310666
- },
310667
- {
310668
- name: "restart",
310669
- description: "Restart the router service",
310670
- action: async (_context, _args) => {
310671
- await handleRouterRestart();
310672
- }
310673
- },
310674
310618
  {
310675
310619
  name: "status",
310676
310620
  description: "Show router service status",
@@ -310684,13 +310628,6 @@ var routerCommand = {
310684
310628
  action: async (_context, args) => {
310685
310629
  await handleRouterCode(args);
310686
310630
  }
310687
- },
310688
- {
310689
- name: "model",
310690
- description: "Interactive model selection and configuration",
310691
- action: async (_context, _args) => {
310692
- await handleRouterModel();
310693
- }
310694
310631
  }
310695
310632
  ]
310696
310633
  };
@@ -331659,7 +331596,7 @@ var DataCollector = class {
331659
331596
  // 提取元数据
331660
331597
  extractMetadata(data) {
331661
331598
  return {
331662
- cli_version: "0.1.36",
331599
+ cli_version: "0.1.37",
331663
331600
  model: process.env.CUSTOM_LLM_MODEL_NAME || "gemini",
331664
331601
  auth_type: process.env.USE_CUSTOM_LLM ? "custom_llm" : "google_oauth",
331665
331602
  project_path: data.projectPath,
@@ -339114,17 +339051,12 @@ async function handleRouterCommand(command, args) {
339114
339051
  Usage: mpdai router [command]
339115
339052
 
339116
339053
  Commands:
339117
- start Start server
339118
- stop Stop server
339119
- restart Restart server
339120
339054
  status Show server status
339121
339055
  code Execute claude command
339122
- model Interactive model selection and configuration
339123
339056
 
339124
339057
  Example:
339125
- mpdai router start
339058
+ mpdai router status
339126
339059
  mpdai router code "Write a Hello World"
339127
- mpdai router model
339128
339060
  `);
339129
339061
  return true;
339130
339062
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mpd-llm-cli",
3
- "version": "0.1.36",
3
+ "version": "0.1.37",
4
4
  "engines": {
5
5
  "node": ">=20.0.0"
6
6
  },
@@ -13,7 +13,7 @@
13
13
  "url": "git+https://git.rakuten-it.com/scm/mpd-ai/mpd-llm-cli.git"
14
14
  },
15
15
  "config": {
16
- "sandboxImageUri": "xx:0.1.31"
16
+ "sandboxImageUri": "xx:0.1.37"
17
17
  },
18
18
  "scripts": {
19
19
  "start": "node scripts/start.js",