@flotorch/loadtest 0.1.2 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +12 -5
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -455,9 +455,10 @@ var cyan = wrap("36", "39");
455
455
  var magenta = wrap("35", "39");
456
456
 
457
457
  // src/cli/args.ts
458
+ var VERSION = true ? "0.2.1" : "dev";
458
459
  var VALID_COMMANDS = /* @__PURE__ */ new Set(["run", "generate", "bench", "report", "init"]);
459
460
  var HELP_TEXT = `
460
- ${bold("FLOTorch Load Tester")}
461
+ ${bold("FLOTorch Load Tester")} ${dim(`v${VERSION}`)}
461
462
 
462
463
  ${yellow("USAGE")}
463
464
  flotorch ${dim("<command>")} ${dim("[options]")}
@@ -480,6 +481,7 @@ ${yellow("OPTIONS")}
480
481
  ${cyan("--base-url")} ${dim("<url>")} Override provider.baseURL
481
482
  ${cyan("--streaming")} Enable streaming
482
483
  ${cyan("--no-streaming")} Disable streaming
484
+ ${cyan("-v, --version")} Show version number
483
485
  ${cyan("-h, --help")} Show this help message
484
486
  `.trimStart();
485
487
  function parseCliArgs(argv) {
@@ -501,10 +503,15 @@ function parseCliArgs(argv) {
501
503
  "base-url": { type: "string" },
502
504
  streaming: { type: "boolean" },
503
505
  "no-streaming": { type: "boolean" },
506
+ version: { type: "boolean", short: "v" },
504
507
  help: { type: "boolean", short: "h" }
505
508
  },
506
509
  allowPositionals: true
507
510
  });
511
+ if (values.version) {
512
+ console.log(VERSION);
513
+ process.exit(0);
514
+ }
508
515
  if (values.help) {
509
516
  console.log(HELP_TEXT);
510
517
  process.exit(0);
@@ -621,8 +628,8 @@ async function runInit(outputPath) {
621
628
  }
622
629
  let baseURL;
623
630
  if (adapter === "openai") {
624
- const url = await prompt(rl, "Base URL", "https://api.openai.com/v1");
625
- if (url !== "https://api.openai.com/v1") {
631
+ const url = await prompt(rl, "Base URL", "https://api.openai.com/v1/chat/completions");
632
+ if (url !== "https://api.openai.com/v1/chat/completions") {
626
633
  baseURL = url;
627
634
  }
628
635
  }
@@ -861,11 +868,11 @@ var OpenAIBackend = class _OpenAIBackend {
861
868
  apiKey;
862
869
  static create(baseURL) {
863
870
  const env = validateEnv(EnvSchema, "openai");
864
- const url = baseURL ?? "https://api.openai.com/v1";
871
+ const url = baseURL ?? "https://api.openai.com/v1/chat/completions";
865
872
  return new _OpenAIBackend(url, env.OPENAI_API_KEY);
866
873
  }
867
874
  constructor(baseURL, apiKey) {
868
- this.url = baseURL.endsWith("/chat/completions") ? baseURL : `${baseURL.replace(/\/+$/, "")}/chat/completions`;
875
+ this.url = baseURL;
869
876
  this.apiKey = apiKey;
870
877
  }
871
878
  async request(prompt2, model, maxTokens, systemPrompt, params, streaming, signal) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@flotorch/loadtest",
3
- "version": "0.1.2",
3
+ "version": "0.2.1",
4
4
  "description": "LLM inference load testing and benchmarking tool",
5
5
  "license": "MIT",
6
6
  "repository": {