@flotorch/loadtest 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -41,7 +41,7 @@ This launches an interactive wizard that asks for:
41
41
 
42
42
  - **Provider adapter** — `openai` or `sagemaker` (default: `openai`)
43
43
  - **Model name** — the model identifier your endpoint expects
44
- - **Base URL** — API endpoint (default: `https://api.openai.com/v1/chat/completions`)
44
+ - **Base URL** — API endpoint (default: `https://api.openai.com/v1`)
45
45
  - **Concurrency** — number of parallel requests (default: `10`)
46
46
  - **Input tokens mean** — average input token count per request (default: `512`)
47
47
  - **Output tokens mean** — average output token count per request (default: `256`)
@@ -129,7 +129,7 @@ The config file is JSON with four sections:
129
129
  "provider": {
130
130
  "adapter": "openai", // "openai" | "sagemaker"
131
131
  "model": "gpt-4o", // model identifier (required)
132
- "baseURL": "https://api.openai.com/v1/chat/completions", // API endpoint
132
+ "baseURL": "https://api.openai.com/v1", // API endpoint
133
133
  "systemPrompt": "You are a helpful assistant.", // optional system message
134
134
  "config": {}, // backend-specific options
135
135
  },
@@ -199,7 +199,7 @@ The config file is JSON with four sections:
199
199
  "provider": {
200
200
  "adapter": "openai",
201
201
  "model": "gpt-4o",
202
- "baseURL": "https://api.openai.com/v1/chat/completions"
202
+ "baseURL": "https://api.openai.com/v1"
203
203
  },
204
204
  "benchmark": {
205
205
  "concurrency": 20,
@@ -239,7 +239,7 @@ The config file is JSON with four sections:
239
239
  "provider": {
240
240
  "adapter": "openai",
241
241
  "model": "gpt-4o-mini",
242
- "baseURL": "https://api.openai.com/v1/chat/completions"
242
+ "baseURL": "https://api.openai.com/v1"
243
243
  },
244
244
  "benchmark": {
245
245
  "concurrency": 10,
package/dist/index.js CHANGED
@@ -382,8 +382,7 @@ init_esm_shims();
382
382
  init_esm_shims();
383
383
  import { parseArgs } from "util";
384
384
  import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
385
- import { join, dirname } from "path";
386
- import { fileURLToPath as fileURLToPath2 } from "url";
385
+ import { join } from "path";
387
386
 
388
387
  // src/schemas/config.zod.ts
389
388
  init_esm_shims();
@@ -456,11 +455,10 @@ var cyan = wrap("36", "39");
456
455
  var magenta = wrap("35", "39");
457
456
 
458
457
  // src/cli/args.ts
459
- var __dirname2 = dirname(fileURLToPath2(import.meta.url));
460
- var pkg = JSON.parse(readFileSync(join(__dirname2, "../../package.json"), "utf-8"));
458
+ var VERSION = true ? "0.2.2" : "dev";
461
459
  var VALID_COMMANDS = /* @__PURE__ */ new Set(["run", "generate", "bench", "report", "init"]);
462
460
  var HELP_TEXT = `
463
- ${bold("FLOTorch Load Tester")} ${dim(`v${pkg.version}`)}
461
+ ${bold("FLOTorch Load Tester")} ${dim(`v${VERSION}`)}
464
462
 
465
463
  ${yellow("USAGE")}
466
464
  flotorch ${dim("<command>")} ${dim("[options]")}
@@ -511,7 +509,7 @@ function parseCliArgs(argv) {
511
509
  allowPositionals: true
512
510
  });
513
511
  if (values.version) {
514
- console.log(pkg.version);
512
+ console.log(VERSION);
515
513
  process.exit(0);
516
514
  }
517
515
  if (values.help) {
@@ -630,8 +628,8 @@ async function runInit(outputPath) {
630
628
  }
631
629
  let baseURL;
632
630
  if (adapter === "openai") {
633
- const url = await prompt(rl, "Base URL", "https://api.openai.com/v1/chat/completions");
634
- if (url !== "https://api.openai.com/v1/chat/completions") {
631
+ const url = await prompt(rl, "Base URL", "https://api.openai.com/v1");
632
+ if (url !== "https://api.openai.com/v1") {
635
633
  baseURL = url;
636
634
  }
637
635
  }
@@ -870,11 +868,11 @@ var OpenAIBackend = class _OpenAIBackend {
870
868
  apiKey;
871
869
  static create(baseURL) {
872
870
  const env = validateEnv(EnvSchema, "openai");
873
- const url = baseURL ?? "https://api.openai.com/v1/chat/completions";
871
+ const url = baseURL ?? "https://api.openai.com/v1";
874
872
  return new _OpenAIBackend(url, env.OPENAI_API_KEY);
875
873
  }
876
874
  constructor(baseURL, apiKey) {
877
- this.url = baseURL;
875
+ this.url = baseURL.endsWith("/") ? baseURL : `${baseURL.replace(/\/+$/, "")}/chat/completions`;
878
876
  this.apiKey = apiKey;
879
877
  }
880
878
  async request(prompt2, model, maxTokens, systemPrompt, params, streaming, signal) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@flotorch/loadtest",
3
- "version": "0.2.0",
3
+ "version": "0.2.2",
4
4
  "description": "LLM inference load testing and benchmarking tool",
5
5
  "license": "MIT",
6
6
  "repository": {