@flotorch/loadtest 0.2.2 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +18 -14
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -455,7 +455,7 @@ var cyan = wrap("36", "39");
|
|
|
455
455
|
var magenta = wrap("35", "39");
|
|
456
456
|
|
|
457
457
|
// src/cli/args.ts
|
|
458
|
-
var VERSION = true ? "0.2.
|
|
458
|
+
var VERSION = true ? "0.2.3" : "dev";
|
|
459
459
|
var VALID_COMMANDS = /* @__PURE__ */ new Set(["run", "generate", "bench", "report", "init"]);
|
|
460
460
|
var HELP_TEXT = `
|
|
461
461
|
${bold("FLOTorch Load Tester")} ${dim(`v${VERSION}`)}
|
|
@@ -882,12 +882,13 @@ var OpenAIBackend = class _OpenAIBackend {
|
|
|
882
882
|
}
|
|
883
883
|
messages.push({ role: "user", content: prompt2 });
|
|
884
884
|
const body = {
|
|
885
|
-
model,
|
|
886
885
|
messages,
|
|
887
886
|
stream: streaming,
|
|
888
|
-
...params
|
|
887
|
+
...params,
|
|
888
|
+
model,
|
|
889
|
+
max_tokens: maxTokens
|
|
889
890
|
};
|
|
890
|
-
if (
|
|
891
|
+
if (this.isOpenAIHost()) {
|
|
891
892
|
body.max_completion_tokens = body.max_tokens;
|
|
892
893
|
delete body.max_tokens;
|
|
893
894
|
}
|
|
@@ -1017,7 +1018,7 @@ var SageMakerBackend = class _SageMakerBackend {
|
|
|
1017
1018
|
}
|
|
1018
1019
|
constructor(config) {
|
|
1019
1020
|
this.baseURL = config.baseURL ?? `https://runtime.sagemaker.${config.region}.amazonaws.com`;
|
|
1020
|
-
this.requestFormat = config.requestFormat ?? "
|
|
1021
|
+
this.requestFormat = config.requestFormat ?? "openai" /* OpenAI */;
|
|
1021
1022
|
this.signer = new SignatureV4({
|
|
1022
1023
|
service: "sagemaker",
|
|
1023
1024
|
region: config.region,
|
|
@@ -1068,22 +1069,25 @@ var SageMakerBackend = class _SageMakerBackend {
|
|
|
1068
1069
|
return this.parseResponse(response);
|
|
1069
1070
|
}
|
|
1070
1071
|
buildRequestBody(prompt2, maxTokens, systemPrompt, params, streaming) {
|
|
1071
|
-
const messages = [];
|
|
1072
|
-
if (systemPrompt) messages.push({ role: "system", content: systemPrompt });
|
|
1073
|
-
messages.push({ role: "user", content: prompt2 });
|
|
1074
1072
|
if (this.requestFormat === "openai" /* OpenAI */) {
|
|
1073
|
+
const messages = [];
|
|
1074
|
+
if (systemPrompt) messages.push({ role: "system", content: systemPrompt });
|
|
1075
|
+
messages.push({ role: "user", content: prompt2 });
|
|
1075
1076
|
return {
|
|
1076
1077
|
messages,
|
|
1077
|
-
max_tokens: maxTokens,
|
|
1078
1078
|
stream: streaming,
|
|
1079
|
-
...params
|
|
1079
|
+
...params,
|
|
1080
|
+
max_tokens: maxTokens
|
|
1080
1081
|
};
|
|
1081
1082
|
}
|
|
1083
|
+
const rawPrompt = systemPrompt ? `${systemPrompt}
|
|
1084
|
+
|
|
1085
|
+
${prompt2}` : prompt2;
|
|
1082
1086
|
return {
|
|
1083
|
-
inputs:
|
|
1087
|
+
inputs: rawPrompt,
|
|
1084
1088
|
parameters: {
|
|
1085
|
-
|
|
1086
|
-
|
|
1089
|
+
...params,
|
|
1090
|
+
max_new_tokens: maxTokens
|
|
1087
1091
|
}
|
|
1088
1092
|
};
|
|
1089
1093
|
}
|
|
@@ -1262,7 +1266,7 @@ function createBackend(config) {
|
|
|
1262
1266
|
case "openai":
|
|
1263
1267
|
return OpenAIBackend.create(baseURL);
|
|
1264
1268
|
case "sagemaker": {
|
|
1265
|
-
const requestFormat = config.provider.config?.["requestFormat"] ?? "
|
|
1269
|
+
const requestFormat = config.provider.config?.["requestFormat"] ?? "openai" /* OpenAI */;
|
|
1266
1270
|
return SageMakerBackend.create(baseURL, requestFormat);
|
|
1267
1271
|
}
|
|
1268
1272
|
default:
|