@perstack/runtime 0.0.85 → 0.0.86
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/bin/cli.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import { package_default, findLockfile, loadLockfile, run } from '../chunk-
|
|
2
|
+
import { package_default, findLockfile, loadLockfile, run } from '../chunk-TYK2JDF3.js';
|
|
3
3
|
import '../chunk-RG4QHAGG.js';
|
|
4
4
|
import { parseWithFriendlyError, runCommandInputSchema, validateEventFilter, createFilteredEventListener, perstackConfigSchema } from '@perstack/core';
|
|
5
5
|
import { Command } from 'commander';
|
|
@@ -21,7 +21,7 @@ import { APICallError, generateText, streamText } from 'ai';
|
|
|
21
21
|
// package.json
|
|
22
22
|
var package_default = {
|
|
23
23
|
name: "@perstack/runtime",
|
|
24
|
-
version: "0.0.
|
|
24
|
+
version: "0.0.86",
|
|
25
25
|
description: "Perstack Runtime",
|
|
26
26
|
author: "Wintermute Technologies, Inc.",
|
|
27
27
|
license: "Apache-2.0",
|
|
@@ -3363,6 +3363,33 @@ registerProviderAdapter(
|
|
|
3363
3363
|
async () => DeepseekProviderAdapter
|
|
3364
3364
|
);
|
|
3365
3365
|
var shouldEnableReasoning = (budget) => budget !== void 0 && budget !== "none" && budget !== 0;
|
|
3366
|
+
var PROVIDERS_WITHOUT_REASONING_HISTORY = [
|
|
3367
|
+
"openai",
|
|
3368
|
+
"azure-openai",
|
|
3369
|
+
"deepseek",
|
|
3370
|
+
"ollama"
|
|
3371
|
+
];
|
|
3372
|
+
function filterReasoningPartsForProvider(messages, providerName) {
|
|
3373
|
+
if (!PROVIDERS_WITHOUT_REASONING_HISTORY.includes(providerName)) {
|
|
3374
|
+
return messages;
|
|
3375
|
+
}
|
|
3376
|
+
return messages.map((message) => {
|
|
3377
|
+
if (message.role !== "assistant") {
|
|
3378
|
+
return message;
|
|
3379
|
+
}
|
|
3380
|
+
const assistantMessage = message;
|
|
3381
|
+
if (!Array.isArray(assistantMessage.content)) {
|
|
3382
|
+
return message;
|
|
3383
|
+
}
|
|
3384
|
+
const filteredContent = assistantMessage.content.filter(
|
|
3385
|
+
(part) => !("type" in part && part.type === "reasoning")
|
|
3386
|
+
);
|
|
3387
|
+
if (filteredContent.length === assistantMessage.content.length) {
|
|
3388
|
+
return message;
|
|
3389
|
+
}
|
|
3390
|
+
return { ...assistantMessage, content: filteredContent };
|
|
3391
|
+
});
|
|
3392
|
+
}
|
|
3366
3393
|
var LLMExecutor = class {
|
|
3367
3394
|
constructor(adapter, model) {
|
|
3368
3395
|
this.adapter = adapter;
|
|
@@ -3377,10 +3404,11 @@ var LLMExecutor = class {
|
|
|
3377
3404
|
const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
|
|
3378
3405
|
const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
|
|
3379
3406
|
const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
|
|
3407
|
+
const messages = filterReasoningPartsForProvider(params.messages, this.adapter.providerName);
|
|
3380
3408
|
try {
|
|
3381
3409
|
const result = await generateText({
|
|
3382
3410
|
model: this.model,
|
|
3383
|
-
messages
|
|
3411
|
+
messages,
|
|
3384
3412
|
maxRetries: params.maxRetries,
|
|
3385
3413
|
tools: { ...params.tools, ...providerTools },
|
|
3386
3414
|
toolChoice: params.toolChoice,
|
|
@@ -3413,10 +3441,11 @@ var LLMExecutor = class {
|
|
|
3413
3441
|
const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
|
|
3414
3442
|
const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
|
|
3415
3443
|
const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
|
|
3444
|
+
const messages = filterReasoningPartsForProvider(params.messages, this.adapter.providerName);
|
|
3416
3445
|
try {
|
|
3417
3446
|
const result = await generateText({
|
|
3418
3447
|
model: this.model,
|
|
3419
|
-
messages
|
|
3448
|
+
messages,
|
|
3420
3449
|
maxRetries: params.maxRetries,
|
|
3421
3450
|
abortSignal: params.abortSignal,
|
|
3422
3451
|
providerOptions
|
|
@@ -3440,9 +3469,10 @@ var LLMExecutor = class {
|
|
|
3440
3469
|
const reasoningEnabled = shouldEnableReasoning(params.reasoningBudget);
|
|
3441
3470
|
const reasoningOptions = reasoningEnabled && params.reasoningBudget ? this.adapter.getReasoningOptions(params.reasoningBudget) : void 0;
|
|
3442
3471
|
const providerOptions = this.mergeProviderOptions(baseProviderOptions, reasoningOptions);
|
|
3472
|
+
const messages = filterReasoningPartsForProvider(params.messages, this.adapter.providerName);
|
|
3443
3473
|
const streamResult = streamText({
|
|
3444
3474
|
model: this.model,
|
|
3445
|
-
messages
|
|
3475
|
+
messages,
|
|
3446
3476
|
maxRetries: params.maxRetries,
|
|
3447
3477
|
tools: { ...params.tools, ...providerTools },
|
|
3448
3478
|
toolChoice: params.toolChoice,
|
|
@@ -3691,5 +3721,5 @@ async function run(runInput, options) {
|
|
|
3691
3721
|
}
|
|
3692
3722
|
|
|
3693
3723
|
export { findLockfile, getLockfileExpertToolDefinitions, getModel, loadLockfile, package_default, run, runtimeStateMachine };
|
|
3694
|
-
//# sourceMappingURL=chunk-
|
|
3695
|
-
//# sourceMappingURL=chunk-
|
|
3724
|
+
//# sourceMappingURL=chunk-TYK2JDF3.js.map
|
|
3725
|
+
//# sourceMappingURL=chunk-TYK2JDF3.js.map
|