@getpochi/cli 0.5.46 → 0.5.48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli.js +31 -4
- package/package.json +1 -1
package/dist/cli.js
CHANGED
|
@@ -354502,6 +354502,10 @@ var GoogleVertexModel = v4_default.union([
|
|
|
354502
354502
|
accessToken: v4_default.string(),
|
|
354503
354503
|
projectId: v4_default.string(),
|
|
354504
354504
|
location: v4_default.string()
|
|
354505
|
+
}),
|
|
354506
|
+
v4_default.object({
|
|
354507
|
+
issueUrl: v4_default.string().default(process.env.POCHI_VERTEX_ISSUE_URL ?? ""),
|
|
354508
|
+
modelUrl: v4_default.string().default(process.env.POCHI_VERTEX_MODEL_URL ?? "")
|
|
354505
354509
|
})
|
|
354506
354510
|
]);
|
|
354507
354511
|
var GoogleVertexTuningModelSettings = BaseModelSettings.extend({
|
|
@@ -368445,7 +368449,7 @@ var {
|
|
|
368445
368449
|
// package.json
|
|
368446
368450
|
var package_default = {
|
|
368447
368451
|
name: "@getpochi/cli",
|
|
368448
|
-
version: "0.5.
|
|
368452
|
+
version: "0.5.48",
|
|
368449
368453
|
type: "module",
|
|
368450
368454
|
bin: {
|
|
368451
368455
|
pochi: "src/cli.ts"
|
|
@@ -409710,14 +409714,18 @@ function toTaskStatus(message, finishReason) {
|
|
|
409710
409714
|
}
|
|
409711
409715
|
if (!finishReason)
|
|
409712
409716
|
return "failed";
|
|
409717
|
+
let hasToolCall = false;
|
|
409713
409718
|
for (const part of message.parts.slice(lastStepStart + 1)) {
|
|
409714
409719
|
if (part.type === "tool-askFollowupQuestion" || part.type === "tool-attemptCompletion") {
|
|
409715
409720
|
return "completed";
|
|
409716
409721
|
}
|
|
409717
409722
|
if (isToolUIPart(part)) {
|
|
409718
|
-
|
|
409723
|
+
hasToolCall = true;
|
|
409719
409724
|
}
|
|
409720
409725
|
}
|
|
409726
|
+
if (hasToolCall) {
|
|
409727
|
+
return "pending-tool";
|
|
409728
|
+
}
|
|
409721
409729
|
if (finishReason !== "error") {
|
|
409722
409730
|
return "pending-input";
|
|
409723
409731
|
}
|
|
@@ -410770,6 +410778,24 @@ function createVertexModel(vertex2, modelId) {
|
|
|
410770
410778
|
fetch: createPatchedFetchForFinetune(accessToken)
|
|
410771
410779
|
})(modelId);
|
|
410772
410780
|
}
|
|
410781
|
+
if ("issueUrl" in vertex2) {
|
|
410782
|
+
const { issueUrl, modelUrl } = vertex2;
|
|
410783
|
+
return createVertex({
|
|
410784
|
+
project: "placeholder",
|
|
410785
|
+
location: "placeholder",
|
|
410786
|
+
baseURL: "placeholder",
|
|
410787
|
+
fetch: async (_input, requestInit) => {
|
|
410788
|
+
const resp = await fetch(issueUrl, {
|
|
410789
|
+
headers: {
|
|
410790
|
+
"Metdata-Flavor": "Google"
|
|
410791
|
+
}
|
|
410792
|
+
}).then((x11) => x11.json());
|
|
410793
|
+
const headers = new Headers(requestInit?.headers);
|
|
410794
|
+
headers.append("Authorization", `Bearer ${resp.access_token}`);
|
|
410795
|
+
return fetch(modelUrl, { ...requestInit, headers });
|
|
410796
|
+
}
|
|
410797
|
+
})("placeholder");
|
|
410798
|
+
}
|
|
410773
410799
|
return;
|
|
410774
410800
|
}
|
|
410775
410801
|
|
|
@@ -411005,7 +411031,7 @@ function prepareMessages(inputMessages, environment2) {
|
|
|
411005
411031
|
function isWellKnownReasoningModel(model2) {
|
|
411006
411032
|
if (!model2)
|
|
411007
411033
|
return false;
|
|
411008
|
-
const models = [/glm-4
|
|
411034
|
+
const models = [/glm-4.*/, /qwen3.*thinking/];
|
|
411009
411035
|
const x11 = model2.toLowerCase();
|
|
411010
411036
|
for (const m13 of models) {
|
|
411011
411037
|
if (x11.match(m13)?.length) {
|
|
@@ -412486,7 +412512,8 @@ async function createLLMConfigWithProviders(program6, model2) {
|
|
|
412486
412512
|
modelId,
|
|
412487
412513
|
vertex: modelProvider.vertex,
|
|
412488
412514
|
contextWindow: modelSetting.contextWindow ?? exports_constants.DefaultContextWindow,
|
|
412489
|
-
maxOutputTokens: modelSetting.maxTokens ?? exports_constants.DefaultMaxOutputTokens
|
|
412515
|
+
maxOutputTokens: modelSetting.maxTokens ?? exports_constants.DefaultMaxOutputTokens,
|
|
412516
|
+
useToolCallMiddleware: modelSetting.useToolCallMiddleware
|
|
412490
412517
|
};
|
|
412491
412518
|
}
|
|
412492
412519
|
if (modelProvider.kind === undefined || modelProvider.kind === "openai" || modelProvider.kind === "openai-responses" || modelProvider.kind === "anthropic") {
|