@fastino-ai/pioneer-cli 0.2.7 → 0.2.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.tsx CHANGED
@@ -4,7 +4,7 @@
4
4
  * Uses Ink (React) for terminal UI.
5
5
  */
6
6
 
7
- import React, { useState, useEffect } from "react";
7
+ import React, { useState, useEffect, useRef } from "react";
8
8
  import { render, Box, Text, useApp, useInput, useStdin, Static } from "ink";
9
9
  import Spinner from "ink-spinner";
10
10
  import TextInput from "ink-text-input";
@@ -14,6 +14,9 @@ import * as path from "path";
14
14
  import {
15
15
  getApiKey,
16
16
  getBaseUrl,
17
+ getMleModel,
18
+ getLastAgentConversationId,
19
+ setLastAgentConversationId,
17
20
  saveConfig,
18
21
  clearApiKey,
19
22
  getHfToken,
@@ -21,7 +24,7 @@ import {
21
24
  clearHfToken,
22
25
  } from "./config.js";
23
26
  import * as api from "./api.js";
24
- import { ChatApp } from "./chat/ChatApp.js";
27
+ import { WebSocketClient, type HistoryMessage } from "./client/WebSocketClient.js";
25
28
  import {
26
29
  isEnabled as isTelemetryEnabled,
27
30
  hasChosenTelemetry,
@@ -150,9 +153,30 @@ function parseDatasetRef(datasetStr: string): api.DatasetRef | null {
150
153
  };
151
154
  }
152
155
 
153
- function parseArgs(argv: string[]): { command: string[]; flags: Record<string, string> } {
156
+ const BOOLEAN_FLAGS = new Set([
157
+ "help",
158
+ "h",
159
+ "version",
160
+ "v",
161
+ "private",
162
+ "save",
163
+ "multi-label",
164
+ "include-spans",
165
+ "echo",
166
+ "include-confidence",
167
+ "format-results",
168
+ "reasoning-trace",
169
+ "use-meta-felix",
170
+ ]);
171
+
172
+ function parseArgs(argv: string[]): {
173
+ command: string[];
174
+ flags: Record<string, string>;
175
+ parseErrors: string[];
176
+ } {
154
177
  const command: string[] = [];
155
178
  const flags: Record<string, string> = {};
179
+ const parseErrors: string[] = [];
156
180
 
157
181
  for (let i = 0; i < argv.length; i++) {
158
182
  const arg = argv[i];
@@ -163,7 +187,12 @@ function parseArgs(argv: string[]): { command: string[]; flags: Record<string, s
163
187
  flags[key] = next;
164
188
  i++;
165
189
  } else {
166
- flags[key] = "true";
190
+ if (BOOLEAN_FLAGS.has(key)) {
191
+ flags[key] = "true";
192
+ } else {
193
+ parseErrors.push(`--${key}`);
194
+ flags[key] = "";
195
+ }
167
196
  }
168
197
  } else if (arg.startsWith("-") && arg.length === 2) {
169
198
  // Handle short flags like -v, -h
@@ -173,14 +202,37 @@ function parseArgs(argv: string[]): { command: string[]; flags: Record<string, s
173
202
  flags[key] = next;
174
203
  i++;
175
204
  } else {
176
- flags[key] = "true";
205
+ if (BOOLEAN_FLAGS.has(key)) {
206
+ flags[key] = "true";
207
+ } else {
208
+ parseErrors.push(`-${key}`);
209
+ flags[key] = "";
210
+ }
177
211
  }
178
212
  } else {
179
213
  command.push(arg);
180
214
  }
181
215
  }
182
216
 
183
- return { command, flags };
217
+ return { command, flags, parseErrors };
218
+ }
219
+
220
+ type CreateProjectExample = Record<string, unknown> | undefined;
221
+
222
+ function parseProjectExample(exampleStr: string | undefined): { value?: CreateProjectExample; error?: string } {
223
+ if (!exampleStr) {
224
+ return { value: undefined };
225
+ }
226
+
227
+ try {
228
+ const parsed = JSON.parse(exampleStr) as unknown;
229
+ if (!parsed || Array.isArray(parsed) || typeof parsed !== "object") {
230
+ return { error: "--example must be a JSON object" };
231
+ }
232
+ return { value: parsed as CreateProjectExample };
233
+ } catch {
234
+ return { error: "--example must be valid JSON" };
235
+ }
184
236
  }
185
237
 
186
238
  // ─────────────────────────────────────────────────────────────────────────────
@@ -224,24 +276,28 @@ interface TelemetryPromptProps {
224
276
 
225
277
  const TelemetryPrompt: React.FC<TelemetryPromptProps> = ({ onComplete }) => {
226
278
  const [selected, setSelected] = useState<"yes" | "no">("yes");
279
+ const { isRawModeSupported } = useStdin();
227
280
 
228
- useInput((input, key) => {
229
- if (key.leftArrow || key.rightArrow) {
230
- setSelected((s) => (s === "yes" ? "no" : "yes"));
231
- }
232
- if (key.return) {
233
- setTelemetryEnabled(selected === "yes");
234
- onComplete();
235
- }
236
- if (input === "y" || input === "Y") {
237
- setTelemetryEnabled(true);
238
- onComplete();
239
- }
240
- if (input === "n" || input === "N") {
241
- setTelemetryEnabled(false);
242
- onComplete();
243
- }
244
- });
281
+ useInput(
282
+ (input, key) => {
283
+ if (key.leftArrow || key.rightArrow) {
284
+ setSelected((s) => (s === "yes" ? "no" : "yes"));
285
+ }
286
+ if (key.return) {
287
+ setTelemetryEnabled(selected === "yes");
288
+ onComplete();
289
+ }
290
+ if (input === "y" || input === "Y") {
291
+ setTelemetryEnabled(true);
292
+ onComplete();
293
+ }
294
+ if (input === "n" || input === "N") {
295
+ setTelemetryEnabled(false);
296
+ onComplete();
297
+ }
298
+ },
299
+ { isActive: isRawModeSupported }
300
+ );
245
301
 
246
302
  return (
247
303
  <Box flexDirection="column" paddingX={1}>
@@ -663,138 +719,1530 @@ function ApiCommand<T>({ action, successMessage }: ApiCommandProps<T>) {
663
719
  );
664
720
  }
665
721
 
666
- // ─────────────────────────────────────────────────────────────────────────────
667
- // Job Logs Command (prettified output)
668
- // ─────────────────────────────────────────────────────────────────────────────
722
+ interface AgentInteractiveCommandProps {
723
+ message: string;
724
+ conversationId?: string;
725
+ history?: api.AgentChatHistoryItem[];
726
+ }
669
727
 
670
- function JobLogsCommand({ jobId }: { jobId: string }) {
671
- const { exit } = useApp();
672
- const [state, setState] = useState<"loading" | "done" | "error">("loading");
673
- const [logs, setLogs] = useState<api.TrainingLog[]>([]);
728
+ function toHistoryMessages(
729
+ items: Array<api.AgentChatHistoryItem | api.ChatSessionMessage>
730
+ ): HistoryMessage[] {
731
+ return items
732
+ .filter((item) => Boolean(item && typeof item.content === "string" && typeof item.role === "string"))
733
+ .map((item) => {
734
+ const role =
735
+ item.role === "user" || item.role === "assistant" || item.role === "tool"
736
+ ? item.role
737
+ : "assistant";
738
+ const message: HistoryMessage = {
739
+ role,
740
+ content: item.content,
741
+ };
742
+
743
+ const sessionMessage = item as api.ChatSessionMessage;
744
+ if (sessionMessage.tool_call_id) {
745
+ message.tool_call_id = sessionMessage.tool_call_id;
746
+ }
747
+ if (sessionMessage.tool_calls && Array.isArray(sessionMessage.tool_calls)) {
748
+ message.tool_calls = sessionMessage.tool_calls
749
+ .filter(
750
+ (call): call is { id: string; name: string; args: Record<string, unknown> } =>
751
+ Boolean(call) &&
752
+ typeof call.id === "string" &&
753
+ typeof call.name === "string" &&
754
+ typeof call.args === "object" &&
755
+ call.args !== null
756
+ )
757
+ .map((call) => ({
758
+ id: call.id,
759
+ name: call.name,
760
+ args: call.args,
761
+ }));
762
+ }
763
+
764
+ return message;
765
+ });
766
+ }
767
+
768
+ function AgentInteractiveCommand({ message, conversationId, history }: AgentInteractiveCommandProps) {
769
+ const [state, setState] = useState<"connecting" | "running" | "done" | "error">("connecting");
770
+ const [stream, setStream] = useState("");
674
771
  const [error, setError] = useState("");
772
+ const [statusHint, setStatusHint] = useState("");
773
+ const [client] = useState(() => new WebSocketClient());
774
+ const doneHistory = useRef<HistoryMessage[] | undefined>(undefined);
675
775
 
676
776
  useEffect(() => {
677
- (async () => {
678
- const result = await api.getJobLogs(jobId);
679
- if (result.ok && result.data) {
680
- setLogs(result.data.logs || []);
777
+ let isActive = true;
778
+ const model = getMleModel();
779
+ let wsSessionId: string | undefined;
780
+ const finish = (code: number) => {
781
+ setTimeout(() => process.exit(code), 300);
782
+ };
783
+ const persistConversation = (nextConversationId?: string) => {
784
+ if (nextConversationId) {
785
+ setLastAgentConversationId(nextConversationId);
786
+ }
787
+ };
788
+ const getLatestAssistantContent = (
789
+ messages: HistoryMessage[] | undefined
790
+ ): string => {
791
+ if (!messages?.length) {
792
+ return "";
793
+ }
794
+ const lastAssistant = [...messages]
795
+ .reverse()
796
+ .find(
797
+ (entry) =>
798
+ entry.role === "assistant" && typeof entry.content === "string" && entry.content.trim()
799
+ );
800
+ return lastAssistant?.content?.trim() ?? "";
801
+ };
802
+ let streamBuffer = "";
803
+ const appendStream = (content: string) => {
804
+ streamBuffer += content;
805
+ setStream(streamBuffer);
806
+ };
807
+ const runAgentFallback = async (
808
+ fallbackHistory: HistoryMessage[],
809
+ sessionIdHint?: string
810
+ ) => {
811
+ const plainHistory: api.AgentChatHistoryItem[] = fallbackHistory.map((entry) => ({
812
+ role: entry.role,
813
+ content: entry.content,
814
+ }));
815
+ const fallbackTimeout = (ms: number) =>
816
+ new Promise<never>((_, reject) => {
817
+ setTimeout(() => reject(new Error(`REST fallback timed out after ${ms}ms`)), ms);
818
+ });
819
+ setState("running");
820
+ setStatusHint("WebSocket unavailable, using REST fallback...");
821
+ try {
822
+ const runRequest = (sessionIdOverride?: string) => {
823
+ const requestConversationId = sessionIdOverride;
824
+ return Promise.race([
825
+ api.agentChat({
826
+ message,
827
+ ...(requestConversationId ? { conversation_id: requestConversationId } : {}),
828
+ ...(plainHistory.length ? { history: plainHistory } : {}),
829
+ }),
830
+ fallbackTimeout(8000),
831
+ ]);
832
+ };
833
+
834
+ const attempts = Array.from(
835
+ new Set([sessionIdHint, conversationId, undefined] as Array<string | undefined>)
836
+ );
837
+ let result: Awaited<ReturnType<typeof runRequest>> | undefined;
838
+ let lastError: string | undefined;
839
+
840
+ for (const attemptConversationId of attempts) {
841
+ const attemptResult = await runRequest(attemptConversationId);
842
+ if (attemptResult.ok && attemptResult.data?.answer) {
843
+ result = attemptResult;
844
+ break;
845
+ }
846
+ if (!attemptResult.ok && attemptResult.error) {
847
+ lastError = attemptResult.error;
848
+ }
849
+ }
850
+
851
+ if (!result) {
852
+ if (lastError) {
853
+ throw new Error(lastError || "Agent fallback request failed.");
854
+ }
855
+ result = {
856
+ ok: false,
857
+ status: 0,
858
+ error: "Agent fallback request failed.",
859
+ };
860
+ }
861
+
862
+ if (!isActive) {
863
+ return;
864
+ }
865
+ if (!result.ok) {
866
+ throw new Error(lastError || result.error || "Agent fallback request failed.");
867
+ }
868
+ setStream(result.data?.answer ?? "");
869
+ if (result.data?.conversation_id) {
870
+ persistConversation(result.data.conversation_id);
871
+ }
872
+ if (!result.data?.answer) {
873
+ setError("Agent returned no response content.");
874
+ setState("error");
875
+ setTimeout(() => process.exit(1), 300);
876
+ return;
877
+ }
878
+ setStatusHint("");
681
879
  setState("done");
682
- } else {
683
- setError(result.error ?? "Unknown error");
880
+ setTimeout(() => process.exit(0), 300);
881
+ } catch (fallbackError) {
882
+ if (!isActive) {
883
+ return;
884
+ }
885
+ const fallbackErrorMessage =
886
+ fallbackError instanceof Error
887
+ ? fallbackError.message
888
+ : String(fallbackError);
889
+ if (fallbackErrorMessage.includes("REST fallback timed out")) {
890
+ setError(
891
+ "Interactive mode timed out. Agent runtime is not reachable in this environment.\n" +
892
+ "Retry in standard mode (no mode flag needed), or set up a reachable WSS endpoint."
893
+ );
894
+ } else {
895
+ setError(fallbackErrorMessage);
896
+ }
684
897
  setState("error");
898
+ setTimeout(() => process.exit(1), 300);
685
899
  }
686
- setTimeout(() => exit(), 500);
687
- })();
688
- }, [jobId, exit]);
900
+ };
901
+ const start = async () => {
902
+ doneHistory.current = undefined;
903
+ const mergedHistory: HistoryMessage[] = [];
904
+ let commandCompleted = false;
905
+ let responseTimeoutId: ReturnType<typeof setTimeout> | undefined;
906
+ const fallbackOnTimeout = async () => {
907
+ if (!isActive || commandCompleted) {
908
+ return;
909
+ }
910
+ commandCompleted = true;
911
+ if (responseTimeoutId) {
912
+ clearTimeout(responseTimeoutId);
913
+ }
914
+ client.disconnect();
915
+ await runAgentFallback(mergedHistory, wsSessionId);
916
+ };
689
917
 
690
- if (state === "loading") {
691
- return <Loading />;
692
- }
918
+ const connectWithTimeout = async (ms: number): Promise<void> => {
919
+ return new Promise<void>((resolve, reject) => {
920
+ const timeoutId = setTimeout(() => {
921
+ reject(new Error(`WebSocket connect timed out after ${ms}ms`));
922
+ }, ms);
923
+
924
+ client.connect()
925
+ .then(() => {
926
+ clearTimeout(timeoutId);
927
+ resolve();
928
+ })
929
+ .catch((err) => {
930
+ clearTimeout(timeoutId);
931
+ reject(err);
932
+ });
933
+ });
934
+ };
935
+ const withTimeout = async <T,>(promise: Promise<T>, ms: number, label: string): Promise<T> => {
936
+ return new Promise<T>((resolve, reject) => {
937
+ const timeoutId = setTimeout(() => {
938
+ reject(new Error(`${label} timed out after ${ms}ms`));
939
+ }, ms);
940
+
941
+ promise.then(
942
+ (value) => {
943
+ clearTimeout(timeoutId);
944
+ resolve(value);
945
+ },
946
+ (error) => {
947
+ clearTimeout(timeoutId);
948
+ reject(error);
949
+ }
950
+ );
951
+ });
952
+ };
953
+
954
+ try {
955
+ if (conversationId) {
956
+ setStatusHint(`Loading conversation ${conversationId}...`);
957
+ const sessionResult = await api.getAgentSession(conversationId);
958
+ if (!sessionResult.ok) {
959
+ throw new Error(
960
+ sessionResult.error ??
961
+ `Could not load conversation ${conversationId}. Please check the conversation id and credentials.`
962
+ );
963
+ }
964
+
965
+ const session = sessionResult.data;
966
+ if (session?.messages?.length) {
967
+ mergedHistory.push(...toHistoryMessages(session.messages));
968
+ }
969
+ }
970
+
971
+ if (history?.length) {
972
+ mergedHistory.push(...toHistoryMessages(history));
973
+ }
974
+
975
+ const isWsHealthy = await withTimeout(client.health(), 3000, "WebSocket health check");
976
+ if (!isWsHealthy) {
977
+ throw new Error("WebSocket endpoint is not reachable.");
978
+ }
979
+
980
+ await connectWithTimeout(5000);
981
+ if (!isActive) {
982
+ return;
983
+ }
984
+
985
+ setStatusHint("");
986
+ setState("running");
987
+ responseTimeoutId = setTimeout(() => {
988
+ void fallbackOnTimeout();
989
+ }, 5000);
990
+
991
+ await client.chat(message, {
992
+ onStream: (content) => {
993
+ appendStream(content);
994
+ },
995
+ onAssistantMessage: (content) => {
996
+ appendStream(content);
997
+ },
998
+ onToolCall: async (call) => {
999
+ setStatusHint(`Tool requested: ${call.tool}`);
1000
+ return "CLI does not execute tool calls yet.";
1001
+ },
1002
+ onError: (err) => {
1003
+ if (!isActive) return;
1004
+ setError(err.message || "WebSocket agent error");
1005
+ setState("error");
1006
+ finish(1);
1007
+ },
1008
+ onDone: (messages, sessionId) => {
1009
+ if (!isActive) return;
1010
+ commandCompleted = true;
1011
+ if (responseTimeoutId) {
1012
+ clearTimeout(responseTimeoutId);
1013
+ }
1014
+ doneHistory.current = messages;
1015
+ wsSessionId = sessionId;
1016
+ persistConversation(sessionId);
1017
+ },
1018
+ }, {
1019
+ history: mergedHistory,
1020
+ ...(model ? { config: { model } } : {}),
1021
+ fileReferences: [],
1022
+ });
1023
+ if (!streamBuffer.trim()) {
1024
+ const doneAssistantContent = getLatestAssistantContent(doneHistory.current);
1025
+ if (doneAssistantContent) {
1026
+ appendStream(doneAssistantContent);
1027
+ setState("done");
1028
+ finish(0);
1029
+ return;
1030
+ }
1031
+
1032
+ setStatusHint("No assistant content in websocket response; retrying via REST fallback.");
1033
+ await runAgentFallback(mergedHistory, wsSessionId);
1034
+ return;
1035
+ }
1036
+
1037
+ setState("done");
1038
+ finish(0);
1039
+ } catch (err) {
1040
+ if (!isActive) return;
1041
+ commandCompleted = true;
1042
+ if (responseTimeoutId) {
1043
+ clearTimeout(responseTimeoutId);
1044
+ }
1045
+ const rawError = err instanceof Error ? err.message : String(err);
1046
+ const unavailable =
1047
+ rawError.includes("WebSocket") ||
1048
+ rawError.includes("websocket") ||
1049
+ rawError.includes("timed out") ||
1050
+ rawError.includes("not reachable");
1051
+
1052
+ if (unavailable) {
1053
+ const wsUrl = client.getWebSocketUrl();
1054
+ setStatusHint(`WebSocket timed out at ${wsUrl}. Falling back to REST.`);
1055
+ await runAgentFallback(mergedHistory);
1056
+ return;
1057
+ }
1058
+ setError(
1059
+ rawError || "Agent runtime unavailable. Retry by running `agent` in standard mode (omit --mode)."
1060
+ );
1061
+ setState("error");
1062
+ finish(1);
1063
+ }
1064
+ };
1065
+ start();
1066
+
1067
+ return () => {
1068
+ isActive = false;
1069
+ if (state !== "done" && state !== "error") {
1070
+ client.disconnect();
1071
+ }
1072
+ };
1073
+ }, [client, conversationId, history, message]);
1074
+
1075
+ useEffect(() => {
1076
+ if (state === "done") {
1077
+ setTimeout(() => process.exit(0), 300);
1078
+ } else if (state === "error") {
1079
+ setTimeout(() => process.exit(1), 300);
1080
+ }
1081
+ }, [state]);
693
1082
 
694
1083
  if (state === "error") {
695
- return <ErrorMessage error={error} />;
1084
+ return (
1085
+ <Box flexDirection="column">
1086
+ <ErrorMessage error={error || "WebSocket agent command failed."} />
1087
+ </Box>
1088
+ );
696
1089
  }
697
1090
 
698
- const formatLogEntry = (log: api.TrainingLog): string => {
699
- const ts = new Date(log.timestamp).toLocaleTimeString();
700
- return `[${ts}] [${log.level}] ${log.message}`;
701
- };
1091
+ if (state === "connecting") {
1092
+ return (
1093
+ <Box flexDirection="column">
1094
+ <Loading message={statusHint || "Connecting to agent runtime..."} />
1095
+ </Box>
1096
+ );
1097
+ }
702
1098
 
703
1099
  return (
704
1100
  <Box flexDirection="column">
705
- {logs.length === 0 ? (
706
- <Text dimColor>No logs available</Text>
1101
+ {state === "running" ? (
1102
+ <Loading message={stream ? "Streaming response..." : "Waiting for agent response..."} />
707
1103
  ) : (
708
- logs.map((log) => (
709
- <Text key={log.id} color={log.level === "ERROR" ? "red" : log.level === "WARNING" ? "yellow" : undefined}>
710
- {formatLogEntry(log)}
711
- </Text>
712
- ))
1104
+ <Success message="Agent response received" />
713
1105
  )}
1106
+ {stream ? <Text>{stream}</Text> : <Text dimColor>No response content yet.</Text>}
1107
+ {conversationId ? <Text dimColor>Conversation: {conversationId}</Text> : null}
714
1108
  </Box>
715
1109
  );
716
1110
  }
717
1111
 
718
- // ─────────────────────────────────────────────────────────────────────────────
719
- // Local Dataset Helpers
720
- // ─────────────────────────────────────────────────────────────────────────────
721
-
722
- const DATASETS_DIR = path.join(process.cwd(), "datasets");
1112
+ type AutoAgentTurnRole = "user" | "assistant";
723
1113
 
724
- function ensureDatasetsDir(): void {
725
- if (!fs.existsSync(DATASETS_DIR)) {
726
- fs.mkdirSync(DATASETS_DIR, { recursive: true });
727
- }
1114
+ interface AutoAgentTurn {
1115
+ role: AutoAgentTurnRole;
1116
+ content: string;
728
1117
  }
729
1118
 
730
- interface LocalDataset {
731
- id: string;
732
- name: string;
733
- path: string;
734
- type: string;
735
- size: number;
736
- sample_size: number;
737
- created_at: string;
738
- source: "local";
1119
+ function formatAutoAgentTurn(turn: AutoAgentTurn): string {
1120
+ const prefix = turn.role === "user" ? "You: " : "Agent: ";
1121
+ return `${prefix}${turn.content}`;
739
1122
  }
740
1123
 
741
- function listLocalDatasets(): LocalDataset[] {
742
- if (!fs.existsSync(DATASETS_DIR)) {
743
- return [];
744
- }
1124
+ export function AutoAgentInteractiveSession({
1125
+ conversationId: initialConversationId,
1126
+ history,
1127
+ mode,
1128
+ firstMessage,
1129
+ allowSessionCreation = true,
1130
+ }: {
1131
+ conversationId?: string;
1132
+ history?: api.AgentChatHistoryItem[];
1133
+ mode: "standard" | "research";
1134
+ firstMessage?: string;
1135
+ allowSessionCreation?: boolean;
1136
+ }) {
1137
+ const { isRawModeSupported } = useStdin();
1138
+ const [input, setInput] = useState("");
1139
+ const [isLoading, setIsLoading] = useState(false);
1140
+ const [error, setError] = useState("");
1141
+ const [statusHint, setStatusHint] = useState("Start typing...");
1142
+ const [conversationId, setConversationId] = useState(initialConversationId);
1143
+ const [historyState, setHistoryState] = useState<api.AgentChatHistoryItem[]>(history ?? []);
1144
+ const normalizeTurnRole = (role: string): AutoAgentTurnRole => {
1145
+ const normalized = role?.trim().toLowerCase();
1146
+ return normalized === "user" || normalized === "human" ? "user" : "assistant";
1147
+ };
1148
+ const [isHistoryLoading, setIsHistoryLoading] = useState(
1149
+ Boolean(initialConversationId) && history === undefined
1150
+ );
1151
+ const [isResumeSessionUsable, setIsResumeSessionUsable] = useState(true);
1152
+ const [turns, setTurns] = useState<AutoAgentTurn[]>(
1153
+ (history ?? []).map((entry) => ({
1154
+ role: normalizeTurnRole(entry.role),
1155
+ content: entry.content,
1156
+ }))
1157
+ );
1158
+ const [inputHistory, setInputHistory] = useState<string[]>([]);
1159
+ const [inputHistoryIndex, setInputHistoryIndex] = useState(-1);
1160
+ const didSeedFirstMessage = useRef(false);
1161
+ const setInputValue = (nextValue: string) => {
1162
+ setInput(nextValue);
1163
+ setInputHistoryIndex(-1);
1164
+ };
1165
+ const normalizeSessionUserHistory = (items: api.AgentChatHistoryItem[]) =>
1166
+ items
1167
+ .filter((message) => {
1168
+ const normalizedRole = message.role?.trim().toLowerCase();
1169
+ return (
1170
+ (normalizedRole === "user" || normalizedRole === "human") &&
1171
+ typeof message.content === "string"
1172
+ );
1173
+ })
1174
+ .map((message) => message.content.trim())
1175
+ .filter((content): content is string => content.length > 0);
1176
+ const mapHistoryToTurns = (items: api.AgentChatHistoryItem[]) =>
1177
+ items.map((entry) => ({
1178
+ role: normalizeTurnRole(entry.role),
1179
+ content: entry.content,
1180
+ }));
1181
+
1182
+ const commitInputToHistory = (message: string) => {
1183
+ const normalized = message.trim();
1184
+ if (!normalized) {
1185
+ return;
1186
+ }
745
1187
 
746
- const files = fs.readdirSync(DATASETS_DIR).filter((f) => f.endsWith(".json"));
747
- return files.map((filename) => {
748
- const filePath = path.join(DATASETS_DIR, filename);
749
- const stats = fs.statSync(filePath);
750
- let data: unknown[] = [];
751
- let type = "unknown";
1188
+ setInputHistory((previous) => {
1189
+ if (previous.length > 0 && previous[previous.length - 1] === normalized) {
1190
+ return previous;
1191
+ }
1192
+ return [...previous, normalized];
1193
+ });
1194
+ };
752
1195
 
753
- try {
754
- const content = JSON.parse(fs.readFileSync(filePath, "utf-8"));
755
- data = Array.isArray(content) ? content : content.data ?? [];
756
- const firstItem = data[0] as Record<string, unknown> | undefined;
757
- type = content.task_type ?? (firstItem?.spans ? "ner" : firstItem?.label ? "classification" : "custom");
758
- } catch {
759
- // Ignore parse errors
1196
+ useEffect(() => {
1197
+ const restoredHistory = history ?? [];
1198
+ setHistoryState(restoredHistory);
1199
+ setTurns(mapHistoryToTurns(restoredHistory));
1200
+ setInputHistory(normalizeSessionUserHistory(restoredHistory));
1201
+ if (history !== undefined || !initialConversationId) {
1202
+ setIsHistoryLoading(false);
760
1203
  }
1204
+ }, [history]);
761
1205
 
762
- return {
763
- id: filename.replace(".json", ""),
764
- name: filename.replace(".json", ""),
765
- path: filePath,
766
- type,
767
- size: stats.size,
768
- sample_size: data.length,
769
- created_at: stats.birthtime.toISOString(),
770
- source: "local" as const,
771
- };
772
- });
773
- }
1206
+ useEffect(() => {
1207
+ setTurns(mapHistoryToTurns(historyState));
1208
+ }, [historyState]);
774
1209
 
775
- function saveLocalDataset(name: string, data: unknown, type: string): string {
776
- ensureDatasetsDir();
777
- const filename = `${name.replace(/[^a-zA-Z0-9-_]/g, "_")}.json`;
778
- const filePath = path.join(DATASETS_DIR, filename);
779
- fs.writeFileSync(filePath, JSON.stringify({ task_type: type, data }, null, 2));
780
- return filePath;
781
- }
1210
+ useEffect(() => {
1211
+ if (!initialConversationId || history !== undefined) {
1212
+ setIsHistoryLoading(false);
1213
+ return;
1214
+ }
782
1215
 
783
- // ─────────────────────────────────────────────────────────────────────────────
784
- // Generate Command (saves to local file when --save is false)
785
- // ─────────────────────────────────────────────────────────────────────────────
1216
+ let isActive = true;
1217
+ const hydrateSessionHistory = async () => {
1218
+ const result = await api.getAgentSession(initialConversationId);
1219
+ if (!isActive) {
1220
+ return;
1221
+ }
786
1222
 
787
- interface GenerateCommandProps<T> {
788
- action: () => Promise<api.ApiResult<T>>;
789
- datasetName: string;
790
- datasetType: string;
791
- saveToRemote: boolean;
792
- }
1223
+ if (!result.ok) {
1224
+ const lowerError = (result.error || "").toLowerCase();
1225
+ const notFound =
1226
+ result.status === 404 ||
1227
+ lowerError.includes("session not found") ||
1228
+ lowerError.includes("not found") ||
1229
+ lowerError.includes("does not exist");
1230
+ if (notFound) {
1231
+ setHistoryState([]);
1232
+ setTurns([]);
1233
+ setError("Could not load session history. This session could not be found on the backend. Resume is not available.");
1234
+ setIsResumeSessionUsable(false);
1235
+ setIsHistoryLoading(false);
1236
+ return;
1237
+ }
1238
+ setError(
1239
+ `Could not load session history for ${initialConversationId}: ${result.error || "Unable to load conversation history."}` +
1240
+ " Proceeding without preloaded history."
1241
+ );
1242
+ setIsResumeSessionUsable(true);
1243
+ setIsHistoryLoading(false);
1244
+ return;
1245
+ }
793
1246
 
794
- interface GenerateResult {
795
- data?: unknown[];
796
- success?: boolean;
797
- dataset?: { id?: string; dataset_name?: string };
1247
+ const restoredMessages = (result.data?.messages ?? [])
1248
+ .map((message) => {
1249
+ if (
1250
+ typeof message.role !== "string" ||
1251
+ typeof message.content !== "string"
1252
+ ) {
1253
+ return undefined;
1254
+ }
1255
+
1256
+ return {
1257
+ role: message.role,
1258
+ content: message.content,
1259
+ } as api.AgentChatHistoryItem;
1260
+ })
1261
+ .filter((message): message is api.AgentChatHistoryItem => message !== undefined);
1262
+
1263
+ setHistoryState(restoredMessages);
1264
+ setTurns(mapHistoryToTurns(restoredMessages));
1265
+ setInputHistory(normalizeSessionUserHistory(restoredMessages));
1266
+
1267
+ setIsHistoryLoading(false);
1268
+ };
1269
+
1270
+ void hydrateSessionHistory();
1271
+
1272
+ return () => {
1273
+ isActive = false;
1274
+ };
1275
+ }, [initialConversationId, history]);
1276
+
1277
+ const runAutoTurn = async (rawMessage: string) => {
1278
+ if (isHistoryLoading) {
1279
+ return;
1280
+ }
1281
+ const trimmed = rawMessage.trim();
1282
+ if (!trimmed || isLoading) {
1283
+ return;
1284
+ }
1285
+ commitInputToHistory(trimmed);
1286
+ if (!isResumeSessionUsable && !allowSessionCreation) {
1287
+ setError("Unable to resume this session because the conversation record is unavailable.");
1288
+ setIsLoading(false);
1289
+ setStatusHint("Ready for next message.");
1290
+ return;
1291
+ }
1292
+
1293
+ const nextHistory: api.AgentChatHistoryItem[] = [
1294
+ ...historyState,
1295
+ { role: "user", content: trimmed },
1296
+ ];
1297
+ setHistoryState(nextHistory);
1298
+ setInputValue("");
1299
+ setError("");
1300
+ setIsLoading(true);
1301
+ setStatusHint("Thinking...");
1302
+
1303
+ let activeConversationId = conversationId;
1304
+
1305
+ if (!activeConversationId && !allowSessionCreation) {
1306
+ setError("Unable to resume this session because no valid conversation id is available.");
1307
+ setIsLoading(false);
1308
+ setStatusHint("Ready for next message.");
1309
+ return;
1310
+ }
1311
+
1312
+ if (!activeConversationId) {
1313
+ setStatusHint("Creating conversation...");
1314
+ const createdSession = await api.createAgentSession({
1315
+ first_message: trimmed,
1316
+ title: trimmed.slice(0, 80) || "New agent session",
1317
+ });
1318
+ if (!createdSession.ok) {
1319
+ setError(`Failed to create conversation: ${createdSession.error || "Unknown error."}`);
1320
+ setIsLoading(false);
1321
+ setStatusHint("Ready for next message.");
1322
+ return;
1323
+ }
1324
+ const createdSessionId = createdSession.data?.id;
1325
+ if (!createdSessionId) {
1326
+ setError("Failed to create conversation: Missing conversation id from server response.");
1327
+ setIsLoading(false);
1328
+ setStatusHint("Ready for next message.");
1329
+ return;
1330
+ }
1331
+ activeConversationId = createdSessionId;
1332
+ setConversationId(activeConversationId);
1333
+ persistConversation(activeConversationId);
1334
+ }
1335
+
1336
+ const sendTurn = async (requestConversationId: string | undefined) =>
1337
+ api.agentChat({
1338
+ message: trimmed,
1339
+ ...(requestConversationId ? { conversation_id: requestConversationId } : {}),
1340
+ ...(nextHistory.length ? { history: nextHistory } : {}),
1341
+ });
1342
+
1343
+ let result = await sendTurn(activeConversationId);
1344
+ const lowerError = (result.error || "").toLowerCase();
1345
+ const notFound =
1346
+ result.status === 404 ||
1347
+ lowerError.includes("session not found") ||
1348
+ lowerError.includes("not found") ||
1349
+ lowerError.includes("does not exist");
1350
+
1351
+ if (!result.ok) {
1352
+ if (lowerError.includes("failed to get session") && notFound) {
1353
+ if (!allowSessionCreation) {
1354
+ setError("Unable to resume this session. It is not available on the backend.");
1355
+ setIsResumeSessionUsable(false);
1356
+ } else {
1357
+ setError("Failed to resume session on the backend. Please start a new message to create a new session.");
1358
+ }
1359
+ } else if (
1360
+ result.status === 403 &&
1361
+ lowerError.includes("deep research mode")
1362
+ ) {
1363
+ setError(
1364
+ "Research mode requires a Pro subscription.\n" +
1365
+ "To run this command, upgrade your account to Pro and retry.\n" +
1366
+ "Go to Manage subscription under Billing at https://agent.pioneer.ai/account."
1367
+ );
1368
+ } else {
1369
+ setError(result.error || "Agent request failed.");
1370
+ }
1371
+
1372
+ setIsLoading(false);
1373
+ setStatusHint("Ready for next message.");
1374
+ return;
1375
+ }
1376
+
1377
+ const data = result.data;
1378
+ if (!data) {
1379
+ setError("Agent request did not return a usable response.");
1380
+ setIsLoading(false);
1381
+ setStatusHint("Ready for next message.");
1382
+ return;
1383
+ }
1384
+
1385
+ const answer = data.answer?.trim() || "No response content yet.";
1386
+ setError("");
1387
+ const persistedConversationId = activeConversationId ?? data.conversation_id;
1388
+ if (persistedConversationId) {
1389
+ setConversationId(persistedConversationId);
1390
+ persistConversation(persistedConversationId);
1391
+ }
1392
+
1393
+ const assistantTurn: api.AgentChatHistoryItem = { role: "assistant", content: answer };
1394
+ if (persistedConversationId) {
1395
+ void api.appendSessionMessages(persistedConversationId, {
1396
+ messages: [
1397
+ { role: "user", content: trimmed },
1398
+ { role: "assistant", content: answer },
1399
+ ],
1400
+ });
1401
+ }
1402
+ setHistoryState((current) => [...current, assistantTurn]);
1403
+ setStatusHint("Ready for next message.");
1404
+ setIsLoading(false);
1405
+ };
1406
+
1407
+ useInput(
1408
+ (character, key) => {
1409
+ if (key.return) {
1410
+ void runAutoTurn(input);
1411
+ return;
1412
+ }
1413
+
1414
+ if (key.backspace || key.delete) {
1415
+ setInputValue(input.slice(0, -1));
1416
+ return;
1417
+ }
1418
+
1419
+ if (key.upArrow) {
1420
+ if (!inputHistory.length) {
1421
+ return;
1422
+ }
1423
+ const nextIndex =
1424
+ inputHistoryIndex === -1 ? inputHistory.length - 1 : Math.max(0, inputHistoryIndex - 1);
1425
+ setInputHistoryIndex(nextIndex);
1426
+ setInput(inputHistory[nextIndex] ?? "");
1427
+ return;
1428
+ }
1429
+
1430
+ if (key.downArrow) {
1431
+ if (inputHistoryIndex === -1) {
1432
+ return;
1433
+ }
1434
+ const nextIndex = inputHistoryIndex + 1;
1435
+ if (nextIndex >= inputHistory.length) {
1436
+ setInputHistoryIndex(-1);
1437
+ setInput("");
1438
+ return;
1439
+ }
1440
+ setInputHistoryIndex(nextIndex);
1441
+ setInput(inputHistory[nextIndex] ?? "");
1442
+ return;
1443
+ }
1444
+
1445
+ if (
1446
+ key.tab ||
1447
+ key.leftArrow ||
1448
+ key.rightArrow ||
1449
+ key.escape ||
1450
+ key.ctrl ||
1451
+ key.meta
1452
+ ) {
1453
+ return;
1454
+ }
1455
+
1456
+ if (character) {
1457
+ setInputValue(input + character);
1458
+ }
1459
+ },
1460
+ { isActive: isRawModeSupported && !isLoading }
1461
+ );
1462
+
1463
+ const persistConversation = (nextConversationId?: string) => {
1464
+ if (nextConversationId) {
1465
+ setLastAgentConversationId(nextConversationId);
1466
+ }
1467
+ };
1468
+
1469
+ useEffect(() => {
1470
+ return () => {
1471
+ persistConversation(conversationId);
1472
+ };
1473
+ }, [conversationId]);
1474
+
1475
+ useEffect(() => {
1476
+ if (!firstMessage || didSeedFirstMessage.current) {
1477
+ return;
1478
+ }
1479
+ if (isHistoryLoading) {
1480
+ return;
1481
+ }
1482
+
1483
+ didSeedFirstMessage.current = true;
1484
+ void runAutoTurn(firstMessage);
1485
+ }, [firstMessage, isHistoryLoading]);
1486
+
1487
+ return (
1488
+ <Box flexDirection="column">
1489
+ <Text bold>Agent mode selected.</Text>
1490
+ <Text>
1491
+ Mode: {mode === "research" ? "research" : "standard"}{" "}
1492
+ {conversationId ? `(conversation ${conversationId})` : "(new conversation)"}
1493
+ </Text>
1494
+ {isHistoryLoading ? <Loading message="Loading conversation history..." /> : null}
1495
+ <Box flexDirection="column" marginTop={1}>
1496
+ {turns.map((entry, idx) => (
1497
+ <Text key={`agent-turn-${idx}`} dimColor={entry.role === "user"}>
1498
+ {formatAutoAgentTurn(entry)}
1499
+ </Text>
1500
+ ))}
1501
+ </Box>
1502
+ <Box marginTop={1}>
1503
+ {isLoading ? <Loading message={statusHint} /> : <Text color="cyan">&gt; </Text>}
1504
+ {isLoading ? null : (
1505
+ <TextInput
1506
+ value={input}
1507
+ focus={false}
1508
+ onChange={() => {}}
1509
+ onSubmit={() => {}}
1510
+ />
1511
+ )}
1512
+ </Box>
1513
+ {error && (
1514
+ <Box marginTop={1}>
1515
+ <ErrorMessage error={error} />
1516
+ </Box>
1517
+ )}
1518
+ <Text dimColor>Type Ctrl+C to exit.</Text>
1519
+ </Box>
1520
+ );
1521
+ }
1522
+
1523
+ export function AgentResumeCommand({
1524
+ mode,
1525
+ }: {
1526
+ mode?: "standard" | "research";
1527
+ }) {
1528
+ const { isRawModeSupported } = useStdin();
1529
+ const [isReady, setReady] = useState(false);
1530
+ const [selectedSession, setSelectedSession] = useState<
1531
+ {
1532
+ id: string;
1533
+ } | undefined
1534
+ >();
1535
+ const [defaultSessionId] = useState(() => getLastAgentConversationId());
1536
+ const [searchQuery, setSearchQuery] = useState("");
1537
+ const [highlightIndex, setHighlightIndex] = useState(0);
1538
+ const [sessions, setSessions] = useState<
1539
+ Array<{
1540
+ id: string;
1541
+ title: string;
1542
+ updated_at: string;
1543
+ is_archived: boolean;
1544
+ }>
1545
+ >([]);
1546
+ const [error, setError] = useState("");
1547
+ const [statusHint, setStatusHint] = useState("Loading sessions...");
1548
+ const [isSessionLoading, setSessionLoading] = useState(false);
1549
+ const [sessionError, setSessionError] = useState("");
1550
+ const [selectedSessionHistory, setSelectedSessionHistory] = useState<
1551
+ api.AgentChatHistoryItem[] | undefined
1552
+ >(undefined);
1553
+
1554
+ useEffect(() => {
1555
+ if (!selectedSession?.id) {
1556
+ setSelectedSessionHistory(undefined);
1557
+ setSessionError("");
1558
+ return;
1559
+ }
1560
+
1561
+ let isActive = true;
1562
+ const loadSessionHistory = async () => {
1563
+ setSessionLoading(true);
1564
+ setSessionError("");
1565
+
1566
+ const result = await api.getAgentSession(selectedSession.id);
1567
+ if (!isActive) {
1568
+ return;
1569
+ }
1570
+
1571
+ if (!result.ok) {
1572
+ setSessionError(
1573
+ `Could not load selected session ${selectedSession.id}: ${result.error || "Unknown error."}`
1574
+ );
1575
+ setSelectedSessionHistory(undefined);
1576
+ setSessionLoading(false);
1577
+ return;
1578
+ }
1579
+
1580
+ const restoredMessages = (result.data?.messages ?? [])
1581
+ .map((message) => {
1582
+ if (typeof message?.role !== "string" || typeof message?.content !== "string") {
1583
+ return undefined;
1584
+ }
1585
+ return {
1586
+ role: message.role,
1587
+ content: message.content,
1588
+ } as api.AgentChatHistoryItem;
1589
+ })
1590
+ .filter((message): message is api.AgentChatHistoryItem => message !== undefined);
1591
+
1592
+ setSelectedSessionHistory(restoredMessages);
1593
+ setSessionLoading(false);
1594
+ };
1595
+
1596
+ void loadSessionHistory();
1597
+
1598
+ return () => {
1599
+ isActive = false;
1600
+ setSessionLoading(false);
1601
+ };
1602
+ }, [selectedSession?.id]);
1603
+
1604
+ useEffect(() => {
1605
+ let isActive = true;
1606
+ const loadSessions = async () => {
1607
+ try {
1608
+ const result = await api.listAgentSessions();
1609
+ if (!isActive) {
1610
+ return;
1611
+ }
1612
+ if (!result.ok) {
1613
+ setError(result.error || "Could not load agent sessions.");
1614
+ setStatusHint("");
1615
+ setReady(true);
1616
+ setSessions([]);
1617
+ return;
1618
+ }
1619
+ const remoteSessions = (result.data?.sessions ?? []).map((session) => ({
1620
+ id: session.id,
1621
+ title: session.title || "(untitled)",
1622
+ updated_at: session.updated_at || new Date().toISOString(),
1623
+ is_archived: session.is_archived,
1624
+ }));
1625
+ remoteSessions.sort((a, b) => {
1626
+ const aTime = Date.parse(a.updated_at);
1627
+ const bTime = Date.parse(b.updated_at);
1628
+ if (Number.isNaN(aTime) || Number.isNaN(bTime)) {
1629
+ return 0;
1630
+ }
1631
+ return bTime - aTime;
1632
+ });
1633
+ if (defaultSessionId) {
1634
+ const lastIndex = remoteSessions.findIndex((session) => session.id === defaultSessionId);
1635
+ if (lastIndex >= 0) {
1636
+ setHighlightIndex(lastIndex);
1637
+ }
1638
+ }
1639
+ setSessions(remoteSessions);
1640
+ setStatusHint("");
1641
+ setReady(true);
1642
+ return;
1643
+ } catch (err) {
1644
+ if (!isActive) {
1645
+ return;
1646
+ }
1647
+ const message = err instanceof Error ? err.message : "Could not load agent sessions.";
1648
+ setError(message);
1649
+ setStatusHint("");
1650
+ setSessions([]);
1651
+ setReady(true);
1652
+ }
1653
+ };
1654
+ void loadSessions();
1655
+
1656
+ return () => {
1657
+ isActive = false;
1658
+ };
1659
+ }, []);
1660
+
1661
+ const normalizeSessionLabel = (session: {
1662
+ id: string;
1663
+ title: string;
1664
+ is_archived: boolean;
1665
+ updated_at: string;
1666
+ }) => `${session.title || "(untitled)"} ${session.is_archived ? "[archived] " : ""}(updated ${session.updated_at})`;
1667
+
1668
+ const matchingSessions = sessions.filter((session) => {
1669
+ const searchable = `${session.title} ${session.id}`.toLowerCase();
1670
+ const normalizedQuery = searchQuery.trim().toLowerCase();
1671
+ return normalizedQuery.length === 0 || searchable.includes(normalizedQuery);
1672
+ });
1673
+
1674
+ useInput((_, key) => {
1675
+ if (!matchingSessions.length) return;
1676
+ if (key.upArrow) {
1677
+ setHighlightIndex((idx) => (idx === 0 ? matchingSessions.length - 1 : idx - 1));
1678
+ return;
1679
+ }
1680
+ if (key.downArrow) {
1681
+ setHighlightIndex((idx) => (idx === matchingSessions.length - 1 ? 0 : idx + 1));
1682
+ return;
1683
+ }
1684
+ if (key.return) {
1685
+ const selected = matchingSessions[highlightIndex];
1686
+ if (!selected) {
1687
+ setError("No matching sessions found.");
1688
+ return;
1689
+ }
1690
+ setError("");
1691
+ setSelectedSession({ id: selected.id });
1692
+ }
1693
+ }, { isActive: isRawModeSupported });
1694
+
1695
+ if (!isRawModeSupported) {
1696
+ return (
1697
+ <Box flexDirection="column">
1698
+ <ErrorMessage error="Interactive input is not supported in this terminal." />
1699
+ <Text>Use interactive mode for this environment:</Text>
1700
+ <Text dimColor> agent --help</Text>
1701
+ </Box>
1702
+ );
1703
+ }
1704
+
1705
+ if (error && !sessions.length) {
1706
+ return <ErrorMessage error={error} />;
1707
+ }
1708
+
1709
+ if (!isReady) {
1710
+ return <Loading message={statusHint || "Loading sessions..."} />;
1711
+ }
1712
+
1713
+ if (!sessions.length) {
1714
+ return (
1715
+ <ErrorMessage
1716
+ error="No agent sessions found. Start a new conversation with `pioneer agent` and try again."
1717
+ />
1718
+ );
1719
+ }
1720
+
1721
+ if (selectedSession?.id) {
1722
+ if (isSessionLoading) {
1723
+ return <Loading message={`Loading conversation ${selectedSession.id}...`} />;
1724
+ }
1725
+ if (sessionError) {
1726
+ return <ErrorMessage error={sessionError} />;
1727
+ }
1728
+
1729
+ return (
1730
+ <AutoAgentInteractiveSession
1731
+ conversationId={selectedSession.id}
1732
+ mode={mode ?? "standard"}
1733
+ history={selectedSessionHistory}
1734
+ allowSessionCreation={false}
1735
+ />
1736
+ );
1737
+ }
1738
+
1739
+ return (
1740
+ <Box flexDirection="column">
1741
+ <Text bold>Agent sessions:</Text>
1742
+ <Text>
1743
+ {defaultSessionId ? `Last session: ${defaultSessionId}. ` : ""}
1744
+ Type to filter. Use ↑/↓ to navigate and Enter to select.
1745
+ </Text>
1746
+ <Text dimColor>Search title or session ID.</Text>
1747
+ <Text> </Text>
1748
+ {matchingSessions.length === 0 ? (
1749
+ <Text color="yellow">No sessions match "{searchQuery}".</Text>
1750
+ ) : (
1751
+ matchingSessions.slice(0, 12).map((session, index) => {
1752
+ const isSelected = index === highlightIndex;
1753
+ return (
1754
+ <Text key={session.id} color={isSelected ? "cyan" : undefined}>
1755
+ {`${isSelected ? ">" : " "} ${normalizeSessionLabel(session)} (${session.id})`}
1756
+ </Text>
1757
+ );
1758
+ })
1759
+ )}
1760
+ <Box marginTop={1}>
1761
+ <Text color="cyan">&gt; </Text>
1762
+ <TextInput
1763
+ value={searchQuery}
1764
+ onChange={setSearchQuery}
1765
+ onSubmit={(rawValue) => {
1766
+ const trimmed = rawValue.trim().toLowerCase();
1767
+ if (!trimmed && !matchingSessions.length) {
1768
+ return;
1769
+ }
1770
+ const exactMatch = matchingSessions.find(
1771
+ (session) =>
1772
+ session.id.toLowerCase() === trimmed ||
1773
+ session.title.toLowerCase() === trimmed
1774
+ );
1775
+ const selected = exactMatch ?? matchingSessions[highlightIndex];
1776
+ if (!selected?.id) {
1777
+ setError("No matching session found.");
1778
+ return;
1779
+ }
1780
+ setError("");
1781
+ setSelectedSession({ id: selected.id });
1782
+ }}
1783
+ />
1784
+ </Box>
1785
+ {error && <ErrorMessage error={error} />}
1786
+ </Box>
1787
+ );
1788
+ }
1789
+
1790
+ function AgentInteractivePrompt({
1791
+ conversationId,
1792
+ history,
1793
+ mode,
1794
+ allowSessionCreation = true,
1795
+ }: {
1796
+ conversationId?: string;
1797
+ history?: api.AgentChatHistoryItem[];
1798
+ mode?: "standard" | "research";
1799
+ allowSessionCreation?: boolean;
1800
+ }) {
1801
+ const [input, setInput] = useState("");
1802
+ const [inputHistory, setInputHistory] = useState<string[]>([]);
1803
+ const [inputHistoryIndex, setInputHistoryIndex] = useState(-1);
1804
+ const [isReady, setReady] = useState(false);
1805
+ const [message, setMessage] = useState("");
1806
+ const { isRawModeSupported } = useStdin();
1807
+ const shouldExitImmediately = !isRawModeSupported;
1808
+
1809
+ const commitInputToHistory = (value: string) => {
1810
+ const normalized = value.trim();
1811
+ if (!normalized) {
1812
+ return;
1813
+ }
1814
+ setInputHistory((previous) => {
1815
+ if (previous.length > 0 && previous[previous.length - 1] === normalized) {
1816
+ return previous;
1817
+ }
1818
+ return [...previous, normalized];
1819
+ });
1820
+ };
1821
+
1822
+ const setInputValue = (nextValue: string) => {
1823
+ setInput(nextValue);
1824
+ setInputHistoryIndex(-1);
1825
+ };
1826
+
1827
+ useInput(
1828
+ (character, key) => {
1829
+ if (key.return) {
1830
+ const trimmed = input.trim();
1831
+ if (!trimmed) {
1832
+ return;
1833
+ }
1834
+ commitInputToHistory(trimmed);
1835
+ setMessage(trimmed);
1836
+ setReady(true);
1837
+ return;
1838
+ }
1839
+
1840
+ if (key.backspace || key.delete) {
1841
+ setInputValue(input.slice(0, -1));
1842
+ return;
1843
+ }
1844
+
1845
+ if (key.upArrow) {
1846
+ if (!inputHistory.length) {
1847
+ return;
1848
+ }
1849
+ const nextIndex =
1850
+ inputHistoryIndex === -1 ? inputHistory.length - 1 : Math.max(0, inputHistoryIndex - 1);
1851
+ setInputHistoryIndex(nextIndex);
1852
+ setInput(inputHistory[nextIndex] ?? "");
1853
+ return;
1854
+ }
1855
+
1856
+ if (key.downArrow) {
1857
+ if (inputHistoryIndex === -1) {
1858
+ return;
1859
+ }
1860
+ const nextIndex = inputHistoryIndex + 1;
1861
+ if (nextIndex >= inputHistory.length) {
1862
+ setInputHistoryIndex(-1);
1863
+ setInput("");
1864
+ return;
1865
+ }
1866
+ setInputHistoryIndex(nextIndex);
1867
+ setInput(inputHistory[nextIndex] ?? "");
1868
+ return;
1869
+ }
1870
+
1871
+ if (
1872
+ key.tab ||
1873
+ key.leftArrow ||
1874
+ key.rightArrow ||
1875
+ key.escape ||
1876
+ key.ctrl ||
1877
+ key.meta
1878
+ ) {
1879
+ return;
1880
+ }
1881
+
1882
+ if (character) {
1883
+ setInputValue(input + character);
1884
+ }
1885
+ },
1886
+ { isActive: isRawModeSupported && !isReady }
1887
+ );
1888
+
1889
+ useEffect(() => {
1890
+ if (!shouldExitImmediately) {
1891
+ return;
1892
+ }
1893
+
1894
+ const timeout = setTimeout(() => {
1895
+ process.exit(0);
1896
+ }, 75);
1897
+
1898
+ return () => clearTimeout(timeout);
1899
+ }, [shouldExitImmediately]);
1900
+
1901
+ if (!isRawModeSupported) {
1902
+ return (
1903
+ <Box flexDirection="column">
1904
+ <ErrorMessage error="Interactive input is not supported in this terminal." />
1905
+ <Text>Use interactive mode for this environment:</Text>
1906
+ <Text dimColor>{` agent${mode === "research" ? " --mode research" : ""}`}</Text>
1907
+ <Text dimColor> agent --help</Text>
1908
+ </Box>
1909
+ );
1910
+ }
1911
+
1912
+ if (!isReady) {
1913
+ return (
1914
+ <Box flexDirection="column">
1915
+ <Text bold>Agent mode selected.</Text>
1916
+ <Text>Type your message and press enter to start:</Text>
1917
+ <Box>
1918
+ <Text color="cyan">&gt; </Text>
1919
+ <TextInput
1920
+ value={input}
1921
+ focus={false}
1922
+ onChange={() => {}}
1923
+ onSubmit={() => {}}
1924
+ />
1925
+ </Box>
1926
+ <Text dimColor>Type Ctrl+C to cancel.</Text>
1927
+ </Box>
1928
+ );
1929
+ }
1930
+
1931
+ return (
1932
+ <AutoAgentInteractiveSession
1933
+ conversationId={conversationId}
1934
+ history={history}
1935
+ mode={mode === "research" ? "research" : "standard"}
1936
+ firstMessage={message}
1937
+ allowSessionCreation={allowSessionCreation}
1938
+ />
1939
+ );
1940
+ }
1941
+
1942
+ // ─────────────────────────────────────────────────────────────────────────────
1943
+ // Interactive Model Create Selector
1944
+ // ─────────────────────────────────────────────────────────────────────────────
1945
+
1946
+ function ModelCreateInteractive({
1947
+ name,
1948
+ icon,
1949
+ repo,
1950
+ description,
1951
+ example,
1952
+ }: {
1953
+ name?: string;
1954
+ icon?: string;
1955
+ repo?: string;
1956
+ description?: string;
1957
+ example?: CreateProjectExample;
1958
+ }) {
1959
+ const [query, setQuery] = useState("");
1960
+ const [models, setModels] = useState<api.BaseModelInfo[]>([]);
1961
+ const [loading, setLoading] = useState(true);
1962
+ const [error, setError] = useState("");
1963
+ const [selectedModelId, setSelectedModelId] = useState<string | null>(null);
1964
+ const { isRawModeSupported } = useStdin();
1965
+ const [highlightIndex, setHighlightIndex] = useState(0);
1966
+
1967
+ useEffect(() => {
1968
+ (async () => {
1969
+ const result = await api.listBaseModels();
1970
+ if (!result.ok) {
1971
+ setError(result.error ?? "Unable to load base models.");
1972
+ setLoading(false);
1973
+ return;
1974
+ }
1975
+ const modelsData = result.data
1976
+ ? Array.isArray((result.data as api.BaseModelsResponse).models)
1977
+ ? (result.data as api.BaseModelsResponse).models
1978
+ : (result.data as api.BaseModelInfo[])
1979
+ : [];
1980
+ setModels(modelsData.sort((a, b) => a.id.localeCompare(b.id)));
1981
+ setLoading(false);
1982
+ })();
1983
+ }, []);
1984
+
1985
+ const normalizedQuery = query.trim().toLowerCase();
1986
+ const matchingModels = models.filter((model) => {
1987
+ const text = `${model.id} ${model.name ?? ""} ${model.label ?? ""} ${model.description ?? ""}`.toLowerCase();
1988
+ return normalizedQuery.length === 0 || text.includes(normalizedQuery);
1989
+ });
1990
+ const topMatches = matchingModels;
1991
+ useEffect(() => {
1992
+ setHighlightIndex(0);
1993
+ }, [query]);
1994
+
1995
+ const resolveModelId = (queryValue: string): string | undefined => {
1996
+ const trimmed = queryValue.trim();
1997
+ if (!trimmed) {
1998
+ return topMatches[highlightIndex]?.id;
1999
+ }
2000
+ const exact = models.find(
2001
+ (model) => model.id.toLowerCase() === trimmed.toLowerCase()
2002
+ );
2003
+ if (exact) return exact.id;
2004
+ return topMatches[highlightIndex]?.id;
2005
+ };
2006
+
2007
+ useEffect(() => {
2008
+ if (highlightIndex >= topMatches.length) {
2009
+ setHighlightIndex(topMatches.length > 0 ? topMatches.length - 1 : 0);
2010
+ }
2011
+ }, [highlightIndex, topMatches.length]);
2012
+
2013
+ useInput(
2014
+ (_, key) => {
2015
+ if (!topMatches.length) return;
2016
+ if (key.upArrow) {
2017
+ setHighlightIndex((idx) => (idx === 0 ? topMatches.length - 1 : idx - 1));
2018
+ return;
2019
+ }
2020
+ if (key.downArrow) {
2021
+ setHighlightIndex((idx) => (idx === topMatches.length - 1 ? 0 : idx + 1));
2022
+ return;
2023
+ }
2024
+ if (key.return) {
2025
+ const resolvedModel = resolveModelId(query);
2026
+ if (!resolvedModel) {
2027
+ setError("No matching models found. Refine your search and try again.");
2028
+ return;
2029
+ }
2030
+ setSelectedModelId(resolvedModel);
2031
+ }
2032
+ },
2033
+ { isActive: isRawModeSupported }
2034
+ );
2035
+
2036
+ if (error) {
2037
+ return (
2038
+ <Box flexDirection="column">
2039
+ <ErrorMessage error={error} />
2040
+ <Text> </Text>
2041
+ <Text dimColor>Try again with --model explicitly, or check your API connectivity.</Text>
2042
+ </Box>
2043
+ );
2044
+ }
2045
+
2046
+ if (selectedModelId) {
2047
+ return (
2048
+ <ApiCommand
2049
+ action={() =>
2050
+ api.createProject({
2051
+ name: name ?? selectedModelId,
2052
+ ...(icon ? { icon } : {}),
2053
+ ...(repo ? { repo } : {}),
2054
+ ...(description ? { description } : {}),
2055
+ active_model_id: selectedModelId,
2056
+ selected_model_id: selectedModelId,
2057
+ ...(example ? { example } : {}),
2058
+ })
2059
+ }
2060
+ successMessage="Model entry created"
2061
+ />
2062
+ );
2063
+ }
2064
+
2065
+ if (loading) {
2066
+ return <Loading message="Loading supported models..." />;
2067
+ }
2068
+
2069
+ return (
2070
+ <Box flexDirection="column">
2071
+ <Text bold>Choose a base model for this entry</Text>
2072
+ <Text>Type to filter. Use ↑/↓ to navigate and Enter to select.</Text>
2073
+ <Text dimColor>Type any part of model id, name, label, or description.</Text>
2074
+ <Text> </Text>
2075
+ <TextInput
2076
+ value={query}
2077
+ onChange={(value) => setQuery(value)}
2078
+ onSubmit={(value) => {
2079
+ const resolvedModel = resolveModelId(value);
2080
+ if (!resolvedModel) {
2081
+ setError("No matching models found. Refine your search and try again.");
2082
+ return;
2083
+ }
2084
+ setSelectedModelId(resolvedModel);
2085
+ }}
2086
+ placeholder="e.g. qwen/qwen3-8b"
2087
+ />
2088
+ <Text> </Text>
2089
+ {topMatches.length === 0 ? (
2090
+ <Text dimColor>No matching models found.</Text>
2091
+ ) : (
2092
+ <Box flexDirection="column">
2093
+ {topMatches.map((model, index) => {
2094
+ const suffix = [model.label, model.task_type, model.type].filter(Boolean).join(" · ");
2095
+ const modelLine = `${model.id}${suffix ? ` (${suffix})` : ""}`;
2096
+ const isHighlighted = index === highlightIndex;
2097
+ return (
2098
+ <Text key={model.id} color={isHighlighted ? "cyan" : undefined} bold={isHighlighted}>
2099
+ {isHighlighted ? "▶ " : " "}
2100
+ {modelLine}
2101
+ </Text>
2102
+ );
2103
+ })}
2104
+ </Box>
2105
+ )}
2106
+ <Text> </Text>
2107
+ <Text dimColor>Press Enter to select the highlighted model.</Text>
2108
+ <Text dimColor> </Text>
2109
+ <Text dimColor>Tip: You can still run {"model endpoints create --model \"<base-model-id>\""} for exact model ids.</Text>
2110
+ </Box>
2111
+ );
2112
+ }
2113
+
2114
+ // ─────────────────────────────────────────────────────────────────────────────
2115
+ // Job Logs Command (prettified output)
2116
+ // ─────────────────────────────────────────────────────────────────────────────
2117
+
2118
+ function JobLogsCommand({ jobId }: { jobId: string }) {
2119
+ const { exit } = useApp();
2120
+ const [state, setState] = useState<"loading" | "done" | "error">("loading");
2121
+ const [logs, setLogs] = useState<api.TrainingLog[]>([]);
2122
+ const [error, setError] = useState("");
2123
+
2124
+ useEffect(() => {
2125
+ (async () => {
2126
+ const result = await api.getJobLogs(jobId);
2127
+ if (result.ok && result.data) {
2128
+ setLogs(result.data.logs || []);
2129
+ setState("done");
2130
+ } else {
2131
+ setError(result.error ?? "Unknown error");
2132
+ setState("error");
2133
+ }
2134
+ setTimeout(() => exit(), 500);
2135
+ })();
2136
+ }, [jobId, exit]);
2137
+
2138
+ if (state === "loading") {
2139
+ return <Loading />;
2140
+ }
2141
+
2142
+ if (state === "error") {
2143
+ return <ErrorMessage error={error} />;
2144
+ }
2145
+
2146
+ const formatLogEntry = (log: api.TrainingLog): string => {
2147
+ const ts = new Date(log.timestamp).toLocaleTimeString();
2148
+ return `[${ts}] [${log.level}] ${log.message}`;
2149
+ };
2150
+
2151
+ return (
2152
+ <Box flexDirection="column">
2153
+ {logs.length === 0 ? (
2154
+ <Text dimColor>No logs available</Text>
2155
+ ) : (
2156
+ logs.map((log) => (
2157
+ <Text key={log.id} color={log.level === "ERROR" ? "red" : log.level === "WARNING" ? "yellow" : undefined}>
2158
+ {formatLogEntry(log)}
2159
+ </Text>
2160
+ ))
2161
+ )}
2162
+ </Box>
2163
+ );
2164
+ }
2165
+
2166
+ // ─────────────────────────────────────────────────────────────────────────────
2167
+ // Local Dataset Helpers
2168
+ // ─────────────────────────────────────────────────────────────────────────────
2169
+
2170
+ const DATASETS_DIR = path.join(process.cwd(), "datasets");
2171
+
2172
+ function ensureDatasetsDir(): void {
2173
+ if (!fs.existsSync(DATASETS_DIR)) {
2174
+ fs.mkdirSync(DATASETS_DIR, { recursive: true });
2175
+ }
2176
+ }
2177
+
2178
+ interface LocalDataset {
2179
+ id: string;
2180
+ name: string;
2181
+ path: string;
2182
+ type: string;
2183
+ size: number;
2184
+ sample_size: number;
2185
+ created_at: string;
2186
+ source: "local";
2187
+ }
2188
+
2189
+ function listLocalDatasets(): LocalDataset[] {
2190
+ if (!fs.existsSync(DATASETS_DIR)) {
2191
+ return [];
2192
+ }
2193
+
2194
+ const files = fs.readdirSync(DATASETS_DIR).filter((f) => f.endsWith(".json"));
2195
+ return files.map((filename) => {
2196
+ const filePath = path.join(DATASETS_DIR, filename);
2197
+ const stats = fs.statSync(filePath);
2198
+ let data: unknown[] = [];
2199
+ let type = "unknown";
2200
+
2201
+ try {
2202
+ const content = JSON.parse(fs.readFileSync(filePath, "utf-8"));
2203
+ data = Array.isArray(content) ? content : content.data ?? [];
2204
+ const firstItem = data[0] as Record<string, unknown> | undefined;
2205
+ type = content.task_type ?? (firstItem?.spans ? "ner" : firstItem?.label ? "classification" : "custom");
2206
+ } catch {
2207
+ // Ignore parse errors
2208
+ }
2209
+
2210
+ return {
2211
+ id: filename.replace(".json", ""),
2212
+ name: filename.replace(".json", ""),
2213
+ path: filePath,
2214
+ type,
2215
+ size: stats.size,
2216
+ sample_size: data.length,
2217
+ created_at: stats.birthtime.toISOString(),
2218
+ source: "local" as const,
2219
+ };
2220
+ });
2221
+ }
2222
+
2223
+ function saveLocalDataset(name: string, data: unknown, type: string): string {
2224
+ ensureDatasetsDir();
2225
+ const filename = `${name.replace(/[^a-zA-Z0-9-_]/g, "_")}.json`;
2226
+ const filePath = path.join(DATASETS_DIR, filename);
2227
+ fs.writeFileSync(filePath, JSON.stringify({ task_type: type, data }, null, 2));
2228
+ return filePath;
2229
+ }
2230
+
2231
+ // ─────────────────────────────────────────────────────────────────────────────
2232
+ // Generate Command (saves to local file when --save is false)
2233
+ // ─────────────────────────────────────────────────────────────────────────────
2234
+
2235
+ interface GenerateCommandProps<T> {
2236
+ action: () => Promise<api.ApiResult<T>>;
2237
+ datasetName: string;
2238
+ datasetType: string;
2239
+ saveToRemote: boolean;
2240
+ }
2241
+
2242
+ interface GenerateResult {
2243
+ data?: unknown[];
2244
+ success?: boolean;
2245
+ dataset?: { id?: string; dataset_name?: string };
798
2246
  }
799
2247
 
800
2248
  function GenerateCommand<T extends GenerateResult>({
@@ -867,271 +2315,6 @@ function GenerateCommand<T extends GenerateResult>({
867
2315
  );
868
2316
  }
869
2317
 
870
- // ─────────────────────────────────────────────────────────────────────────────
871
- // Notebook Run Command
872
- // ─────────────────────────────────────────────────────────────────────────────
873
-
874
- interface NotebookCell {
875
- cell_type: "code" | "markdown" | "raw";
876
- source: string[];
877
- metadata?: Record<string, unknown>;
878
- }
879
-
880
- interface NotebookFile {
881
- cells: NotebookCell[];
882
- metadata?: Record<string, unknown>;
883
- nbformat: number;
884
- nbformat_minor: number;
885
- }
886
-
887
- function readNotebookFile(filePath: string): NotebookFile {
888
- const absPath = path.resolve(filePath);
889
- if (!fs.existsSync(absPath)) {
890
- throw new Error(`File not found: ${absPath}`);
891
- }
892
- const content = fs.readFileSync(absPath, "utf-8");
893
- return JSON.parse(content) as NotebookFile;
894
- }
895
-
896
- interface NotebookRunCommandProps {
897
- filePath: string;
898
- gpu: string;
899
- loadFelixHelpers: boolean;
900
- }
901
-
902
- function NotebookRunCommand({ filePath, gpu, loadFelixHelpers }: NotebookRunCommandProps) {
903
- const { exit } = useApp();
904
- const [phase, setPhase] = useState<"reading" | "session" | "running" | "done" | "error">("reading");
905
- const [sessionId, setSessionId] = useState<string | null>(null);
906
- const [currentCell, setCurrentCell] = useState(0);
907
- const [totalCells, setTotalCells] = useState(0);
908
- const [outputs, setOutputs] = useState<Array<{ cell: number; source: string; result: api.ExecuteCodeResponse | null; error?: string }>>([]);
909
- const [error, setError] = useState("");
910
-
911
- useEffect(() => {
912
- (async () => {
913
- // 1. Read notebook
914
- let notebook: NotebookFile;
915
- try {
916
- notebook = readNotebookFile(filePath);
917
- } catch (e) {
918
- setError(e instanceof Error ? e.message : String(e));
919
- setPhase("error");
920
- setTimeout(() => exit(), 500);
921
- return;
922
- }
923
-
924
- const codeCells = notebook.cells.filter((c) => c.cell_type === "code");
925
- setTotalCells(codeCells.length);
926
-
927
- if (codeCells.length === 0) {
928
- setError("No code cells found in notebook");
929
- setPhase("error");
930
- setTimeout(() => exit(), 500);
931
- return;
932
- }
933
-
934
- // 2. Create session
935
- setPhase("session");
936
- const sessionResult = await api.createNotebookSession({
937
- gpu,
938
- load_felix_helpers: loadFelixHelpers,
939
- });
940
-
941
- if (!sessionResult.ok || !sessionResult.data) {
942
- setError(sessionResult.error ?? "Failed to create notebook session");
943
- setPhase("error");
944
- setTimeout(() => exit(), 500);
945
- return;
946
- }
947
-
948
- let sid = sessionResult.data.session_id;
949
- setSessionId(sid);
950
-
951
- // Poll until session is ready (if status is 'creating')
952
- if (sessionResult.data.status === "creating") {
953
- let ready = false;
954
- for (let attempt = 0; attempt < 60; attempt++) {
955
- await new Promise((r) => setTimeout(r, 2000));
956
- const status = await api.getNotebookSessionStatus(sid);
957
- if (status.ok && status.data?.status === "ready") {
958
- // Status response returns the real session_id when ready
959
- sid = status.data.session_id;
960
- setSessionId(sid);
961
- ready = true;
962
- break;
963
- }
964
- if (status.ok && status.data?.status === "failed") {
965
- setError(status.data.error ?? "Session creation failed");
966
- setPhase("error");
967
- setTimeout(() => exit(), 500);
968
- return;
969
- }
970
- }
971
- if (!ready) {
972
- setError("Session creation timed out after 2 minutes");
973
- setPhase("error");
974
- setTimeout(() => exit(), 500);
975
- return;
976
- }
977
- }
978
-
979
- // 3. Execute cells sequentially
980
- setPhase("running");
981
- const cellOutputs: typeof outputs = [];
982
-
983
- for (let i = 0; i < codeCells.length; i++) {
984
- setCurrentCell(i + 1);
985
- const cell = codeCells[i];
986
- const code = Array.isArray(cell.source) ? cell.source.join("") : String(cell.source);
987
-
988
- if (!code.trim()) {
989
- cellOutputs.push({ cell: i + 1, source: code, result: null });
990
- continue;
991
- }
992
-
993
- const execResult = await api.executeNotebookCode({
994
- code,
995
- session_id: sid,
996
- cell_id: `cell-${i}`,
997
- });
998
-
999
- if (!execResult.ok) {
1000
- cellOutputs.push({ cell: i + 1, source: code, result: null, error: execResult.error ?? "Execution failed" });
1001
- // Continue running remaining cells despite errors
1002
- } else {
1003
- cellOutputs.push({ cell: i + 1, source: code, result: execResult.data ?? null });
1004
- }
1005
-
1006
- setOutputs([...cellOutputs]);
1007
- }
1008
-
1009
- setPhase("done");
1010
- setTimeout(() => exit(), 1000);
1011
- })();
1012
- }, [filePath, gpu, loadFelixHelpers, exit]);
1013
-
1014
- if (phase === "error") {
1015
- return <ErrorMessage error={error} />;
1016
- }
1017
-
1018
- if (phase === "reading") {
1019
- return <Loading message="Reading notebook..." />;
1020
- }
1021
-
1022
- if (phase === "session") {
1023
- return <Loading message={`Creating ${gpu} session...`} />;
1024
- }
1025
-
1026
- // Render outputs
1027
- const renderCellOutput = (entry: (typeof outputs)[0]) => {
1028
- const lines: string[] = [];
1029
-
1030
- if (entry.error) {
1031
- lines.push(` Error: ${entry.error}`);
1032
- return lines.join("\n");
1033
- }
1034
-
1035
- if (!entry.result) {
1036
- lines.push(" (empty cell)");
1037
- return lines.join("\n");
1038
- }
1039
-
1040
- for (const out of entry.result.outputs) {
1041
- if (out.type === "stream" && out.text) {
1042
- // Truncate long stream output
1043
- const text = out.text.length > 2000 ? out.text.slice(0, 2000) + "\n ... (truncated)" : out.text;
1044
- lines.push(text);
1045
- } else if (out.type === "execute_result" || out.type === "display_data") {
1046
- if (out.data?.["text/plain"]) {
1047
- lines.push(out.data["text/plain"]);
1048
- } else if (out.text) {
1049
- lines.push(out.text);
1050
- }
1051
- if (out.data?.["image/png"]) {
1052
- lines.push(" [image output]");
1053
- }
1054
- } else if (out.type === "error") {
1055
- lines.push(` ${out.ename}: ${out.evalue}`);
1056
- if (out.traceback) {
1057
- // Strip ANSI codes for cleaner terminal output
1058
- lines.push(out.traceback.map((l) => l.replace(/\x1b\[[0-9;]*m/g, "")).join("\n"));
1059
- }
1060
- }
1061
- }
1062
-
1063
- if (entry.result.execution_time_ms) {
1064
- lines.push(` (${(entry.result.execution_time_ms / 1000).toFixed(1)}s)`);
1065
- }
1066
-
1067
- return lines.join("\n") || " (no output)";
1068
- };
1069
-
1070
- return (
1071
- <Box flexDirection="column">
1072
- {phase === "running" && (
1073
- <Box>
1074
- <Text color="yellow"><Spinner type="dots" /></Text>
1075
- <Text> Running cell {currentCell}/{totalCells}...</Text>
1076
- </Box>
1077
- )}
1078
-
1079
- <Static items={outputs}>
1080
- {(entry) => (
1081
- <Box key={entry.cell} flexDirection="column" marginBottom={1}>
1082
- <Text bold color="cyan">── Cell {entry.cell}/{totalCells} ──</Text>
1083
- <Text dimColor>{entry.source.split("\n").slice(0, 3).join("\n")}{entry.source.split("\n").length > 3 ? "\n ..." : ""}</Text>
1084
- <Text color={entry.error || (entry.result && !entry.result.success) ? "red" : undefined}>
1085
- {renderCellOutput(entry)}
1086
- </Text>
1087
- </Box>
1088
- )}
1089
- </Static>
1090
-
1091
- {phase === "done" && (
1092
- <Box flexDirection="column" marginTop={1}>
1093
- <Success message={`Notebook complete: ${outputs.length} cells executed`} />
1094
- {sessionId && <Text dimColor>Session: {sessionId}</Text>}
1095
- {outputs.some((o) => o.error || (o.result && !o.result.success)) && (
1096
- <Text color="yellow">Some cells had errors — check output above</Text>
1097
- )}
1098
- </Box>
1099
- )}
1100
- </Box>
1101
- );
1102
- }
1103
-
1104
- function createBlankNotebook(name: string): string {
1105
- const notebook: NotebookFile = {
1106
- cells: [
1107
- {
1108
- cell_type: "markdown",
1109
- source: [`# ${name}\n`],
1110
- metadata: {},
1111
- },
1112
- {
1113
- cell_type: "code",
1114
- source: [],
1115
- metadata: {},
1116
- },
1117
- ],
1118
- metadata: {
1119
- kernelspec: { display_name: "Python 3", language: "python", name: "python3" },
1120
- language_info: { name: "python", version: "3.12.0" },
1121
- },
1122
- nbformat: 4,
1123
- nbformat_minor: 4,
1124
- };
1125
-
1126
- const filename = name.endsWith(".ipynb") ? name : `${name}.ipynb`;
1127
- const filePath = path.resolve(filename);
1128
- if (fs.existsSync(filePath)) {
1129
- throw new Error(`File already exists: ${filePath}`);
1130
- }
1131
- fs.writeFileSync(filePath, JSON.stringify(notebook, null, 1));
1132
- return filePath;
1133
- }
1134
-
1135
2318
  // ─────────────────────────────────────────────────────────────────────────────
1136
2319
  // Helper: Infer format from file extension
1137
2320
  // ─────────────────────────────────────────────────────────────────────────────
@@ -1200,179 +2383,21 @@ function DatasetListCommand() {
1200
2383
  <Text>
1201
2384
  {" "}<Text color="yellow">{ds.dataset_name}:{ds.version_number || "v1"}</Text> <Text dimColor>({ds.dataset_type}, {ds.sample_size} examples)</Text>
1202
2385
  </Text>
1203
- <Text dimColor> {ds.id}</Text>
1204
- </Box>
1205
- ))
1206
- )}
1207
- <Text> </Text>
1208
- <Text bold color="cyan">Local Datasets ({localDatasets.length})</Text>
1209
- {localDatasets.length === 0 ? (
1210
- <Text dimColor> No local datasets in ./datasets/</Text>
1211
- ) : (
1212
- localDatasets.map((ds) => (
1213
- <Text key={ds.id}>
1214
- {" "}<Text color="green">{ds.name}</Text> <Text dimColor>({ds.type}, {ds.sample_size} examples)</Text>
1215
- </Text>
1216
- ))
1217
- )}
1218
- </Box>
1219
- );
1220
- }
1221
-
1222
- // ─────────────────────────────────────────────────────────────────────────────
1223
- // Competition List Command
1224
- // ─────────────────────────────────────────────────────────────────────────────
1225
-
1226
- function CompetitionListCommand() {
1227
- const { exit } = useApp();
1228
- const [state, setState] = useState<"loading" | "done" | "error">("loading");
1229
- const [competitions, setCompetitions] = useState<api.CompetitionInfo[]>([]);
1230
- const [error, setError] = useState("");
1231
-
1232
- useEffect(() => {
1233
- (async () => {
1234
- const result = await api.listCompetitions();
1235
- if (result.ok && result.data) {
1236
- setCompetitions(result.data.competitions || []);
1237
- setState("done");
1238
- } else {
1239
- setError(result.error ?? "Unknown error");
1240
- setState("error");
1241
- }
1242
- setTimeout(() => exit(), 500);
1243
- })();
1244
- }, [exit]);
1245
-
1246
- if (state === "loading") {
1247
- return <Loading message="Loading competitions..." />;
1248
- }
1249
-
1250
- if (state === "error") {
1251
- return <ErrorMessage error={error} />;
1252
- }
1253
-
1254
- if (competitions.length === 0) {
1255
- return <Text dimColor>No active competitions found.</Text>;
1256
- }
1257
-
1258
- return (
1259
- <Box flexDirection="column">
1260
- <Text bold color="cyan">Active Competitions ({competitions.length})</Text>
1261
- <Text> </Text>
1262
- {competitions.map((comp, idx) => (
1263
- <Box key={comp.dataset_id} flexDirection="column" marginBottom={1}>
1264
- <Text bold color="yellow">
1265
- {idx + 1}. {comp.dataset_name}
1266
- </Text>
1267
- <Text>
1268
- {" "}Type: <Text color="magenta">{comp.dataset_type}</Text>
1269
- {" "}Samples: <Text color="blue">{comp.sample_count ?? "N/A"}</Text>
1270
- {" "}Entries: <Text color="green">{comp.total_entries}</Text>
1271
- </Text>
1272
- {comp.description && (
1273
- <Text dimColor>{" "}{comp.description}</Text>
1274
- )}
1275
- {comp.labels && comp.labels.length > 0 && (
1276
- <Text>{" "}Labels: <Text color="cyan">{comp.labels.join(", ")}</Text></Text>
1277
- )}
1278
- {comp.winner && (
1279
- <Text>
1280
- {" "}Winner: <Text color="green" bold>{comp.winner.display_name}</Text>
1281
- {" "}(F1: <Text color="yellow">{comp.winner.f1_score.toFixed(4)}</Text>)
1282
- </Text>
1283
- )}
1284
- <Text dimColor>{" "}ID: {comp.dataset_id}</Text>
1285
- </Box>
1286
- ))}
1287
- </Box>
1288
- );
1289
- }
1290
-
1291
- // ─────────────────────────────────────────────────────────────────────────────
1292
- // Leaderboard Command
1293
- // ─────────────────────────────────────────────────────────────────────────────
1294
-
1295
- function LeaderboardCommand({ datasetId, limit }: { datasetId: string; limit?: number }) {
1296
- const { exit } = useApp();
1297
- const [state, setState] = useState<"loading" | "done" | "error">("loading");
1298
- const [data, setData] = useState<api.LeaderboardEntriesResponse | null>(null);
1299
- const [error, setError] = useState("");
1300
-
1301
- useEffect(() => {
1302
- (async () => {
1303
- const result = await api.getLeaderboardEntries(datasetId, limit);
1304
- if (result.ok && result.data) {
1305
- setData(result.data);
1306
- setState("done");
1307
- } else {
1308
- setError(result.error ?? "Unknown error");
1309
- setState("error");
1310
- }
1311
- setTimeout(() => exit(), 500);
1312
- })();
1313
- }, [datasetId, limit, exit]);
1314
-
1315
- if (state === "loading") {
1316
- return <Loading message="Loading leaderboard..." />;
1317
- }
1318
-
1319
- if (state === "error") {
1320
- return <ErrorMessage error={error} />;
1321
- }
1322
-
1323
- if (!data || data.entries.length === 0) {
1324
- return (
1325
- <Box flexDirection="column">
1326
- <Text bold color="cyan">Leaderboard: {data?.dataset_name ?? datasetId}</Text>
1327
- <Text dimColor> No entries yet. Be the first to submit!</Text>
1328
- </Box>
1329
- );
1330
- }
1331
-
1332
- return (
1333
- <Box flexDirection="column">
1334
- <Text bold color="cyan">Leaderboard: {data.dataset_name} ({data.total_entries} entries)</Text>
1335
- <Text> </Text>
1336
- {/* Header */}
1337
- <Box>
1338
- <Box width={6}><Text bold dimColor>Rank</Text></Box>
1339
- <Box width={22}><Text bold dimColor>Name</Text></Box>
1340
- <Box width={22}><Text bold dimColor>Model</Text></Box>
1341
- <Box width={10}><Text bold dimColor>F1</Text></Box>
1342
- <Box width={10}><Text bold dimColor>Precision</Text></Box>
1343
- <Box width={10}><Text bold dimColor>Recall</Text></Box>
1344
- </Box>
1345
- {/* Entries */}
1346
- {data.entries.map((entry, idx) => {
1347
- const rank = entry.rank ?? idx + 1;
1348
- const isFirst = rank === 1;
1349
- return (
1350
- <Box key={entry.id}>
1351
- <Box width={6}>
1352
- <Text color={isFirst ? "yellow" : undefined} bold={isFirst}>
1353
- {isFirst ? `#${rank}` : `#${rank}`}
1354
- </Text>
1355
- </Box>
1356
- <Box width={22}>
1357
- <Text color={isFirst ? "yellow" : undefined} bold={isFirst}>
1358
- {entry.display_name.substring(0, 20)}
1359
- </Text>
1360
- </Box>
1361
- <Box width={22}>
1362
- <Text color="cyan">{entry.model_name.substring(0, 20)}</Text>
1363
- </Box>
1364
- <Box width={10}>
1365
- <Text color="green" bold>{entry.f1_score.toFixed(4)}</Text>
1366
- </Box>
1367
- <Box width={10}>
1368
- <Text>{entry.precision_score?.toFixed(4) ?? "N/A"}</Text>
1369
- </Box>
1370
- <Box width={10}>
1371
- <Text>{entry.recall_score?.toFixed(4) ?? "N/A"}</Text>
1372
- </Box>
2386
+ <Text dimColor> {ds.id}</Text>
1373
2387
  </Box>
1374
- );
1375
- })}
2388
+ ))
2389
+ )}
2390
+ <Text> </Text>
2391
+ <Text bold color="cyan">Local Datasets ({localDatasets.length})</Text>
2392
+ {localDatasets.length === 0 ? (
2393
+ <Text dimColor> No local datasets in ./datasets/</Text>
2394
+ ) : (
2395
+ localDatasets.map((ds) => (
2396
+ <Text key={ds.id}>
2397
+ {" "}<Text color="green">{ds.name}</Text> <Text dimColor>({ds.type}, {ds.sample_size} examples)</Text>
2398
+ </Text>
2399
+ ))
2400
+ )}
1376
2401
  </Box>
1377
2402
  );
1378
2403
  }
@@ -2004,11 +3029,54 @@ function DeployedModelCard({ model, index }: DeployedModelCardProps) {
2004
3029
  );
2005
3030
  }
2006
3031
 
3032
+ // ─────────────────────────────────────────────────────────────────────────────
3033
+ // Registered Model (Project) Visualization Component
3034
+ // ─────────────────────────────────────────────────────────────────────────────
3035
+
3036
+ interface ProjectModelCardProps {
3037
+ model: api.ProjectResponse;
3038
+ index: number;
3039
+ }
3040
+
3041
+ function ProjectModelCard({ model, index }: ProjectModelCardProps) {
3042
+ const formatDateShort = (dateStr: string | null | undefined) => {
3043
+ if (!dateStr) return "N/A";
3044
+ const date = new Date(dateStr);
3045
+ return date.toLocaleDateString() + " " + date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" });
3046
+ };
3047
+
3048
+ return (
3049
+ <Box flexDirection="column" marginTop={index > 0 ? 0 : 0}>
3050
+ <Box>
3051
+ <Text bold color="green">●</Text>
3052
+ <Text> </Text>
3053
+ <Box width={28}>
3054
+ <Text bold color="cyan">{model.name?.substring(0, 26) || "Unnamed"}</Text>
3055
+ </Box>
3056
+ <Box width={38}>
3057
+ <Text dimColor>{model.id}</Text>
3058
+ </Box>
3059
+ <Box width={24}>
3060
+ <Text color="magenta">{model.selected_model_id || "N/A"}</Text>
3061
+ </Box>
3062
+ <Box width={18}>
3063
+ <Text dimColor>{formatDateShort(model.created_at)}</Text>
3064
+ </Box>
3065
+ </Box>
3066
+ {model.description && (
3067
+ <Box marginLeft={2}>
3068
+ <Text dimColor>{model.description}</Text>
3069
+ </Box>
3070
+ )}
3071
+ </Box>
3072
+ );
3073
+ }
3074
+
2007
3075
  // ─────────────────────────────────────────────────────────────────────────────
2008
3076
  // Model List Command
2009
3077
  // ─────────────────────────────────────────────────────────────────────────────
2010
3078
 
2011
- type ModelListFilter = "all" | "trained" | "deployed";
3079
+ type ModelListFilter = "registered" | "trained" | "deployed" | "artifacts";
2012
3080
 
2013
3081
  interface ModelListCommandProps {
2014
3082
  filter: ModelListFilter;
@@ -2042,11 +3110,42 @@ function ModelListCommand({ filter }: ModelListCommandProps) {
2042
3110
  return <ErrorMessage error={error} />;
2043
3111
  }
2044
3112
 
2045
- const showDeployed = filter === "all" || filter === "deployed";
2046
- const showTrained = filter === "all" || filter === "trained";
3113
+ const showProjects = filter === "registered";
3114
+ const showDeployed = filter === "deployed" || filter === "artifacts";
3115
+ const showTrained = filter === "trained" || filter === "artifacts";
2047
3116
 
2048
3117
  return (
2049
3118
  <Box flexDirection="column">
3119
+ {showProjects && (
3120
+ <>
3121
+ <Text bold color="cyan">Model Entries ({data?.projects.length ?? 0})</Text>
3122
+ {data?.projects.length === 0 ? (
3123
+ <Text dimColor> No model entries</Text>
3124
+ ) : (
3125
+ <Box flexDirection="column" marginTop={1}>
3126
+ <Box marginBottom={0}>
3127
+ <Text bold dimColor> </Text>
3128
+ <Box width={28}>
3129
+ <Text bold dimColor>Name</Text>
3130
+ </Box>
3131
+ <Box width={38}>
3132
+ <Text bold dimColor>Model ID</Text>
3133
+ </Box>
3134
+ <Box width={24}>
3135
+ <Text bold dimColor>Base Model</Text>
3136
+ </Box>
3137
+ <Box width={18}>
3138
+ <Text bold dimColor>Created</Text>
3139
+ </Box>
3140
+ </Box>
3141
+ {data?.projects.map((model, index) => (
3142
+ <ProjectModelCard key={model.id || index} model={model} index={index} />
3143
+ ))}
3144
+ </Box>
3145
+ )}
3146
+ </>
3147
+ )}
3148
+ {showProjects && (showDeployed || showTrained) && <Text> </Text>}
2050
3149
  {showDeployed && (
2051
3150
  <>
2052
3151
  <Text bold color="cyan">Deployed Models ({data?.deployed.length ?? 0})</Text>
@@ -2135,6 +3234,9 @@ interface ModelGenerateCommandProps {
2135
3234
  systemMsg?: string;
2136
3235
  maxTokens: number;
2137
3236
  temperature: number;
3237
+ topP?: number;
3238
+ includeReasoningTrace?: boolean;
3239
+ projectId?: string;
2138
3240
  }
2139
3241
 
2140
3242
  function normalizeBaseModels(
@@ -2157,18 +3259,74 @@ function formatDecoderSuggestions(decoderIds: string[]): string {
2157
3259
  return `\nTry one of these decoder models: ${decoderIds.slice(0, 4).join(", ")}`;
2158
3260
  }
2159
3261
 
3262
+ function normalizeModelId(modelId: string): string {
3263
+ return modelId.trim();
3264
+ }
3265
+
3266
+ function getDecoderTaskType(model: api.BaseModelInfo | null | undefined): string {
3267
+ if (!model) return "";
3268
+ return `${model.task_type ?? model.type ?? ""}`.trim().toLowerCase();
3269
+ }
3270
+
3271
+ function modelSupportsDecoderInference(model: api.BaseModelInfo | null | undefined): boolean {
3272
+ if (!model) return false;
3273
+ if (model.supports_inference !== undefined) {
3274
+ return model.supports_inference;
3275
+ }
3276
+ if (model.supports_on_demand_inference !== undefined) {
3277
+ return model.supports_on_demand_inference;
3278
+ }
3279
+ return true;
3280
+ }
3281
+
3282
+ function isDecoderModel(model: api.BaseModelInfo | null | undefined): boolean {
3283
+ const taskType = getDecoderTaskType(model);
3284
+ return (
3285
+ taskType === "decoder" ||
3286
+ taskType === "llm" ||
3287
+ taskType === "generative"
3288
+ );
3289
+ }
3290
+
3291
+ function shouldFallbackToTextCompletions(errorMessage: string): boolean {
3292
+ const normalized = errorMessage.toLowerCase();
3293
+ return (
3294
+ normalized.includes("/v1/completions") ||
3295
+ normalized.includes("without a chat template") ||
3296
+ normalized.includes("text completion")
3297
+ );
3298
+ }
3299
+
3300
+ function buildTextCompletionPrompt(prompt: string, systemMsg?: string): string {
3301
+ if (!systemMsg) return prompt;
3302
+ return `System instruction:\n${systemMsg}\n\nUser prompt:\n${prompt}\n\nAssistant response:`;
3303
+ }
3304
+
3305
+ function parseCommaSeparated(value?: string): string[] {
3306
+ if (!value) return [];
3307
+ return value
3308
+ .split(",")
3309
+ .map((item) => item.trim())
3310
+ .filter(Boolean);
3311
+ }
3312
+
2160
3313
  function ModelGenerateCommand({
2161
3314
  modelId,
2162
3315
  prompt,
2163
3316
  systemMsg,
2164
3317
  maxTokens,
2165
3318
  temperature,
3319
+ topP,
3320
+ includeReasoningTrace,
3321
+ projectId,
2166
3322
  }: ModelGenerateCommandProps) {
2167
3323
  const { exit } = useApp();
2168
3324
  const [state, setState] = useState<"checking" | "running" | "done" | "error">(
2169
3325
  "checking"
2170
3326
  );
2171
- const [data, setData] = useState<api.InferenceResponse | null>(null);
3327
+ const [data, setData] = useState<api.InferenceResponse | api.TextCompletionResponse | null>(
3328
+ null
3329
+ );
2172
3330
  const [error, setError] = useState("");
2173
3331
 
2174
3332
  useEffect(() => {
@@ -2181,8 +3339,9 @@ function ModelGenerateCommand({
2181
3339
  ? normalizeBaseModels(baseModelsResult.data)
2182
3340
  : [];
2183
3341
  const decoderModelIds = baseModels
2184
- .filter((model) => model.type === "decoder")
3342
+ .filter((model) => isDecoderModel(model) && modelSupportsDecoderInference(model))
2185
3343
  .map((model) => model.id);
3344
+ const normalizedModelId = normalizeModelId(modelId);
2186
3345
 
2187
3346
  const fail = (message: string) => {
2188
3347
  if (!active) return;
@@ -2191,38 +3350,35 @@ function ModelGenerateCommand({
2191
3350
  setTimeout(() => exit(), 500);
2192
3351
  };
2193
3352
 
2194
- if (modelId === "base") {
2195
- fail(
2196
- "Model 'base' is encoder-only and cannot run decoder generation. Use 'model predict' for encoder tasks." +
2197
- formatDecoderSuggestions(decoderModelIds)
2198
- );
2199
- return;
2200
- }
2201
-
2202
- const matchedBaseModel = baseModels.find((model) => model.id === modelId);
2203
- if (matchedBaseModel && matchedBaseModel.type !== "decoder") {
3353
+ const matchedBaseModel = baseModels.find((model) => model.id === normalizedModelId);
3354
+ if (matchedBaseModel && (!isDecoderModel(matchedBaseModel) || !modelSupportsDecoderInference(matchedBaseModel))) {
2204
3355
  const modelName = matchedBaseModel.label || matchedBaseModel.name || matchedBaseModel.id;
3356
+ const taskType = getDecoderTaskType(matchedBaseModel) || "unknown";
3357
+ const supportsInference = modelSupportsDecoderInference(matchedBaseModel);
2205
3358
  fail(
2206
- `Model '${modelName}' is type '${matchedBaseModel.type}' and is not decoder-compatible.` +
3359
+ `Model '${modelName}' is type '${taskType}' and is not decoder-compatible for inference` +
3360
+ `${supportsInference ? "" : " (supports_inference = false)"} ` +
3361
+ `(supports_inference: ${String(matchedBaseModel.supports_inference ?? "unknown")}, task_type: ${taskType}).` +
2207
3362
  formatDecoderSuggestions(decoderModelIds)
2208
3363
  );
2209
3364
  return;
2210
3365
  }
2211
3366
 
2212
- if (modelId.startsWith("base:") && baseModels.length > 0 && !matchedBaseModel) {
3367
+ if (baseModels.length > 0 && !isUuid(normalizedModelId) && !matchedBaseModel) {
2213
3368
  fail(
2214
- `Unknown base model '${modelId}'.` + formatDecoderSuggestions(decoderModelIds)
3369
+ `Model '${modelId}' is not a recognized decoder base model and is not a valid job UUID.` +
3370
+ ` Use 'model list' to copy a full training job UUID or provide a known decoder catalog ID.`
2215
3371
  );
2216
3372
  return;
2217
3373
  }
2218
3374
 
2219
- if (isUuid(modelId)) {
2220
- const jobResult = await api.getJob(modelId);
3375
+ if (isUuid(normalizedModelId)) {
3376
+ const jobResult = await api.getJob(normalizedModelId);
2221
3377
  if (jobResult.ok && jobResult.data) {
2222
3378
  const taskType = jobResult.data.task_type?.toLowerCase();
2223
- if (taskType && taskType !== "decoder") {
3379
+ if (taskType && !isDecoderModel({ task_type: taskType } as api.BaseModelInfo)) {
2224
3380
  fail(
2225
- `Training job '${modelId}' has task_type '${taskType}', so it is not decoder-compatible.` +
3381
+ `Training job '${normalizedModelId}' has task_type '${taskType}', so it is not decoder-compatible.` +
2226
3382
  formatDecoderSuggestions(decoderModelIds)
2227
3383
  );
2228
3384
  return;
@@ -2233,18 +3389,21 @@ function ModelGenerateCommand({
2233
3389
  if (!active) return;
2234
3390
  setState("running");
2235
3391
 
2236
- const messages: Array<{ role: "system" | "user" | "assistant"; content: string }> = [];
3392
+ const messages: api.InferenceMessage[] = [];
2237
3393
  if (systemMsg) {
2238
3394
  messages.push({ role: "system", content: systemMsg });
2239
3395
  }
2240
3396
  messages.push({ role: "user", content: prompt });
2241
3397
 
2242
3398
  const result = await api.runInference({
2243
- model_id: modelId,
3399
+ model_id: normalizedModelId,
2244
3400
  task: "generate",
2245
3401
  messages,
2246
3402
  max_tokens: maxTokens,
2247
3403
  temperature,
3404
+ ...(topP !== undefined ? { top_p: topP } : {}),
3405
+ ...(includeReasoningTrace ? { include_reasoning_trace: true } : {}),
3406
+ ...(projectId ? { project_id: projectId } : {}),
2248
3407
  });
2249
3408
 
2250
3409
  if (!active) return;
@@ -2253,9 +3412,32 @@ function ModelGenerateCommand({
2253
3412
  setData(result.data);
2254
3413
  setState("done");
2255
3414
  } else {
2256
- let message = result.error ?? "Unknown error";
2257
- const normalizedError = formatApiError(message);
2258
- if (normalizedError.toLowerCase().includes("inference failed")) {
3415
+ const primaryError = result.error ?? "Unknown error";
3416
+ const normalizedError = formatApiError(primaryError);
3417
+ let message = normalizedError;
3418
+
3419
+ if (shouldFallbackToTextCompletions(normalizedError)) {
3420
+ const completionResult = await api.runTextCompletion({
3421
+ model: normalizedModelId,
3422
+ prompt: buildTextCompletionPrompt(prompt, systemMsg),
3423
+ max_tokens: maxTokens,
3424
+ temperature,
3425
+ ...(topP !== undefined ? { extra_body: { top_p: topP } } : {}),
3426
+ });
3427
+
3428
+ if (!active) return;
3429
+
3430
+ if (completionResult.ok && completionResult.data) {
3431
+ setData(completionResult.data);
3432
+ setState("done");
3433
+ setTimeout(() => exit(), 500);
3434
+ return;
3435
+ }
3436
+
3437
+ message =
3438
+ "Decoder chat inference failed, and raw text fallback via /v1/completions also failed.\n" +
3439
+ formatApiError(completionResult.error ?? "Unknown error");
3440
+ } else if (normalizedError.toLowerCase().includes("inference failed")) {
2259
3441
  message =
2260
3442
  `${normalizedError}\nDecoder inference request reached the backend but failed to execute.` +
2261
3443
  formatDecoderSuggestions(decoderModelIds);
@@ -2270,7 +3452,17 @@ function ModelGenerateCommand({
2270
3452
  return () => {
2271
3453
  active = false;
2272
3454
  };
2273
- }, [modelId, prompt, systemMsg, maxTokens, temperature, exit]);
3455
+ }, [
3456
+ modelId,
3457
+ prompt,
3458
+ systemMsg,
3459
+ maxTokens,
3460
+ temperature,
3461
+ topP,
3462
+ includeReasoningTrace,
3463
+ projectId,
3464
+ exit,
3465
+ ]);
2274
3466
 
2275
3467
  if (state === "checking") {
2276
3468
  return <Loading message="Checking decoder model compatibility..." />;
@@ -2301,16 +3493,14 @@ type HelpContext =
2301
3493
  | "dataset"
2302
3494
  | "dataset-analyze"
2303
3495
  | "dataset-edit"
2304
- | "project"
2305
3496
  | "job"
2306
3497
  | "model"
2307
- | "chat"
2308
3498
  | "eval"
2309
3499
  | "benchmark"
2310
- | "competition"
2311
- | "notebook"
2312
3500
  | "inference"
2313
- | "adaptive-finetuning";
3501
+ | "agent"
3502
+ | "model-endpoints"
3503
+ | "model-artifacts";
2314
3504
 
2315
3505
  interface HelpProps {
2316
3506
  context?: HelpContext;
@@ -2356,6 +3546,7 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2356
3546
  <Text> dataset get {"<name[:version]>"} Get dataset details</Text>
2357
3547
  <Text> dataset delete {"<name[:version]>"} Delete a dataset</Text>
2358
3548
  <Text> dataset analyze {"<name[:version]>"} Analyze a dataset</Text>
3549
+ <Text> dataset analyze-llm {"<name[:version]>"} LLM-only dataset analysis</Text>
2359
3550
  <Text> </Text>
2360
3551
  <Text bold> Generate:</Text>
2361
3552
  <Text> dataset generate ner</Text>
@@ -2386,12 +3577,53 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2386
3577
  <Text> --num {"<n>"} Number of examples (default: 10)</Text>
2387
3578
  <Text> --save true Save to database</Text>
2388
3579
  <Text> --name {"<name>"} Dataset name (required if --save)</Text>
3580
+ <Text> Advanced generation flags:</Text>
3581
+ <Text> --quality {"<light|medium|heavy>"} Generation quality profile</Text>
3582
+ <Text> --generation-profile {"<auto|fast|balanced|quality>"} Runtime profile</Text>
3583
+ <Text> --reasoning-trace {"true|false"} Include reasoning traces (decoder only)</Text>
3584
+ <Text> --reasoning-effort {"<low|medium|high>"} Reasoning effort</Text>
3585
+ <Text> --multiplicator {"<json>"} Multiplicator settings</Text>
3586
+ <Text> --use-meta-felix {"true|false"} Use MetaFelix metadata</Text>
3587
+ <Text> --min-criteria {"<n>"} Minimum diversity criteria</Text>
3588
+ <Text> --target-choices {"<n>"} Diversity target choices</Text>
3589
+ <Text> --project-id {"<id>"} Project ID</Text>
3590
+ <Text> --type {"training|evaluation|split"} Dataset type</Text>
3591
+ <Text> --visibility {"private|public"} Dataset visibility</Text>
3592
+ <Text> --split-ratio {"<train:eval>|{json>}"} Split dataset ratio</Text>
3593
+ <Text> --negative-ratio {"<n>"} Percent negative samples</Text>
3594
+ <Text> --classified-examples {"<json>"} Classified examples with feedback</Text>
2389
3595
  <Text> </Text>
2390
3596
  <Text bold> Infer Labels:</Text>
2391
3597
  <Text> dataset infer ner Infer NER labels from description</Text>
2392
3598
  <Text> dataset infer classification Infer classification labels</Text>
2393
3599
  <Text> dataset infer fields Infer input/output fields</Text>
2394
3600
  <Text> --domain {"<desc>"} Domain description (required)</Text>
3601
+ <Text> dataset infer infer-advanced Infer constraints and multiplicator from a prompt</Text>
3602
+ <Text> --prompt {"<prompt>"} Prompt for inference</Text>
3603
+ <Text> --labels {"<l1,l2,...>"} Optional labels to guide suggestions</Text>
3604
+ <Text> --data-type {"<type>"} entity_extraction|classification|json_extraction</Text>
3605
+ <Text> dataset infer improve-prompt Improve a generation prompt</Text>
3606
+ <Text> --prompt {"<prompt>"} Prompt to improve</Text>
3607
+ <Text> --data-type {"<type>"} Optional prompt domain hint</Text>
3608
+ <Text> dataset label-existing ner Label existing NER texts</Text>
3609
+ <Text> --labels {"<l1,l2,...>"} Labels for entities</Text>
3610
+ <Text> --inputs {"[{\"text\":\"...\"},...]"} Input texts JSON array (required)</Text>
3611
+ <Text> --name {"<name>"} Output dataset name (optional if --save false)</Text>
3612
+ <Text> --project-id {"<project_id>"} Assign output dataset to project</Text>
3613
+ <Text> --save {"<true|false>"} Save dataset (default: false)</Text>
3614
+ <Text> dataset label-existing classification Label existing classification texts</Text>
3615
+ <Text> --labels {"<l1,l2,...>"} Labels for classes</Text>
3616
+ <Text> --inputs {"[{\"text\":\"...\"},...]"} Input texts JSON array (required)</Text>
3617
+ <Text> --name {"<name>"} Output dataset name (optional if --save false)</Text>
3618
+ <Text> --project-id {"<project_id>"} Assign output dataset to project</Text>
3619
+ <Text> --save {"<true|false>"} Save dataset (default: false)</Text>
3620
+ <Text> dataset label-existing fields Label existing structured records</Text>
3621
+ <Text> --input-fields {"[{\"name\":\"...\"},...]"} Input schema fields (required)</Text>
3622
+ <Text> --output-fields {"[{\"name\":\"...\"},...]"} Output schema fields (required)</Text>
3623
+ <Text> --inputs {"[{\"f1\":\"v\"},...]"} Input records JSON array (required)</Text>
3624
+ <Text> --name {"<name>"} Output dataset name (optional if --save false)</Text>
3625
+ <Text> --project-id {"<project_id>"} Assign output dataset to project</Text>
3626
+ <Text> --save {"<true|false>"} Save dataset (default: false)</Text>
2395
3627
  <Text> </Text>
2396
3628
  <Text bold> Upload/Download:</Text>
2397
3629
  <Text> dataset upload {"<file>"} Upload local file to Pioneer</Text>
@@ -2413,6 +3645,8 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2413
3645
  <Text bold> Data Editing:</Text>
2414
3646
  <Text> dataset edit --help Show data editing commands</Text>
2415
3647
  <Text> dataset edit scan-pii {"<name[:version]>"} Scan for PII</Text>
3648
+ <Text> dataset edit dismiss-outlier {"<name[:version]>"} Dismiss an outlier fingerprint</Text>
3649
+ <Text> --fingerprint {"<hash>"} Outlier fingerprint from dataset analysis</Text>
2416
3650
  <Text> dataset edit subsample {"<name[:version]>"} Create a subsample</Text>
2417
3651
  </Box>
2418
3652
  );
@@ -2431,6 +3665,8 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2431
3665
  <Text> dataset edit scan-phd {"<name[:version]>"} Scan for prompt injection</Text>
2432
3666
  <Text> --columns {"<col1,col2>"} Columns to scan (optional, scans all if omitted)</Text>
2433
3667
  <Text> --threshold {"<n>"} Detection threshold (default: 0.5)</Text>
3668
+ <Text> dataset edit dismiss-outlier {"<name[:version]>"} Dismiss an outlier fingerprint</Text>
3669
+ <Text> --fingerprint {"<hash>"} Outlier fingerprint from dataset analysis</Text>
2434
3670
  <Text> dataset edit subsample {"<name[:version]>"} Create a subsample</Text>
2435
3671
  <Text> --n {"<count>"} Target sample count (required)</Text>
2436
3672
  <Text> --method {"<type>"} Method: random, balanced, stratified</Text>
@@ -2458,32 +3694,13 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2458
3694
  <Text> Options: ner, classification, generative</Text>
2459
3695
  <Text> --analyses {"<a1,a2,...>"} Analyses to run (required, comma-separated)</Text>
2460
3696
  <Text> Options: distribution, duplicates, outliers, splits, diversity</Text>
3697
+ <Text> </Text>
3698
+ <Text> LLM-only analysis:</Text>
3699
+ <Text> dataset analyze-llm {"<id>"} --task-type {"<type>"} --description {"<text>"} --labels {"<l1,l2>"} </Text>
2461
3700
  <Text> </Text>
2462
3701
  <Text bold> Example:</Text>
2463
3702
  <Text> dataset analyze abc123 --task-type ner --analyses distribution,duplicates</Text>
2464
- </Box>
2465
- );
2466
- }
2467
-
2468
- // Project help
2469
- if (context === "project") {
2470
- return (
2471
- <Box flexDirection="column">
2472
- <Text bold>Project Commands:</Text>
2473
- <Text> project list List all projects</Text>
2474
- <Text> project get {"<project-id>"} Get project details</Text>
2475
- <Text> project create Create a project</Text>
2476
- <Text> --name {"<name>"} Project name (required)</Text>
2477
- <Text> --icon {"<icon>"} Icon name (optional, default: folder)</Text>
2478
- <Text> --repo {"<repo-url>"} Repository URL/reference (optional)</Text>
2479
- <Text> --description {"<text>"} Description (optional)</Text>
2480
- <Text> --model-id {"<id>"} Selected model ID (optional)</Text>
2481
- <Text> --example {"<json>"} JSON example payload (optional)</Text>
2482
- <Text> project update {"<project-id>"} Update project fields</Text>
2483
- <Text> --name {"<name>"} --icon {"<icon>"} --repo {"<repo-url>"} --description {"<text>"} --model-id {"<id>"}</Text>
2484
- <Text> project delete {"<project-id>"} Delete a project</Text>
2485
- <Text> project dataset-count {"<project-id>"} Show attached dataset count</Text>
2486
- <Text> project quality-metrics {"<project-id>"} Show LLMAJ pass/fail metrics</Text>
3703
+ <Text> dataset analyze-llm abc123 --task-type ner --description "NER quality analysis"</Text>
2487
3704
  </Box>
2488
3705
  );
2489
3706
  }
@@ -2509,156 +3726,143 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2509
3726
  );
2510
3727
  }
2511
3728
 
2512
- // Model help
2513
- if (context === "model") {
3729
+ // Model endpoints help
3730
+ if (context === "model-endpoints") {
2514
3731
  return (
2515
3732
  <Box flexDirection="column">
2516
- <Text bold>Model Commands:</Text>
2517
- <Text> model list List all models (trained + deployed)</Text>
2518
- <Text> model list trained List trained models only</Text>
2519
- <Text> model list deployed List deployed models only</Text>
2520
- <Text> model delete {"<job-id>"} Undeploy a model by training job ID</Text>
2521
- <Text> model download {"<job-id>"} Get model download URL by job ID</Text>
2522
- <Text> </Text>
2523
- <Text bold> Inference:</Text>
2524
- <Text> model predict {"<job-id>"} --text {"<text>"} --labels {"<labels>"} Run NER inference</Text>
2525
- <Text> --task {"<task>"} Task: extract_entities, classify_text, extract_json (default: extract_entities)</Text>
2526
- <Text> --threshold {"<n>"} Confidence threshold 0-1 (default: 0.5)</Text>
2527
- <Text> model generate {"<model-id>"} --prompt {"<text>"} Run decoder generation</Text>
2528
- <Text> --system {"<text>"} System message (optional)</Text>
2529
- <Text> --max-tokens {"<n>"} Max tokens to generate (default: 256)</Text>
2530
- <Text> --temperature {"<n>"} Sampling temperature (default: 0.7)</Text>
2531
- <Text dimColor> model-id can be a decoder training job UUID or base:{"<provider/model>"}</Text>
2532
- <Text> </Text>
2533
- <Text bold> Upload:</Text>
2534
- <Text> model upload {"<job-id>"} --to hf Upload trained model to Hugging Face</Text>
2535
- <Text> --repo {"<repo>"} HF repo (required, e.g., username/model)</Text>
2536
- <Text> --private Make repo private</Text>
3733
+ <Text bold>Model Endpoint Commands:</Text>
3734
+ <Text> Aliases: model_endpoints ...</Text>
3735
+ <Text> model endpoints list</Text>
3736
+ <Text> model endpoints create</Text>
3737
+ <Text> --name {"<name>"} Optional (defaults to model id)</Text>
3738
+ <Text> --icon {"<icon>"} Optional</Text>
3739
+ <Text> --repo {"<repo-url>"} Optional</Text>
3740
+ <Text> --description {"<text>"} Optional</Text>
3741
+ <Text> --model {"<base-model-id>"} Optional (starts interactive picker when omitted)</Text>
3742
+ <Text> --example {"<json>"} Optional</Text>
3743
+ <Text> model endpoints get {"<model-id>"} Get endpoint/model entry details</Text>
3744
+ <Text> model endpoints update {"<model-id>"} Update endpoint metadata</Text>
3745
+ <Text> --name {"<name>"} --icon {"<icon>"} --repo {"<repo-url>"} --description {"<text>"} --model-id {"<id>"}</Text>
3746
+ <Text> model endpoints delete {"<model-id>"} Delete an endpoint/model entry</Text>
3747
+ <Text> model endpoints dataset-count {"<model-id>"} Get attached dataset count</Text>
3748
+ <Text> model endpoints quality-metrics {"<model-id>"} Show LLMAJ pass/fail metrics</Text>
3749
+ <Text> model endpoints deploy {"<model-id>"} --job {"<training-job-id>"} [--reason {"<text>"}] Deploy a trained job to the endpoint</Text>
3750
+ <Text> model endpoints rollback {"<model-id>"} {"<deployment-id>"} Rollback endpoint to previous deployment</Text>
3751
+ </Box>
3752
+ );
3753
+ }
3754
+
3755
+ // Model artifacts help
3756
+ if (context === "model-artifacts") {
3757
+ return (
3758
+ <Box flexDirection="column">
3759
+ <Text bold>Model Artifact Commands:</Text>
3760
+ <Text> Aliases: model_artifacts ...</Text>
3761
+ <Text> model artifacts list Show both trained and deployed artifacts</Text>
3762
+ <Text> model artifacts trained List trained artifacts</Text>
3763
+ <Text> model artifacts deployed List deployed artifacts</Text>
3764
+ <Text> model artifacts download {"<job-id>"} Download model artifact</Text>
3765
+ <Text> model artifacts delete {"<job-id>"} Delete deployed artifact record</Text>
3766
+ <Text> model artifacts upload {"<job-id>"} --to hf Upload trained model artifact to Hugging Face</Text>
3767
+ <Text> --repo {"<repo>"} HF repo (required, e.g., username/model)</Text>
3768
+ <Text> --private Make repo private</Text>
2537
3769
  <Text dimColor> Note: Set HF token with 'pioneer auth hf' first</Text>
2538
3770
  <Text dimColor> Note: Use full job ID (not partial ID shown in list)</Text>
2539
3771
  </Box>
2540
3772
  );
2541
3773
  }
2542
3774
 
2543
- // Inference help
2544
- if (context === "inference") {
3775
+ // Model help
3776
+ if (context === "model") {
2545
3777
  return (
2546
3778
  <Box flexDirection="column">
2547
- <Text bold>Inference Commands:</Text>
2548
- <Text> model predict {"<model-id>"} --text {"<text>"} --labels {"<labels>"}</Text>
2549
- <Text> Run encoder inference (NER, classification, JSON extraction)</Text>
2550
- <Text> Use "base" as model-id for the base GLiNER2 model</Text>
3779
+ <Text bold>Model Commands:</Text>
3780
+ <Text> model endpoints ... (alias: model_endpoints) Manage model catalog entries (from /projects)</Text>
3781
+ <Text> model artifacts ... (alias: model_artifacts) Manage trained/deployed artifacts (from /felix)</Text>
2551
3782
  <Text> </Text>
2552
- <Text> model generate {"<model-id>"} --prompt {"<text>"}</Text>
2553
- <Text> Run decoder text generation on a decoder model</Text>
2554
- <Text> Example base IDs: base:Qwen/Qwen3-8B, base:meta-llama/Llama-3.1-8B-Instruct</Text>
3783
+ <Text> model endpoints list</Text>
3784
+ <Text> model endpoints create</Text>
3785
+ <Text> model endpoints get {"<model-id>"}</Text>
3786
+ <Text> model endpoints deploy {"<model-id>"} --job {"<training-job-id>"} [--reason {"<text>"}]</Text>
3787
+ <Text> model endpoints rollback {"<model-id>"} {"<deployment-id>"}</Text>
3788
+ <Text> model artifacts list</Text>
3789
+ <Text> model artifacts trained</Text>
3790
+ <Text> model artifacts deployed</Text>
3791
+ <Text> model artifacts download {"<job-id>"}</Text>
2555
3792
  <Text> </Text>
2556
- <Text bold> Options:</Text>
2557
- <Text> --task {"<task>"} extract_entities | classify_text | extract_json | schema</Text>
2558
- <Text> --labels {"<labels>"} Comma-separated labels for extraction/classification</Text>
2559
- <Text> --threshold {"<n>"} Confidence threshold (0-1, default: 0.5)</Text>
2560
- <Text> --system {"<text>"} System message for decoder generation</Text>
2561
- <Text> --max-tokens {"<n>"} Max tokens for generation (default: 256)</Text>
2562
- <Text> --temperature {"<n>"} Sampling temperature (default: 0.7)</Text>
2563
3793
  </Box>
2564
3794
  );
2565
3795
  }
2566
3796
 
2567
- // Chat help
2568
- if (context === "chat") {
3797
+ // Inference help
3798
+ if (context === "inference") {
2569
3799
  return (
2570
3800
  <Box flexDirection="column">
2571
- <Text bold>Chat Commands:</Text>
2572
- <Text> chat Start interactive chat agent</Text>
2573
- <Text> --message {"<msg>"} Initial message to process</Text>
3801
+ <Text bold>Inference Commands:</Text>
3802
+ <Text> inference base-models</Text>
3803
+ <Text> List base models from /base-models</Text>
3804
+ <Text> </Text>
3805
+ <Text> inference encoder {"<model-id>"} --text {"<text>"} --labels {"<labels>"}</Text>
3806
+ <Text> Run encoder inference via /inference</Text>
3807
+ <Text> --task {"<task>"} extract_entities | classify_text | extract_json | schema</Text>
3808
+ <Text> --labels {"<labels>"} Comma-separated labels (or use --schema JSON)</Text>
3809
+ <Text> --schema {"<json>"} JSON schema object for advanced tasks</Text>
3810
+ <Text> --threshold {"<n>"} Confidence threshold (0-1, default: 0.5)</Text>
3811
+ <Text> --project-id {"<id>"} Associate inference with a project</Text>
3812
+ <Text> </Text>
3813
+ <Text> inference decoder {"<model-id>"} --prompt {"<text>"}</Text>
3814
+ <Text> Run decoder generation via /inference</Text>
3815
+ <Text> --system {"<text>"} System message (optional)</Text>
3816
+ <Text> --max-tokens {"<n>"} Max tokens (default: 256)</Text>
3817
+ <Text> --temperature {"<n>"} Sampling temperature (default: 0.7)</Text>
3818
+ <Text> --top-p {"<n>"} Top-p sampling (0-1)</Text>
3819
+ <Text> --reasoning-trace Include reasoning trace when supported</Text>
3820
+ <Text> --project-id {"<id>"} Associate inference with a project</Text>
3821
+ <Text> Example model IDs: Qwen/Qwen3-8B, meta-llama/Llama-3.1-8B-Instruct</Text>
2574
3822
  <Text> </Text>
2575
- <Text dimColor> Note: Model selection available in chat via /model command</Text>
3823
+ <Text> inference completions {"<model-id>"} --prompt {"<text>"}</Text>
3824
+ <Text> Run raw text completion via /v1/completions</Text>
3825
+ <Text> --max-tokens {"<n>"} Max tokens (default: 256)</Text>
3826
+ <Text> --temperature {"<n>"} Sampling temperature (default: 0.7)</Text>
3827
+ <Text> --top-p {"<n>"} Top-p (sent via extra_body)</Text>
3828
+ <Text> --stop {"<a,b,c>"} Stop sequences (comma-separated)</Text>
3829
+ <Text> --echo true Echo prompt in output</Text>
3830
+ <Text> --provider {"<name>"} Provider override (optional)</Text>
2576
3831
  </Box>
2577
3832
  );
2578
3833
  }
2579
3834
 
2580
3835
  // Eval help
2581
3836
  if (context === "eval") {
2582
- return (
2583
- <Box flexDirection="column">
2584
- <Text bold>Evaluation Commands:</Text>
2585
- <Text dimColor> Dataset format: name[:version] (version defaults to "latest")</Text>
2586
- <Text> </Text>
2587
- <Text> eval list {"<name[:version]>"} List evaluations for a dataset</Text>
2588
- <Text> eval get {"<id>"} Get evaluation details</Text>
2589
- <Text> eval create Create a new evaluation</Text>
2590
- <Text> --model-id {"<id>"} Model to evaluate (required)</Text>
2591
- <Text> --dataset {"<name[:version]>"} Dataset to evaluate on (required)</Text>
2592
- <Text> --task-type {"<type>"} Task type: ner, classification</Text>
2593
- <Text> --text-column {"<col>"} Text column name</Text>
2594
- <Text> --label-column {"<col>"} Label column name</Text>
2595
- </Box>
2596
- );
3837
+ return <ErrorMessage error="The 'eval' command group is temporarily hidden for this version." />;
2597
3838
  }
2598
3839
 
2599
3840
  // Benchmark help
2600
3841
  if (context === "benchmark") {
2601
- return (
2602
- <Box flexDirection="column">
2603
- <Text bold>Benchmark Commands:</Text>
2604
- <Text> benchmark list List available benchmarks</Text>
2605
- <Text> benchmark run Start a benchmark evaluation</Text>
2606
- <Text> --model-id {"<id>"} Model to evaluate (required)</Text>
2607
- <Text> --task {"<type>"} Task: ner, text_classification (required)</Text>
2608
- <Text> --benchmark {"<name>"} Benchmark name (required)</Text>
2609
- <Text> --max-samples {"<n>"} Max samples (default: 100)</Text>
2610
- <Text> --split {"<name>"} Dataset split (default: test)</Text>
2611
- <Text> benchmark get {"<id>"} Get evaluation status/results</Text>
2612
- <Text> benchmark cancel {"<id>"} Cancel running evaluation</Text>
2613
- </Box>
2614
- );
3842
+ return <ErrorMessage error="The 'benchmark' command group is temporarily hidden for this version." />;
2615
3843
  }
2616
3844
 
2617
- // Adaptive fine-tuning help
2618
- if (context === "adaptive-finetuning") {
3845
+ // Agent help
3846
+ if (context === "agent") {
2619
3847
  return (
2620
3848
  <Box flexDirection="column">
2621
- <Text bold>Adaptive Fine-tuning Commands:</Text>
2622
- <Text> adaptive-finetuning chat Send a request to the adaptive FT agent</Text>
2623
- <Text> --message {"<text>"} Instruction (required)</Text>
3849
+ <Text bold>Agent Commands:</Text>
3850
+ <Text> agent Start interactive agent chat</Text>
3851
+ <Text> --mode {"<research>"} --mode research uses Pro workflow</Text>
3852
+ <Text> </Text>
3853
+ <Text> Omit --mode to use the default standard interactive mode.</Text>
3854
+ <Text> agent sessions List and resume previous sessions</Text>
3855
+ <Text> agent resume {"[conversation-id]"} List sessions, then resume a selected conversation</Text>
2624
3856
  <Text> --conversation-id {"<id>"} Continue an existing conversation</Text>
2625
- <Text> --filters {"<json>"} Optional query filters JSON</Text>
3857
+ <Text> --filters {"<json>"} Reserved for future query filters</Text>
2626
3858
  <Text> --history {"<json>"} Optional message history JSON</Text>
2627
3859
  <Text> </Text>
2628
- <Text dimColor> Alias: aft</Text>
2629
- <Text dimColor> Example: pioneer adaptive-finetuning chat --message "Analyze failures from last 24h and propose retraining plan"</Text>
2630
- </Box>
2631
- );
2632
- }
2633
-
2634
- // Competition help
2635
- if (context === "competition") {
2636
- return (
2637
- <Box flexDirection="column">
2638
- <Text bold>Competition Commands:</Text>
2639
- <Text> competition list List active competitions</Text>
2640
- <Text> competition show {"<dataset-id>"} View sample data for a competition</Text>
2641
- <Text> competition leaderboard {"<dataset-id>"} View leaderboard rankings</Text>
2642
- <Text> --limit {"<n>"} Number of entries (default: 10)</Text>
2643
- <Text> competition submit {"<dataset-id>"} Submit evaluation to leaderboard</Text>
2644
- <Text> --eval-id {"<id>"} Evaluation ID (required)</Text>
2645
- <Text> --name {"<name>"} Display name (required)</Text>
2646
- </Box>
2647
- );
2648
- }
2649
-
2650
- // Notebook help
2651
- if (context === "notebook") {
2652
- return (
2653
- <Box flexDirection="column">
2654
- <Text bold>Notebook Commands:</Text>
2655
- <Text> </Text>
2656
- <Text> notebook run {"<file>"} Run all cells in a .ipynb notebook</Text>
2657
- <Text> --gpu {"<type>"} GPU type: cpu, t4, a10g, a100, h100 (default: cpu)</Text>
2658
- <Text> --no-felix Don't inject Felix helper functions</Text>
2659
- <Text> notebook create {"<name>"} Create a blank notebook</Text>
2660
- <Text> notebook sessions List active notebook sessions</Text>
2661
- <Text> notebook stop {"<session-id>"} Terminate a notebook session</Text>
3860
+ <Text dimColor>Example: pioneer agent</Text>
3861
+ <Text dimColor>Example: pioneer agent --mode research</Text>
3862
+ <Text dimColor>Then type: Analyze failures and propose retraining plan</Text>
3863
+ <Text dimColor>Then type: Draft a short status summary</Text>
3864
+ <Text dimColor>Example: pioneer agent resume</Text>
3865
+ <Text dimColor>Example: pioneer agent resume b042f7a1-0e7e-4f78-96df-a1cc2d4afcdf</Text>
2662
3866
  </Box>
2663
3867
  );
2664
3868
  }
@@ -2672,17 +3876,10 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2672
3876
  <Text> pioneer {"<command>"} {"[options]"}</Text>
2673
3877
  <Text> </Text>
2674
3878
  <Text bold>Commands:</Text>
2675
- <Text> chat Start interactive chat agent</Text>
2676
3879
  <Text> auth Authentication (login, logout, status)</Text>
2677
- <Text> dataset Manage datasets (list, generate, edit, analyze)</Text>
2678
- <Text> project Manage projects</Text>
3880
+ <Text> model Manage model endpoints and artifacts</Text>
2679
3881
  <Text> job Manage training jobs</Text>
2680
- <Text> model Manage models</Text>
2681
- <Text> eval Model evaluations on datasets</Text>
2682
- <Text> benchmark Run benchmark evaluations</Text>
2683
- <Text> notebook Run and manage Jupyter notebooks</Text>
2684
- <Text> competition Competitions and leaderboards</Text>
2685
- <Text> adaptive-finetuning Adaptive fine-tuning agent</Text>
3882
+ <Text> agent Run agent chat (research is the only explicit alternate mode)</Text>
2686
3883
  <Text> telemetry Manage anonymous usage analytics</Text>
2687
3884
  <Text> </Text>
2688
3885
  <Text dimColor>Run 'pioneer {"<command>"} --help' for details on a specific command.</Text>
@@ -2690,24 +3887,12 @@ const Help: React.FC<HelpProps> = ({ context = "root" }) => {
2690
3887
  <Text dimColor>Get started:</Text>
2691
3888
  <Text dimColor> 1. Sign up at https://app.pioneer.ai</Text>
2692
3889
  <Text dimColor> 2. Run: pioneer auth login</Text>
2693
- <Text dimColor> 3. Start building with: pioneer chat</Text>
3890
+ <Text dimColor> 3. Start building with: pioneer agent</Text>
2694
3891
  </Box>
2695
3892
  );
2696
3893
  };
2697
3894
 
2698
3895
  // ─────────────────────────────────────────────────────────────────────────────
2699
- // Chat Wrapper Component
2700
- // ─────────────────────────────────────────────────────────────────────────────
2701
-
2702
- interface ChatWrapperProps {
2703
- flags: Record<string, string>;
2704
- }
2705
-
2706
- const ChatWrapper: React.FC<ChatWrapperProps> = ({ flags }) => {
2707
- const initialMessage = flags.message;
2708
- return <ChatApp initialMessage={initialMessage} />;
2709
- };
2710
-
2711
3896
  // ─────────────────────────────────────────────────────────────────────────────
2712
3897
  // Main Router
2713
3898
  // ─────────────────────────────────────────────────────────────────────────────
@@ -2715,13 +3900,114 @@ const ChatWrapper: React.FC<ChatWrapperProps> = ({ flags }) => {
2715
3900
  interface AppProps {
2716
3901
  command: string[];
2717
3902
  flags: Record<string, string>;
3903
+ parseErrors: string[];
2718
3904
  }
2719
3905
 
2720
- const App: React.FC<AppProps> = ({ command, flags }) => {
3906
+ const App: React.FC<AppProps> = ({ command, flags, parseErrors }) => {
2721
3907
  // Check if raw mode is supported for interactive prompts
2722
3908
  const { isRawModeSupported } = useStdin();
2723
3909
  const [showTelemetryPrompt, setShowTelemetryPrompt] = useState(!hasChosenTelemetry());
2724
3910
  const [group, action, ...rest] = command;
3911
+ const normalizedAction =
3912
+ action === "model_endpoints"
3913
+ ? "endpoints"
3914
+ : action === "model_artifacts"
3915
+ ? "artifacts"
3916
+ : action;
3917
+ const hasParseErrors = parseErrors.length > 0;
3918
+ const isModelCreateMissingModel =
3919
+ group === "model" &&
3920
+ normalizedAction === "endpoints" &&
3921
+ rest[0] === "create" &&
3922
+ parseErrors.length === 1 &&
3923
+ parseErrors[0] === "--model";
3924
+ const isModelEndpointsDeployMissingJob =
3925
+ group === "model" &&
3926
+ normalizedAction === "endpoints" &&
3927
+ rest[0] === "deploy" &&
3928
+ !flags["job"];
3929
+
3930
+ if (group === "dataset" || group === "inference" || group === "eval" || group === "benchmark") {
3931
+ return (
3932
+ <ErrorMessage
3933
+ error={`The '${group}' command group is temporarily hidden for this version. Use 'pioneer --help' to see available commands.`}
3934
+ />
3935
+ );
3936
+ }
3937
+
3938
+ if (hasParseErrors && !isModelCreateMissingModel) {
3939
+ const missingValueHints: Record<string, string> = {
3940
+ "--model": "<base-model-id>",
3941
+ "--mode":
3942
+ group === "agent" || group === "agents"
3943
+ ? "<research>"
3944
+ : "<value>",
3945
+ "--conversation-id": "<session-id>",
3946
+ "--conversation": "<session-id>",
3947
+ "--history": "<json>",
3948
+ "--filters": "<json>",
3949
+ "--format": "<format>",
3950
+ "--text": "<text>",
3951
+ "--prompt": "<text>",
3952
+ "--name": "<name>",
3953
+ "--repo": "<url>",
3954
+ "--icon": "<icon>",
3955
+ "--description": "<text>",
3956
+ "--api-key": "<key>",
3957
+ "--api-url": "<url>",
3958
+ "--message": "<text>",
3959
+ "--inputs": "<json>",
3960
+ "--labels": "<json-array>",
3961
+ "--label-column": "<column>",
3962
+ "--text-column": "<column>",
3963
+ "--dataset-ids": "<comma-separated-ids>",
3964
+ "--output": "<path>",
3965
+ "--format-results": "<true|false>",
3966
+ "--include-confidence": "<true|false>",
3967
+ "--include-spans": "<true|false>",
3968
+ "--reasoning-trace": "<true|false>",
3969
+ "--reasoning-effort": "<low|medium|high>",
3970
+ };
3971
+ const getValueHint = (flag: string) => {
3972
+ if (flag === "--mode" && group === "agent") {
3973
+ return "<research> (default is standard when omitted)";
3974
+ }
3975
+ return missingValueHints[flag] ?? "<value>";
3976
+ };
3977
+
3978
+ if (isModelEndpointsDeployMissingJob) {
3979
+ const errorMessage = rest[1]
3980
+ ? `Training job ID required: model endpoints deploy ${rest[1]} --job <training-job-id>`
3981
+ : "Training job ID required: model endpoints deploy <model-id> --job <training-job-id>";
3982
+
3983
+ return (
3984
+ <ApiCommand
3985
+ action={() =>
3986
+ Promise.resolve<api.ApiResult<{ message: string }>>({
3987
+ ok: false,
3988
+ status: 400,
3989
+ error: errorMessage,
3990
+ data: {
3991
+ message: errorMessage,
3992
+ },
3993
+ })
3994
+ }
3995
+ successMessage="Validation failed"
3996
+ />
3997
+ );
3998
+ }
3999
+
4000
+ return (
4001
+ <Box flexDirection="column">
4002
+ <ErrorMessage error="One or more flags are missing values. Please provide values for: " />
4003
+ {parseErrors.map((flag) => (
4004
+ <Text dimColor key={flag}>
4005
+ - {flag} {getValueHint(flag)}
4006
+ </Text>
4007
+ ))}
4008
+ </Box>
4009
+ );
4010
+ }
2725
4011
 
2726
4012
  // In non-interactive mode, skip telemetry prompt and default to disabled
2727
4013
  useEffect(() => {
@@ -2734,9 +4020,11 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
2734
4020
  // Track command usage (must be before any conditional returns)
2735
4021
  useEffect(() => {
2736
4022
  if (group && !showTelemetryPrompt) {
2737
- trackCommand(group, action);
4023
+ const actionForTracking =
4024
+ group === "model" && normalizedAction !== action ? action || normalizedAction : normalizedAction;
4025
+ trackCommand(group, actionForTracking);
2738
4026
  }
2739
- }, [group, action, showTelemetryPrompt]);
4027
+ }, [group, action, normalizedAction, showTelemetryPrompt]);
2740
4028
 
2741
4029
  // Show telemetry consent prompt on first run (but not for --help or --version, and only if interactive)
2742
4030
  if (showTelemetryPrompt && isRawModeSupported && !flags.help && flags.version !== "true" && flags.v !== "true") {
@@ -2748,14 +4036,6 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
2748
4036
  return <Help />;
2749
4037
  }
2750
4038
 
2751
- // Chat command - Interactive agent
2752
- if (group === "chat") {
2753
- if (flags.help === "true" || action === "help") {
2754
- return <Help context="chat" />;
2755
- }
2756
- return <ChatWrapper flags={flags} />;
2757
- }
2758
-
2759
4039
  // Auth commands
2760
4040
  if (group === "auth") {
2761
4041
  if (flags.help === "true" || !action || action === "help") {
@@ -2895,6 +4175,31 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
2895
4175
  );
2896
4176
  }
2897
4177
 
4178
+ if (subAction === "dismiss-outlier" && rest[1]) {
4179
+ const dataset = parseDatasetRef(rest[1]);
4180
+ const fingerprint = flags["fingerprint"];
4181
+ if (!dataset) {
4182
+ return <ErrorMessage error={`Invalid dataset format: ${rest[1]}. Use name[:version] format (e.g., my-dataset or my-dataset:v1).`} />;
4183
+ }
4184
+ if (!fingerprint) {
4185
+ return (
4186
+ <ErrorMessage error="--fingerprint is required (from dataset analysis output)" />
4187
+ );
4188
+ }
4189
+
4190
+ return (
4191
+ <ApiCommand
4192
+ action={() =>
4193
+ api.dismissOutlier({
4194
+ dataset,
4195
+ fingerprint,
4196
+ })
4197
+ }
4198
+ successMessage={`Outlier dismissed for dataset ${rest[1]}`}
4199
+ />
4200
+ );
4201
+ }
4202
+
2898
4203
  return <Help context="dataset-edit" />;
2899
4204
  }
2900
4205
 
@@ -3076,6 +4381,41 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3076
4381
  />
3077
4382
  );
3078
4383
  }
4384
+ if (action === "analyze-llm") {
4385
+ const datasetStr = rest[0];
4386
+ const taskType = flags["task-type"];
4387
+ const taskDescription = flags["description"] || flags["task-description"];
4388
+ const labels = parseCommaSeparated(flags["labels"]);
4389
+
4390
+ if (!datasetStr || !taskType) {
4391
+ return <Help context="dataset-analyze" />;
4392
+ }
4393
+
4394
+ if (!["ner", "classification", "generative"].includes(taskType)) {
4395
+ return (
4396
+ <ErrorMessage error="--task-type must be one of: ner, classification, generative" />
4397
+ );
4398
+ }
4399
+
4400
+ const dataset = parseDatasetRef(datasetStr);
4401
+ if (!dataset) {
4402
+ return <ErrorMessage error={`Invalid dataset format: ${datasetStr}. Use name:version format.`} />;
4403
+ }
4404
+
4405
+ return (
4406
+ <ApiCommand
4407
+ action={() =>
4408
+ api.analyzeDatasetLLM({
4409
+ task_type: taskType as "ner" | "classification" | "generative",
4410
+ dataset_name: dataset.name,
4411
+ dataset_version: dataset.version,
4412
+ ...(taskDescription ? { task_description: taskDescription } : {}),
4413
+ ...(labels.length ? { labels } : {}),
4414
+ })
4415
+ }
4416
+ />
4417
+ );
4418
+ }
3079
4419
  if (action === "analyze") {
3080
4420
  const datasetStr = rest[0];
3081
4421
  const taskType = flags["task-type"];
@@ -3105,6 +4445,131 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3105
4445
  const domainDescription = flags["domain"];
3106
4446
  const saveDataset = flags["save"]?.toLowerCase() === "true";
3107
4447
  const datasetName = flags["name"];
4448
+ const quality = flags["quality"] as "light" | "medium" | "heavy" | undefined;
4449
+ const generationProfile = flags["generation-profile"] as
4450
+ | "auto"
4451
+ | "fast"
4452
+ | "balanced"
4453
+ | "quality"
4454
+ | undefined;
4455
+ const includeReasoningTrace =
4456
+ flags["reasoning-trace"] === undefined
4457
+ ? undefined
4458
+ : flags["reasoning-trace"].toLowerCase() !== "false";
4459
+ const reasoningEffort = flags["reasoning-effort"] as
4460
+ | "low"
4461
+ | "medium"
4462
+ | "high"
4463
+ | undefined;
4464
+ const validGenerationProfiles = ["auto", "fast", "balanced", "quality"];
4465
+ if (generationProfile && !validGenerationProfiles.includes(generationProfile)) {
4466
+ return <ErrorMessage error="--generation-profile must be one of: auto, fast, balanced, quality" />;
4467
+ }
4468
+ const validReasoningEfforts = ["low", "medium", "high"];
4469
+ if (reasoningEffort && !validReasoningEfforts.includes(reasoningEffort)) {
4470
+ return <ErrorMessage error="--reasoning-effort must be one of: low, medium, high" />;
4471
+ }
4472
+ const multiplicatorArg = flags["multiplicator"];
4473
+ const useMetaFelix = flags["use-meta-felix"]
4474
+ ? flags["use-meta-felix"].toLowerCase() !== "false"
4475
+ : undefined;
4476
+ const minCriteria = flags["min-criteria"] ? parseInt(flags["min-criteria"], 10) : undefined;
4477
+ if (flags["min-criteria"] && Number.isNaN(minCriteria)) {
4478
+ return <ErrorMessage error="--min-criteria must be a number" />;
4479
+ }
4480
+ const targetChoices = flags["target-choices"]
4481
+ ? parseInt(flags["target-choices"], 10)
4482
+ : undefined;
4483
+ if (flags["target-choices"] && Number.isNaN(targetChoices)) {
4484
+ return <ErrorMessage error="--target-choices must be a number" />;
4485
+ }
4486
+ const projectId = flags["project-id"];
4487
+ const generationType = flags["type"] as "training" | "evaluation" | "split" | undefined;
4488
+ if (generationType && !["training", "evaluation", "split"].includes(generationType)) {
4489
+ return <ErrorMessage error="--type must be one of: training, evaluation, split" />;
4490
+ }
4491
+ const visibility = flags["visibility"] as "private" | "public" | undefined;
4492
+ if (visibility && !["private", "public"].includes(visibility)) {
4493
+ return <ErrorMessage error="--visibility must be private or public" />;
4494
+ }
4495
+ const splitRatioArg = flags["split-ratio"];
4496
+ const splitRatio =
4497
+ splitRatioArg && splitRatioArg.includes(":")
4498
+ ? (() => {
4499
+ const [training, evaluation] = splitRatioArg.split(":").map((v) => parseFloat(v));
4500
+ if (Number.isNaN(training) || Number.isNaN(evaluation)) {
4501
+ return undefined;
4502
+ }
4503
+ return { training, evaluation };
4504
+ })()
4505
+ : splitRatioArg
4506
+ ? (() => {
4507
+ try {
4508
+ const parsed = JSON.parse(splitRatioArg);
4509
+ return parsed;
4510
+ } catch {
4511
+ return undefined;
4512
+ }
4513
+ })()
4514
+ : undefined;
4515
+ const negativeRatio = flags["negative-ratio"]
4516
+ ? parseFloat(flags["negative-ratio"])
4517
+ : undefined;
4518
+ if (flags["negative-ratio"] && Number.isNaN(negativeRatio)) {
4519
+ return <ErrorMessage error="--negative-ratio must be a number" />;
4520
+ }
4521
+ const classifiedExamplesArg = flags["classified-examples"];
4522
+ const qualityArg = flags["quality"];
4523
+ if (qualityArg && !["light", "medium", "heavy"].includes(qualityArg)) {
4524
+ return <ErrorMessage error="--quality must be light, medium, or heavy" />;
4525
+ }
4526
+ if (splitRatioArg && splitRatio === undefined) {
4527
+ return <ErrorMessage error="--split-ratio must be training:evaluation or a JSON object" />;
4528
+ }
4529
+ let parsedMultiplicator: Record<string, unknown> | undefined;
4530
+ if (multiplicatorArg) {
4531
+ try {
4532
+ const parsed = JSON.parse(multiplicatorArg);
4533
+ if (typeof parsed !== "object" || parsed === null || Array.isArray(parsed)) {
4534
+ return <ErrorMessage error="--multiplicator must be a valid JSON object" />;
4535
+ }
4536
+ parsedMultiplicator = parsed;
4537
+ } catch {
4538
+ return <ErrorMessage error="--multiplicator must be valid JSON" />;
4539
+ }
4540
+ }
4541
+ let classifiedExamples: Record<string, unknown>[] | undefined;
4542
+ if (classifiedExamplesArg) {
4543
+ try {
4544
+ const parsed = JSON.parse(classifiedExamplesArg);
4545
+ if (!Array.isArray(parsed)) {
4546
+ return <ErrorMessage error="--classified-examples must be a JSON array" />;
4547
+ }
4548
+ classifiedExamples = parsed as Record<string, unknown>[];
4549
+ } catch {
4550
+ return <ErrorMessage error="--classified-examples must be valid JSON" />;
4551
+ }
4552
+ }
4553
+ if (multiplicatorArg && parsedMultiplicator === undefined) {
4554
+ return <ErrorMessage error="--multiplicator must be a valid JSON object" />;
4555
+ }
4556
+
4557
+ const commonGenerationOptions = {
4558
+ quality,
4559
+ generation_profile: generationProfile,
4560
+ ...(includeReasoningTrace !== undefined ? { include_reasoning_trace: includeReasoningTrace } : {}),
4561
+ ...(reasoningEffort ? { reasoning_effort: reasoningEffort } : {}),
4562
+ ...(multiplicatorArg ? { multiplicator: parsedMultiplicator } : {}),
4563
+ ...(useMetaFelix !== undefined ? { use_meta_felix: useMetaFelix } : {}),
4564
+ ...(minCriteria !== undefined ? { min_criteria: minCriteria } : {}),
4565
+ ...(targetChoices !== undefined ? { target_choices: targetChoices } : {}),
4566
+ ...(projectId ? { project_id: projectId } : {}),
4567
+ ...(generationType ? { type: generationType } : {}),
4568
+ ...(visibility ? { visibility } : {}),
4569
+ ...(splitRatio ? { split_ratio: splitRatio } : {}),
4570
+ ...(negativeRatio !== undefined ? { negative_ratio: negativeRatio } : {}),
4571
+ ...(classifiedExamplesArg ? { classified_examples: classifiedExamples } : {}),
4572
+ };
3108
4573
 
3109
4574
  if (subAction === "ner") {
3110
4575
  const labels = flags["labels"]?.split(",");
@@ -3120,6 +4585,7 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3120
4585
  domain_description: domainDescription,
3121
4586
  save_dataset: saveDataset,
3122
4587
  dataset_name: datasetName,
4588
+ ...commonGenerationOptions,
3123
4589
  })
3124
4590
  }
3125
4591
  datasetName={datasetName || "ner-dataset"}
@@ -3145,6 +4611,7 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3145
4611
  multi_label: multiLabel,
3146
4612
  save_dataset: saveDataset,
3147
4613
  dataset_name: datasetName,
4614
+ ...commonGenerationOptions,
3148
4615
  })
3149
4616
  }
3150
4617
  datasetName={datasetName || "classification-dataset"}
@@ -3175,6 +4642,7 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3175
4642
  num_examples: numExamples,
3176
4643
  save_dataset: saveDataset,
3177
4644
  dataset_name: datasetName,
4645
+ ...commonGenerationOptions,
3178
4646
  })
3179
4647
  }
3180
4648
  datasetName={datasetName || "custom-dataset"}
@@ -3198,6 +4666,7 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3198
4666
  num_examples: numExamples,
3199
4667
  save_dataset: saveDataset,
3200
4668
  dataset_name: datasetName,
4669
+ ...commonGenerationOptions,
3201
4670
  })
3202
4671
  }
3203
4672
  datasetName={datasetName || "decoder-dataset"}
@@ -3213,6 +4682,44 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3213
4682
  // Infer labels commands
3214
4683
  if (action === "infer") {
3215
4684
  const subAction = rest[0];
4685
+ if (subAction === "improve-prompt") {
4686
+ const prompt = flags["prompt"] || flags["domain"];
4687
+ if (!prompt) {
4688
+ return <ErrorMessage error="--prompt is required for infer improve-prompt" />;
4689
+ }
4690
+ const dataType = flags["data-type"];
4691
+ return (
4692
+ <ApiCommand
4693
+ action={() =>
4694
+ api.improvePrompt({
4695
+ prompt,
4696
+ ...(dataType ? { data_type: dataType } : {}),
4697
+ })
4698
+ }
4699
+ />
4700
+ );
4701
+ }
4702
+
4703
+ if (subAction === "infer-advanced" || subAction === "advanced") {
4704
+ const prompt = flags["prompt"] || flags["domain"];
4705
+ const dataType = flags["data-type"];
4706
+ const labels = parseCommaSeparated(flags["labels"]);
4707
+ if (!prompt) {
4708
+ return <ErrorMessage error="--prompt is required for dataset infer infer-advanced" />;
4709
+ }
4710
+ return (
4711
+ <ApiCommand
4712
+ action={() =>
4713
+ api.inferAdvanced({
4714
+ prompt,
4715
+ ...(dataType ? { data_type: dataType } : {}),
4716
+ ...(labels.length > 0 ? { labels } : {}),
4717
+ })
4718
+ }
4719
+ />
4720
+ );
4721
+ }
4722
+
3216
4723
  const domainDescription = flags["domain"];
3217
4724
 
3218
4725
  if (!domainDescription) {
@@ -3246,137 +4753,129 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3246
4753
  return <Help context="dataset" />;
3247
4754
  }
3248
4755
 
3249
- return <Help context="dataset" />;
3250
- }
3251
-
3252
- // Project commands
3253
- if (group === "project") {
3254
- if (flags.help === "true" || !action || action === "help") {
3255
- return <Help context="project" />;
3256
- }
3257
-
3258
- if (action === "list") {
3259
- return <ApiCommand action={api.listProjects} />;
3260
- }
3261
-
3262
- if (action === "get") {
3263
- const projectId = rest[0];
3264
- if (!projectId) {
3265
- return <ErrorMessage error="Project ID required: project get <project-id>" />;
3266
- }
3267
- return <ApiCommand action={() => api.getProject(projectId)} />;
3268
- }
3269
-
3270
- if (action === "create") {
3271
- const name = flags["name"];
3272
- const icon = flags["icon"];
3273
- const repo = flags["repo"];
3274
- const description = flags["description"];
3275
- const modelId = flags["model-id"];
3276
- const exampleStr = flags["example"];
3277
-
3278
- if (!name) {
3279
- return <ErrorMessage error="--name is required for project creation" />;
4756
+ if (action === "label-existing") {
4757
+ const subAction = rest[0];
4758
+ const labels = parseCommaSeparated(flags["labels"]);
4759
+ const inputsArg = flags["inputs"];
4760
+ const datasetName = flags["name"];
4761
+ const saveDataset = flags["save"]?.toLowerCase() === "true";
4762
+ const domainDescription = flags["domain"];
4763
+ const projectId = flags["project-id"];
4764
+ if (!inputsArg) {
4765
+ return <ErrorMessage error="--inputs is required for label-existing commands" />;
3280
4766
  }
3281
-
3282
- let example: Record<string, unknown> | undefined;
3283
- if (exampleStr) {
3284
- try {
3285
- const parsed = JSON.parse(exampleStr) as unknown;
3286
- if (!parsed || Array.isArray(parsed) || typeof parsed !== "object") {
3287
- return <ErrorMessage error="--example must be a JSON object" />;
3288
- }
3289
- example = parsed as Record<string, unknown>;
3290
- } catch {
3291
- return <ErrorMessage error="--example must be valid JSON" />;
3292
- }
4767
+ if (saveDataset && !datasetName) {
4768
+ return <ErrorMessage error="--name is required when --save=true for label-existing" />;
3293
4769
  }
3294
4770
 
3295
- return (
3296
- <ApiCommand
3297
- action={() =>
3298
- api.createProject({
3299
- name,
3300
- ...(icon ? { icon } : {}),
3301
- ...(repo ? { repo } : {}),
3302
- ...(description ? { description } : {}),
3303
- ...(modelId ? { selected_model_id: modelId } : {}),
3304
- ...(example ? { example } : {}),
3305
- })
3306
- }
3307
- successMessage="Project created"
3308
- />
3309
- );
3310
- }
3311
-
3312
- if (action === "update") {
3313
- const projectId = rest[0];
3314
- if (!projectId) {
3315
- return <ErrorMessage error="Project ID required: project update <project-id>" />;
4771
+ let inputs: unknown;
4772
+ try {
4773
+ inputs = JSON.parse(inputsArg);
4774
+ } catch {
4775
+ return <ErrorMessage error="--inputs must be valid JSON" />;
3316
4776
  }
3317
4777
 
3318
- const name = flags["name"];
3319
- const icon = flags["icon"];
3320
- const repo = flags["repo"];
3321
- const description = flags["description"];
3322
- const modelId = flags["model-id"];
3323
-
3324
- if (!name && !icon && !repo && !description && !modelId) {
4778
+ if (subAction === "ner" || subAction === "classification") {
4779
+ if (!labels || labels.length === 0) {
4780
+ return <ErrorMessage error="--labels is required for label-existing ner|classification" />;
4781
+ }
4782
+ if (
4783
+ !Array.isArray(inputs) ||
4784
+ inputs.length === 0 ||
4785
+ !inputs.every((item) => typeof item === "string")
4786
+ ) {
4787
+ return <ErrorMessage error="--inputs must be a JSON array of strings for ner/classification" />;
4788
+ }
4789
+ const common = { labels, inputs: inputs as string[], dataset_name: datasetName };
4790
+ if (subAction === "ner") {
4791
+ return (
4792
+ <ApiCommand
4793
+ action={() =>
4794
+ api.labelExistingNER({
4795
+ ...common,
4796
+ domain_description: domainDescription,
4797
+ save_dataset: saveDataset,
4798
+ project_id: projectId,
4799
+ })
4800
+ }
4801
+ />
4802
+ );
4803
+ }
3325
4804
  return (
3326
- <ErrorMessage error="Provide at least one field to update: --name, --icon, --repo, --description, or --model-id" />
4805
+ <ApiCommand
4806
+ action={() =>
4807
+ api.labelExistingClassification({
4808
+ ...common,
4809
+ domain_description: domainDescription,
4810
+ save_dataset: saveDataset,
4811
+ project_id: projectId,
4812
+ })
4813
+ }
4814
+ />
3327
4815
  );
3328
4816
  }
3329
4817
 
3330
- return (
3331
- <ApiCommand
3332
- action={() =>
3333
- api.updateProject(projectId, {
3334
- ...(name ? { name } : {}),
3335
- ...(icon ? { icon } : {}),
3336
- ...(repo ? { repo } : {}),
3337
- ...(description ? { description } : {}),
3338
- ...(modelId ? { selected_model_id: modelId } : {}),
3339
- })
3340
- }
3341
- successMessage="Project updated"
3342
- />
3343
- );
3344
- }
3345
-
3346
- if (action === "delete") {
3347
- const projectId = rest[0];
3348
- if (!projectId) {
3349
- return <ErrorMessage error="Project ID required: project delete <project-id>" />;
3350
- }
3351
- return (
3352
- <ApiCommand
3353
- action={() => api.deleteProject(projectId)}
3354
- successMessage={`Project ${projectId} deleted`}
3355
- />
3356
- );
3357
- }
3358
-
3359
- if (action === "dataset-count" || action === "count") {
3360
- const projectId = rest[0];
3361
- if (!projectId) {
3362
- return (
3363
- <ErrorMessage error="Project ID required: project dataset-count <project-id>" />
4818
+ if (subAction === "fields") {
4819
+ const inputFieldsArg = flags["input-fields"];
4820
+ const outputFieldsArg = flags["output-fields"];
4821
+ if (!inputFieldsArg || !outputFieldsArg) {
4822
+ return <ErrorMessage error="--input-fields and --output-fields are required for fields labeling" />;
4823
+ }
4824
+ let inputFields: unknown;
4825
+ let outputFields: unknown;
4826
+ try {
4827
+ inputFields = JSON.parse(inputFieldsArg);
4828
+ outputFields = JSON.parse(outputFieldsArg);
4829
+ } catch {
4830
+ return <ErrorMessage error="--input-fields and --output-fields must be valid JSON" />;
4831
+ }
4832
+ if (
4833
+ !Array.isArray(inputFields) ||
4834
+ !Array.isArray(outputFields) ||
4835
+ inputFields.length === 0 ||
4836
+ outputFields.length === 0
4837
+ ) {
4838
+ return (
4839
+ <ErrorMessage error="--input-fields and --output-fields must be non-empty arrays" />
4840
+ );
4841
+ }
4842
+ const validInputFields = inputFields.every(
4843
+ (field) => field && typeof field === "object" && !Array.isArray(field)
3364
4844
  );
3365
- }
3366
- return <ApiCommand action={() => api.getProjectDatasetCount(projectId)} />;
3367
- }
3368
-
3369
- if (action === "quality-metrics" || action === "quality") {
3370
- const projectId = rest[0];
3371
- if (!projectId) {
4845
+ const validOutputFields = outputFields.every(
4846
+ (field) => field && typeof field === "object" && !Array.isArray(field)
4847
+ );
4848
+ if (!validInputFields || !validOutputFields) {
4849
+ return <ErrorMessage error="--input-fields and --output-fields must be arrays of objects" />;
4850
+ }
4851
+ if (
4852
+ !Array.isArray(inputs) ||
4853
+ inputs.length === 0 ||
4854
+ !inputs.every((item) => item && typeof item === "object" && !Array.isArray(item))
4855
+ ) {
4856
+ return <ErrorMessage error="--inputs must be a JSON array of objects for fields" />;
4857
+ }
3372
4858
  return (
3373
- <ErrorMessage error="Project ID required: project quality-metrics <project-id>" />
4859
+ <ApiCommand
4860
+ action={() =>
4861
+ api.labelExistingFields({
4862
+ input_fields: inputFields as api.RecordField[],
4863
+ output_fields: outputFields as api.RecordField[],
4864
+ inputs: inputs as Record<string, unknown>[],
4865
+ dataset_name: datasetName,
4866
+ save_dataset: saveDataset,
4867
+ domain_description: domainDescription,
4868
+ project_id: projectId,
4869
+ })
4870
+ }
4871
+ />
3374
4872
  );
3375
4873
  }
3376
- return <ApiCommand action={() => api.getProjectQualityMetrics(projectId)} />;
4874
+
4875
+ return <Help context="dataset" />;
3377
4876
  }
3378
4877
 
3379
- return <Help context="project" />;
4878
+ return <Help context="dataset" />;
3380
4879
  }
3381
4880
 
3382
4881
  // Job commands
@@ -3433,181 +4932,558 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3433
4932
  return <Help context="job" />;
3434
4933
  }
3435
4934
 
3436
- // Model commands
3437
- if (group === "model") {
4935
+ // Inference commands
4936
+ if (group === "inference") {
3438
4937
  if (flags.help === "true" || !action || action === "help") {
3439
- return <Help context="model" />;
4938
+ return <Help context="inference" />;
3440
4939
  }
3441
- if (action === "list") {
3442
- const subAction = rest[0];
3443
- if (subAction === "help") {
3444
- return <Help context="model" />;
4940
+
4941
+ if (action === "base-models" || action === "models" || action === "list") {
4942
+ return <ApiCommand action={api.listBaseModels} />;
4943
+ }
4944
+
4945
+ if (action === "encoder") {
4946
+ const rawModelId = rest[0];
4947
+ if (!rawModelId) {
4948
+ return (
4949
+ <ErrorMessage error="Model ID required. Usage: inference encoder <model-id> --text <text> --labels <labels>" />
4950
+ );
3445
4951
  }
3446
- if (subAction === "trained") {
3447
- return <ModelListCommand filter="trained" />;
4952
+ const modelId = normalizeModelId(rawModelId);
4953
+ const text = flags["text"];
4954
+ if (!text) {
4955
+ return <ErrorMessage error="--text is required for encoder inference" />;
3448
4956
  }
3449
- if (subAction === "deployed") {
3450
- return <ModelListCommand filter="deployed" />;
4957
+
4958
+ const task = (flags["task"] || "extract_entities") as
4959
+ | "extract_entities"
4960
+ | "classify_text"
4961
+ | "extract_json"
4962
+ | "schema";
4963
+ if (!["extract_entities", "classify_text", "extract_json", "schema"].includes(task)) {
4964
+ return (
4965
+ <ErrorMessage error="--task must be one of: extract_entities, classify_text, extract_json, schema" />
4966
+ );
4967
+ }
4968
+
4969
+ const labels = parseCommaSeparated(flags["labels"]);
4970
+ const schemaStr = flags["schema"];
4971
+ let schema: string[] | Record<string, unknown> | null = null;
4972
+
4973
+ if (schemaStr) {
4974
+ try {
4975
+ const parsed = JSON.parse(schemaStr) as unknown;
4976
+ if (Array.isArray(parsed)) {
4977
+ if (!parsed.every((item) => typeof item === "string")) {
4978
+ return (
4979
+ <ErrorMessage error="--schema JSON array must contain only strings" />
4980
+ );
4981
+ }
4982
+ schema = parsed;
4983
+ } else if (parsed && typeof parsed === "object") {
4984
+ schema = parsed as Record<string, unknown>;
4985
+ } else {
4986
+ return (
4987
+ <ErrorMessage error="--schema must be a JSON object or array of strings" />
4988
+ );
4989
+ }
4990
+ } catch {
4991
+ return <ErrorMessage error="--schema must be valid JSON" />;
4992
+ }
4993
+ } else if (labels.length > 0) {
4994
+ schema = labels;
4995
+ }
4996
+
4997
+ if (!schema) {
4998
+ return <ErrorMessage error="Provide --labels or --schema for encoder inference" />;
4999
+ }
5000
+
5001
+ const threshold = flags["threshold"] ? parseFloat(flags["threshold"]) : 0.5;
5002
+ if (Number.isNaN(threshold) || threshold < 0 || threshold > 1) {
5003
+ return <ErrorMessage error="--threshold must be a number between 0 and 1" />;
5004
+ }
5005
+
5006
+ const includeConfidence =
5007
+ flags["include-confidence"] === undefined ||
5008
+ flags["include-confidence"].toLowerCase() !== "false";
5009
+ const includeSpans = flags["include-spans"]?.toLowerCase() === "true";
5010
+ const formatResults =
5011
+ flags["format-results"] === undefined ||
5012
+ flags["format-results"].toLowerCase() !== "false";
5013
+ const projectId = flags["project-id"];
5014
+
5015
+ return (
5016
+ <ApiCommand
5017
+ action={() =>
5018
+ api.runInference({
5019
+ model_id: modelId,
5020
+ task,
5021
+ text,
5022
+ schema,
5023
+ threshold,
5024
+ include_confidence: includeConfidence,
5025
+ include_spans: includeSpans,
5026
+ format_results: formatResults,
5027
+ ...(projectId ? { project_id: projectId } : {}),
5028
+ })
5029
+ }
5030
+ />
5031
+ );
5032
+ }
5033
+
5034
+ if (action === "decoder") {
5035
+ const modelId = rest[0];
5036
+ if (!modelId) {
5037
+ return (
5038
+ <ErrorMessage error="Model ID required. Usage: inference decoder <model-id> --prompt <text>" />
5039
+ );
5040
+ }
5041
+ const normalizedModelId = normalizeModelId(modelId);
5042
+ const prompt = flags["prompt"];
5043
+ if (!prompt) {
5044
+ return <ErrorMessage error="--prompt is required for decoder inference" />;
5045
+ }
5046
+ const systemMsg = flags["system"];
5047
+ const maxTokens = flags["max-tokens"] ? parseInt(flags["max-tokens"], 10) : 256;
5048
+ if (Number.isNaN(maxTokens) || maxTokens < 1) {
5049
+ return <ErrorMessage error="--max-tokens must be a positive integer" />;
5050
+ }
5051
+ const temperature = flags["temperature"] ? parseFloat(flags["temperature"]) : 0.7;
5052
+ if (Number.isNaN(temperature) || temperature < 0 || temperature > 2) {
5053
+ return <ErrorMessage error="--temperature must be a number between 0 and 2" />;
5054
+ }
5055
+ const topP = flags["top-p"] ? parseFloat(flags["top-p"]) : undefined;
5056
+ if (topP !== undefined && (Number.isNaN(topP) || topP < 0 || topP > 1)) {
5057
+ return <ErrorMessage error="--top-p must be a number between 0 and 1" />;
5058
+ }
5059
+ const includeReasoningTrace =
5060
+ flags["reasoning-trace"] !== undefined &&
5061
+ flags["reasoning-trace"].toLowerCase() !== "false";
5062
+ const projectId = flags["project-id"];
5063
+
5064
+ return (
5065
+ <ModelGenerateCommand
5066
+ modelId={normalizedModelId}
5067
+ prompt={prompt}
5068
+ systemMsg={systemMsg}
5069
+ maxTokens={maxTokens}
5070
+ temperature={temperature}
5071
+ topP={topP}
5072
+ includeReasoningTrace={includeReasoningTrace}
5073
+ projectId={projectId}
5074
+ />
5075
+ );
5076
+ }
5077
+
5078
+ if (action === "completions") {
5079
+ const rawModelId = rest[0];
5080
+ if (!rawModelId) {
5081
+ return (
5082
+ <ErrorMessage error="Model ID required. Usage: inference completions <model-id> --prompt <text>" />
5083
+ );
5084
+ }
5085
+ const modelId = normalizeModelId(rawModelId);
5086
+ const prompt = flags["prompt"];
5087
+ if (!prompt) {
5088
+ return <ErrorMessage error="--prompt is required for text completions" />;
5089
+ }
5090
+
5091
+ const systemMsg = flags["system"];
5092
+ const maxTokens = flags["max-tokens"] ? parseInt(flags["max-tokens"], 10) : 256;
5093
+ if (Number.isNaN(maxTokens) || maxTokens < 1) {
5094
+ return <ErrorMessage error="--max-tokens must be a positive integer" />;
5095
+ }
5096
+ const temperature = flags["temperature"] ? parseFloat(flags["temperature"]) : 0.7;
5097
+ if (Number.isNaN(temperature) || temperature < 0 || temperature > 2) {
5098
+ return <ErrorMessage error="--temperature must be a number between 0 and 2" />;
5099
+ }
5100
+ const topP = flags["top-p"] ? parseFloat(flags["top-p"]) : undefined;
5101
+ if (topP !== undefined && (Number.isNaN(topP) || topP < 0 || topP > 1)) {
5102
+ return <ErrorMessage error="--top-p must be a number between 0 and 1" />;
3451
5103
  }
3452
- // Default: show all models
3453
- return <ModelListCommand filter="all" />;
5104
+ const stopValues = parseCommaSeparated(flags["stop"]);
5105
+ const stop =
5106
+ stopValues.length === 0 ? undefined : stopValues.length === 1 ? stopValues[0] : stopValues;
5107
+ const echo = flags["echo"]?.toLowerCase() === "true";
5108
+ const provider = flags["provider"];
5109
+
5110
+ const extraBody: Record<string, unknown> = {};
5111
+ if (topP !== undefined) extraBody.top_p = topP;
5112
+ if (provider) extraBody.provider = provider;
5113
+
5114
+ return (
5115
+ <ApiCommand
5116
+ action={() =>
5117
+ api.runTextCompletion({
5118
+ model: modelId,
5119
+ prompt: buildTextCompletionPrompt(prompt, systemMsg),
5120
+ max_tokens: maxTokens,
5121
+ temperature,
5122
+ ...(stop !== undefined ? { stop } : {}),
5123
+ ...(echo ? { echo: true } : {}),
5124
+ ...(Object.keys(extraBody).length > 0 ? { extra_body: extraBody } : {}),
5125
+ })
5126
+ }
5127
+ />
5128
+ );
5129
+ }
5130
+
5131
+ return <Help context="inference" />;
5132
+ }
5133
+
5134
+ // Model commands
5135
+ if (group === "model") {
5136
+ if (flags.help === "true" || !action || action === "help") {
5137
+ return <Help context="model" />;
3454
5138
  }
3455
- if (action === "delete") {
3456
- if (!rest[0]) {
3457
- return <ErrorMessage error="Job ID required. Usage: model delete <job-id>" />;
5139
+
5140
+ if (normalizedAction === "endpoints") {
5141
+ const endpointAction = rest[0];
5142
+ const endpointArgs = rest.slice(1);
5143
+
5144
+ if (flags.help === "true" || !endpointAction || endpointAction === "help") {
5145
+ return <Help context="model-endpoints" />;
3458
5146
  }
3459
- const jobId = rest[0];
3460
- if (jobId.length !== 36) {
5147
+
5148
+ if (endpointAction === "create") {
5149
+ const name = flags["name"];
5150
+ const icon = flags["icon"];
5151
+ const repo = flags["repo"];
5152
+ const description = flags["description"];
5153
+ const modelId = flags["model"] || "";
5154
+ const exampleStr = flags["example"];
5155
+
5156
+ if (flags["model-id"]) {
5157
+ return (
5158
+ <ErrorMessage error="Use --model to specify the base model reference. --model-id is deprecated." />
5159
+ );
5160
+ }
5161
+
5162
+ if (flags["base-model"] || flags["active-model-id"]) {
5163
+ return (
5164
+ <ErrorMessage error="Use --model to specify the model reference. --base-model and --active-model-id are no longer supported." />
5165
+ );
5166
+ }
5167
+
5168
+ if (!modelId) {
5169
+ const parsedExample = parseProjectExample(exampleStr);
5170
+ if (parsedExample.error) {
5171
+ return <ErrorMessage error={parsedExample.error} />;
5172
+ }
5173
+ return (
5174
+ <ModelCreateInteractive
5175
+ name={name}
5176
+ icon={icon}
5177
+ repo={repo}
5178
+ description={description}
5179
+ example={parsedExample.value}
5180
+ />
5181
+ );
5182
+ }
5183
+
5184
+ const parsedExample = parseProjectExample(exampleStr);
5185
+ if (parsedExample.error) {
5186
+ return <ErrorMessage error={parsedExample.error} />;
5187
+ }
5188
+
3461
5189
  return (
3462
- <Box flexDirection="column">
3463
- <ErrorMessage error="Invalid job ID: must be full UUID (36 characters)" />
3464
- <Text dimColor> Provided: {jobId} ({jobId.length} characters)</Text>
3465
- <Text dimColor> Tip: Use 'pioneer model list' to see full job IDs</Text>
3466
- </Box>
5190
+ <ApiCommand
5191
+ action={() =>
5192
+ api.createProject({
5193
+ name: name ?? modelId,
5194
+ ...(icon ? { icon } : {}),
5195
+ ...(repo ? { repo } : {}),
5196
+ ...(description ? { description } : {}),
5197
+ ...(modelId ? { active_model_id: modelId } : {}),
5198
+ ...(modelId ? { selected_model_id: modelId } : {}),
5199
+ ...(parsedExample.value ? { example: parsedExample.value } : {}),
5200
+ })
5201
+ }
5202
+ successMessage="Model entry created"
5203
+ />
3467
5204
  );
3468
5205
  }
3469
- return (
3470
- <ApiCommand
3471
- action={() => api.deleteModel(jobId)}
3472
- successMessage={`Model ${jobId} deleted`}
3473
- />
3474
- );
3475
- }
3476
- if (action === "download") {
3477
- if (!rest[0]) {
3478
- return <ErrorMessage error="Job ID required. Usage: model download <job-id>" />;
5206
+
5207
+ if (endpointAction === "list") {
5208
+ return <ModelListCommand filter="registered" />;
5209
+ }
5210
+
5211
+ if (endpointAction === "get") {
5212
+ const modelId = endpointArgs[0];
5213
+ if (!modelId) {
5214
+ return <ErrorMessage error="Model ID required: model endpoints get <model-id>" />;
5215
+ }
5216
+ return <ApiCommand action={() => api.getProject(modelId)} />;
3479
5217
  }
3480
- const jobId = rest[0];
3481
- if (jobId.length !== 36) {
5218
+
5219
+ if (endpointAction === "update") {
5220
+ const modelId = endpointArgs[0];
5221
+ if (!modelId) {
5222
+ return <ErrorMessage error="Model ID required: model endpoints update <model-id>" />;
5223
+ }
5224
+
5225
+ const name = flags["name"];
5226
+ const icon = flags["icon"];
5227
+ const repo = flags["repo"];
5228
+ const description = flags["description"];
5229
+ const selectedModelId = flags["model-id"];
5230
+
5231
+ if (!name && !icon && !repo && !description && !selectedModelId) {
5232
+ return (
5233
+ <ErrorMessage error="Provide at least one field to update: --name, --icon, --repo, --description, or --model-id" />
5234
+ );
5235
+ }
5236
+
3482
5237
  return (
3483
- <Box flexDirection="column">
3484
- <ErrorMessage error="Invalid job ID: must be full UUID (36 characters)" />
3485
- <Text dimColor> Provided: {jobId} ({jobId.length} characters)</Text>
3486
- <Text dimColor> Tip: Use 'pioneer model list' to see full job IDs</Text>
3487
- </Box>
5238
+ <ApiCommand
5239
+ action={() =>
5240
+ api.updateProject(modelId, {
5241
+ ...(name ? { name } : {}),
5242
+ ...(icon ? { icon } : {}),
5243
+ ...(repo ? { repo } : {}),
5244
+ ...(description ? { description } : {}),
5245
+ ...(selectedModelId ? { selected_model_id: selectedModelId } : {}),
5246
+ })
5247
+ }
5248
+ successMessage="Model updated"
5249
+ />
3488
5250
  );
3489
5251
  }
3490
- return <ApiCommand action={() => api.downloadModel(jobId)} />;
3491
- }
3492
- // Model upload command
3493
- if (action === "upload") {
3494
- const destination = flags["to"];
3495
5252
 
3496
- // Show help if no arguments provided
3497
- if (!rest[0] && !destination) {
5253
+ if (endpointAction === "delete") {
5254
+ const modelId = endpointArgs[0];
5255
+ if (!modelId) {
5256
+ return <ErrorMessage error="Model ID required: model endpoints delete <model-id>" />;
5257
+ }
3498
5258
  return (
3499
- <Box flexDirection="column">
3500
- <Text bold>Model Upload:</Text>
3501
- <Text> </Text>
3502
- <Text> Upload to Hugging Face:</Text>
3503
- <Text> model upload {"<job-id>"} --to hf --repo {"<repo>"} [--hf-token {"<token>"}] [--private]</Text>
3504
- <Text> </Text>
3505
- <Text dimColor> Supported destinations: hf (more coming soon)</Text>
3506
- </Box>
5259
+ <ApiCommand
5260
+ action={() => api.deleteProject(modelId)}
5261
+ successMessage={`Model ${modelId} deleted`}
5262
+ />
3507
5263
  );
3508
5264
  }
3509
5265
 
3510
- // Upload to Hugging Face
3511
- if (destination === "hf") {
3512
- if (!rest[0]) {
3513
- return <ErrorMessage error="Job ID required: model upload <job-id> --to hf --repo <repo>" />;
5266
+ if (endpointAction === "dataset-count" || endpointAction === "count") {
5267
+ const modelId = endpointArgs[0];
5268
+ if (!modelId) {
5269
+ return <ErrorMessage error="Model ID required: model endpoints dataset-count <model-id>" />;
3514
5270
  }
3515
- const jobId = rest[0];
3516
- const repo = flags["repo"];
3517
- const hfTokenFlag = flags["hf-token"];
3518
- const isPrivate = flags["private"]?.toLowerCase() === "true";
5271
+ return <ApiCommand action={() => api.getProjectDatasetCount(modelId)} />;
5272
+ }
3519
5273
 
3520
- if (!repo) {
3521
- return <ErrorMessage error="--repo is required (e.g., username/model-name)" />;
5274
+ if (endpointAction === "quality-metrics" || endpointAction === "quality") {
5275
+ const modelId = endpointArgs[0];
5276
+ if (!modelId) {
5277
+ return <ErrorMessage error="Model ID required: model endpoints quality-metrics <model-id>" />;
3522
5278
  }
5279
+ return <ApiCommand action={() => api.getProjectQualityMetrics(modelId)} />;
5280
+ }
3523
5281
 
3524
- const hfToken = getHfToken(hfTokenFlag);
3525
- if (!hfToken) {
5282
+ if (endpointAction === "deploy") {
5283
+ const modelId = endpointArgs[0];
5284
+ const jobId = flags["job"];
5285
+
5286
+ if (!modelId) {
5287
+ return <ErrorMessage error="Model ID required: model endpoints deploy <model-id> --job <training-job-id>" />;
5288
+ }
5289
+ if (!jobId) {
5290
+ return <ErrorMessage error="Training job ID required: model endpoints deploy <model-id> --job <training-job-id>" />;
5291
+ }
5292
+ if (jobId.length !== 36) {
3526
5293
  return (
3527
5294
  <Box flexDirection="column">
3528
- <ErrorMessage error="Hugging Face token required." />
3529
- <Text> </Text>
3530
- <Text>Set your token with:</Text>
3531
- <Text color="cyan"> pioneer auth hf</Text>
3532
- <Text> </Text>
3533
- <Text dimColor>Get a token at: https://huggingface.co/settings/tokens</Text>
5295
+ <ErrorMessage error="Invalid job ID: must be full UUID (36 characters)" />
5296
+ <Text dimColor> Provided: {jobId} ({jobId.length} characters)</Text>
5297
+ <Text dimColor> Tip: Use 'pioneer model artifacts list' and 'pioneer model artifacts trained' to see full job IDs</Text>
3534
5298
  </Box>
3535
5299
  );
3536
5300
  }
3537
5301
 
5302
+ const reason = flags["reason"];
5303
+
3538
5304
  return (
3539
5305
  <ApiCommand
3540
5306
  action={() =>
3541
- api.pushModelToHub(jobId, {
3542
- hf_token: hfToken,
3543
- repo_id: repo,
3544
- private: isPrivate,
5307
+ api.deployTrainingJobToProject(modelId, {
5308
+ training_job_id: jobId,
5309
+ ...(reason ? { reason } : {}),
3545
5310
  })
3546
5311
  }
3547
- successMessage={`Model uploaded to Hugging Face: ${repo}`}
5312
+ successMessage={`Deployment initiated for project ${modelId} from job ${jobId}`}
5313
+ />
5314
+ );
5315
+ }
5316
+
5317
+ if (endpointAction === "rollback") {
5318
+ const modelId = endpointArgs[0];
5319
+ const deploymentId = endpointArgs[1];
5320
+
5321
+ if (!modelId) {
5322
+ return <ErrorMessage error="Model ID required: model endpoints rollback <model-id> <deployment-id>" />;
5323
+ }
5324
+ if (!deploymentId) {
5325
+ return (
5326
+ <ErrorMessage error="Deployment ID required: model endpoints rollback <model-id> <deployment-id>" />
5327
+ );
5328
+ }
5329
+ if (deploymentId.length !== 36) {
5330
+ return (
5331
+ <Box flexDirection="column">
5332
+ <ErrorMessage error="Invalid deployment ID: must be full UUID (36 characters)" />
5333
+ <Text dimColor> Provided: {deploymentId} ({deploymentId.length} characters)</Text>
5334
+ </Box>
5335
+ );
5336
+ }
5337
+
5338
+ return (
5339
+ <ApiCommand
5340
+ action={() => api.rollbackProjectDeployment(modelId, deploymentId)}
5341
+ successMessage={`Rollback initiated for endpoint ${modelId} using deployment ${deploymentId}`}
3548
5342
  />
3549
5343
  );
3550
5344
  }
3551
5345
 
3552
- return <ErrorMessage error="--to is required. Supported destinations: hf" />;
5346
+ return <Help context="model-endpoints" />;
3553
5347
  }
3554
- // Model predict command (encoder inference)
3555
- if (action === "predict") {
3556
- const modelId = rest[0];
3557
- if (!modelId) {
3558
- return <ErrorMessage error="Model ID required. Usage: model predict <model-id> --text <text> --labels <labels>" />;
5348
+
5349
+ if (normalizedAction === "artifacts") {
5350
+ const artifactsAction = rest[0];
5351
+ const artifactArgs = rest.slice(1);
5352
+
5353
+ if (flags.help === "true" || !artifactsAction || artifactsAction === "help") {
5354
+ return <Help context="model-artifacts" />;
3559
5355
  }
3560
- const text = flags["text"];
3561
- const labelsStr = flags["labels"];
3562
- if (!text) {
3563
- return <ErrorMessage error="--text is required for inference" />;
5356
+
5357
+ if (artifactsAction === "list") {
5358
+ return <ModelListCommand filter="artifacts" />;
3564
5359
  }
3565
- if (!labelsStr) {
3566
- return <ErrorMessage error="--labels is required (comma-separated, e.g., PERSON,ORG,LOCATION)" />;
5360
+
5361
+ if (artifactsAction === "trained") {
5362
+ return <ModelListCommand filter="trained" />;
3567
5363
  }
3568
- const labels = labelsStr.split(",").map((l: string) => l.trim());
3569
- const task = (flags["task"] || "extract_entities") as "extract_entities" | "classify_text" | "extract_json" | "schema";
3570
- const threshold = flags["threshold"] ? parseFloat(flags["threshold"]) : 0.5;
3571
5364
 
3572
- return (
3573
- <ApiCommand
3574
- action={() =>
3575
- api.runInference({
3576
- model_id: modelId,
3577
- task,
3578
- text,
3579
- schema: labels,
3580
- threshold,
3581
- include_confidence: true,
3582
- })
3583
- }
3584
- />
3585
- );
3586
- }
3587
- // Model generate command (decoder inference)
3588
- if (action === "generate") {
3589
- const modelId = rest[0];
3590
- if (!modelId) {
3591
- return <ErrorMessage error="Model ID required. Usage: model generate <model-id> --prompt <text>" />;
5365
+ if (artifactsAction === "deployed") {
5366
+ return <ModelListCommand filter="deployed" />;
3592
5367
  }
3593
- const prompt = flags["prompt"];
3594
- if (!prompt) {
3595
- return <ErrorMessage error="--prompt is required for text generation" />;
5368
+
5369
+ if (artifactsAction === "download") {
5370
+ if (!artifactArgs[0]) {
5371
+ return <ErrorMessage error="Job ID required: model artifacts download <job-id>" />;
5372
+ }
5373
+ const jobId = artifactArgs[0];
5374
+ if (jobId.length !== 36) {
5375
+ return (
5376
+ <Box flexDirection="column">
5377
+ <ErrorMessage error="Invalid job ID: must be full UUID (36 characters)" />
5378
+ <Text dimColor> Provided: {jobId} ({jobId.length} characters)</Text>
5379
+ <Text dimColor> Tip: Use 'pioneer model artifacts trained' or 'pioneer model artifacts deployed' to see full job IDs</Text>
5380
+ </Box>
5381
+ );
5382
+ }
5383
+ return <ApiCommand action={() => api.downloadModel(jobId)} />;
3596
5384
  }
3597
- const systemMsg = flags["system"];
3598
- const maxTokens = flags["max-tokens"] ? parseInt(flags["max-tokens"]) : 256;
3599
- const temperature = flags["temperature"] ? parseFloat(flags["temperature"]) : 0.7;
5385
+
5386
+ if (artifactsAction === "delete") {
5387
+ if (!artifactArgs[0]) {
5388
+ return <ErrorMessage error="Model ID required: model artifacts delete <job-id>" />;
5389
+ }
5390
+ const jobId = artifactArgs[0];
5391
+ if (jobId.length !== 36) {
5392
+ return (
5393
+ <Box flexDirection="column">
5394
+ <ErrorMessage error="Invalid job ID: must be full UUID (36 characters)" />
5395
+ <Text dimColor> Provided: {jobId} ({jobId.length} characters)</Text>
5396
+ <Text dimColor> Tip: Use 'pioneer model artifacts list' to see full job IDs</Text>
5397
+ </Box>
5398
+ );
5399
+ }
5400
+ return (
5401
+ <ApiCommand
5402
+ action={() => api.deleteModel(jobId)}
5403
+ successMessage={`Model ${jobId} deleted`}
5404
+ />
5405
+ );
5406
+ }
5407
+
5408
+ // Model upload command
5409
+ if (artifactsAction === "upload") {
5410
+ const destination = flags["to"];
5411
+
5412
+ if (!artifactArgs[0] && !destination) {
5413
+ return (
5414
+ <Box flexDirection="column">
5415
+ <Text bold>Model Upload:</Text>
5416
+ <Text> </Text>
5417
+ <Text> Upload to Hugging Face:</Text>
5418
+ <Text> model artifacts upload {"<job-id>"} --to hf --repo {"<repo>"} [--hf-token {"<token>"}] [--private]</Text>
5419
+ <Text> </Text>
5420
+ <Text dimColor> Supported destinations: hf (more coming soon)</Text>
5421
+ </Box>
5422
+ );
5423
+ }
5424
+
5425
+ if (destination === "hf") {
5426
+ if (!artifactArgs[0]) {
5427
+ return <ErrorMessage error="Job ID required: model artifacts upload <job-id> --to hf --repo <repo>" />;
5428
+ }
5429
+ const jobId = artifactArgs[0];
5430
+ const repo = flags["repo"];
5431
+ const hfTokenFlag = flags["hf-token"];
5432
+ const isPrivate = flags["private"]?.toLowerCase() === "true";
5433
+
5434
+ if (!repo) {
5435
+ return <ErrorMessage error="--repo is required (e.g., username/model-name)" />;
5436
+ }
5437
+
5438
+ const hfToken = getHfToken(hfTokenFlag);
5439
+ if (!hfToken) {
5440
+ return (
5441
+ <Box flexDirection="column">
5442
+ <ErrorMessage error="Hugging Face token required." />
5443
+ <Text> </Text>
5444
+ <Text>Set your token with:</Text>
5445
+ <Text color="cyan"> pioneer auth hf</Text>
5446
+ <Text> </Text>
5447
+ <Text dimColor>Get a token at: https://huggingface.co/settings/tokens</Text>
5448
+ </Box>
5449
+ );
5450
+ }
5451
+
5452
+ return (
5453
+ <ApiCommand
5454
+ action={() =>
5455
+ api.pushModelToHub(jobId, {
5456
+ hf_token: hfToken,
5457
+ repo_id: repo,
5458
+ private: isPrivate,
5459
+ })
5460
+ }
5461
+ successMessage={`Model uploaded to Hugging Face: ${repo}`}
5462
+ />
5463
+ );
5464
+ }
5465
+
5466
+ return <ErrorMessage error="--to is required. Supported destinations: hf" />;
5467
+ }
5468
+
5469
+ return <Help context="model-artifacts" />;
5470
+ }
5471
+
5472
+ if (action === "predict" || action === "generate") {
3600
5473
  return (
3601
- <ModelGenerateCommand
3602
- modelId={modelId}
3603
- prompt={prompt}
3604
- systemMsg={systemMsg}
3605
- maxTokens={maxTokens}
3606
- temperature={temperature}
5474
+ <ErrorMessage
5475
+ error={
5476
+ "model predict and model generate are no longer supported. Use model endpoints/... and model artifacts/... instead."
5477
+ }
3607
5478
  />
3608
5479
  );
3609
5480
  }
3610
- return <Help context="model" />;
5481
+
5482
+ return (
5483
+ <ErrorMessage
5484
+ error={`Unknown model command: model ${action}. Use 'pioneer model_endpoints ...', 'pioneer model endpoints ...', 'pioneer model_artifacts ...', or 'pioneer model artifacts ...'.`}
5485
+ />
5486
+ );
3611
5487
  }
3612
5488
 
3613
5489
  // Eval commands
@@ -3625,9 +5501,60 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3625
5501
  if (action === "list" && !rest[0]) {
3626
5502
  return <ErrorMessage error="Dataset required: eval list <name[:version]>" />;
3627
5503
  }
5504
+ if (action === "baseline-models") {
5505
+ return <ApiCommand action={api.listBaselineModels} />;
5506
+ }
3628
5507
  if (action === "get" && rest[0]) {
3629
5508
  return <ApiCommand action={() => api.getEvaluation(rest[0])} />;
3630
5509
  }
5510
+ if (action === "delete" && rest[0]) {
5511
+ const evaluationId = rest[0];
5512
+ return (
5513
+ <ApiCommand
5514
+ action={async () => {
5515
+ const result = await api.deleteEvaluation(evaluationId);
5516
+ if (!result.ok && (result.status === 401 || result.status === 403) && result.error) {
5517
+ const lower = result.error.toLowerCase();
5518
+ const indicatesJwt = lower.includes("jwt") ||
5519
+ lower.includes("api key access") ||
5520
+ lower.includes("table") ||
5521
+ lower.includes("requires authentication");
5522
+ if (indicatesJwt) {
5523
+ return {
5524
+ ok: false,
5525
+ status: result.status,
5526
+ error:
5527
+ "Evaluation deletion requires a JWT-authenticated session. Run 'pioneer auth login' to sign in with your account credentials.",
5528
+ };
5529
+ }
5530
+ }
5531
+ return result;
5532
+ }}
5533
+ />
5534
+ );
5535
+ }
5536
+ if (action === "update") {
5537
+ const mode = rest[0];
5538
+ const evaluationId = mode === "project" ? rest[1] : rest[0];
5539
+ if (!evaluationId) {
5540
+ return <Help context="eval" />;
5541
+ }
5542
+ const projectId = flags["project-id"];
5543
+ if (!projectId) {
5544
+ return <ErrorMessage error="--project-id is required" />;
5545
+ }
5546
+ return (
5547
+ <ApiCommand
5548
+ action={() =>
5549
+ api.updateEvaluationProject({
5550
+ evaluation_id: evaluationId,
5551
+ project_id: projectId,
5552
+ })
5553
+ }
5554
+ successMessage={`Evaluation ${evaluationId} reassigned`}
5555
+ />
5556
+ );
5557
+ }
3631
5558
  if (action === "create") {
3632
5559
  const modelId = flags["model-id"];
3633
5560
  const datasetStr = flags["dataset"];
@@ -3671,21 +5598,35 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3671
5598
  return <ApiCommand action={api.listBenchmarks} />;
3672
5599
  }
3673
5600
  if (action === "run") {
3674
- const modelId = flags["model-id"];
5601
+ const rawModelId = flags["model-id"];
3675
5602
  const task = flags["task"] as "ner" | "text_classification";
3676
5603
  const benchmark = flags["benchmark"];
3677
5604
  const maxSamples = flags["max-samples"] ? parseInt(flags["max-samples"], 10) : undefined;
3678
5605
  const split = flags["split"];
3679
5606
 
3680
- if (!modelId || !task || !benchmark) {
5607
+ if (!rawModelId || !task || !benchmark) {
3681
5608
  return <ErrorMessage error="--model-id, --task, and --benchmark are required" />;
3682
5609
  }
5610
+ const normalizedModelId = normalizeModelId(rawModelId);
5611
+ if (!isUuid(normalizedModelId)) {
5612
+ return (
5613
+ <ErrorMessage
5614
+ error="Benchmark model-id must be a training job UUID (example: 72c1ac92-3a89-439d-afe3-687d8a935c06)."
5615
+ />
5616
+ );
5617
+ }
5618
+ if (task !== "ner" && task !== "text_classification") {
5619
+ return <ErrorMessage error="--task must be either ner or text_classification" />;
5620
+ }
5621
+ if (maxSamples !== undefined && (Number.isNaN(maxSamples) || maxSamples < 1)) {
5622
+ return <ErrorMessage error="--max-samples must be a positive integer" />;
5623
+ }
3683
5624
 
3684
5625
  return (
3685
5626
  <ApiCommand
3686
5627
  action={() =>
3687
5628
  api.startBenchmarkEvaluation({
3688
- model_id: modelId,
5629
+ model_id: normalizedModelId,
3689
5630
  task,
3690
5631
  benchmark,
3691
5632
  max_samples: maxSamples,
@@ -3710,152 +5651,89 @@ const App: React.FC<AppProps> = ({ command, flags }) => {
3710
5651
  return <Help context="benchmark" />;
3711
5652
  }
3712
5653
 
3713
- // Notebook commands
3714
- if (group === "notebook") {
3715
- if (flags.help === "true" || !action || action === "help") {
3716
- return <Help context="notebook" />;
3717
- }
3718
-
3719
- if (action === "run") {
3720
- const file = rest[0];
3721
- if (!file) {
3722
- return <ErrorMessage error="File path required: notebook run <file.ipynb>" />;
3723
- }
3724
- const gpu = flags.gpu || "cpu";
3725
- const loadFelixHelpers = flags["no-felix"] !== "true";
3726
- return <NotebookRunCommand filePath={file} gpu={gpu} loadFelixHelpers={loadFelixHelpers} />;
3727
- }
3728
-
3729
- if (action === "create") {
3730
- const name = rest[0] || flags.name;
3731
- if (!name) {
3732
- return <ErrorMessage error="Name required: notebook create <name>" />;
3733
- }
3734
- try {
3735
- const filePath = createBlankNotebook(name);
3736
- return <Success message={`Created notebook: ${filePath}`} />;
3737
- } catch (e) {
3738
- return <ErrorMessage error={e instanceof Error ? e.message : String(e)} />;
3739
- }
3740
- }
3741
-
3742
- if (action === "sessions") {
3743
- return <ApiCommand action={api.listNotebookSessions} />;
5654
+ // Adaptive agent commands (new short command)
5655
+ if (group === "agent") {
5656
+ if (flags.help === "true" || action === "help") {
5657
+ return <Help context="agent" />;
3744
5658
  }
3745
5659
 
3746
- if (action === "stop") {
3747
- const sessionId = rest[0];
3748
- if (!sessionId) {
3749
- return <ErrorMessage error="Session ID required: notebook stop <session-id>" />;
3750
- }
5660
+ if (action && action !== "resume" && action !== "sessions" && !action.startsWith("-")) {
3751
5661
  return (
3752
- <ApiCommand
3753
- action={() => api.terminateNotebookSession(sessionId)}
3754
- successMessage={`Session ${sessionId} terminated`}
5662
+ <ErrorMessage
5663
+ error={
5664
+ 'Invalid agent command syntax. Use one of:\n' +
5665
+ "pioneer agent\n" +
5666
+ "pioneer agent --mode research\n" +
5667
+ "pioneer agent sessions\n" +
5668
+ "pioneer agent resume [conversation-id]"
5669
+ }
3755
5670
  />
3756
5671
  );
3757
5672
  }
3758
5673
 
3759
- return <Help context="notebook" />;
3760
- }
3761
-
3762
- // Adaptive fine-tuning commands
3763
- if (group === "adaptive-finetuning" || group === "aft") {
3764
- if (flags.help === "true" || action === "help") {
3765
- return <Help context="adaptive-finetuning" />;
3766
- }
3767
-
3768
- let message = flags.message;
3769
- if (!message) {
3770
- const rawParts = action === "chat" ? rest : [action, ...rest];
3771
- message = rawParts.filter(Boolean).join(" ").trim();
5674
+ if (flags.message) {
5675
+ return <ErrorMessage error="The --message flag has been removed for agent. Run `pioneer agent` or `pioneer agent --mode research` and provide input interactively." />;
3772
5676
  }
3773
5677
 
3774
- if (!message) {
3775
- return (
3776
- <ErrorMessage
3777
- error={'Message required. Usage: adaptive-finetuning chat --message "Analyze failures and propose retraining plan"'}
3778
- />
3779
- );
5678
+ const rawMode = flags.mode;
5679
+ const validModes = ["research"];
5680
+ const normalizedMode = rawMode ? rawMode.toLowerCase() : undefined;
5681
+ if (normalizedMode && !validModes.includes(normalizedMode)) {
5682
+ return <ErrorMessage error="--mode must be one of: research" />;
3780
5683
  }
5684
+ const mode = (normalizedMode === "research" ? "research" : "standard") as "standard" | "research";
3781
5685
 
3782
- let history: api.AdaptiveFinetuningHistoryItem[] | undefined;
5686
+ let history: api.AgentChatHistoryItem[] | undefined;
3783
5687
  if (flags.history) {
3784
5688
  try {
3785
5689
  const parsed = JSON.parse(flags.history) as unknown;
3786
5690
  if (!Array.isArray(parsed)) {
3787
5691
  return <ErrorMessage error="--history must be a JSON array" />;
3788
5692
  }
3789
- history = parsed as api.AdaptiveFinetuningHistoryItem[];
5693
+ history = parsed as api.AgentChatHistoryItem[];
3790
5694
  } catch {
3791
5695
  return <ErrorMessage error="--history must be valid JSON" />;
3792
5696
  }
3793
5697
  }
3794
5698
 
3795
- let filters: api.AdaptiveFinetuningQueryFilters | undefined;
3796
5699
  if (flags.filters) {
3797
- try {
3798
- const parsed = JSON.parse(flags.filters) as unknown;
3799
- if (!parsed || Array.isArray(parsed) || typeof parsed !== "object") {
3800
- return <ErrorMessage error="--filters must be a JSON object" />;
3801
- }
3802
- filters = parsed as api.AdaptiveFinetuningQueryFilters;
3803
- } catch {
3804
- return <ErrorMessage error="--filters must be valid JSON" />;
5700
+ return <ErrorMessage error='--filters is not supported for /auto-agent/clarify. Omit this flag for now.' />;
5701
+ }
5702
+
5703
+ if (action === "resume" || action === "sessions") {
5704
+ if (!isRawModeSupported) {
5705
+ return (
5706
+ <ErrorMessage
5707
+ error="Interactive input is not supported in this terminal.\nUse interactive mode for this environment: agent --help"
5708
+ />
5709
+ );
5710
+ }
5711
+ if (rest[0] || flags["conversation-id"]) {
5712
+ const resumeId = rest[0] ?? flags["conversation-id"];
5713
+ return (
5714
+ <AutoAgentInteractiveSession
5715
+ conversationId={resumeId}
5716
+ history={history}
5717
+ mode={mode}
5718
+ allowSessionCreation={false}
5719
+ />
5720
+ );
3805
5721
  }
5722
+ return <AgentResumeCommand mode={mode} />;
3806
5723
  }
3807
5724
 
3808
5725
  return (
3809
- <ApiCommand
3810
- action={() =>
3811
- api.adaptiveFinetuningChat({
3812
- message,
3813
- ...(flags["conversation-id"] ? { conversation_id: flags["conversation-id"] } : {}),
3814
- ...(history ? { history } : {}),
3815
- ...(filters ? { filters } : {}),
3816
- })
3817
- }
3818
- successMessage="Adaptive fine-tuning response received"
5726
+ <AgentInteractivePrompt
5727
+ conversationId={flags["conversation-id"]}
5728
+ history={history}
5729
+ mode={mode}
5730
+ allowSessionCreation={true}
3819
5731
  />
3820
5732
  );
3821
5733
  }
3822
5734
 
3823
- // Competition commands
3824
- if (group === "competition") {
3825
- if (flags.help === "true" || !action || action === "help") {
3826
- return <Help context="competition" />;
3827
- }
3828
- if (action === "list") {
3829
- return <CompetitionListCommand />;
3830
- }
3831
- if (action === "show" && rest[0]) {
3832
- return <ApiCommand action={() => api.getCompetitionSamples(rest[0])} />;
3833
- }
3834
- if (action === "leaderboard" && rest[0]) {
3835
- const limit = flags["limit"] ? parseInt(flags["limit"], 10) : undefined;
3836
- return <LeaderboardCommand datasetId={rest[0]} limit={limit} />;
3837
- }
3838
- if (action === "submit" && rest[0]) {
3839
- const evalId = flags["eval-id"];
3840
- const displayName = flags["name"];
3841
-
3842
- if (!evalId || !displayName) {
3843
- return <ErrorMessage error="--eval-id and --name are required" />;
3844
- }
3845
-
3846
- return (
3847
- <ApiCommand
3848
- action={() =>
3849
- api.submitToLeaderboard(rest[0], {
3850
- evaluation_id: evalId,
3851
- display_name: displayName,
3852
- })
3853
- }
3854
- successMessage="Submitted to leaderboard"
3855
- />
3856
- );
3857
- }
3858
- return <Help context="competition" />;
5735
+ if (group === "notebook") {
5736
+ return <ErrorMessage error="The notebook command is deprecated and has been removed from this CLI." />;
3859
5737
  }
3860
5738
 
3861
5739
  return <Help />;
@@ -3869,7 +5747,7 @@ import packageJson from "../package.json";
3869
5747
 
3870
5748
  async function main() {
3871
5749
  const argv = process.argv.slice(2);
3872
- const { command, flags } = parseArgs(argv);
5750
+ const { command, flags, parseErrors } = parseArgs(argv);
3873
5751
 
3874
5752
  // Handle version flag early (before React render)
3875
5753
  if (flags.version === "true" || flags.v === "true") {
@@ -3877,7 +5755,9 @@ async function main() {
3877
5755
  process.exit(0);
3878
5756
  }
3879
5757
 
3880
- await render(<App command={command} flags={flags} />).waitUntilExit();
5758
+ await render(<App command={command} flags={flags} parseErrors={parseErrors} />).waitUntilExit();
3881
5759
  }
3882
5760
 
3883
- main();
5761
+ if (process.env.PIONEER_SKIP_AUTORUN !== "true") {
5762
+ main();
5763
+ }