@fastino-ai/pioneer-cli 0.2.7 → 0.2.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/api.ts CHANGED
@@ -248,7 +248,8 @@ export interface UploadUrlRequest {
248
248
  }
249
249
 
250
250
  export interface UploadUrlResponse {
251
- upload_url: string;
251
+ upload_url?: string;
252
+ presigned_url?: string;
252
253
  dataset_id: string;
253
254
  key: string;
254
255
  expires_in: number;
@@ -351,7 +352,7 @@ export async function uploadDataset(
351
352
  dataset_name: options.dataset_name,
352
353
  dataset_type: options.dataset_type,
353
354
  format,
354
- content_type: format === "parquet" ? "application/octet-stream" : "text/plain",
355
+ content_type: "application/octet-stream",
355
356
  });
356
357
 
357
358
  if (!urlResult.ok || !urlResult.data) {
@@ -362,13 +363,21 @@ export async function uploadDataset(
362
363
  };
363
364
  }
364
365
 
365
- const { upload_url, dataset_id } = urlResult.data;
366
+ const { dataset_id } = urlResult.data;
367
+ const uploadUrl = urlResult.data.upload_url || urlResult.data.presigned_url;
368
+ if (!uploadUrl) {
369
+ return {
370
+ ok: false,
371
+ status: 500,
372
+ error: "Upload URL not provided by backend",
373
+ };
374
+ }
366
375
 
367
376
  // Step 2: Upload file directly to S3 via presigned URL
368
- const uploadRes = await fetch(upload_url, {
377
+ const uploadRes = await fetch(uploadUrl, {
369
378
  method: "PUT",
370
379
  headers: {
371
- "Content-Type": format === "parquet" ? "application/octet-stream" : "text/plain",
380
+ "Content-Type": "application/octet-stream",
372
381
  },
373
382
  body: fileContent,
374
383
  });
@@ -587,6 +596,21 @@ export async function analyzeDataset(
587
596
  });
588
597
  }
589
598
 
599
+ export interface DatasetLLMAnalysisRequest {
600
+ task_type: "ner" | "classification" | "generative";
601
+ task_description?: string;
602
+ labels?: string[];
603
+ dataset?: Record<string, unknown>[];
604
+ dataset_name?: string;
605
+ dataset_version?: string;
606
+ }
607
+
608
+ export async function analyzeDatasetLLM(
609
+ req: DatasetLLMAnalysisRequest
610
+ ): Promise<ApiResult<DiversityLLMAnalysis>> {
611
+ return request<DiversityLLMAnalysis>("POST", "/felix/dataset/analyze_llm", req);
612
+ }
613
+
590
614
  // ─────────────────────────────────────────────────────────────────────────────
591
615
  // Dataset Generation
592
616
  // ─────────────────────────────────────────────────────────────────────────────
@@ -650,6 +674,7 @@ export interface GenerateRequest {
650
674
  domain_description?: string;
651
675
  temperature?: number;
652
676
  quality?: "light" | "medium" | "heavy";
677
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
653
678
  session_id?: string;
654
679
  config_num_examples?: number;
655
680
  seed?: number;
@@ -668,6 +693,8 @@ export interface GenerateRequest {
668
693
  instruction?: string;
669
694
  constraints?: ConstraintRequest[];
670
695
  multiplicator?: Record<string, unknown>;
696
+ include_reasoning_trace?: boolean;
697
+ reasoning_effort?: "low" | "medium" | "high";
671
698
  use_meta_felix?: boolean;
672
699
  min_criteria?: number;
673
700
  target_choices?: number;
@@ -791,10 +818,19 @@ export interface GenerateNERRequest {
791
818
  save_dataset?: boolean;
792
819
  dataset_name?: string;
793
820
  project_id?: string;
821
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
822
+ include_reasoning_trace?: boolean;
823
+ reasoning_effort?: "low" | "medium" | "high";
794
824
  quality?: "light" | "medium" | "heavy";
795
825
  type?: "training" | "evaluation" | "split";
796
826
  visibility?: "private" | "public";
797
827
  negative_ratio?: number;
828
+ split_ratio?: Record<string, number>;
829
+ multiplicator?: Record<string, unknown>;
830
+ use_meta_felix?: boolean;
831
+ min_criteria?: number;
832
+ target_choices?: number;
833
+ classified_examples?: Record<string, unknown>[];
798
834
  }
799
835
 
800
836
  export async function generateNER(
@@ -812,10 +848,19 @@ export async function generateNER(
812
848
  temperature: req.temperature,
813
849
  constraints: req.constraints,
814
850
  project_id: req.project_id,
851
+ generation_profile: req.generation_profile,
852
+ include_reasoning_trace: req.include_reasoning_trace,
853
+ reasoning_effort: req.reasoning_effort,
815
854
  quality: req.quality,
855
+ min_criteria: req.min_criteria,
856
+ target_choices: req.target_choices,
816
857
  type: req.type,
817
858
  visibility: req.visibility,
818
859
  negative_ratio: req.negative_ratio,
860
+ split_ratio: req.split_ratio,
861
+ multiplicator: req.multiplicator,
862
+ use_meta_felix: req.use_meta_felix,
863
+ classified_examples: req.classified_examples,
819
864
  });
820
865
  }
821
866
 
@@ -828,6 +873,7 @@ export interface LabelExistingNERRequest {
828
873
  constraints?: ConstraintRequest[];
829
874
  save_dataset?: boolean;
830
875
  dataset_name?: string;
876
+ project_id?: string;
831
877
  }
832
878
 
833
879
  export async function labelExistingNER(
@@ -862,10 +908,18 @@ export interface GenerateClassificationRequest {
862
908
  save_dataset?: boolean;
863
909
  dataset_name?: string;
864
910
  project_id?: string;
911
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
912
+ include_reasoning_trace?: boolean;
913
+ reasoning_effort?: "low" | "medium" | "high";
865
914
  quality?: "light" | "medium" | "heavy";
915
+ min_criteria?: number;
916
+ target_choices?: number;
917
+ split_ratio?: Record<string, number>;
918
+ negative_ratio?: number;
919
+ multiplicator?: Record<string, unknown>;
920
+ use_meta_felix?: boolean;
866
921
  type?: "training" | "evaluation" | "split";
867
922
  visibility?: "private" | "public";
868
- negative_ratio?: number;
869
923
  }
870
924
 
871
925
  export async function generateClassification(
@@ -886,7 +940,15 @@ export async function generateClassification(
886
940
  constraints: req.constraints,
887
941
  multi_label: req.multi_label,
888
942
  project_id: req.project_id,
943
+ generation_profile: req.generation_profile,
944
+ include_reasoning_trace: req.include_reasoning_trace,
945
+ reasoning_effort: req.reasoning_effort,
889
946
  quality: req.quality,
947
+ min_criteria: req.min_criteria,
948
+ target_choices: req.target_choices,
949
+ split_ratio: req.split_ratio,
950
+ multiplicator: req.multiplicator,
951
+ use_meta_felix: req.use_meta_felix,
890
952
  type: req.type,
891
953
  visibility: req.visibility,
892
954
  negative_ratio: req.negative_ratio,
@@ -905,6 +967,7 @@ export interface LabelExistingClassificationRequest {
905
967
  multi_label?: boolean;
906
968
  save_dataset?: boolean;
907
969
  dataset_name?: string;
970
+ project_id?: string;
908
971
  }
909
972
 
910
973
  export async function labelExistingClassification(
@@ -931,13 +994,21 @@ export interface GenerateCustomRequest {
931
994
  num_examples?: number;
932
995
  seed?: number;
933
996
  session_id?: string;
934
- min_criteria?: number;
935
997
  temperature?: number;
936
998
  constraints?: ConstraintRequest[];
937
999
  save_dataset?: boolean;
938
1000
  dataset_name?: string;
939
1001
  project_id?: string;
1002
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
1003
+ include_reasoning_trace?: boolean;
1004
+ reasoning_effort?: "low" | "medium" | "high";
940
1005
  quality?: "light" | "medium" | "heavy";
1006
+ min_criteria?: number;
1007
+ target_choices?: number;
1008
+ split_ratio?: Record<string, number>;
1009
+ negative_ratio?: number;
1010
+ multiplicator?: Record<string, unknown>;
1011
+ use_meta_felix?: boolean;
941
1012
  type?: "training" | "evaluation" | "split";
942
1013
  visibility?: "private" | "public";
943
1014
  }
@@ -958,7 +1029,15 @@ export async function generateCustom(
958
1029
  temperature: req.temperature,
959
1030
  constraints: req.constraints,
960
1031
  project_id: req.project_id,
1032
+ generation_profile: req.generation_profile,
1033
+ include_reasoning_trace: req.include_reasoning_trace,
1034
+ reasoning_effort: req.reasoning_effort,
1035
+ target_choices: req.target_choices,
961
1036
  quality: req.quality,
1037
+ split_ratio: req.split_ratio,
1038
+ negative_ratio: req.negative_ratio,
1039
+ multiplicator: req.multiplicator,
1040
+ use_meta_felix: req.use_meta_felix,
962
1041
  type: req.type,
963
1042
  visibility: req.visibility,
964
1043
  });
@@ -973,6 +1052,20 @@ export interface GenerateRecordsRequest {
973
1052
  seed?: number;
974
1053
  temperature?: number;
975
1054
  constraints?: ConstraintRequest[];
1055
+ quality?: "light" | "medium" | "heavy";
1056
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
1057
+ include_reasoning_trace?: boolean;
1058
+ reasoning_effort?: "low" | "medium" | "high";
1059
+ min_criteria?: number;
1060
+ target_choices?: number;
1061
+ split_ratio?: Record<string, number>;
1062
+ negative_ratio?: number;
1063
+ multiplicator?: Record<string, unknown>;
1064
+ project_id?: string;
1065
+ use_meta_felix?: boolean;
1066
+ type?: "training" | "evaluation" | "split";
1067
+ visibility?: "private" | "public";
1068
+ classified_examples?: Record<string, unknown>[];
976
1069
  }
977
1070
 
978
1071
  export async function generateRecords(
@@ -987,6 +1080,20 @@ export async function generateRecords(
987
1080
  seed: req.seed,
988
1081
  temperature: req.temperature,
989
1082
  constraints: req.constraints,
1083
+ project_id: req.project_id,
1084
+ generation_profile: req.generation_profile,
1085
+ include_reasoning_trace: req.include_reasoning_trace,
1086
+ reasoning_effort: req.reasoning_effort,
1087
+ quality: req.quality,
1088
+ min_criteria: req.min_criteria,
1089
+ target_choices: req.target_choices,
1090
+ split_ratio: req.split_ratio,
1091
+ negative_ratio: req.negative_ratio,
1092
+ multiplicator: req.multiplicator,
1093
+ use_meta_felix: req.use_meta_felix,
1094
+ type: req.type,
1095
+ visibility: req.visibility,
1096
+ classified_examples: req.classified_examples,
990
1097
  });
991
1098
  }
992
1099
 
@@ -1001,6 +1108,20 @@ export interface GenerateFieldsRequest {
1001
1108
  temperature?: number;
1002
1109
  constraints?: ConstraintRequest[];
1003
1110
  save_dataset?: boolean;
1111
+ quality?: "light" | "medium" | "heavy";
1112
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
1113
+ include_reasoning_trace?: boolean;
1114
+ reasoning_effort?: "low" | "medium" | "high";
1115
+ min_criteria?: number;
1116
+ target_choices?: number;
1117
+ split_ratio?: Record<string, number>;
1118
+ negative_ratio?: number;
1119
+ multiplicator?: Record<string, unknown>;
1120
+ project_id?: string;
1121
+ use_meta_felix?: boolean;
1122
+ type?: "training" | "evaluation" | "split";
1123
+ visibility?: "private" | "public";
1124
+ classified_examples?: Record<string, unknown>[];
1004
1125
  }
1005
1126
 
1006
1127
  export async function generateFields(
@@ -1016,6 +1137,20 @@ export async function generateFields(
1016
1137
  seed: req.seed,
1017
1138
  temperature: req.temperature,
1018
1139
  constraints: req.constraints,
1140
+ project_id: req.project_id,
1141
+ generation_profile: req.generation_profile,
1142
+ include_reasoning_trace: req.include_reasoning_trace,
1143
+ reasoning_effort: req.reasoning_effort,
1144
+ quality: req.quality,
1145
+ min_criteria: req.min_criteria,
1146
+ target_choices: req.target_choices,
1147
+ split_ratio: req.split_ratio,
1148
+ negative_ratio: req.negative_ratio,
1149
+ multiplicator: req.multiplicator,
1150
+ use_meta_felix: req.use_meta_felix,
1151
+ type: req.type,
1152
+ visibility: req.visibility,
1153
+ classified_examples: req.classified_examples,
1019
1154
  });
1020
1155
  }
1021
1156
 
@@ -1029,6 +1164,7 @@ export interface LabelExistingFieldsRequest {
1029
1164
  constraints?: ConstraintRequest[];
1030
1165
  save_dataset?: boolean;
1031
1166
  dataset_name?: string;
1167
+ project_id?: string;
1032
1168
  }
1033
1169
 
1034
1170
  export async function labelExistingFields(
@@ -1047,6 +1183,52 @@ export async function inferFields(
1047
1183
  return request<InferFieldsResponse>("POST", "/generate/fields/infer-fields", req);
1048
1184
  }
1049
1185
 
1186
+ export interface ImprovePromptRequest {
1187
+ prompt: string;
1188
+ data_type?: string;
1189
+ }
1190
+
1191
+ export interface ImprovePromptResponse {
1192
+ success: boolean;
1193
+ improved_prompt: string;
1194
+ summary: string;
1195
+ }
1196
+
1197
+ export async function improvePrompt(
1198
+ req: ImprovePromptRequest
1199
+ ): Promise<ApiResult<ImprovePromptResponse>> {
1200
+ return request<ImprovePromptResponse>("POST", "/generate/improve-prompt", req);
1201
+ }
1202
+
1203
+ export interface InferAdvancedRequest {
1204
+ prompt: string;
1205
+ data_type?: string;
1206
+ labels?: string[];
1207
+ }
1208
+
1209
+ export interface InferredConstraint {
1210
+ description: string;
1211
+ choices?: string[];
1212
+ probability?: number;
1213
+ }
1214
+
1215
+ export interface InferredMultiplicator {
1216
+ prompt: string;
1217
+ choices: string[];
1218
+ }
1219
+
1220
+ export interface InferAdvancedResponse {
1221
+ success: boolean;
1222
+ constraints: InferredConstraint[];
1223
+ multiplicator?: InferredMultiplicator;
1224
+ }
1225
+
1226
+ export async function inferAdvanced(
1227
+ req: InferAdvancedRequest
1228
+ ): Promise<ApiResult<InferAdvancedResponse>> {
1229
+ return request<InferAdvancedResponse>("POST", "/generate/infer-advanced", req);
1230
+ }
1231
+
1050
1232
  // ─────────────────────────────────────────────────────────────────────────────
1051
1233
  // Training Jobs
1052
1234
  // ─────────────────────────────────────────────────────────────────────────────
@@ -1341,12 +1523,14 @@ export async function listTrainedModels(): Promise<
1341
1523
  export interface AllModelsResponse {
1342
1524
  deployed: DeployedModel[];
1343
1525
  trained: TrainedModel[];
1526
+ projects: ProjectResponse[];
1344
1527
  }
1345
1528
 
1346
1529
  export async function listAllModels(): Promise<ApiResult<AllModelsResponse>> {
1347
- const [deployedResult, trainedResult] = await Promise.all([
1530
+ const [deployedResult, trainedResult, projectsResult] = await Promise.all([
1348
1531
  listModels(),
1349
1532
  listTrainedModels(),
1533
+ listProjects(),
1350
1534
  ]);
1351
1535
 
1352
1536
  if (!deployedResult.ok) {
@@ -1355,6 +1539,9 @@ export async function listAllModels(): Promise<ApiResult<AllModelsResponse>> {
1355
1539
  if (!trainedResult.ok) {
1356
1540
  return { ok: false, status: trainedResult.status, error: trainedResult.error };
1357
1541
  }
1542
+ if (!projectsResult.ok) {
1543
+ return { ok: false, status: projectsResult.status, error: projectsResult.error };
1544
+ }
1358
1545
 
1359
1546
  return {
1360
1547
  ok: true,
@@ -1362,6 +1549,7 @@ export async function listAllModels(): Promise<ApiResult<AllModelsResponse>> {
1362
1549
  data: {
1363
1550
  deployed: deployedResult.data?.models ?? [],
1364
1551
  trained: trainedResult.data?.training_jobs ?? [],
1552
+ projects: projectsResult.data?.projects ?? [],
1365
1553
  },
1366
1554
  };
1367
1555
  }
@@ -1397,21 +1585,34 @@ export interface EncoderInferenceRequest {
1397
1585
  include_confidence?: boolean;
1398
1586
  include_spans?: boolean;
1399
1587
  format_results?: boolean;
1588
+ is_warmup?: boolean;
1589
+ store?: boolean;
1590
+ project_id?: string;
1591
+ }
1592
+
1593
+ export interface InferenceMessage {
1594
+ role: "system" | "user" | "assistant";
1595
+ content: string;
1400
1596
  }
1401
1597
 
1402
1598
  export interface GenerateInferenceRequest {
1403
1599
  model_id: string;
1404
1600
  task: "generate";
1405
- messages: Array<{ role: "system" | "user" | "assistant"; content: string }>;
1601
+ messages: InferenceMessage[];
1406
1602
  max_tokens?: number;
1407
1603
  temperature?: number;
1408
1604
  top_p?: number;
1605
+ include_reasoning_trace?: boolean;
1606
+ is_warmup?: boolean;
1607
+ store?: boolean;
1608
+ project_id?: string;
1409
1609
  }
1410
1610
 
1411
1611
  export type InferenceRequest = EncoderInferenceRequest | GenerateInferenceRequest;
1412
1612
 
1413
1613
  export interface EncoderInferenceResponse {
1414
1614
  type: "encoder";
1615
+ inference_id: string;
1415
1616
  result: Record<string, unknown> | unknown[];
1416
1617
  model_id: string;
1417
1618
  latency_ms: number;
@@ -1421,7 +1622,9 @@ export interface EncoderInferenceResponse {
1421
1622
 
1422
1623
  export interface GenerateInferenceResponse {
1423
1624
  type: "decoder";
1625
+ inference_id: string;
1424
1626
  completion: string;
1627
+ reasoning_trace?: string | null;
1425
1628
  model_id: string;
1426
1629
  latency_ms: number;
1427
1630
  }
@@ -1434,11 +1637,52 @@ export async function runInference(
1434
1637
  return request<InferenceResponse>("POST", "/inference", req);
1435
1638
  }
1436
1639
 
1640
+ export interface TextCompletionRequest {
1641
+ model: string;
1642
+ prompt: string;
1643
+ temperature?: number;
1644
+ max_tokens?: number;
1645
+ stop?: string[] | string;
1646
+ echo?: boolean;
1647
+ extra_body?: Record<string, unknown>;
1648
+ }
1649
+
1650
+ export interface TextCompletionUsage {
1651
+ prompt_tokens: number;
1652
+ completion_tokens: number;
1653
+ total_tokens: number;
1654
+ }
1655
+
1656
+ export interface TextCompletionChoice {
1657
+ index?: number;
1658
+ text: string;
1659
+ finish_reason?: string | null;
1660
+ }
1661
+
1662
+ export interface TextCompletionResponse {
1663
+ id?: string;
1664
+ object?: string;
1665
+ created?: number;
1666
+ model: string;
1667
+ choices: TextCompletionChoice[];
1668
+ usage: TextCompletionUsage;
1669
+ }
1670
+
1671
+ export async function runTextCompletion(
1672
+ req: TextCompletionRequest
1673
+ ): Promise<ApiResult<TextCompletionResponse>> {
1674
+ return request<TextCompletionResponse>("POST", "/v1/completions", req);
1675
+ }
1676
+
1437
1677
  export interface BaseModelInfo {
1438
1678
  id: string;
1439
1679
  name?: string;
1440
1680
  label?: string;
1441
- type: "encoder" | "decoder" | string;
1681
+ type?: "encoder" | "decoder" | string;
1682
+ task_type?: string;
1683
+ supports_inference?: boolean;
1684
+ supports_on_demand_inference?: boolean;
1685
+ supports_training?: boolean;
1442
1686
  description?: string;
1443
1687
  }
1444
1688
 
@@ -1449,42 +1693,325 @@ export interface BaseModelsResponse {
1449
1693
  export async function listBaseModels(): Promise<
1450
1694
  ApiResult<BaseModelsResponse | BaseModelInfo[]>
1451
1695
  > {
1452
- return request<BaseModelsResponse | BaseModelInfo[]>("GET", "/inference/base-models");
1696
+ return request<BaseModelsResponse | BaseModelInfo[]>("GET", "/base-models");
1453
1697
  }
1454
1698
 
1455
1699
  // ─────────────────────────────────────────────────────────────────────────────
1456
- // Adaptive Fine-tuning
1700
+ // Agent chat
1457
1701
  // ─────────────────────────────────────────────────────────────────────────────
1458
1702
 
1459
- export interface AdaptiveFinetuningHistoryItem {
1703
+ export interface AgentChatHistoryItem {
1460
1704
  role: "user" | "assistant" | "system" | string;
1461
1705
  content: string;
1462
1706
  }
1463
1707
 
1464
- export interface AdaptiveFinetuningQueryFilters {
1465
- start_time?: string;
1466
- end_time?: string;
1708
+ export interface ChatSessionMessage {
1709
+ id?: string;
1710
+ session_id?: string;
1711
+ role: string;
1712
+ content: string;
1713
+ message_index?: number;
1714
+ created_at?: string;
1715
+ tool_call_id?: string | null;
1716
+ tool_calls?: Array<{
1717
+ id?: string;
1718
+ name?: string;
1719
+ args?: Record<string, unknown>;
1720
+ }> | null;
1721
+ images?: Array<Record<string, unknown>> | null;
1722
+ }
1723
+
1724
+ export interface ChatSessionResponse {
1725
+ id: string;
1726
+ user_id: string;
1727
+ title: string;
1728
+ created_at: string;
1729
+ updated_at: string;
1730
+ is_archived: boolean;
1731
+ project_id: string | null;
1732
+ modal_sandbox_id: string | null;
1733
+ raw_message_tree_present?: boolean;
1734
+ messages: ChatSessionMessage[];
1735
+ }
1736
+
1737
+ export interface CreateChatSessionRequest {
1738
+ first_message?: string;
1739
+ title?: string;
1467
1740
  project_id?: string;
1468
- model_id?: string;
1469
1741
  }
1470
1742
 
1471
- export interface AdaptiveFinetuningChatRequest {
1743
+ export interface UpdateChatSessionRequest {
1744
+ title?: string;
1745
+ is_archived?: boolean;
1746
+ }
1747
+
1748
+ export interface AppendSessionMessagesRequest {
1749
+ messages: Array<{
1750
+ role: string;
1751
+ content: string;
1752
+ tool_call_id?: string;
1753
+ tool_calls?: Array<{
1754
+ id?: string;
1755
+ name?: string;
1756
+ args?: Record<string, unknown>;
1757
+ [key: string]: unknown;
1758
+ }>;
1759
+ }>;
1760
+ }
1761
+
1762
+ export interface ChatSessionListResponse {
1763
+ success: boolean;
1764
+ sessions: Array<Pick<ChatSessionResponse, "id" | "user_id" | "title" | "created_at" | "updated_at" | "is_archived">>;
1765
+ }
1766
+
1767
+ export interface AgentChatRequest {
1472
1768
  message: string;
1473
1769
  conversation_id?: string;
1474
- history?: AdaptiveFinetuningHistoryItem[];
1475
- filters?: AdaptiveFinetuningQueryFilters;
1770
+ history?: AgentChatHistoryItem[];
1771
+ resume_value?: string;
1476
1772
  }
1477
1773
 
1478
- export interface AdaptiveFinetuningChatResponse {
1774
+ export interface AgentChatResponse {
1479
1775
  answer: string;
1480
1776
  conversation_id: string;
1481
1777
  tool_calls_made?: number;
1482
1778
  }
1483
1779
 
1484
- export async function adaptiveFinetuningChat(
1485
- req: AdaptiveFinetuningChatRequest
1486
- ): Promise<ApiResult<AdaptiveFinetuningChatResponse>> {
1487
- return request<AdaptiveFinetuningChatResponse>("POST", "/adaptive-finetuning/chat", req);
1780
+ export interface AutoAgentRun {
1781
+ agent_run_id: string;
1782
+ status: string;
1783
+ created_at?: string;
1784
+ updated_at?: string;
1785
+ project_id?: string;
1786
+ model_id?: string;
1787
+ result_url?: string;
1788
+ run_type?: string;
1789
+ [key: string]: unknown;
1790
+ }
1791
+
1792
+ export async function agentChat(
1793
+ req: AgentChatRequest
1794
+ ): Promise<ApiResult<AgentChatResponse>> {
1795
+ const baseUrl = getBaseUrl().replace(/\/$/, "");
1796
+ const apiKey = getApiKey();
1797
+ const url = `${baseUrl}/auto-agent/clarify`;
1798
+ const headers: Record<string, string> = {
1799
+ "Content-Type": "application/json",
1800
+ "User-Agent": "pioneer-cli/0.1.0",
1801
+ };
1802
+
1803
+ if (apiKey) {
1804
+ headers["X-API-Key"] = apiKey;
1805
+ }
1806
+
1807
+ try {
1808
+ const res = await fetch(url, {
1809
+ method: "POST",
1810
+ headers,
1811
+ body: JSON.stringify(req),
1812
+ });
1813
+
1814
+ const text = await res.text();
1815
+
1816
+ let parsedJson: unknown;
1817
+ try {
1818
+ parsedJson = JSON.parse(text);
1819
+ } catch {
1820
+ // Ignore; some responses may be SSE/event text or empty.
1821
+ }
1822
+
1823
+ const parseAgentStream = (raw: string): AgentChatResponse | undefined => {
1824
+ const direct = parseAgentPayload(raw);
1825
+ if (direct) return direct;
1826
+
1827
+ const events = raw
1828
+ .split("\n")
1829
+ .map((line) => line.trim())
1830
+ .filter((line) => line.startsWith("data:"));
1831
+ const fallback: AgentChatResponse = {
1832
+ answer: "",
1833
+ conversation_id: "",
1834
+ };
1835
+
1836
+ for (const eventLine of events) {
1837
+ const payload = eventLine.slice(5).trim();
1838
+ if (!payload) continue;
1839
+
1840
+ let event: Record<string, unknown> | undefined;
1841
+ try {
1842
+ event = JSON.parse(payload);
1843
+ } catch {
1844
+ continue;
1845
+ }
1846
+ if (typeof event === "object" && event) {
1847
+ const type = typeof event.type === "string" ? event.type : undefined;
1848
+
1849
+ if (type === "delta" && typeof event.content === "string") {
1850
+ fallback.answer += event.content;
1851
+ }
1852
+
1853
+ if (typeof event.content === "string" && !fallback.answer) {
1854
+ fallback.answer = event.content;
1855
+ }
1856
+
1857
+ if (typeof event.answer === "string") {
1858
+ fallback.answer = event.answer;
1859
+ }
1860
+
1861
+ if (typeof event.conversation_id === "string") {
1862
+ fallback.conversation_id = event.conversation_id;
1863
+ }
1864
+
1865
+ if (typeof event.tool_calls_made === "number") {
1866
+ fallback.tool_calls_made = event.tool_calls_made;
1867
+ }
1868
+
1869
+ if (type === "complete") {
1870
+ if (typeof event.conversation_id === "string") {
1871
+ fallback.conversation_id = event.conversation_id;
1872
+ }
1873
+ if (typeof event.answer === "string") {
1874
+ fallback.answer = event.answer;
1875
+ }
1876
+ }
1877
+ }
1878
+ }
1879
+
1880
+ if (fallback.answer || fallback.conversation_id) {
1881
+ return fallback;
1882
+ }
1883
+ return undefined;
1884
+ };
1885
+
1886
+ const parseAgentPayload = (raw: string): AgentChatResponse | undefined => {
1887
+ if (!raw.trim()) {
1888
+ return undefined;
1889
+ }
1890
+ if (typeof parsedJson !== "object" || parsedJson === null) {
1891
+ return undefined;
1892
+ }
1893
+
1894
+ const payload = parsedJson as Record<string, unknown>;
1895
+ const answer = typeof payload.answer === "string" ? payload.answer : "";
1896
+ const conversationId =
1897
+ typeof payload.answer === "string" && typeof payload.conversation_id === "string"
1898
+ ? payload.conversation_id
1899
+ : typeof payload.conversation_id === "string"
1900
+ ? payload.conversation_id
1901
+ : "";
1902
+
1903
+ if (!answer && !conversationId && typeof payload.detail !== "string") {
1904
+ return undefined;
1905
+ }
1906
+
1907
+ return {
1908
+ answer,
1909
+ conversation_id: conversationId,
1910
+ ...(typeof payload.tool_calls_made === "number"
1911
+ ? { tool_calls_made: payload.tool_calls_made }
1912
+ : {}),
1913
+ };
1914
+ };
1915
+
1916
+ const parsedResponse = parseAgentStream(text);
1917
+
1918
+ if (!res.ok) {
1919
+ const rawError = parsedJson ? JSON.stringify(parsedJson) : text || `HTTP ${res.status}`;
1920
+ const extractedError = extractErrorMessage(parsedJson) || text || `HTTP ${res.status}`;
1921
+ if (isAuthError(res.status, rawError)) {
1922
+ return {
1923
+ ok: false,
1924
+ status: res.status,
1925
+ error: formatAuthError(extractedError),
1926
+ };
1927
+ }
1928
+ return {
1929
+ ok: false,
1930
+ status: res.status,
1931
+ error: extractedError,
1932
+ };
1933
+ }
1934
+
1935
+ if (parsedResponse) {
1936
+ return { ok: true, status: res.status, data: parsedResponse };
1937
+ }
1938
+
1939
+ return {
1940
+ ok: true,
1941
+ status: res.status,
1942
+ data: {
1943
+ answer: text || "",
1944
+ conversation_id: "",
1945
+ },
1946
+ };
1947
+ } catch (err) {
1948
+ return {
1949
+ ok: false,
1950
+ status: 0,
1951
+ error: err instanceof Error ? err.message : String(err),
1952
+ };
1953
+ }
1954
+ }
1955
+
1956
+ // Agent session persistence APIs (Pioneer MLE agent)
1957
+ export async function createAgentSession(
1958
+ req: CreateChatSessionRequest
1959
+ ): Promise<ApiResult<ChatSessionResponse>> {
1960
+ return request<ChatSessionResponse>("POST", "/mle-agent/sessions", req);
1961
+ }
1962
+
1963
+ export async function getAgentSession(
1964
+ sessionId: string
1965
+ ): Promise<ApiResult<ChatSessionResponse>> {
1966
+ return request<ChatSessionResponse>("GET", `/mle-agent/sessions/${sessionId}`);
1967
+ }
1968
+
1969
+ export async function listAgentSessions(): Promise<ApiResult<ChatSessionListResponse>> {
1970
+ return request<ChatSessionListResponse>("GET", "/mle-agent/sessions");
1971
+ }
1972
+
1973
+ export async function appendSessionMessages(
1974
+ sessionId: string,
1975
+ req: AppendSessionMessagesRequest
1976
+ ): Promise<ApiResult<{ success: boolean; session_id: string; messages_added: number }>> {
1977
+ return request<{ success: boolean; session_id: string; messages_added: number }>(
1978
+ "POST",
1979
+ `/mle-agent/sessions/${sessionId}/messages`,
1980
+ req
1981
+ );
1982
+ }
1983
+
1984
+ export async function updateAgentSession(
1985
+ sessionId: string,
1986
+ req: UpdateChatSessionRequest
1987
+ ): Promise<ApiResult<ChatSessionResponse>> {
1988
+ return request<ChatSessionResponse>("PATCH", `/mle-agent/sessions/${sessionId}`, req);
1989
+ }
1990
+
1991
+ export async function deleteAgentSession(sessionId: string): Promise<ApiResult<{ success: boolean }>> {
1992
+ return request<{ success: boolean }>("DELETE", `/mle-agent/sessions/${sessionId}`);
1993
+ }
1994
+
1995
+ export async function listAutoAgentRuns(
1996
+ limit = 20,
1997
+ offset = 0
1998
+ ): Promise<ApiResult<{ runs: AutoAgentRun[] }>> {
1999
+ return request<{ runs: AutoAgentRun[] }>(
2000
+ "GET",
2001
+ `/auto-agent/runs?limit=${limit}&offset=${offset}`
2002
+ );
2003
+ }
2004
+
2005
+ export async function getAutoAgentRun(
2006
+ runId: string
2007
+ ): Promise<ApiResult<AutoAgentRun>> {
2008
+ return request<AutoAgentRun>("GET", `/auto-agent/run/${runId}`);
2009
+ }
2010
+
2011
+ export async function stopAutoAgentRun(
2012
+ runId: string
2013
+ ): Promise<ApiResult<{ success: boolean }>> {
2014
+ return request<{ success: boolean }>("POST", `/auto-agent/run/${runId}/stop`);
1488
2015
  }
1489
2016
 
1490
2017
  // ─────────────────────────────────────────────────────────────────────────────
@@ -1496,15 +2023,22 @@ export interface GenerateDecoderRequest {
1496
2023
  instruction?: string;
1497
2024
  num_examples?: number;
1498
2025
  temperature?: number;
2026
+ generation_profile?: "auto" | "fast" | "balanced" | "quality";
1499
2027
  quality?: "light" | "medium" | "heavy";
2028
+ include_reasoning_trace?: boolean;
2029
+ reasoning_effort?: "low" | "medium" | "high";
1500
2030
  constraints?: ConstraintRequest[];
1501
2031
  save_dataset?: boolean;
1502
2032
  dataset_name?: string;
1503
2033
  project_id?: string;
2034
+ use_meta_felix?: boolean;
1504
2035
  session_id?: string;
1505
2036
  config_num_examples?: number;
1506
2037
  min_criteria?: number;
1507
2038
  target_choices?: number;
2039
+ split_ratio?: Record<string, number>;
2040
+ negative_ratio?: number;
2041
+ multiplicator?: Record<string, unknown>;
1508
2042
  type?: "training" | "evaluation" | "split";
1509
2043
  visibility?: "private" | "public";
1510
2044
  }
@@ -1519,13 +2053,20 @@ export async function generateDecoder(
1519
2053
  instruction: req.instruction,
1520
2054
  num_examples: req.num_examples,
1521
2055
  temperature: req.temperature,
2056
+ generation_profile: req.generation_profile,
1522
2057
  quality: req.quality,
1523
2058
  constraints: req.constraints,
1524
2059
  project_id: req.project_id,
2060
+ include_reasoning_trace: req.include_reasoning_trace,
2061
+ reasoning_effort: req.reasoning_effort,
2062
+ use_meta_felix: req.use_meta_felix,
1525
2063
  session_id: req.session_id,
1526
2064
  config_num_examples: req.config_num_examples,
1527
2065
  min_criteria: req.min_criteria,
1528
2066
  target_choices: req.target_choices,
2067
+ split_ratio: req.split_ratio,
2068
+ negative_ratio: req.negative_ratio,
2069
+ multiplicator: req.multiplicator,
1529
2070
  type: req.type,
1530
2071
  visibility: req.visibility,
1531
2072
  });
@@ -1535,6 +2076,23 @@ export async function generateDecoder(
1535
2076
  // Evaluations
1536
2077
  // ─────────────────────────────────────────────────────────────────────────────
1537
2078
 
2079
+ export interface BaselineModel {
2080
+ id: string;
2081
+ name: string;
2082
+ provider: string;
2083
+ description: string;
2084
+ }
2085
+
2086
+ export interface BaselineModelsResponse {
2087
+ success: boolean;
2088
+ models: BaselineModel[];
2089
+ count: number;
2090
+ }
2091
+
2092
+ export async function listBaselineModels(): Promise<ApiResult<BaselineModelsResponse>> {
2093
+ return request<BaselineModelsResponse>("GET", "/felix/baseline-models");
2094
+ }
2095
+
1538
2096
  export interface Evaluation {
1539
2097
  id: string;
1540
2098
  user_id: string;
@@ -1640,6 +2198,37 @@ export async function getDatasetEvaluations(
1640
2198
  );
1641
2199
  }
1642
2200
 
2201
+ export interface DeleteEvaluationResponse {
2202
+ success: boolean;
2203
+ message: string;
2204
+ }
2205
+
2206
+ export async function deleteEvaluation(
2207
+ evaluationId: string
2208
+ ): Promise<ApiResult<DeleteEvaluationResponse>> {
2209
+ return request<DeleteEvaluationResponse>("DELETE", `/felix/evaluations/${evaluationId}`);
2210
+ }
2211
+
2212
+ export interface UpdateEvaluationProjectRequest {
2213
+ evaluation_id: string;
2214
+ project_id?: string;
2215
+ }
2216
+
2217
+ export interface UpdateEvaluationProjectResponse {
2218
+ success: boolean;
2219
+ message: string;
2220
+ }
2221
+
2222
+ export async function updateEvaluationProject(
2223
+ req: UpdateEvaluationProjectRequest
2224
+ ): Promise<ApiResult<UpdateEvaluationProjectResponse>> {
2225
+ return request<UpdateEvaluationProjectResponse>(
2226
+ "PATCH",
2227
+ `/felix/evaluations/${req.evaluation_id}/project`,
2228
+ { project_id: req.project_id }
2229
+ );
2230
+ }
2231
+
1643
2232
  // ─────────────────────────────────────────────────────────────────────────────
1644
2233
  // Benchmarks
1645
2234
  // ─────────────────────────────────────────────────────────────────────────────
@@ -1759,6 +2348,26 @@ export async function scanForPHD(
1759
2348
  });
1760
2349
  }
1761
2350
 
2351
+ export interface DataEditingDismissOutlierRequest {
2352
+ dataset: DatasetRef;
2353
+ fingerprint: string;
2354
+ }
2355
+
2356
+ export interface DataEditingDismissOutlierResponse {
2357
+ success: boolean;
2358
+ }
2359
+
2360
+ export async function dismissOutlier(
2361
+ req: DataEditingDismissOutlierRequest
2362
+ ): Promise<ApiResult<DataEditingDismissOutlierResponse>> {
2363
+ const { dataset, ...rest } = req;
2364
+ return request<DataEditingDismissOutlierResponse>("POST", "/felix/dataset/outliers/dismiss", {
2365
+ ...rest,
2366
+ dataset_name: dataset.name,
2367
+ dataset_version: dataset.version,
2368
+ });
2369
+ }
2370
+
1762
2371
  export interface DataEditingRemoveRequest {
1763
2372
  dataset: DatasetRef;
1764
2373
  findings: PIIFinding[];
@@ -2163,10 +2772,11 @@ export async function listActivity(
2163
2772
  // ─────────────────────────────────────────────────────────────────────────────
2164
2773
 
2165
2774
  export interface ProjectCreateRequest {
2166
- name: string;
2775
+ name?: string;
2167
2776
  icon?: string;
2168
2777
  repo?: string;
2169
2778
  description?: string;
2779
+ active_model_id?: string;
2170
2780
  selected_model_id?: string;
2171
2781
  example?: Record<string, unknown>;
2172
2782
  }
@@ -2204,6 +2814,19 @@ export interface ProjectDeleteResponse {
2204
2814
  project_id: string;
2205
2815
  }
2206
2816
 
2817
+ export interface ProjectDeploymentCreate {
2818
+ training_job_id: string;
2819
+ reason?: string;
2820
+ }
2821
+
2822
+ export interface ProjectDeploymentResponse {
2823
+ success: boolean;
2824
+ message?: string;
2825
+ project_id?: string;
2826
+ training_job_id?: string;
2827
+ deployment_id?: string;
2828
+ }
2829
+
2207
2830
  export interface ProjectDatasetCountResponse {
2208
2831
  project_id: string;
2209
2832
  dataset_count: number;
@@ -2265,6 +2888,27 @@ export async function getProjectQualityMetrics(
2265
2888
  );
2266
2889
  }
2267
2890
 
2891
+ export async function deployTrainingJobToProject(
2892
+ projectId: string,
2893
+ req: ProjectDeploymentCreate
2894
+ ): Promise<ApiResult<ProjectDeploymentResponse>> {
2895
+ return request<ProjectDeploymentResponse>(
2896
+ "POST",
2897
+ `/projects/${projectId}/deployments`,
2898
+ req
2899
+ );
2900
+ }
2901
+
2902
+ export async function rollbackProjectDeployment(
2903
+ projectId: string,
2904
+ deploymentId: string
2905
+ ): Promise<ApiResult<ProjectDeploymentResponse>> {
2906
+ return request<ProjectDeploymentResponse>(
2907
+ "POST",
2908
+ `/projects/${projectId}/deployments/${deploymentId}/rollback`
2909
+ );
2910
+ }
2911
+
2268
2912
  // ─────────────────────────────────────────────────────────────────────────────
2269
2913
  // Presets
2270
2914
  // ─────────────────────────────────────────────────────────────────────────────
@@ -2440,205 +3084,3 @@ export async function endAnnotationSession(
2440
3084
  );
2441
3085
  }
2442
3086
 
2443
- // ─────────────────────────────────────────────────────────────────────────────
2444
- // Leaderboard / Competitions
2445
- // ─────────────────────────────────────────────────────────────────────────────
2446
-
2447
- export interface LeaderboardEntry {
2448
- id: string;
2449
- dataset_id: string;
2450
- evaluation_id: string;
2451
- user_id: string;
2452
- display_name: string;
2453
- model_name: string;
2454
- model_id?: string;
2455
- f1_score: number;
2456
- precision_score?: number;
2457
- recall_score?: number;
2458
- accuracy?: number;
2459
- created_at: string;
2460
- updated_at: string;
2461
- rank?: number;
2462
- }
2463
-
2464
- export interface CompetitionInfo {
2465
- dataset_id: string;
2466
- dataset_name: string;
2467
- dataset_type: string;
2468
- description?: string;
2469
- sample_count?: number;
2470
- labels?: string[];
2471
- sample_rows?: Record<string, unknown>[];
2472
- winner?: LeaderboardEntry;
2473
- total_entries: number;
2474
- }
2475
-
2476
- export interface CompetitionsResponse {
2477
- success: boolean;
2478
- competitions: CompetitionInfo[];
2479
- }
2480
-
2481
- export interface CompetitionSamplesResponse {
2482
- dataset_id: string;
2483
- sample_rows: Record<string, unknown>[];
2484
- }
2485
-
2486
- export interface LeaderboardEntriesResponse {
2487
- success: boolean;
2488
- dataset_id: string;
2489
- dataset_name: string;
2490
- entries: LeaderboardEntry[];
2491
- total_entries: number;
2492
- }
2493
-
2494
- export interface LeaderboardSubmission {
2495
- evaluation_id: string;
2496
- display_name: string;
2497
- }
2498
-
2499
- export interface LeaderboardSubmitResponse {
2500
- success: boolean;
2501
- entry: LeaderboardEntry;
2502
- rank: number;
2503
- is_new_best: boolean;
2504
- }
2505
-
2506
- export async function listCompetitions(): Promise<ApiResult<CompetitionsResponse>> {
2507
- return request<CompetitionsResponse>("GET", "/leaderboard/competitions");
2508
- }
2509
-
2510
- export async function getCompetitionSamples(
2511
- datasetId: string
2512
- ): Promise<ApiResult<CompetitionSamplesResponse>> {
2513
- return request<CompetitionSamplesResponse>(
2514
- "GET",
2515
- `/leaderboard/competitions/${datasetId}/samples`
2516
- );
2517
- }
2518
-
2519
- export async function getLeaderboardEntries(
2520
- datasetId: string,
2521
- limit?: number
2522
- ): Promise<ApiResult<LeaderboardEntriesResponse>> {
2523
- const params = new URLSearchParams();
2524
- if (limit) params.set("limit", String(limit));
2525
- const query = params.toString();
2526
- const url = `/leaderboard/datasets/${datasetId}/entries${query ? `?${query}` : ""}`;
2527
- return request<LeaderboardEntriesResponse>("GET", url);
2528
- }
2529
-
2530
- export async function submitToLeaderboard(
2531
- datasetId: string,
2532
- submission: LeaderboardSubmission
2533
- ): Promise<ApiResult<LeaderboardSubmitResponse>> {
2534
- return request<LeaderboardSubmitResponse>(
2535
- "POST",
2536
- `/leaderboard/datasets/${datasetId}/submit`,
2537
- submission
2538
- );
2539
- }
2540
-
2541
- // ─────────────────────────────────────────────────────────────────────────────
2542
- // Notebook Execution
2543
- // ─────────────────────────────────────────────────────────────────────────────
2544
-
2545
- export interface CreateNotebookSessionRequest {
2546
- gpu?: string;
2547
- load_felix_helpers?: boolean;
2548
- auto_terminate_seconds?: number;
2549
- tags?: string[];
2550
- }
2551
-
2552
- export interface CreateNotebookSessionResponse {
2553
- session_id: string;
2554
- gpu: string;
2555
- message: string;
2556
- status: "creating" | "ready";
2557
- }
2558
-
2559
- export interface SessionStatusResponse {
2560
- session_id: string;
2561
- status: "creating" | "ready" | "failed";
2562
- error?: string;
2563
- }
2564
-
2565
- export interface ExecuteOutput {
2566
- type: string;
2567
- name?: string;
2568
- text?: string;
2569
- data?: Record<string, string>;
2570
- ename?: string;
2571
- evalue?: string;
2572
- traceback?: string[];
2573
- }
2574
-
2575
- export interface ExecuteCodeRequest {
2576
- code: string;
2577
- session_id?: string;
2578
- cell_id?: string;
2579
- include_variables?: boolean;
2580
- gpu?: string;
2581
- load_felix_helpers?: boolean;
2582
- }
2583
-
2584
- export interface ExecuteCodeResponse {
2585
- success: boolean;
2586
- outputs: ExecuteOutput[];
2587
- variables: Record<string, unknown>;
2588
- execution_time_ms: number;
2589
- gpu_used?: string;
2590
- cell_id?: string;
2591
- session_id: string;
2592
- error?: string;
2593
- session_created?: boolean;
2594
- }
2595
-
2596
- export interface NotebookSessionInfo {
2597
- session_id: string;
2598
- gpu: string;
2599
- created_at: string;
2600
- last_activity: string;
2601
- status: string;
2602
- }
2603
-
2604
- export interface ListSessionsResponse {
2605
- sessions: NotebookSessionInfo[];
2606
- }
2607
-
2608
- export async function createNotebookSession(
2609
- req?: CreateNotebookSessionRequest
2610
- ): Promise<ApiResult<CreateNotebookSessionResponse>> {
2611
- return request<CreateNotebookSessionResponse>(
2612
- "POST",
2613
- "/felix/notebook/sessions",
2614
- req ?? {}
2615
- );
2616
- }
2617
-
2618
- export async function getNotebookSessionStatus(
2619
- sessionId: string
2620
- ): Promise<ApiResult<SessionStatusResponse>> {
2621
- return request<SessionStatusResponse>(
2622
- "GET",
2623
- `/felix/notebook/sessions/${sessionId}/status`
2624
- );
2625
- }
2626
-
2627
- export async function listNotebookSessions(): Promise<ApiResult<ListSessionsResponse>> {
2628
- return request<ListSessionsResponse>("GET", "/felix/notebook/sessions");
2629
- }
2630
-
2631
- export async function executeNotebookCode(
2632
- req: ExecuteCodeRequest
2633
- ): Promise<ApiResult<ExecuteCodeResponse>> {
2634
- return request<ExecuteCodeResponse>("POST", "/felix/notebook/execute", req);
2635
- }
2636
-
2637
- export async function terminateNotebookSession(
2638
- sessionId: string
2639
- ): Promise<ApiResult<{ message: string }>> {
2640
- return request<{ message: string }>(
2641
- "DELETE",
2642
- `/felix/notebook/sessions/${sessionId}`
2643
- );
2644
- }