@eve-horizon/cli 0.2.31 → 0.2.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +50 -1
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -56819,6 +56819,9 @@ var EnvironmentListResponseSchema = external_exports.object({
56819
56819
  data: external_exports.array(EnvironmentResponseSchema),
56820
56820
  pagination: PaginationSchema
56821
56821
  });
56822
+ var DeleteEnvironmentRequestSchema = external_exports.object({
56823
+ force: external_exports.boolean().optional()
56824
+ }).optional().default({});
56822
56825
  var EnvLogEntrySchema = external_exports.object({
56823
56826
  timestamp: external_exports.string(),
56824
56827
  line: external_exports.string(),
@@ -59680,6 +59683,38 @@ var InferenceRoutePolicySchema = external_exports.object({
59680
59683
  created_at: external_exports.string(),
59681
59684
  updated_at: external_exports.string()
59682
59685
  });
59686
+ var InferenceManagedModelSchema = external_exports.object({
59687
+ canonical_model_id: external_exports.string(),
59688
+ provider: external_exports.string(),
59689
+ provider_model_slug: external_exports.string(),
59690
+ target_id: external_exports.string(),
59691
+ requires_warm_start: external_exports.boolean().default(false),
59692
+ enabled: external_exports.boolean().default(true)
59693
+ });
59694
+ var InferenceManagedModelsSchema = external_exports.record(InferenceManagedModelSchema);
59695
+ var UpsertInferenceManagedModelRequestSchema = external_exports.object({
59696
+ canonical_model_id: external_exports.string(),
59697
+ provider: external_exports.string(),
59698
+ provider_model_slug: external_exports.string(),
59699
+ target_id: external_exports.string(),
59700
+ requires_warm_start: external_exports.boolean().optional().default(false),
59701
+ enabled: external_exports.boolean().optional().default(true)
59702
+ });
59703
+ var InferenceManagedModelResponseSchema = external_exports.object({
59704
+ canonical_model_id: external_exports.string(),
59705
+ provider: external_exports.string(),
59706
+ provider_model_slug: external_exports.string(),
59707
+ target_id: external_exports.string(),
59708
+ requires_warm_start: external_exports.boolean(),
59709
+ enabled: external_exports.boolean()
59710
+ });
59711
+ var InferenceManagedModelListResponseSchema = external_exports.object({
59712
+ data: external_exports.array(InferenceManagedModelResponseSchema)
59713
+ });
59714
+ var InferenceManagedModelUpsertResponseSchema = external_exports.object({
59715
+ data: InferenceManagedModelResponseSchema,
59716
+ created_install: external_exports.boolean()
59717
+ });
59683
59718
  var CreateInferenceTargetRequestSchema = external_exports.object({
59684
59719
  scope_kind: InferenceScopeKindSchema,
59685
59720
  scope_id: external_exports.string().optional(),
@@ -78070,6 +78105,15 @@ function asBoolean(flags, key) {
78070
78105
  function normalizeManagedModelRows(payload) {
78071
78106
  return Array.isArray(payload) ? payload : payload.data;
78072
78107
  }
78108
+ function looksLikeGatewayPath(baseUrl) {
78109
+ try {
78110
+ const parsed = new URL(baseUrl);
78111
+ const path6 = parsed.pathname.toLowerCase();
78112
+ return path6.includes("/inference/v1") || path6.endsWith("/v1");
78113
+ } catch {
78114
+ return false;
78115
+ }
78116
+ }
78073
78117
  function printTable(headers, rows) {
78074
78118
  const widths = headers.map((header, idx) => Math.max(header.length, ...rows.map((row) => row[idx]?.length ?? 0)));
78075
78119
  const line = `+${widths.map((w) => "-".repeat(w + 2)).join("+")}+`;
@@ -78482,6 +78526,11 @@ async function handleOllama(subcommand, positionals, flags, context2) {
78482
78526
  };
78483
78527
  const requiresWarmStart = asBoolean(flags, "requires-warm-start");
78484
78528
  const enabled = asBoolean(flags, "enabled");
78529
+ const targetId = body.target_id;
78530
+ const target = await requestJson(context2, `/inference/targets/${targetId}`);
78531
+ if (target.transport_profile === "openai_compat" && looksLikeGatewayPath(target.base_url)) {
78532
+ console.log("warning: openai_compat target base_url appears to include /v1-like gateway path.");
78533
+ }
78485
78534
  if (requiresWarmStart !== void 0) {
78486
78535
  body.requires_warm_start = requiresWarmStart;
78487
78536
  }
@@ -78495,7 +78544,7 @@ async function handleOllama(subcommand, positionals, flags, context2) {
78495
78544
  outputJson(result, json);
78496
78545
  if (!json) {
78497
78546
  console.log(
78498
- `Published managed model ${result.data.canonical_model_id} (install ${result.created_install ? "created" : "already present"}).`
78547
+ `Published managed model ${result.data.canonical_model_id} (install ${result.created_install ? "created" : "already present"}), target ${result.data.target_id}`
78499
78548
  );
78500
78549
  }
78501
78550
  return;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@eve-horizon/cli",
3
- "version": "0.2.31",
3
+ "version": "0.2.33",
4
4
  "description": "Eve Horizon CLI",
5
5
  "license": "MIT",
6
6
  "repository": {