@graphql-hive/gateway 2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e → 2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @graphql-hive/gateway
2
2
 
3
- ## 2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
3
+ ## 2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
4
4
 
5
5
  ### Major Changes
6
6
 
@@ -18,17 +18,17 @@
18
18
  - [#1141](https://github.com/graphql-hive/gateway/pull/1141) [`d8892f2`](https://github.com/graphql-hive/gateway/commit/d8892f2713388fcea37dfa74a8ae42294f07d362) Thanks [@EmrysMyrddin](https://github.com/EmrysMyrddin)! - **Security Update:** The Docker image have been updated to fix a CVE affecting `passwd` command. This CVE was not directly affecting Hive Gateway software, since it's not using impacted components.
19
19
 
20
20
  - Updated dependencies [[`ce6e879`](https://github.com/graphql-hive/gateway/commit/ce6e8793bed7390cfbf4a2cd0a66bd3e7eb30a1e), [`11cff4f`](https://github.com/graphql-hive/gateway/commit/11cff4f8ff28ca7d709b5b962029e17d5843110e), [`54beb7a`](https://github.com/graphql-hive/gateway/commit/54beb7acde7558eee81ec0e20c123717865b8e18), [`2640dbf`](https://github.com/graphql-hive/gateway/commit/2640dbf8c81819efe326bf3efead85be3b6cbea6), [`2adc7f1`](https://github.com/graphql-hive/gateway/commit/2adc7f14f282349831af926864c0ecb3f3e65d73), [`c84b528`](https://github.com/graphql-hive/gateway/commit/c84b52850a290c2741d5bd72464a06a1ab99d000), [`ce6e879`](https://github.com/graphql-hive/gateway/commit/ce6e8793bed7390cfbf4a2cd0a66bd3e7eb30a1e), [`54beb7a`](https://github.com/graphql-hive/gateway/commit/54beb7acde7558eee81ec0e20c123717865b8e18), [`a165a2f`](https://github.com/graphql-hive/gateway/commit/a165a2f73f5f04b3b7179792d54be448e17dca19), [`f4a5e1c`](https://github.com/graphql-hive/gateway/commit/f4a5e1c71c47666b44511cea1f9a19fa04680deb), [`14b38b2`](https://github.com/graphql-hive/gateway/commit/14b38b2b841b52de57207bcbc0e0b3748c24a3fa), [`3a4f65c`](https://github.com/graphql-hive/gateway/commit/3a4f65c861156a1e1ca6075d8796697cc3eb15db), [`ab6c98a`](https://github.com/graphql-hive/gateway/commit/ab6c98a3e386a023cb1735c0b58da429a9319078), [`11cff4f`](https://github.com/graphql-hive/gateway/commit/11cff4f8ff28ca7d709b5b962029e17d5843110e), [`c84b528`](https://github.com/graphql-hive/gateway/commit/c84b52850a290c2741d5bd72464a06a1ab99d000), [`e2b3a4d`](https://github.com/graphql-hive/gateway/commit/e2b3a4d4c9edee01152074f5f8a6224125ace83b)]:
21
- - @graphql-hive/gateway-runtime@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
22
- - @graphql-mesh/plugin-opentelemetry@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
23
- - @graphql-mesh/plugin-prometheus@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
24
- - @graphql-mesh/transport-http-callback@1.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
25
- - @graphql-mesh/hmac-upstream-signature@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
26
- - @graphql-mesh/plugin-jwt-auth@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
27
- - @graphql-mesh/transport-ws@2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
28
- - @graphql-hive/logger@1.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
29
- - @graphql-hive/plugin-aws-sigv4@1.0.11-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
21
+ - @graphql-hive/gateway-runtime@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
22
+ - @graphql-mesh/plugin-opentelemetry@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
23
+ - @graphql-mesh/plugin-prometheus@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
24
+ - @graphql-mesh/transport-http-callback@1.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
25
+ - @graphql-mesh/hmac-upstream-signature@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
26
+ - @graphql-mesh/plugin-jwt-auth@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
27
+ - @graphql-mesh/transport-ws@2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
28
+ - @graphql-hive/logger@1.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
29
+ - @graphql-hive/plugin-aws-sigv4@1.0.11-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
30
30
  - @graphql-hive/plugin-deduplicate-request@1.0.0
31
- - @graphql-mesh/transport-http@0.6.42-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e
31
+ - @graphql-mesh/transport-http@0.6.42-alpha-dc75093c5467d8467068ec27a9e61fdab5001790
32
32
 
33
33
  ## 1.14.2
34
34
 
package/dist/bin.cjs CHANGED
@@ -3,7 +3,7 @@
3
3
  require('dotenv/config');
4
4
  var module$1 = require('node:module');
5
5
  var logger = require('@graphql-hive/logger');
6
- var cli = require('./cli-_WmSliwb.cjs');
6
+ var cli = require('./cli-bwdDvr0W.cjs');
7
7
  require('node:cluster');
8
8
  require('node:os');
9
9
  require('node:path');
package/dist/bin.js CHANGED
@@ -2,7 +2,7 @@
2
2
  import 'dotenv/config';
3
3
  import module from 'node:module';
4
4
  import { Logger } from '@graphql-hive/logger';
5
- import { e as enableModuleCachingIfPossible, h as handleNodeWarnings, r as run } from './cli-BBlJHQNF.js';
5
+ import { e as enableModuleCachingIfPossible, h as handleNodeWarnings, r as run } from './cli-HmIj4heR.js';
6
6
  import 'node:cluster';
7
7
  import 'node:os';
8
8
  import 'node:path';
@@ -291,7 +291,7 @@ async function startBunServer(gwRuntime, opts) {
291
291
  };
292
292
  }
293
293
  const server = Bun.serve(serverOptions);
294
- opts.log.info("Listening on %s", server.url);
294
+ opts.log.info(`Listening on ${server.url}`);
295
295
  gwRuntime.disposableStack.use(server);
296
296
  }
297
297
 
@@ -358,7 +358,7 @@ async function startNodeHttpServer(gwRuntime, opts) {
358
358
  );
359
359
  }
360
360
  const url = `${protocol}://${host}:${port}`.replace("0.0.0.0", "localhost");
361
- log.debug("Starting server on %s", url);
361
+ log.debug(`Starting server on ${url}`);
362
362
  if (!disableWebsockets) {
363
363
  log.debug("Setting up WebSocket server");
364
364
  const { WebSocketServer } = await import('ws');
@@ -390,7 +390,7 @@ async function startNodeHttpServer(gwRuntime, opts) {
390
390
  return new Promise((resolve, reject) => {
391
391
  server.once("error", reject);
392
392
  server.listen(port, host, () => {
393
- log.info("Listening on %s", url);
393
+ log.info(`Listening on ${url}`);
394
394
  gwRuntime.disposableStack.defer(
395
395
  () => new Promise((resolve2) => {
396
396
  process.stderr.write("\n");
@@ -438,7 +438,7 @@ function handleFork(log, config) {
438
438
  if (cluster.isPrimary && config.fork && config.fork > 1) {
439
439
  const workers = /* @__PURE__ */ new Set();
440
440
  let expectedToExit = false;
441
- log.debug("Forking %d workers", config.fork);
441
+ log.debug(`Forking ${config.fork} workers`);
442
442
  for (let i = 0; i < config.fork; i++) {
443
443
  const worker = cluster.fork();
444
444
  const workerLogger = log.child({ worker: worker.id });
@@ -466,7 +466,7 @@ function handleFork(log, config) {
466
466
  workers.add(worker);
467
467
  }
468
468
  registerTerminateHandler((signal) => {
469
- log.info("Killing workers on %s", signal);
469
+ log.info(`Killing workers on ${signal}`);
470
470
  expectedToExit = true;
471
471
  workers.forEach((w) => {
472
472
  w.kill(signal);
@@ -570,6 +570,7 @@ const addCommand$2 = (ctx, cli) => cli.command("proxy").description(
570
570
  hivePersistedDocumentsToken,
571
571
  ...opts
572
572
  } = this.optsWithGlobals();
573
+ ctx.log.info(`Starting ${ctx.productName} ${ctx.version} in proxy mode`);
573
574
  const loadedConfig = await loadConfig({
574
575
  log: ctx.log,
575
576
  configPath: opts.configPath,
@@ -726,6 +727,7 @@ const addCommand$1 = (ctx, cli) => cli.command("subgraph").description(
726
727
  hivePersistedDocumentsToken,
727
728
  ...opts
728
729
  } = this.optsWithGlobals();
730
+ ctx.log.info(`Starting ${ctx.productName} ${ctx.version} as subgraph`);
729
731
  const loadedConfig = await loadConfig({
730
732
  log: ctx.log,
731
733
  configPath: opts.configPath,
@@ -869,21 +871,23 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
869
871
  ...opts
870
872
  } = this.optsWithGlobals();
871
873
  const { apolloUplink } = this.opts();
874
+ ctx.log.info(
875
+ `Starting ${ctx.productName} ${ctx.version} with supergraph`
876
+ );
872
877
  const loadedConfig = await loadConfig({
873
878
  log: ctx.log,
874
879
  configPath: opts.configPath,
875
880
  quiet: !cluster.isPrimary,
876
881
  configFileName: ctx.configFileName
877
882
  });
878
- let supergraph2 = "supergraph.graphql";
883
+ let supergraph2 = "./supergraph.graphql";
879
884
  if (schemaPathOrUrl) {
880
- ctx.log.info("Supergraph will be loaded from %s", schemaPathOrUrl);
885
+ ctx.log.info(`Supergraph will be loaded from ${schemaPathOrUrl}`);
881
886
  if (hiveCdnKey) {
882
887
  ctx.log.info("Using Hive CDN key");
883
888
  if (!isUrl(schemaPathOrUrl)) {
884
889
  ctx.log.error(
885
- "Hive CDN endpoint must be a URL when providing --hive-cdn-key but got %s",
886
- schemaPathOrUrl
890
+ `Hive CDN endpoint must be a URL when providing --hive-cdn-key but got ${schemaPathOrUrl}`
887
891
  );
888
892
  process.exit(1);
889
893
  }
@@ -896,8 +900,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
896
900
  ctx.log.info("Using GraphOS API key");
897
901
  if (!schemaPathOrUrl.includes("@")) {
898
902
  ctx.log.error(
899
- `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not %s.`,
900
- schemaPathOrUrl
903
+ `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not ${schemaPathOrUrl}.`
901
904
  );
902
905
  process.exit(1);
903
906
  }
@@ -923,7 +926,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
923
926
  );
924
927
  process.exit(1);
925
928
  }
926
- ctx.log.info("Using Hive CDN endpoint %s", hiveCdnEndpoint);
929
+ ctx.log.info(`Using Hive CDN endpoint ${hiveCdnEndpoint}`);
927
930
  supergraph2 = {
928
931
  type: "hive",
929
932
  endpoint: hiveCdnEndpoint,
@@ -932,8 +935,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
932
935
  } else if (apolloGraphRef) {
933
936
  if (!apolloGraphRef.includes("@")) {
934
937
  ctx.log.error(
935
- "Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not %s.",
936
- apolloGraphRef
938
+ `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not ${apolloGraphRef}.`
937
939
  );
938
940
  process.exit(1);
939
941
  }
@@ -943,7 +945,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
943
945
  );
944
946
  process.exit(1);
945
947
  }
946
- ctx.log.info("Using Apollo Graph Ref %s", apolloGraphRef);
948
+ ctx.log.info(`Using Apollo Graph Ref ${apolloGraphRef}`);
947
949
  supergraph2 = {
948
950
  type: "graphos",
949
951
  apiKey: apolloKey,
@@ -953,7 +955,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
953
955
  } else if ("supergraph" in loadedConfig) {
954
956
  supergraph2 = loadedConfig.supergraph;
955
957
  } else {
956
- ctx.log.info("Using default supergraph location %s", supergraph2);
958
+ ctx.log.info(`Using default supergraph location "${supergraph2}"`);
957
959
  }
958
960
  const registryConfig = {};
959
961
  const reporting = handleReportingConfig(ctx, loadedConfig, {
@@ -1037,13 +1039,13 @@ async function runSupergraph({ log }, config) {
1037
1039
  if (typeof config.supergraph === "string" && isValidPath(config.supergraph) && !isUrl(config.supergraph)) {
1038
1040
  const supergraphPath = config.supergraph;
1039
1041
  absSchemaPath = isAbsolute(supergraphPath) ? String(supergraphPath) : resolve(process.cwd(), supergraphPath);
1040
- log.info("Reading supergraph from %s", absSchemaPath);
1042
+ log.info({ path: absSchemaPath }, "Reading supergraph");
1041
1043
  try {
1042
1044
  await lstat(absSchemaPath);
1043
- } catch {
1045
+ } catch (err) {
1044
1046
  log.error(
1045
- "Could not read supergraph from %s. Make sure the file exists.",
1046
- absSchemaPath
1047
+ { path: absSchemaPath, err },
1048
+ "Could not read supergraph. Make sure the file exists."
1047
1049
  );
1048
1050
  process.exit(1);
1049
1051
  }
@@ -1051,10 +1053,13 @@ async function runSupergraph({ log }, config) {
1051
1053
  if (absSchemaPath) {
1052
1054
  delete config.pollingInterval;
1053
1055
  if (cluster.isPrimary) {
1054
- log.info("Watching %s for changes", absSchemaPath);
1056
+ log.info({ path: absSchemaPath }, "Watching supergraph for changes");
1055
1057
  const ctrl = new AbortController();
1056
1058
  registerTerminateHandler((signal) => {
1057
- log.info("Closing watcher for %s on %s", absSchemaPath, signal);
1059
+ log.info(
1060
+ { path: absSchemaPath },
1061
+ `Closing watcher for supergraph on ${signal}`
1062
+ );
1058
1063
  return ctrl.abort(`Process terminated on ${signal}`);
1059
1064
  });
1060
1065
  (async function watcher() {
@@ -1064,7 +1069,10 @@ async function runSupergraph({ log }, config) {
1064
1069
  if (f.eventType === "rename") {
1065
1070
  throw new Error(`Supergraph file was renamed to "${f.filename}"`);
1066
1071
  }
1067
- log.info("%s changed. Invalidating supergraph...", absSchemaPath);
1072
+ log.info(
1073
+ { path: absSchemaPath },
1074
+ "Supergraph changed. Invalidating..."
1075
+ );
1068
1076
  if (config.fork && config.fork > 1) {
1069
1077
  for (const workerId in cluster.workers) {
1070
1078
  cluster.workers[workerId].send("invalidateUnifiedGraph");
@@ -1075,9 +1083,15 @@ async function runSupergraph({ log }, config) {
1075
1083
  }
1076
1084
  })().catch((e) => {
1077
1085
  if (e.name === "AbortError") return;
1078
- log.error(e, "Watcher for %s closed with an error", absSchemaPath);
1086
+ log.error(
1087
+ { path: absSchemaPath, err: e },
1088
+ "Supergraph watcher closed with an error"
1089
+ );
1079
1090
  }).then(() => {
1080
- log.info("Watcher for %s successfuly closed", absSchemaPath);
1091
+ log.info(
1092
+ { path: absSchemaPath },
1093
+ "Supergraph watcher successfuly closed"
1094
+ );
1081
1095
  });
1082
1096
  }
1083
1097
  }
@@ -1107,13 +1121,13 @@ async function runSupergraph({ log }, config) {
1107
1121
  }
1108
1122
  const runtime = createGatewayRuntime(config);
1109
1123
  if (absSchemaPath) {
1110
- log.info("Serving local supergraph from %s", absSchemaPath);
1124
+ log.info({ path: absSchemaPath }, "Serving local supergraph");
1111
1125
  } else if (isUrl(String(config.supergraph))) {
1112
- log.info("Serving remote supergraph from %s", config.supergraph);
1126
+ log.info({ url: config.supergraph }, "Serving remote supergraph");
1113
1127
  } else if (typeof config.supergraph === "object" && "type" in config.supergraph && config.supergraph.type === "hive") {
1114
1128
  log.info(
1115
- "Serving supergraph from Hive CDN at %s",
1116
- config.supergraph.endpoint
1129
+ { endpoint: config.supergraph.endpoint },
1130
+ "Serving supergraph from Hive CDN"
1117
1131
  );
1118
1132
  } else {
1119
1133
  log.info("Serving supergraph from config");
@@ -298,7 +298,7 @@ async function startBunServer(gwRuntime, opts) {
298
298
  };
299
299
  }
300
300
  const server = Bun.serve(serverOptions);
301
- opts.log.info("Listening on %s", server.url);
301
+ opts.log.info(`Listening on ${server.url}`);
302
302
  gwRuntime.disposableStack.use(server);
303
303
  }
304
304
 
@@ -365,7 +365,7 @@ async function startNodeHttpServer(gwRuntime, opts) {
365
365
  );
366
366
  }
367
367
  const url = `${protocol}://${host}:${port}`.replace("0.0.0.0", "localhost");
368
- log.debug("Starting server on %s", url);
368
+ log.debug(`Starting server on ${url}`);
369
369
  if (!disableWebsockets) {
370
370
  log.debug("Setting up WebSocket server");
371
371
  const { WebSocketServer } = await import('ws');
@@ -397,7 +397,7 @@ async function startNodeHttpServer(gwRuntime, opts) {
397
397
  return new Promise((resolve, reject) => {
398
398
  server.once("error", reject);
399
399
  server.listen(port, host, () => {
400
- log.info("Listening on %s", url);
400
+ log.info(`Listening on ${url}`);
401
401
  gwRuntime.disposableStack.defer(
402
402
  () => new Promise((resolve2) => {
403
403
  process.stderr.write("\n");
@@ -445,7 +445,7 @@ function handleFork(log, config) {
445
445
  if (cluster__default.default.isPrimary && config.fork && config.fork > 1) {
446
446
  const workers = /* @__PURE__ */ new Set();
447
447
  let expectedToExit = false;
448
- log.debug("Forking %d workers", config.fork);
448
+ log.debug(`Forking ${config.fork} workers`);
449
449
  for (let i = 0; i < config.fork; i++) {
450
450
  const worker = cluster__default.default.fork();
451
451
  const workerLogger = log.child({ worker: worker.id });
@@ -473,7 +473,7 @@ function handleFork(log, config) {
473
473
  workers.add(worker);
474
474
  }
475
475
  utils.registerTerminateHandler((signal) => {
476
- log.info("Killing workers on %s", signal);
476
+ log.info(`Killing workers on ${signal}`);
477
477
  expectedToExit = true;
478
478
  workers.forEach((w) => {
479
479
  w.kill(signal);
@@ -577,6 +577,7 @@ const addCommand$2 = (ctx, cli) => cli.command("proxy").description(
577
577
  hivePersistedDocumentsToken,
578
578
  ...opts
579
579
  } = this.optsWithGlobals();
580
+ ctx.log.info(`Starting ${ctx.productName} ${ctx.version} in proxy mode`);
580
581
  const loadedConfig = await loadConfig({
581
582
  log: ctx.log,
582
583
  configPath: opts.configPath,
@@ -733,6 +734,7 @@ const addCommand$1 = (ctx, cli) => cli.command("subgraph").description(
733
734
  hivePersistedDocumentsToken,
734
735
  ...opts
735
736
  } = this.optsWithGlobals();
737
+ ctx.log.info(`Starting ${ctx.productName} ${ctx.version} as subgraph`);
736
738
  const loadedConfig = await loadConfig({
737
739
  log: ctx.log,
738
740
  configPath: opts.configPath,
@@ -876,21 +878,23 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
876
878
  ...opts
877
879
  } = this.optsWithGlobals();
878
880
  const { apolloUplink } = this.opts();
881
+ ctx.log.info(
882
+ `Starting ${ctx.productName} ${ctx.version} with supergraph`
883
+ );
879
884
  const loadedConfig = await loadConfig({
880
885
  log: ctx.log,
881
886
  configPath: opts.configPath,
882
887
  quiet: !cluster__default.default.isPrimary,
883
888
  configFileName: ctx.configFileName
884
889
  });
885
- let supergraph2 = "supergraph.graphql";
890
+ let supergraph2 = "./supergraph.graphql";
886
891
  if (schemaPathOrUrl) {
887
- ctx.log.info("Supergraph will be loaded from %s", schemaPathOrUrl);
892
+ ctx.log.info(`Supergraph will be loaded from ${schemaPathOrUrl}`);
888
893
  if (hiveCdnKey) {
889
894
  ctx.log.info("Using Hive CDN key");
890
895
  if (!utils.isUrl(schemaPathOrUrl)) {
891
896
  ctx.log.error(
892
- "Hive CDN endpoint must be a URL when providing --hive-cdn-key but got %s",
893
- schemaPathOrUrl
897
+ `Hive CDN endpoint must be a URL when providing --hive-cdn-key but got ${schemaPathOrUrl}`
894
898
  );
895
899
  process.exit(1);
896
900
  }
@@ -903,8 +907,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
903
907
  ctx.log.info("Using GraphOS API key");
904
908
  if (!schemaPathOrUrl.includes("@")) {
905
909
  ctx.log.error(
906
- `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not %s.`,
907
- schemaPathOrUrl
910
+ `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant> when providing --apollo-key. Please provide a valid graph ref not ${schemaPathOrUrl}.`
908
911
  );
909
912
  process.exit(1);
910
913
  }
@@ -930,7 +933,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
930
933
  );
931
934
  process.exit(1);
932
935
  }
933
- ctx.log.info("Using Hive CDN endpoint %s", hiveCdnEndpoint);
936
+ ctx.log.info(`Using Hive CDN endpoint ${hiveCdnEndpoint}`);
934
937
  supergraph2 = {
935
938
  type: "hive",
936
939
  endpoint: hiveCdnEndpoint,
@@ -939,8 +942,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
939
942
  } else if (apolloGraphRef) {
940
943
  if (!apolloGraphRef.includes("@")) {
941
944
  ctx.log.error(
942
- "Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not %s.",
943
- apolloGraphRef
945
+ `Apollo GraphOS requires a graph ref in the format <graph-id>@<graph-variant>. Please provide a valid graph ref not ${apolloGraphRef}.`
944
946
  );
945
947
  process.exit(1);
946
948
  }
@@ -950,7 +952,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
950
952
  );
951
953
  process.exit(1);
952
954
  }
953
- ctx.log.info("Using Apollo Graph Ref %s", apolloGraphRef);
955
+ ctx.log.info(`Using Apollo Graph Ref ${apolloGraphRef}`);
954
956
  supergraph2 = {
955
957
  type: "graphos",
956
958
  apiKey: apolloKey,
@@ -960,7 +962,7 @@ const addCommand = (ctx, cli) => cli.command("supergraph").description(
960
962
  } else if ("supergraph" in loadedConfig) {
961
963
  supergraph2 = loadedConfig.supergraph;
962
964
  } else {
963
- ctx.log.info("Using default supergraph location %s", supergraph2);
965
+ ctx.log.info(`Using default supergraph location "${supergraph2}"`);
964
966
  }
965
967
  const registryConfig = {};
966
968
  const reporting = handleReportingConfig(ctx, loadedConfig, {
@@ -1044,13 +1046,13 @@ async function runSupergraph({ log }, config) {
1044
1046
  if (typeof config.supergraph === "string" && utils$1.isValidPath(config.supergraph) && !utils.isUrl(config.supergraph)) {
1045
1047
  const supergraphPath = config.supergraph;
1046
1048
  absSchemaPath = node_path.isAbsolute(supergraphPath) ? String(supergraphPath) : node_path.resolve(process.cwd(), supergraphPath);
1047
- log.info("Reading supergraph from %s", absSchemaPath);
1049
+ log.info({ path: absSchemaPath }, "Reading supergraph");
1048
1050
  try {
1049
1051
  await promises.lstat(absSchemaPath);
1050
- } catch {
1052
+ } catch (err) {
1051
1053
  log.error(
1052
- "Could not read supergraph from %s. Make sure the file exists.",
1053
- absSchemaPath
1054
+ { path: absSchemaPath, err },
1055
+ "Could not read supergraph. Make sure the file exists."
1054
1056
  );
1055
1057
  process.exit(1);
1056
1058
  }
@@ -1058,10 +1060,13 @@ async function runSupergraph({ log }, config) {
1058
1060
  if (absSchemaPath) {
1059
1061
  delete config.pollingInterval;
1060
1062
  if (cluster__default.default.isPrimary) {
1061
- log.info("Watching %s for changes", absSchemaPath);
1063
+ log.info({ path: absSchemaPath }, "Watching supergraph for changes");
1062
1064
  const ctrl = new AbortController();
1063
1065
  utils.registerTerminateHandler((signal) => {
1064
- log.info("Closing watcher for %s on %s", absSchemaPath, signal);
1066
+ log.info(
1067
+ { path: absSchemaPath },
1068
+ `Closing watcher for supergraph on ${signal}`
1069
+ );
1065
1070
  return ctrl.abort(`Process terminated on ${signal}`);
1066
1071
  });
1067
1072
  (async function watcher() {
@@ -1071,7 +1076,10 @@ async function runSupergraph({ log }, config) {
1071
1076
  if (f.eventType === "rename") {
1072
1077
  throw new Error(`Supergraph file was renamed to "${f.filename}"`);
1073
1078
  }
1074
- log.info("%s changed. Invalidating supergraph...", absSchemaPath);
1079
+ log.info(
1080
+ { path: absSchemaPath },
1081
+ "Supergraph changed. Invalidating..."
1082
+ );
1075
1083
  if (config.fork && config.fork > 1) {
1076
1084
  for (const workerId in cluster__default.default.workers) {
1077
1085
  cluster__default.default.workers[workerId].send("invalidateUnifiedGraph");
@@ -1082,9 +1090,15 @@ async function runSupergraph({ log }, config) {
1082
1090
  }
1083
1091
  })().catch((e) => {
1084
1092
  if (e.name === "AbortError") return;
1085
- log.error(e, "Watcher for %s closed with an error", absSchemaPath);
1093
+ log.error(
1094
+ { path: absSchemaPath, err: e },
1095
+ "Supergraph watcher closed with an error"
1096
+ );
1086
1097
  }).then(() => {
1087
- log.info("Watcher for %s successfuly closed", absSchemaPath);
1098
+ log.info(
1099
+ { path: absSchemaPath },
1100
+ "Supergraph watcher successfuly closed"
1101
+ );
1088
1102
  });
1089
1103
  }
1090
1104
  }
@@ -1114,13 +1128,13 @@ async function runSupergraph({ log }, config) {
1114
1128
  }
1115
1129
  const runtime = gatewayRuntime.createGatewayRuntime(config);
1116
1130
  if (absSchemaPath) {
1117
- log.info("Serving local supergraph from %s", absSchemaPath);
1131
+ log.info({ path: absSchemaPath }, "Serving local supergraph");
1118
1132
  } else if (utils.isUrl(String(config.supergraph))) {
1119
- log.info("Serving remote supergraph from %s", config.supergraph);
1133
+ log.info({ url: config.supergraph }, "Serving remote supergraph");
1120
1134
  } else if (typeof config.supergraph === "object" && "type" in config.supergraph && config.supergraph.type === "hive") {
1121
1135
  log.info(
1122
- "Serving supergraph from Hive CDN at %s",
1123
- config.supergraph.endpoint
1136
+ { endpoint: config.supergraph.endpoint },
1137
+ "Serving supergraph from Hive CDN"
1124
1138
  );
1125
1139
  } else {
1126
1140
  log.info("Serving supergraph from config");
package/dist/index.cjs CHANGED
@@ -1,6 +1,6 @@
1
1
  'use strict';
2
2
 
3
- var cli = require('./cli-_WmSliwb.cjs');
3
+ var cli = require('./cli-bwdDvr0W.cjs');
4
4
  var logger = require('@graphql-hive/logger');
5
5
  var gatewayRuntime = require('@graphql-hive/gateway-runtime');
6
6
  var pubsub = require('@graphql-hive/pubsub');
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
- export { b as defaultOptions, d as defineConfig, e as enableModuleCachingIfPossible, a as getBuiltinPluginsFromConfig, g as getCacheInstanceFromConfig, h as handleNodeWarnings, r as run } from './cli-BBlJHQNF.js';
1
+ export { b as defaultOptions, d as defineConfig, e as enableModuleCachingIfPossible, a as getBuiltinPluginsFromConfig, g as getCacheInstanceFromConfig, h as handleNodeWarnings, r as run } from './cli-HmIj4heR.js';
2
2
  export * from '@graphql-hive/logger';
3
3
  export * from '@graphql-hive/gateway-runtime';
4
4
  export { PubSub } from '@graphql-hive/pubsub';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@graphql-hive/gateway",
3
- "version": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
3
+ "version": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
4
4
  "type": "module",
5
5
  "repository": {
6
6
  "type": "git",
@@ -53,10 +53,10 @@
53
53
  "@escape.tech/graphql-armor-block-field-suggestions": "^3.0.0",
54
54
  "@escape.tech/graphql-armor-max-depth": "^2.4.0",
55
55
  "@escape.tech/graphql-armor-max-tokens": "^2.5.0",
56
- "@graphql-hive/gateway-runtime": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
56
+ "@graphql-hive/gateway-runtime": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
57
57
  "@graphql-hive/importer": "^1.1.0",
58
- "@graphql-hive/logger": "1.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
59
- "@graphql-hive/plugin-aws-sigv4": "1.0.11-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
58
+ "@graphql-hive/logger": "1.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
59
+ "@graphql-hive/plugin-aws-sigv4": "1.0.11-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
60
60
  "@graphql-hive/plugin-deduplicate-request": "^1.0.0",
61
61
  "@graphql-hive/pubsub": "^1.0.0",
62
62
  "@graphql-mesh/cache-cfw-kv": "^0.105.0",
@@ -64,18 +64,18 @@
64
64
  "@graphql-mesh/cache-redis": "^0.104.0",
65
65
  "@graphql-mesh/cache-upstash-redis": "^0.1.0",
66
66
  "@graphql-mesh/cross-helpers": "^0.4.10",
67
- "@graphql-mesh/hmac-upstream-signature": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
67
+ "@graphql-mesh/hmac-upstream-signature": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
68
68
  "@graphql-mesh/plugin-http-cache": "^0.105.2",
69
69
  "@graphql-mesh/plugin-jit": "^0.2.0",
70
- "@graphql-mesh/plugin-jwt-auth": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
70
+ "@graphql-mesh/plugin-jwt-auth": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
71
71
  "@graphql-mesh/plugin-mock": "^0.105.0",
72
- "@graphql-mesh/plugin-opentelemetry": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
73
- "@graphql-mesh/plugin-prometheus": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
72
+ "@graphql-mesh/plugin-opentelemetry": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
73
+ "@graphql-mesh/plugin-prometheus": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
74
74
  "@graphql-mesh/plugin-rate-limit": "^0.104.0",
75
75
  "@graphql-mesh/plugin-snapshot": "^0.104.0",
76
- "@graphql-mesh/transport-http": "0.6.42-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
77
- "@graphql-mesh/transport-http-callback": "1.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
78
- "@graphql-mesh/transport-ws": "2.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
76
+ "@graphql-mesh/transport-http": "0.6.42-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
77
+ "@graphql-mesh/transport-http-callback": "1.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
78
+ "@graphql-mesh/transport-ws": "2.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
79
79
  "@graphql-mesh/types": "^0.104.0",
80
80
  "@graphql-mesh/utils": "^0.104.2",
81
81
  "@graphql-tools/code-file-loader": "^8.1.15",
@@ -90,7 +90,7 @@
90
90
  "ws": "^8.18.0"
91
91
  },
92
92
  "devDependencies": {
93
- "@graphql-mesh/transport-common": "1.0.0-alpha-2c512a9004f7e2ac471bde15cf9dd9b064d6f87e",
93
+ "@graphql-mesh/transport-common": "1.0.0-alpha-dc75093c5467d8467068ec27a9e61fdab5001790",
94
94
  "@graphql-mesh/transport-soap": "^0.10.0",
95
95
  "@graphql-tools/executor": "^1.4.7",
96
96
  "@rollup/plugin-commonjs": "^28.0.0",