@metaplay/metaplay-auth 1.6.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.ts CHANGED
@@ -2,7 +2,6 @@
2
2
  import { Command } from 'commander'
3
3
  import { randomBytes } from 'crypto'
4
4
  import Docker from 'dockerode'
5
- import { existsSync } from 'fs'
6
5
  import { writeFile, unlink } from 'fs/promises'
7
6
  import { tmpdir } from 'os'
8
7
  import { exit } from 'process'
@@ -19,11 +18,8 @@ import { TargetEnvironment } from './src/targetenvironment.js'
19
18
  import { pathJoin, isValidFQDN, executeCommand, removeTrailingSlash, fetchHelmChartVersions, resolveBestMatchingVersion } from './src/utils.js'
20
19
  import { PACKAGE_VERSION } from './src/version.js'
21
20
 
22
- /**
23
- * Base URL of StackAPI infra to use -- defaults to p1.metaplay.io. Override with the global --stack-api flag.
24
- * Note: The dynamic `kubeconfig`s generated by `metaplay-auth` override this with '--stack-api <url>' flag.
25
- */
26
- let defaultStackApiBaseUrl = 'https://infra.p1.metaplay.io/stackapi'
21
+ /** Stack API base url override, specified with the '--stack-api' global flag. */
22
+ let stackApiBaseUrlOverride: string | undefined
27
23
 
28
24
  /**
29
25
  * Resolve a TargetEnvironment from a fully-qualified domain name (eg, 'idler-develop.p1.metaplay.io').
@@ -32,9 +28,13 @@ let defaultStackApiBaseUrl = 'https://infra.p1.metaplay.io/stackapi'
32
28
  * @returns The TargetEnvironment instance needed to operate with the environment.
33
29
  */
34
30
  function resolveTargetEnvironmentFromFQDN(tokens: TokenSet, environmentDomain: string): TargetEnvironment {
31
+ // Default p1.metaplay.io StackAPI base URL. This code path is deprecated so not a problem to do this here.
32
+ const defaultStackApiBaseUrl = 'https://infra.p1.metaplay.io/stackapi'
33
+
35
34
  // Extract the humanId from the domain, eg: 'idler-develop.p1.metaplay.io' -> 'idler-develop'
35
+ // When using FQDNs, we assume the target is on p1.metaplay.io -- the only stack supported with FQDNs.
36
36
  const humanId = environmentDomain.split('.')[0]
37
- return new TargetEnvironment(tokens.access_token, humanId, defaultStackApiBaseUrl) // \todo We could probably infer the stack API URL from the domain?
37
+ return new TargetEnvironment(tokens.access_token, humanId, defaultStackApiBaseUrl)
38
38
  }
39
39
 
40
40
  /**
@@ -67,7 +67,18 @@ interface PortalEnvironmentInfo {
67
67
  // \todo Make field mandatory when portal returns valid values
68
68
  human_id?: string
69
69
 
70
- // \todo Add stackapi url that the portal should return?
70
+ /**
71
+ * Domain that the environment uses, eg, 'metaplay.games'. Old environments use 'p1.metaplay.io'.
72
+ * This domain is where the clients connect to, the one that JWKS signatures use, and so on.
73
+ */
74
+ env_domain?: string
75
+
76
+ /**
77
+ * Domain of the infra stack where the environment is provisioned, eg, 'p1.metaplay.io'.
78
+ * Note that the environment does not exist on the infra stack if it hasn't been created yet,
79
+ * but the decision can still have been made where to put it.
80
+ */
81
+ stack_domain?: string
71
82
  }
72
83
 
73
84
  /**
@@ -79,7 +90,7 @@ interface PortalEnvironmentInfo {
79
90
  * @returns The portal's information about the environment.
80
91
  */
81
92
  // eslint-disable-next-line @typescript-eslint/max-params
82
- async function fetchManagedEnvironmentInfo(
93
+ async function fetchManagedEnvironmentInfoWithSlugs(
83
94
  tokens: TokenSet,
84
95
  organization: string,
85
96
  project: string,
@@ -102,7 +113,35 @@ async function fetchManagedEnvironmentInfo(
102
113
  }
103
114
 
104
115
  // \todo Validate response?
105
- return (await response.json()) as PortalEnvironmentInfo
116
+ const portalEnvInfo = (await response.json()) as PortalEnvironmentInfo
117
+ logger.debug(`Portal returned environment info: ${JSON.stringify(portalEnvInfo, undefined, 2)}`)
118
+ return portalEnvInfo
119
+ }
120
+
121
+ async function fetchManageEnvironmentInfoWithHumanId(tokens: TokenSet, humanId: string): Promise<PortalEnvironmentInfo> {
122
+ const url = `${portalBaseUrl}/api/v1/environments?human_id=${humanId}`
123
+ logger.debug(`Getting environment information from portal: ${url}...`)
124
+ const response = await fetch(url, {
125
+ method: 'GET',
126
+ headers: {
127
+ Authorization: `Bearer ${tokens.access_token}`,
128
+ 'Content-Type': 'application/json',
129
+ },
130
+ })
131
+
132
+ // Throw on server errors (eg, forbidden)
133
+ if (!response.ok) {
134
+ const errorData = await response.json()
135
+ throw new Error(`Failed to fetch environment details with error ${response.status}: ${JSON.stringify(errorData)}`)
136
+ }
137
+
138
+ // Return the result
139
+ const portalEnvInfos = (await response.json()) as PortalEnvironmentInfo[]
140
+ logger.debug(`Portal returned environment infos: ${JSON.stringify(portalEnvInfos, undefined, 2)}`)
141
+ if (portalEnvInfos.length === 0) {
142
+ throw new Error(`Failed to fetch details from portal for environment ${humanId}: no matching environment found`)
143
+ }
144
+ return portalEnvInfos[0]
106
145
  }
107
146
 
108
147
  /**
@@ -123,19 +162,33 @@ async function resolveTargetEnvironmentFromSlugs(
123
162
  environment: string
124
163
  ): Promise<TargetEnvironment> {
125
164
  // Fetch the deployment information from the portal
126
- const portalEnvInfo = await fetchManagedEnvironmentInfo(tokens, organization, project, environment)
165
+ const portalEnvInfo = await fetchManagedEnvironmentInfoWithSlugs(tokens, organization, project, environment)
127
166
  const humanId = portalEnvInfo.human_id
128
167
  if (!humanId) {
129
168
  throw new Error(`Portal returned missing human_id for environment '${organization}-${project}-${environment}'`)
130
169
  }
131
170
 
132
- return new TargetEnvironment(tokens.access_token, humanId, defaultStackApiBaseUrl)
171
+ const stackDomain = portalEnvInfo.stack_domain
172
+ if (!stackDomain) {
173
+ throw new Error(`The environment ${portalEnvInfo.human_id} has not been provisioned to any infra stack (environment.stack_domain is empty).`)
174
+ }
175
+
176
+ return new TargetEnvironment(tokens.access_token, humanId, `https://infra.${stackDomain}/stackapi`)
133
177
  }
134
178
 
135
179
  async function resolveTargetEnvironmentHumanId(tokens: TokenSet, humanId: string): Promise<TargetEnvironment> {
136
- // \todo Validate that the target environment exists?
180
+ // Resolve the StackAPI base URL to use: prefer the --stack-api override, or if not defined, fetch it from the portal
181
+ let stackApiBaseUrl = stackApiBaseUrlOverride
182
+ if (!stackApiBaseUrl) {
183
+ const portalEnvInfo = await fetchManageEnvironmentInfoWithHumanId(tokens, humanId)
184
+ if (!portalEnvInfo.stack_domain) {
185
+ throw new Error(`The environment ${portalEnvInfo.human_id} has not been provisioned to any infra stack (environment.stack_domain is empty).`)
186
+ }
187
+ stackApiBaseUrl = `https://infra.${portalEnvInfo.stack_domain}/stackapi`
188
+ }
137
189
 
138
- return new TargetEnvironment(tokens.access_token, humanId, defaultStackApiBaseUrl)
190
+
191
+ return new TargetEnvironment(tokens.access_token, humanId, stackApiBaseUrl)
139
192
  }
140
193
 
141
194
  /**
@@ -156,6 +209,7 @@ async function resolveTargetEnvironment(
156
209
  // - Tuple of '<organization>-<project>-<environment>' slugs (eg, 'metaplay-idler-develop')
157
210
  // - Stable humanId, eg, 'delicious-elephant'
158
211
  if (isValidFQDN(address)) {
212
+ console.warn('Using the FQDN to specify target environment is deprecated. Use the "<organization>-<project>-<environment>" slugs instead.')
159
213
  return resolveTargetEnvironmentFromFQDN(tokens, address)
160
214
  } else {
161
215
  const parts = address.split('-')
@@ -171,9 +225,7 @@ async function resolveTargetEnvironment(
171
225
  }
172
226
  } else if (options.organization && options.project && options.environment) {
173
227
  // Parse tuple from command-line options (output to stderr to avoid messing up '$(eval metaplay-auth ... --format env)' invocations
174
- console.warn(
175
- `Warning: Specifying the target environment with -o (--organization), -p (--project), and -e (--environment) is deprecated! Use the '${options.organization}-${options.project}-${options.environment}' syntax instead.`
176
- )
228
+ console.warn(`Warning: Specifying the target environment with -o (--organization), -p (--project), and -e (--environment) is deprecated! Use the '${options.organization}-${options.project}-${options.environment}' syntax instead.`)
177
229
  return await resolveTargetEnvironmentFromSlugs(tokens, options.organization, options.project, options.environment)
178
230
  } else {
179
231
  throw new Error('Could not determine target environment from arguments: You need to specify either an environment FQDN or an organization, project, and environment. Run this command with --help flag for more information.')
@@ -205,7 +257,7 @@ program
205
257
 
206
258
  // Store the stack API base URL for accessing globally
207
259
  if (opts.stackApi) {
208
- defaultStackApiBaseUrl = opts.stackApi as string
260
+ stackApiBaseUrlOverride = opts.stackApi as string
209
261
  }
210
262
  })
211
263
 
@@ -744,7 +796,7 @@ program
744
796
  // - Unknown, error out!
745
797
  const helmChartVersionSpec = options.helmChartVersion ?? imageLabels['io.metaplay.default_server_chart_version']
746
798
  if (!options.helmChartVersion) {
747
- console.warn('You should specify the Helm chart version with --helm-chart-version=<version>!')
799
+ console.warn('Warning: You should specify the Helm chart version with --helm-chart-version=<version>!')
748
800
  }
749
801
  if (!helmChartVersionSpec) {
750
802
  throw new Error('No Helm chart version defined. With pre-R28 SDK versions, you must specify the Helm chart version explicitly with --helm-chart-version=<version>.')
@@ -817,7 +869,7 @@ program
817
869
  ? ` from repo ${helmChartRepo}`
818
870
  : ''
819
871
  console.log(
820
- `Game server deployed to ${gameserver} with tag ${imageTag} using chart version ${resolvedHelmChartVersion}${testingRepoSuffix}!`
872
+ `Game server deployed to ${gameserver} with tag ${imageTag} using chart version ${resolvedHelmChartVersion}${testingRepoSuffix}`
821
873
  )
822
874
  } finally {
823
875
  // Remove temporary kubeconfig file
@@ -830,12 +882,7 @@ program
830
882
  kubeconfig.loadFromString(kubeconfigPayload)
831
883
 
832
884
  console.log('Validating game server deployment...')
833
- const exitCode = await checkGameServerDeployment(
834
- envInfo.deployment.kubernetes_namespace,
835
- kubeconfig,
836
- imageTag
837
- )
838
- exit(exitCode)
885
+ await checkGameServerDeployment(envInfo, kubeconfig, imageTag)
839
886
  } catch (error) {
840
887
  const errMessage = error instanceof Error ? error.message : String(error)
841
888
  console.error(`Failed to resolve game server deployment status: ${errMessage}`)
@@ -867,7 +914,6 @@ program
867
914
  try {
868
915
  logger.debug('Get environment info')
869
916
  const envInfo = await targetEnv.getEnvironmentDetails()
870
- const kubernetesNamespace = envInfo.deployment.kubernetes_namespace
871
917
 
872
918
  // Load kubeconfig from file and throw error if validation fails.
873
919
  logger.debug('Get kubeconfig')
@@ -883,10 +929,9 @@ program
883
929
  }
884
930
 
885
931
  // Run the checks and exit with success/failure exitCode depending on result
886
- console.log(`Validating game server deployment in namespace ${kubernetesNamespace}`)
932
+ console.log(`Validating game server deployment`)
887
933
  // \todo Get requiredImageTag from the Helm chart
888
- const exitCode = await checkGameServerDeployment(kubernetesNamespace, kubeconfig, /* requiredImageTag: */ null)
889
- exit(exitCode)
934
+ await checkGameServerDeployment(envInfo, kubeconfig, /* requiredImageTag: */ null)
890
935
  } catch (error: any) {
891
936
  console.error(`Failed to check deployment status: ${error.message}`)
892
937
  exit(1)
@@ -894,52 +939,6 @@ program
894
939
  }
895
940
  )
896
941
 
897
- program
898
- .command('check-deployment')
899
- .description('[deprecated] check that a game server was successfully deployed, or print out useful error messages in case of failure')
900
- .argument('[namespace]', 'kubernetes namespace of the deployment')
901
- .action(async (namespace: string) => {
902
- console.error(
903
- 'DEPRECATED! Use the "metaplay-auth check-server-status [gameserver]" command instead! This command will be removed soon.'
904
- )
905
-
906
- try {
907
- if (!namespace) {
908
- throw new Error('Must specify value for argument "namespace"')
909
- }
910
-
911
- // Check that the KUBECONFIG environment variable exists
912
- const kubeconfigPath = process.env.KUBECONFIG
913
- if (!kubeconfigPath) {
914
- throw new Error('The KUBECONFIG environment variable must be specified')
915
- }
916
-
917
- // Check that the kubeconfig file exists
918
- if (!existsSync(kubeconfigPath)) {
919
- throw new Error(`The environment variable KUBECONFIG points to a file '${kubeconfigPath}' that doesn't exist`)
920
- }
921
-
922
- // Create Kubernetes API instance (with default kubeconfig)
923
- const kubeconfig = new KubeConfig()
924
- // Load kubeconfig from file and throw error if validation fails.
925
- try {
926
- kubeconfig.loadFromFile(kubeconfigPath)
927
- } catch (error) {
928
- const errMessage = error instanceof Error ? error.message : String(error)
929
- throw new Error(`Failed to load or validate kubeconfig: ${errMessage}`)
930
- }
931
-
932
- // Run the checks and exit with success/failure exitCode depending on result
933
- console.log(`Validating game server deployment in namespace ${namespace}`)
934
- // \todo Get requiredImageTag from the Helm chart
935
- const exitCode = await checkGameServerDeployment(namespace, kubeconfig, /* requiredImageTag: */ null)
936
- exit(exitCode)
937
- } catch (error: any) {
938
- console.error(`Failed to check deployment status: ${error.message}`)
939
- exit(1)
940
- }
941
- })
942
-
943
942
  program
944
943
  .command('debug-server')
945
944
  .description('run an ephemeral debug container against a game server pod running in the cloud')
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@metaplay/metaplay-auth",
3
3
  "description": "Utility CLI for authenticating with the Metaplay Auth and making authenticated calls to infrastructure endpoints.",
4
- "version": "1.6.1",
4
+ "version": "1.7.0",
5
5
  "type": "module",
6
6
  "license": "SEE LICENSE IN LICENSE",
7
7
  "homepage": "https://metaplay.io",
@@ -17,28 +17,28 @@
17
17
  },
18
18
  "devDependencies": {
19
19
  "@metaplay/eslint-config": "workspace:*",
20
- "@types/dockerode": "^3.3.31",
21
- "@types/express": "^4.17.21",
22
- "@types/js-yaml": "^4.0.9",
23
- "@types/jsonwebtoken": "^9.0.6",
24
- "@types/jwk-to-pem": "^2.0.3",
25
- "@types/node": "^20.16.1",
26
- "@types/semver": "^7.5.8",
27
- "esbuild": "^0.23.1",
28
- "tsx": "^4.19.0",
29
- "typescript": "5.5.4",
30
- "vitest": "^2.0.5",
31
- "@aws-sdk/client-ecr": "^3.645.0",
32
- "@kubernetes/client-node": "^1.0.0-rc6",
33
- "@ory/client": "^1.14.5",
34
- "commander": "^12.1.0",
35
- "dockerode": "^4.0.2",
36
- "h3": "^1.12.0",
37
- "js-yaml": "^4.1.0",
38
- "jsonwebtoken": "^9.0.2",
39
- "jwk-to-pem": "^2.0.6",
40
- "open": "^8.4.2",
41
- "semver": "^7.6.3",
42
- "tslog": "^4.9.3"
20
+ "@types/dockerode": "3.3.31",
21
+ "@types/express": "4.17.21",
22
+ "@types/js-yaml": "4.0.9",
23
+ "@types/jsonwebtoken": "9.0.7",
24
+ "@types/jwk-to-pem": "2.0.3",
25
+ "@types/node": "20.16.1",
26
+ "@types/semver": "7.5.8",
27
+ "esbuild": "0.24.0",
28
+ "tsx": "4.19.1",
29
+ "typescript": "5.6.2",
30
+ "vitest": "2.1.1",
31
+ "@aws-sdk/client-ecr": "3.654.0",
32
+ "@kubernetes/client-node": "1.0.0-rc6",
33
+ "@ory/client": "1.15.4",
34
+ "commander": "12.1.0",
35
+ "dockerode": "4.0.2",
36
+ "h3": "1.12.0",
37
+ "js-yaml": "4.1.0",
38
+ "jsonwebtoken": "9.0.2",
39
+ "jwk-to-pem": "2.0.6",
40
+ "open": "8.4.2",
41
+ "semver": "7.6.3",
42
+ "tslog": "4.9.3"
43
43
  }
44
44
  }
@@ -226,8 +226,8 @@ export function registerBuildCommand(program: Command): void {
226
226
  .concat(['-t', imageTag])
227
227
  .concat(['-f', dockerFilePath])
228
228
  .concat(['--platform', platform]) // target platform(s)
229
- .concat(options.buildNumber ? ['--build-arg', `BUILD_NUMBER=${options.buildNumber}`] : [])
230
- .concat(options.commitId ? ['--build-arg', `COMMIT_ID=${options.commitId}`] : [])
229
+ .concat(buildNumber ? ['--build-arg', `BUILD_NUMBER=${buildNumber}`] : [])
230
+ .concat(commitId ? ['--build-arg', `COMMIT_ID=${commitId}`] : [])
231
231
  .concat(['--build-arg', `SDK_ROOT=${sdkRootPath}`])
232
232
  .concat(['--build-arg', `PROJECT_ROOT=${projectRootPath}`])
233
233
  .concat(['--build-arg', `SHARED_CODE_DIR=${options.sharedCodeDir.replaceAll('\\', '/')}`]) // pass relative-to-project-root dir
package/src/deployment.ts CHANGED
@@ -2,7 +2,8 @@ import { unlink, writeFile } from 'fs/promises'
2
2
  import os from 'os'
3
3
  import path from 'path'
4
4
  import { exit } from 'process'
5
- import { TargetEnvironment } from 'targetenvironment.js'
5
+ import { EnvironmentDetails, TargetEnvironment } from 'targetenvironment.js'
6
+ import * as net from 'net'
6
7
 
7
8
  import {
8
9
  KubeConfig,
@@ -273,8 +274,8 @@ function resolvePodStatus(pod: V1Pod, requiredImageTag: string | null): GameServ
273
274
  const podImageTag = resolvePodGameServerImageTag(pod)
274
275
  if (podImageTag !== requiredImageTag) {
275
276
  return {
276
- phase: GameServerPodPhase.Unknown,
277
- message: `Image tag is not (yet?) updated. Pod image is ${podImageTag ?? 'unknown'}, expecting ${requiredImageTag}.`,
277
+ phase: GameServerPodPhase.Pending,
278
+ message: `Pod has old image tag ${podImageTag ?? 'unknown'}, expecting ${requiredImageTag}.`,
278
279
  }
279
280
  }
280
281
  }
@@ -372,17 +373,24 @@ function allPodsInPhase(podStatuses: GameServerPodStatus[], phase: GameServerPod
372
373
  return podStatuses.every((status) => status.phase === phase)
373
374
  }
374
375
 
375
- export async function checkGameServerDeployment(
376
+ /**
377
+ * Wait for the game server Kubernetes pods to reach their ready state,
378
+ * or throw an error on timeout.
379
+ * @param namespace Kubernetes namespace where the pods are deployed.
380
+ * @param kubeconfig Kubeconfig to use to connect to the cluster.
381
+ * @param requiredImageTag Image tag the the pods should be using (or null if any is accepted).
382
+ */
383
+ export async function waitForGameServerPodsToBeReady(
376
384
  namespace: string,
377
385
  kubeconfig: KubeConfig,
378
386
  requiredImageTag: string | null
379
- ): Promise<number> {
380
- const k8sApi = kubeconfig.makeApiClient(CoreV1Api)
387
+ ): Promise<void> {
388
+ // Try for 3 min before giving up
389
+ const timeoutAt = Date.now() + 3 * 60 * 1000
381
390
 
382
- // Figure out when to stop
383
- const startTime = Date.now()
384
- const timeoutAt = startTime + 1 * 60 * 1000 // 5min
391
+ const k8sApi = kubeconfig.makeApiClient(CoreV1Api)
385
392
 
393
+ // Wait for the Kubernetes pods to be ready (with the desired image).
386
394
  while (true) {
387
395
  // Check pod states
388
396
  const pods = await fetchGameServerPods(k8sApi, namespace)
@@ -403,7 +411,7 @@ export async function checkGameServerDeployment(
403
411
  const suffix = status.phase !== GameServerPodPhase.Ready ? ` -- ${status.message}` : ''
404
412
  console.log(` ${pods[ndx].metadata?.name}: ${status.phase}${suffix}`)
405
413
  }
406
- return 1
414
+ return
407
415
  } else if (
408
416
  anyPodsInPhase(podStatuses, GameServerPodPhase.Unknown) ||
409
417
  anyPodsInPhase(podStatuses, GameServerPodPhase.Pending) ||
@@ -417,8 +425,7 @@ export async function checkGameServerDeployment(
417
425
  }
418
426
  } else if (allPodsInPhase(podStatuses, GameServerPodPhase.Ready)) {
419
427
  console.log('Gameserver is up and ready to serve!')
420
- // \todo add further readiness checks -- ping endpoint, ping dashboard, other checks?
421
- return 0
428
+ return
422
429
  } else {
423
430
  console.log('Deployment in inconsistent state, waiting...')
424
431
  for (let ndx = 0; ndx < pods.length; ndx += 1) {
@@ -430,8 +437,7 @@ export async function checkGameServerDeployment(
430
437
  }
431
438
 
432
439
  if (Date.now() >= timeoutAt) {
433
- console.log('Deployment failed! Timeout while waiting for gameserver to initialize.')
434
- return 124 // timeout
440
+ throw new Error('Timeout while waiting for the server pods be get ready')
435
441
  }
436
442
 
437
443
  // Sleep a bit to avoid spamming the log
@@ -439,6 +445,86 @@ export async function checkGameServerDeployment(
439
445
  }
440
446
  }
441
447
 
448
+ /**
449
+ * Wait until we can establish a client-simulating connection to the target
450
+ * game server, or a timeout happens.
451
+ * @param hostname Hostname of the target server to connect to.
452
+ * @param port Port to use for the connections (usually 9339).
453
+ */
454
+ async function waitForGameServerClientEndpointToBeReady(
455
+ hostname: string,
456
+ port: number
457
+ ): Promise<void> {
458
+ const checkConnection = async (): Promise<boolean> => {
459
+ return await new Promise((resolve) => {
460
+ const socket = new net.Socket()
461
+ // Set a short timeout for each attempt
462
+ // Note: This does not include the DNS resolve time which can take a minute or so.
463
+ // \todo Consider adding a DNS resolve step before trying to connect.
464
+ socket.setTimeout(2000)
465
+
466
+ socket.on('connect', () => {
467
+ socket.destroy() // Clean up after success
468
+ resolve(true)
469
+ })
470
+
471
+ socket.on('error', () => {
472
+ socket.destroy() // Clean up after error
473
+ resolve(false)
474
+ })
475
+
476
+ socket.on('timeout', () => {
477
+ socket.destroy() // Clean up after timeout
478
+ resolve(false)
479
+ })
480
+
481
+ socket.connect(port, hostname)
482
+ })
483
+ }
484
+
485
+ // Try for 2 min before giving up
486
+ const timeoutAt = Date.now() + 2 * 60 * 1000
487
+
488
+ while (Date.now() < timeoutAt) {
489
+ const connected = await checkConnection()
490
+ if (connected) {
491
+ console.log(`Successfully connected to ${hostname}:${port}`)
492
+ return
493
+ }
494
+ console.log(`Retrying connection to ${hostname}:${port}...`)
495
+ await new Promise((resolve) => setTimeout(resolve, 1000))
496
+ }
497
+
498
+ throw new Error(`Timeout while trying to connect to ${hostname}:${port}.`)
499
+ }
500
+
501
+ /**
502
+ * Check for the status of a game server deployment:
503
+ * - Check that the Kubernetes resources (pods) are up and running.
504
+ * - Check that the server accepts connections on its client-facing port.
505
+ * @param envInfo Environment info of the target.
506
+ * @param kubeconfig Kubeconfig to use for checking the Kubernetes resources.
507
+ * @param requiredImageTag Expected image tag that is being deployed (or null if any is acceptable).
508
+ */
509
+ export async function checkGameServerDeployment(
510
+ envInfo: EnvironmentDetails,
511
+ kubeconfig: KubeConfig,
512
+ requiredImageTag: string | null
513
+ ): Promise<void> {
514
+ // Wait for Kubernetes pods to be in ready state
515
+ console.log('Waiting for game server pods to be ready...')
516
+ await waitForGameServerPodsToBeReady(envInfo.deployment.kubernetes_namespace, kubeconfig, requiredImageTag)
517
+
518
+ // \todo Add a separate step for a DNS check on the game server
519
+
520
+ // Check that the game server accepts traffic on its client-facing endpoint
521
+ const clientTrafficPort = 9339 // \todo check for other ports, too?
522
+ console.log(`Connecting to the server at ${envInfo.deployment.server_hostname}:${clientTrafficPort}...`)
523
+ await waitForGameServerClientEndpointToBeReady(envInfo.deployment.server_hostname, clientTrafficPort)
524
+
525
+ // \todo add more readiness checks: dashboard, others?
526
+ }
527
+
442
528
  export async function debugGameServer(targetEnv: TargetEnvironment, targetPodName?: string): Promise<void> {
443
529
  // Initialize kubeconfig for target environment
444
530
  logger.debug('Get kubeconfig')
package/src/version.ts CHANGED
@@ -1 +1 @@
1
- export const PACKAGE_VERSION = "1.6.1"
1
+ export const PACKAGE_VERSION = "1.7.0"