cdk8s-plus-32 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +90710 -0
- package/CODE_OF_CONDUCT.md +3 -0
- package/CONTRIBUTING.md +185 -0
- package/DCO +34 -0
- package/LICENSE +202 -0
- package/NOTICE +1 -0
- package/OWNERS.md +5 -0
- package/README.md +32 -0
- package/SECURITY.md +5 -0
- package/cdk8s.yaml +9 -0
- package/docs/java.md +24089 -0
- package/docs/plus/config-map.md +98 -0
- package/docs/plus/container.md +133 -0
- package/docs/plus/cronjob.md +67 -0
- package/docs/plus/deployment.md +232 -0
- package/docs/plus/horizontal-pod-autoscaler.md +226 -0
- package/docs/plus/ingress.md +68 -0
- package/docs/plus/job.md +48 -0
- package/docs/plus/namespace.md +58 -0
- package/docs/plus/network-policy.md +341 -0
- package/docs/plus/pod.md +455 -0
- package/docs/plus/pv.md +82 -0
- package/docs/plus/pvc.md +77 -0
- package/docs/plus/rbac.md +104 -0
- package/docs/plus/secret.md +32 -0
- package/docs/plus/service-account.md +35 -0
- package/docs/plus/service.md +41 -0
- package/docs/plus/volume.md +38 -0
- package/docs/python.md +26313 -0
- package/docs/typescript.md +19695 -0
- package/git-hooks/README.md +9 -0
- package/git-hooks/prepare-commit-msg +18 -0
- package/git-hooks/setup.sh +10 -0
- package/lib/_action.d.ts +21 -0
- package/lib/_action.js +32 -0
- package/lib/api-resource.d.ts +298 -0
- package/lib/api-resource.js +430 -0
- package/lib/base.d.ts +79 -0
- package/lib/base.js +92 -0
- package/lib/config-map.d.ts +126 -0
- package/lib/config-map.js +159 -0
- package/lib/container.d.ts +1057 -0
- package/lib/container.js +845 -0
- package/lib/cron-job.d.ts +138 -0
- package/lib/cron-job.js +103 -0
- package/lib/daemon-set.d.ts +45 -0
- package/lib/daemon-set.js +55 -0
- package/lib/deployment.d.ts +223 -0
- package/lib/deployment.js +214 -0
- package/lib/handler.d.ts +62 -0
- package/lib/handler.js +54 -0
- package/lib/horizontal-pod-autoscaler.d.ts +500 -0
- package/lib/horizontal-pod-autoscaler.js +569 -0
- package/lib/imports/k8s.d.ts +21811 -0
- package/lib/imports/k8s.js +16678 -0
- package/lib/index.d.ts +26 -0
- package/lib/index.js +44 -0
- package/lib/ingress.d.ts +230 -0
- package/lib/ingress.js +246 -0
- package/lib/job.d.ts +64 -0
- package/lib/job.js +54 -0
- package/lib/namespace.d.ts +128 -0
- package/lib/namespace.js +109 -0
- package/lib/network-policy.d.ts +311 -0
- package/lib/network-policy.js +344 -0
- package/lib/pod.d.ts +1088 -0
- package/lib/pod.js +1141 -0
- package/lib/probe.d.ts +141 -0
- package/lib/probe.js +77 -0
- package/lib/pv.d.ts +375 -0
- package/lib/pv.js +273 -0
- package/lib/pvc.d.ts +163 -0
- package/lib/pvc.js +152 -0
- package/lib/role-binding.d.ts +138 -0
- package/lib/role-binding.js +165 -0
- package/lib/role.d.ts +268 -0
- package/lib/role.js +401 -0
- package/lib/secret.d.ts +195 -0
- package/lib/secret.js +185 -0
- package/lib/service-account.d.ts +83 -0
- package/lib/service-account.js +105 -0
- package/lib/service.d.ts +289 -0
- package/lib/service.js +182 -0
- package/lib/stateful-set.d.ts +169 -0
- package/lib/stateful-set.js +174 -0
- package/lib/utils.d.ts +4 -0
- package/lib/utils.js +33 -0
- package/lib/volume.d.ts +573 -0
- package/lib/volume.js +371 -0
- package/lib/workload.d.ts +121 -0
- package/lib/workload.js +122 -0
- package/node_modules/balanced-match/.github/FUNDING.yml +2 -0
- package/node_modules/balanced-match/LICENSE.md +21 -0
- package/node_modules/balanced-match/README.md +97 -0
- package/node_modules/balanced-match/index.js +62 -0
- package/node_modules/balanced-match/package.json +48 -0
- package/node_modules/concat-map/.travis.yml +4 -0
- package/node_modules/concat-map/LICENSE +18 -0
- package/node_modules/concat-map/README.markdown +62 -0
- package/node_modules/concat-map/example/map.js +6 -0
- package/node_modules/concat-map/index.js +13 -0
- package/node_modules/concat-map/package.json +43 -0
- package/node_modules/concat-map/test/map.js +39 -0
- package/node_modules/minimatch/LICENSE +15 -0
- package/node_modules/minimatch/README.md +230 -0
- package/node_modules/minimatch/minimatch.js +947 -0
- package/node_modules/minimatch/node_modules/brace-expansion/LICENSE +21 -0
- package/node_modules/minimatch/node_modules/brace-expansion/README.md +129 -0
- package/node_modules/minimatch/node_modules/brace-expansion/index.js +201 -0
- package/node_modules/minimatch/node_modules/brace-expansion/package.json +47 -0
- package/node_modules/minimatch/package.json +33 -0
- package/package.json +181 -0
- package/rotate.md +84 -0
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
# ConfigMap
|
|
2
|
+
|
|
3
|
+
ConfigMap are used to store configuration data. They provide a dictionary based
|
|
4
|
+
data structure that can be consumed in various shapes and forms.
|
|
5
|
+
|
|
6
|
+
!!! tip ""
|
|
7
|
+
[API Reference](../../reference/cdk8s-plus-32/typescript.md#configmap)
|
|
8
|
+
|
|
9
|
+
## Use an existing `ConfigMap`
|
|
10
|
+
|
|
11
|
+
You can reference to an existing `ConfigMap` like so. Note that this does not create a new object,
|
|
12
|
+
and will therefore not be included in the resulting manifest.
|
|
13
|
+
|
|
14
|
+
```typescript
|
|
15
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
16
|
+
import { Construct } from 'constructs';
|
|
17
|
+
import { App, Chart, ChartProps } from 'cdk8s';
|
|
18
|
+
|
|
19
|
+
export class MyChart extends Chart {
|
|
20
|
+
constructor(scope: Construct, id: string, props?: ChartProps) {
|
|
21
|
+
super(scope, id, props);
|
|
22
|
+
|
|
23
|
+
const config: kplus.IConfigMap = kplus.ConfigMap.fromConfigMapName(this, 'ConfigMap', 'config');
|
|
24
|
+
|
|
25
|
+
// the 'config' constant can later be used by API's that require an IConfigMap.
|
|
26
|
+
// for example when creating a volume.
|
|
27
|
+
kplus.Volume.fromConfigMap(this, 'Volume', config);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const app = new App();
|
|
32
|
+
new MyChart(app, 'VolumeFromConfigMap');
|
|
33
|
+
app.synth();
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Adding data
|
|
37
|
+
|
|
38
|
+
You can create config maps and add some data to them like so:
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
42
|
+
import { Construct } from 'constructs';
|
|
43
|
+
import { App, Chart, ChartProps } from 'cdk8s';
|
|
44
|
+
|
|
45
|
+
export class MyChart extends Chart {
|
|
46
|
+
constructor(scope: Construct, id: string, props?: ChartProps) {
|
|
47
|
+
super(scope, id, props);
|
|
48
|
+
|
|
49
|
+
const config = new kplus.ConfigMap(this, 'Config');
|
|
50
|
+
config.addData('url', 'https://my-endpoint:8080');
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const app = new App();
|
|
55
|
+
new MyChart(app, 'ConfigMap');
|
|
56
|
+
app.synth();
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
## Creating a volume from a directory
|
|
60
|
+
|
|
61
|
+
Here is a nifty little trick you can use to create a volume that contains a directory on the client machine (machine that runs `cdk8s synth`):
|
|
62
|
+
|
|
63
|
+
```typescript
|
|
64
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
65
|
+
import * as path from 'path';
|
|
66
|
+
import { Construct } from 'constructs';
|
|
67
|
+
import { App, Chart, ChartProps } from 'cdk8s';
|
|
68
|
+
|
|
69
|
+
export class MyChart extends Chart {
|
|
70
|
+
constructor(scope: Construct, id: string, props?: ChartProps) {
|
|
71
|
+
super(scope, id, props);
|
|
72
|
+
const appMap = new kplus.ConfigMap(this, 'Config');
|
|
73
|
+
|
|
74
|
+
// add the files in the directory to the config map.
|
|
75
|
+
// this will create a key for each file.
|
|
76
|
+
// note: this directory needs to exist
|
|
77
|
+
// note: that only top level files will be included, sub-directories are not yet supported.
|
|
78
|
+
appMap.addDirectory(path.join(__dirname, 'app'));
|
|
79
|
+
|
|
80
|
+
const appVolume = kplus.Volume.fromConfigMap(this, 'ConfigMap', appMap);
|
|
81
|
+
|
|
82
|
+
const mountPath = '/var/app';
|
|
83
|
+
const pod = new kplus.Pod(this, 'Pod');
|
|
84
|
+
const container = pod.addContainer({
|
|
85
|
+
image: 'node',
|
|
86
|
+
command: [ 'node', 'app.js' ],
|
|
87
|
+
workingDir: mountPath,
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
// from here, just mount the volume to a container, and run your app!
|
|
91
|
+
container.mount(mountPath, appVolume);
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
const app = new App();
|
|
96
|
+
new MyChart(app, 'AppWithDir');
|
|
97
|
+
app.synth();
|
|
98
|
+
```
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# Container
|
|
2
|
+
|
|
3
|
+
Define containers that run in a pod using the `Container` class.
|
|
4
|
+
|
|
5
|
+
!!! tip ""
|
|
6
|
+
[API Reference](../../reference/cdk8s-plus-32/typescript.md#container)
|
|
7
|
+
|
|
8
|
+
## Environment
|
|
9
|
+
|
|
10
|
+
A container's environment can be populated by various methods.
|
|
11
|
+
|
|
12
|
+
### Variables
|
|
13
|
+
|
|
14
|
+
Environment variables can be added to containers by specifying the
|
|
15
|
+
variable name and value. The value can come from different sources, either dynamic or static.
|
|
16
|
+
|
|
17
|
+
```typescript
|
|
18
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
19
|
+
import { Construct } from 'constructs';
|
|
20
|
+
import { App, Chart, ChartProps } from 'cdk8s';
|
|
21
|
+
|
|
22
|
+
export class MyChart extends Chart {
|
|
23
|
+
constructor(scope: Construct, id: string, props: ChartProps = { }) {
|
|
24
|
+
super(scope, id, props);
|
|
25
|
+
|
|
26
|
+
const pod = new kplus.Pod(this, 'Pod');
|
|
27
|
+
const container = pod.addContainer({
|
|
28
|
+
image: 'my-app'
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
// use a static value.
|
|
32
|
+
container.env.addVariable('endpoint', kplus.EnvValue.fromValue('value'));
|
|
33
|
+
|
|
34
|
+
// use a specific key from a config map.
|
|
35
|
+
const backendsConfig = kplus.ConfigMap.fromConfigMapName(this, 'BackendConfig', 'backends');
|
|
36
|
+
container.env.addVariable('endpoint', kplus.EnvValue.fromConfigMap(backendsConfig, 'endpoint'));
|
|
37
|
+
|
|
38
|
+
// use a specific key from a secret.
|
|
39
|
+
const credentials = kplus.Secret.fromSecretName(this, 'Credentials', 'credentials');
|
|
40
|
+
container.env.addVariable('password', kplus.EnvValue.fromSecretValue({ secret: credentials, key: 'password' }));
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const app = new App();
|
|
45
|
+
new MyChart(app, 'container');
|
|
46
|
+
app.synth();
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
### Sources
|
|
50
|
+
|
|
51
|
+
Environment variables can also be populated by referencing other objects as an environment source.
|
|
52
|
+
With this method, all the key-value data of the source is added as environment variables,
|
|
53
|
+
where the key is the env name and the value is the env value.
|
|
54
|
+
|
|
55
|
+
```typescript
|
|
56
|
+
const pod = new kplus.Pod(this, 'Pod');
|
|
57
|
+
const cm = new kplus.ConfigMap(this, 'ConfigMap', {
|
|
58
|
+
data: {
|
|
59
|
+
key: 'value',
|
|
60
|
+
}
|
|
61
|
+
});
|
|
62
|
+
const container = pod.addContainer({
|
|
63
|
+
image: 'my-app'
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
// this will add 'key=value' env variable at runtime.
|
|
67
|
+
container.env.copyFrom(kplus.Env.fromConfigMap(cm));
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Volume Mounts
|
|
71
|
+
|
|
72
|
+
A very common capability is to mount a volume with some data onto a container. Using pure kubernetes API, this would require writing something like:
|
|
73
|
+
|
|
74
|
+
```yaml
|
|
75
|
+
kind: Pod
|
|
76
|
+
apiVersion: v1
|
|
77
|
+
spec:
|
|
78
|
+
containers:
|
|
79
|
+
- name: main
|
|
80
|
+
volumeMounts:
|
|
81
|
+
- mountPath: /path/to/mount
|
|
82
|
+
name: 'config-volume'
|
|
83
|
+
volumes:
|
|
84
|
+
- name: 'config-volume'
|
|
85
|
+
configMap:
|
|
86
|
+
name: 'config'
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
Notice the apparent redundancy of having to specify the volume name twice. Also, if you happen to need the same mount in other pods,
|
|
90
|
+
you would need to duplicate this configuration. This can get complex and cluttered very fast.
|
|
91
|
+
|
|
92
|
+
In contrast, here is how to do this with `cdk8s+`:
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
const config = kplus.ConfigMap.fromConfigMapName(this, 'Config', 'config');
|
|
96
|
+
const volume = kplus.Volume.fromConfigMap(this, 'Volume', config);
|
|
97
|
+
|
|
98
|
+
const pod = new kplus.Pod(this, 'Pod');
|
|
99
|
+
const container = pod.addContainer({
|
|
100
|
+
image: 'my-app'
|
|
101
|
+
})
|
|
102
|
+
|
|
103
|
+
// Cool alert: every pod that will later be configured with this container,
|
|
104
|
+
// will automatically have access to this volume, so you don't need to explicitly add it to the pod spec!.
|
|
105
|
+
container.mount('/path/to/mount', volume);
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
## Probes
|
|
109
|
+
|
|
110
|
+
A [Probe] is a diagnostic performed periodically by the kubelet on a Container. To
|
|
111
|
+
perform a diagnostic, the kubelet calls a Handler implemented by the container.
|
|
112
|
+
|
|
113
|
+
[Probe]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#probe-v1-core
|
|
114
|
+
|
|
115
|
+
A `Probe` instance can be created through one of the `fromXxx` static methods:
|
|
116
|
+
|
|
117
|
+
- `Probe.fromHttpGet()`
|
|
118
|
+
- `Probe.fromCommand()`
|
|
119
|
+
|
|
120
|
+
Readiness, liveness, and startup probes can be configured at the container-level through the `readiness`, `liveness`, and `startup` options:
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
new kplus.Pod(this, 'Pod', {
|
|
124
|
+
containers: [
|
|
125
|
+
{
|
|
126
|
+
image: 'my-app',
|
|
127
|
+
readiness: kplus.Probe.fromHttpGet('/ping'),
|
|
128
|
+
}
|
|
129
|
+
]
|
|
130
|
+
});
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
See the API reference for details.
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
# CronJob
|
|
2
|
+
|
|
3
|
+
CronJob resource is responsible for creating recurring [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/). The job recurrence is determined by a [Cron](https://github.com/cdk8s-team/cdk8s-core/blob/2.x/src/cron.ts) expression.
|
|
4
|
+
|
|
5
|
+
CronJob is similar to a [job](https://cdk8s.io/docs/latest/plus/job/) but it is suitable when there is a need to run a job indefinitely following a schedule. These repetitive jobs can be utilized for recurring tasks such as backing up a database, pinging a server for health checks, creating snapshots of systems and much more.
|
|
6
|
+
|
|
7
|
+
!!! tip ""
|
|
8
|
+
[API Reference](../../reference/cdk8s-plus-32/typescript.md#cronjob)
|
|
9
|
+
|
|
10
|
+
## Creating a `CronJob`
|
|
11
|
+
|
|
12
|
+
```typescript
|
|
13
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
14
|
+
import { Construct } from 'constructs';
|
|
15
|
+
import { App, Chart, ChartProps, Cron } from 'cdk8s';
|
|
16
|
+
|
|
17
|
+
export class MyChart extends Chart {
|
|
18
|
+
constructor(scope: Construct, id: string, props: ChartProps = { }) {
|
|
19
|
+
super(scope, id, props);
|
|
20
|
+
|
|
21
|
+
new kplus.CronJob(this, 'CronJob', {
|
|
22
|
+
containers: [{
|
|
23
|
+
image: 'databack/mysql-backup',
|
|
24
|
+
}],
|
|
25
|
+
// You can pass a custom cron schedule using our Cron class
|
|
26
|
+
schedule: Cron.schedule({
|
|
27
|
+
minute: '*',
|
|
28
|
+
hour: '*',
|
|
29
|
+
day: '*',
|
|
30
|
+
month: '*',
|
|
31
|
+
weekDay: '*',
|
|
32
|
+
}),
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const app = new App();
|
|
38
|
+
new MyChart(app, 'cronjob-readme');
|
|
39
|
+
app.synth();
|
|
40
|
+
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
### Defaults
|
|
44
|
+
|
|
45
|
+
The above would create a cronjob resource which would schedule `databack/mysql-backup` to run every minute. With this resource, we also set some meaningful defaults. For instance, this cronjob would not run jobs concurrently by default. It will also retain 3 instance of successful job runs and 1 instance of a failed run for debugging later if needed. To customize the properties, see [API Reference](../../reference/cdk8s-plus-32/typescript.md#cronjob).
|
|
46
|
+
|
|
47
|
+
### Helper Functions
|
|
48
|
+
|
|
49
|
+
As we see in the previous example, we can pass custom cron expression for scheduling our jobs. But, we have also have added helper functions that would make it easy to mention some of the commonly used schedules. These include scheduling jobs to run every minute, hour, day, week, month or year. For instance, the same example mentioned before could be written as,
|
|
50
|
+
|
|
51
|
+
```typescript
|
|
52
|
+
new kplus.CronJob(this, 'CronJob', {
|
|
53
|
+
containers: [{
|
|
54
|
+
image: 'databack/mysql-backup',
|
|
55
|
+
}],
|
|
56
|
+
// This would schedule jobs to be scheduled to run every minute
|
|
57
|
+
schedule: Cron.everyMinute(),
|
|
58
|
+
});
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### Validations
|
|
62
|
+
|
|
63
|
+
The cronjob construct also validates some of the properties so that the manifest created works as expected.
|
|
64
|
+
|
|
65
|
+
* You cannot pass `startingDeadline` property value less that 10 seconds. This is because the Kubernetes CronJobController checks things every 10 seconds and if the value passed is less than that then the jobs would not be scheduled.
|
|
66
|
+
|
|
67
|
+
* `ttlAfterFinished` job property limits the lifetime of a job that has finished execution. You cannot pass the `ttlAfterFinished` property with any/both of the `successfulJobsRetained` and `failedJobsRetained` property since this would not let retention of jobs work in an expected manner.
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# Deployment
|
|
2
|
+
|
|
3
|
+
Create a deployment to govern the lifecycle and orchestration of a set of identical pods.
|
|
4
|
+
|
|
5
|
+
!!! tip ""
|
|
6
|
+
[API Reference](../../reference/cdk8s-plus-32/typescript.md#deployment)
|
|
7
|
+
|
|
8
|
+
## Automatic pod selection
|
|
9
|
+
|
|
10
|
+
When you specify pods in a deployment, you normally have to configure the appropriate labels and selectors to
|
|
11
|
+
make the deployment control the relevant pods. This construct does this automatically.
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import * as kplus from 'cdk8s-plus-32';
|
|
15
|
+
import { Construct } from 'constructs';
|
|
16
|
+
import { App, Chart, ChartProps } from 'cdk8s';
|
|
17
|
+
|
|
18
|
+
export class MyChart extends Chart {
|
|
19
|
+
constructor(scope: Construct, id: string, props: ChartProps = { }) {
|
|
20
|
+
super(scope, id, props);
|
|
21
|
+
|
|
22
|
+
new kplus.Deployment(this, 'FrontEnds', {
|
|
23
|
+
containers: [ { image: 'node' } ],
|
|
24
|
+
});
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const app = new App();
|
|
29
|
+
new MyChart(app, 'deployment');
|
|
30
|
+
app.synth();
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
Note the resulting manifest contains a special `cdk8s.io/metadata.addr` label that is applied to the pods, and is used as
|
|
34
|
+
the selector for the deployment.
|
|
35
|
+
|
|
36
|
+
```yaml
|
|
37
|
+
apiVersion: apps/v1
|
|
38
|
+
kind: Deployment
|
|
39
|
+
metadata:
|
|
40
|
+
name: deployment-frontends-c8e48310
|
|
41
|
+
spec:
|
|
42
|
+
minReadySeconds: 0
|
|
43
|
+
progressDeadlineSeconds: 600
|
|
44
|
+
replicas: 2
|
|
45
|
+
selector:
|
|
46
|
+
matchLabels:
|
|
47
|
+
cdk8s.io/metadata.addr: deployment-FrontEnds-c89e9e97
|
|
48
|
+
strategy:
|
|
49
|
+
rollingUpdate:
|
|
50
|
+
maxSurge: 25%
|
|
51
|
+
maxUnavailable: 25%
|
|
52
|
+
type: RollingUpdate
|
|
53
|
+
template:
|
|
54
|
+
metadata:
|
|
55
|
+
labels:
|
|
56
|
+
cdk8s.io/metadata.addr: deployment-FrontEnds-c89e9e97
|
|
57
|
+
spec:
|
|
58
|
+
automountServiceAccountToken: false
|
|
59
|
+
containers:
|
|
60
|
+
- image: node
|
|
61
|
+
imagePullPolicy: Always
|
|
62
|
+
name: main
|
|
63
|
+
resources:
|
|
64
|
+
limits:
|
|
65
|
+
cpu: 1500m
|
|
66
|
+
memory: 2048Mi
|
|
67
|
+
requests:
|
|
68
|
+
cpu: 1000m
|
|
69
|
+
memory: 512Mi
|
|
70
|
+
securityContext:
|
|
71
|
+
allowPrivilegeEscalation: false
|
|
72
|
+
privileged: false
|
|
73
|
+
readOnlyRootFilesystem: true
|
|
74
|
+
runAsGroup: 26000
|
|
75
|
+
runAsNonRoot: true
|
|
76
|
+
runAsUser: 25000
|
|
77
|
+
dnsPolicy: ClusterFirst
|
|
78
|
+
restartPolicy: Always
|
|
79
|
+
securityContext:
|
|
80
|
+
fsGroupChangePolicy: Always
|
|
81
|
+
runAsNonRoot: true
|
|
82
|
+
setHostnameAsFQDN: false
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Exposing via a service
|
|
86
|
+
|
|
87
|
+
Following up on pod selection, you can also easily create a service that will select the pods relevant to the deployment.
|
|
88
|
+
|
|
89
|
+
```typescript
|
|
90
|
+
// store the deployment to created in a constant
|
|
91
|
+
const frontends = new kplus.Deployment(this, 'FrontEnds', {
|
|
92
|
+
containers: [ {
|
|
93
|
+
image: 'node',
|
|
94
|
+
portNumber: 9000,
|
|
95
|
+
} ],
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
// create a ClusterIP service that listens on port 9000 and redirects to port 9000 on the containers.
|
|
99
|
+
frontends.exposeViaService({ ports: [{
|
|
100
|
+
port: 9000,
|
|
101
|
+
}]
|
|
102
|
+
});
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
Notice the resulting manifest, will have the same `cdk8s.io/metadata.addr` magic label as the selector.
|
|
106
|
+
This will cause the service to attach to the pods that were configured as part of the said deployment.
|
|
107
|
+
|
|
108
|
+
```yaml
|
|
109
|
+
apiVersion: apps/v1
|
|
110
|
+
kind: Deployment
|
|
111
|
+
metadata:
|
|
112
|
+
name: deployment-frontends-c8e48310
|
|
113
|
+
spec:
|
|
114
|
+
minReadySeconds: 0
|
|
115
|
+
progressDeadlineSeconds: 600
|
|
116
|
+
replicas: 2
|
|
117
|
+
selector:
|
|
118
|
+
matchLabels:
|
|
119
|
+
cdk8s.io/metadata.addr: deployment-FrontEnds-c89e9e97
|
|
120
|
+
strategy:
|
|
121
|
+
rollingUpdate:
|
|
122
|
+
maxSurge: 25%
|
|
123
|
+
maxUnavailable: 25%
|
|
124
|
+
type: RollingUpdate
|
|
125
|
+
template:
|
|
126
|
+
metadata:
|
|
127
|
+
labels:
|
|
128
|
+
cdk8s.io/metadata.addr: deployment-FrontEnds-c89e9e97
|
|
129
|
+
spec:
|
|
130
|
+
automountServiceAccountToken: false
|
|
131
|
+
containers:
|
|
132
|
+
- image: node
|
|
133
|
+
imagePullPolicy: Always
|
|
134
|
+
name: main
|
|
135
|
+
ports:
|
|
136
|
+
- containerPort: 9000
|
|
137
|
+
resources:
|
|
138
|
+
limits:
|
|
139
|
+
cpu: 1500m
|
|
140
|
+
memory: 2048Mi
|
|
141
|
+
requests:
|
|
142
|
+
cpu: 1000m
|
|
143
|
+
memory: 512Mi
|
|
144
|
+
securityContext:
|
|
145
|
+
allowPrivilegeEscalation: false
|
|
146
|
+
privileged: false
|
|
147
|
+
readOnlyRootFilesystem: true
|
|
148
|
+
runAsGroup: 26000
|
|
149
|
+
runAsNonRoot: true
|
|
150
|
+
runAsUser: 25000
|
|
151
|
+
startupProbe:
|
|
152
|
+
failureThreshold: 3
|
|
153
|
+
tcpSocket:
|
|
154
|
+
port: 9000
|
|
155
|
+
dnsPolicy: ClusterFirst
|
|
156
|
+
restartPolicy: Always
|
|
157
|
+
securityContext:
|
|
158
|
+
fsGroupChangePolicy: Always
|
|
159
|
+
runAsNonRoot: true
|
|
160
|
+
setHostnameAsFQDN: false
|
|
161
|
+
---
|
|
162
|
+
apiVersion: v1
|
|
163
|
+
kind: Service
|
|
164
|
+
metadata:
|
|
165
|
+
name: deployment-frontends-service-c8206158
|
|
166
|
+
spec:
|
|
167
|
+
externalIPs: []
|
|
168
|
+
ports:
|
|
169
|
+
- port: 9000
|
|
170
|
+
selector:
|
|
171
|
+
cdk8s.io/metadata.addr: deployment-FrontEnds-c89e9e97
|
|
172
|
+
type: ClusterIP
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## Scheduling
|
|
176
|
+
|
|
177
|
+
In addition to the scheduling capabilities provided by [pod scheduling](./pod.md#scheduling),
|
|
178
|
+
a Deployment offers the following:
|
|
179
|
+
|
|
180
|
+
### Spreading
|
|
181
|
+
|
|
182
|
+
A spread is a [separation](./pod.md#pod-separation) of pods from themselves.
|
|
183
|
+
It can be used to ensure replicas of the same workload are scheduled on different topologies.
|
|
184
|
+
|
|
185
|
+
> The same API is also available on all workload resources (i.e `Deployment`, `StatefulSet`, `Job`, `DaemonSet`).
|
|
186
|
+
|
|
187
|
+
```typescript
|
|
188
|
+
const redis = new kplus.Deployment(this, 'Redis', {
|
|
189
|
+
containers: [{ image: 'redis' }],
|
|
190
|
+
replicas: 3,
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
redis.scheduling.spread({
|
|
194
|
+
topology: kplus.Topology.HOSTNAME
|
|
195
|
+
});
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
This example ensures that each replica of the `Redis` deployment
|
|
199
|
+
will be scheduled on a different node.
|
|
200
|
+
|
|
201
|
+
Take, for [example](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#more-practical-use-cases), a three-node cluster running a web application with an in-memory cache like redis. You'd like to co-locate the web servers with the cache as much as possible, while still maintaining node failure resistance. (i.e not all pods are on the same node).
|
|
202
|
+
|
|
203
|
+
Here is how you can accomplish that:
|
|
204
|
+
|
|
205
|
+
```typescript
|
|
206
|
+
const redis = new kplus.Deployment(this, 'Redis', {
|
|
207
|
+
containers: [{ image: 'redis' }],
|
|
208
|
+
replicas: 3,
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
const web = new kplus.Deployment(this, 'Web', {
|
|
212
|
+
containers: [{ image: 'web' }],
|
|
213
|
+
replicas: 3,
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
// ensure redis is spread across all nodes
|
|
217
|
+
redis.scheduling.spread({
|
|
218
|
+
topology: kplus.Topology.HOSTNAME
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
// ensure web app is spread across all nodes
|
|
222
|
+
web.scheduling.spread({
|
|
223
|
+
topology: kplus.Topology.HOSTNAME
|
|
224
|
+
});
|
|
225
|
+
|
|
226
|
+
// ensure a web app pod always runs along side a cache instance
|
|
227
|
+
web.scheduling.colocate(redis);
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
## Connections
|
|
231
|
+
|
|
232
|
+
See [Pod connections](./pod.md#connections).
|