cdk8s-plus-33 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +98351 -0
- package/CODE_OF_CONDUCT.md +3 -0
- package/CONTRIBUTING.md +185 -0
- package/DCO +34 -0
- package/LICENSE +202 -0
- package/NOTICE +1 -0
- package/OWNERS.md +5 -0
- package/README.md +32 -0
- package/SECURITY.md +5 -0
- package/cdk8s.yaml +10 -0
- package/docs/java.md +24323 -0
- package/docs/plus/config-map.md +98 -0
- package/docs/plus/container.md +133 -0
- package/docs/plus/cronjob.md +67 -0
- package/docs/plus/deployment.md +232 -0
- package/docs/plus/horizontal-pod-autoscaler.md +226 -0
- package/docs/plus/ingress.md +68 -0
- package/docs/plus/job.md +48 -0
- package/docs/plus/namespace.md +58 -0
- package/docs/plus/network-policy.md +341 -0
- package/docs/plus/pod.md +455 -0
- package/docs/plus/pv.md +82 -0
- package/docs/plus/pvc.md +77 -0
- package/docs/plus/rbac.md +104 -0
- package/docs/plus/secret.md +32 -0
- package/docs/plus/service-account.md +35 -0
- package/docs/plus/service.md +41 -0
- package/docs/plus/volume.md +38 -0
- package/docs/python.md +26547 -0
- package/docs/typescript.md +19825 -0
- package/git-hooks/README.md +9 -0
- package/git-hooks/prepare-commit-msg +18 -0
- package/git-hooks/setup.sh +10 -0
- package/lib/_action.d.ts +21 -0
- package/lib/_action.js +32 -0
- package/lib/api-resource.d.ts +298 -0
- package/lib/api-resource.js +430 -0
- package/lib/base.d.ts +79 -0
- package/lib/base.js +92 -0
- package/lib/config-map.d.ts +126 -0
- package/lib/config-map.js +159 -0
- package/lib/container.d.ts +1057 -0
- package/lib/container.js +845 -0
- package/lib/cron-job.d.ts +138 -0
- package/lib/cron-job.js +103 -0
- package/lib/daemon-set.d.ts +45 -0
- package/lib/daemon-set.js +55 -0
- package/lib/deployment.d.ts +223 -0
- package/lib/deployment.js +214 -0
- package/lib/handler.d.ts +62 -0
- package/lib/handler.js +54 -0
- package/lib/horizontal-pod-autoscaler.d.ts +500 -0
- package/lib/horizontal-pod-autoscaler.js +569 -0
- package/lib/imports/k8s.d.ts +24537 -0
- package/lib/imports/k8s.js +18615 -0
- package/lib/index.d.ts +26 -0
- package/lib/index.js +44 -0
- package/lib/ingress.d.ts +230 -0
- package/lib/ingress.js +246 -0
- package/lib/job.d.ts +64 -0
- package/lib/job.js +54 -0
- package/lib/namespace.d.ts +128 -0
- package/lib/namespace.js +109 -0
- package/lib/network-policy.d.ts +311 -0
- package/lib/network-policy.js +344 -0
- package/lib/pod.d.ts +1097 -0
- package/lib/pod.js +1143 -0
- package/lib/probe.d.ts +141 -0
- package/lib/probe.js +77 -0
- package/lib/pv.d.ts +375 -0
- package/lib/pv.js +273 -0
- package/lib/pvc.d.ts +163 -0
- package/lib/pvc.js +154 -0
- package/lib/role-binding.d.ts +138 -0
- package/lib/role-binding.js +165 -0
- package/lib/role.d.ts +268 -0
- package/lib/role.js +401 -0
- package/lib/secret.d.ts +195 -0
- package/lib/secret.js +185 -0
- package/lib/service-account.d.ts +83 -0
- package/lib/service-account.js +105 -0
- package/lib/service.d.ts +289 -0
- package/lib/service.js +182 -0
- package/lib/stateful-set.d.ts +169 -0
- package/lib/stateful-set.js +174 -0
- package/lib/utils.d.ts +4 -0
- package/lib/utils.js +33 -0
- package/lib/volume.d.ts +573 -0
- package/lib/volume.js +371 -0
- package/lib/workload.d.ts +121 -0
- package/lib/workload.js +122 -0
- package/node_modules/balanced-match/.github/FUNDING.yml +2 -0
- package/node_modules/balanced-match/LICENSE.md +21 -0
- package/node_modules/balanced-match/README.md +97 -0
- package/node_modules/balanced-match/index.js +62 -0
- package/node_modules/balanced-match/package.json +48 -0
- package/node_modules/brace-expansion/.github/FUNDING.yml +2 -0
- package/node_modules/brace-expansion/LICENSE +21 -0
- package/node_modules/brace-expansion/README.md +135 -0
- package/node_modules/brace-expansion/index.js +203 -0
- package/node_modules/brace-expansion/package.json +49 -0
- package/node_modules/minimatch/LICENSE +15 -0
- package/node_modules/minimatch/README.md +454 -0
- package/node_modules/minimatch/dist/commonjs/assert-valid-pattern.d.ts +2 -0
- package/node_modules/minimatch/dist/commonjs/assert-valid-pattern.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js +14 -0
- package/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js.map +1 -0
- package/node_modules/minimatch/dist/commonjs/ast.d.ts +20 -0
- package/node_modules/minimatch/dist/commonjs/ast.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/ast.js +592 -0
- package/node_modules/minimatch/dist/commonjs/ast.js.map +1 -0
- package/node_modules/minimatch/dist/commonjs/brace-expressions.d.ts +8 -0
- package/node_modules/minimatch/dist/commonjs/brace-expressions.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/brace-expressions.js +152 -0
- package/node_modules/minimatch/dist/commonjs/brace-expressions.js.map +1 -0
- package/node_modules/minimatch/dist/commonjs/escape.d.ts +12 -0
- package/node_modules/minimatch/dist/commonjs/escape.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/escape.js +22 -0
- package/node_modules/minimatch/dist/commonjs/escape.js.map +1 -0
- package/node_modules/minimatch/dist/commonjs/index.d.ts +94 -0
- package/node_modules/minimatch/dist/commonjs/index.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/index.js +1017 -0
- package/node_modules/minimatch/dist/commonjs/index.js.map +1 -0
- package/node_modules/minimatch/dist/commonjs/package.json +3 -0
- package/node_modules/minimatch/dist/commonjs/unescape.d.ts +17 -0
- package/node_modules/minimatch/dist/commonjs/unescape.d.ts.map +1 -0
- package/node_modules/minimatch/dist/commonjs/unescape.js +24 -0
- package/node_modules/minimatch/dist/commonjs/unescape.js.map +1 -0
- package/node_modules/minimatch/dist/esm/assert-valid-pattern.d.ts +2 -0
- package/node_modules/minimatch/dist/esm/assert-valid-pattern.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/assert-valid-pattern.js +10 -0
- package/node_modules/minimatch/dist/esm/assert-valid-pattern.js.map +1 -0
- package/node_modules/minimatch/dist/esm/ast.d.ts +20 -0
- package/node_modules/minimatch/dist/esm/ast.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/ast.js +588 -0
- package/node_modules/minimatch/dist/esm/ast.js.map +1 -0
- package/node_modules/minimatch/dist/esm/brace-expressions.d.ts +8 -0
- package/node_modules/minimatch/dist/esm/brace-expressions.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/brace-expressions.js +148 -0
- package/node_modules/minimatch/dist/esm/brace-expressions.js.map +1 -0
- package/node_modules/minimatch/dist/esm/escape.d.ts +12 -0
- package/node_modules/minimatch/dist/esm/escape.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/escape.js +18 -0
- package/node_modules/minimatch/dist/esm/escape.js.map +1 -0
- package/node_modules/minimatch/dist/esm/index.d.ts +94 -0
- package/node_modules/minimatch/dist/esm/index.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/index.js +1001 -0
- package/node_modules/minimatch/dist/esm/index.js.map +1 -0
- package/node_modules/minimatch/dist/esm/package.json +3 -0
- package/node_modules/minimatch/dist/esm/unescape.d.ts +17 -0
- package/node_modules/minimatch/dist/esm/unescape.d.ts.map +1 -0
- package/node_modules/minimatch/dist/esm/unescape.js +20 -0
- package/node_modules/minimatch/dist/esm/unescape.js.map +1 -0
- package/node_modules/minimatch/package.json +82 -0
- package/package.json +181 -0
- package/rotate.md +84 -0
package/docs/plus/pod.md
ADDED
|
@@ -0,0 +1,455 @@
|
|
|
1
|
+
# Pod
|
|
2
|
+
|
|
3
|
+
A pod is essentially a collection of containers. It is the most fundamental computation unit that can be provisioned.
|
|
4
|
+
|
|
5
|
+
!!! tip ""
|
|
6
|
+
[API Reference](../../reference/cdk8s-plus-33/typescript.md#pod)
|
|
7
|
+
|
|
8
|
+
## Create a `Pod`
|
|
9
|
+
|
|
10
|
+
To create a new pod in the cluster:
|
|
11
|
+
|
|
12
|
+
```ts
|
|
13
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
14
|
+
import * as k from 'cdk8s';
|
|
15
|
+
|
|
16
|
+
const app = new k.App();
|
|
17
|
+
const chart = new k.Chart(app, 'Chart');
|
|
18
|
+
|
|
19
|
+
const pod = new kplus.Pod(chart, 'Pod');
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
### Adding Containers
|
|
23
|
+
|
|
24
|
+
Every `Pod` must have at least one container before you synthesize the application.
|
|
25
|
+
You can add containers either during, or post instantiation:
|
|
26
|
+
|
|
27
|
+
```ts
|
|
28
|
+
const pod = new kplus.Pod(chart, 'Pod', {
|
|
29
|
+
containers: [{ image: 'image' }],
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
pod.addContainer({ image: 'another-image' });
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Adding Volumes
|
|
36
|
+
|
|
37
|
+
Volumes can be added to pod definition either during, or post instantiation:
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
41
|
+
|
|
42
|
+
const data1 = kplus.Volume.fromEmptyDir('data1');
|
|
43
|
+
const data2 = kplus.Volume.fromEmptyDir('data2');
|
|
44
|
+
|
|
45
|
+
const pod = new kplus.Pod(chart, 'Pod', {
|
|
46
|
+
volumes: [data1],
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
pod.addVolume(data2);
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Note that adding a volume to a pod doesn't actually make the volume available
|
|
53
|
+
to its containers. For that, you also need to mount the volume onto a container.
|
|
54
|
+
|
|
55
|
+
```ts
|
|
56
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
57
|
+
|
|
58
|
+
const data = kplus.Volume.fromEmptyDir('data');
|
|
59
|
+
|
|
60
|
+
const pod = new kplus.Pod(chart, 'Pod');
|
|
61
|
+
const container = pod.addContainer({ image: 'image' });
|
|
62
|
+
|
|
63
|
+
// mount the volume onto the container. this is actually enough, and you
|
|
64
|
+
// don't need to explicitly add the volume to the pod -- cdk8s+ will do that for you.
|
|
65
|
+
container.mount('/data', data);
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Applying a restart policy
|
|
69
|
+
|
|
70
|
+
A restart policy can only be specified at instantiation time:
|
|
71
|
+
|
|
72
|
+
```typescript
|
|
73
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
74
|
+
|
|
75
|
+
const app = new k.App();
|
|
76
|
+
const chart = new k.Chart(app, 'Chart');
|
|
77
|
+
|
|
78
|
+
const pod = new kplus.Pod(chart, 'Pod', {
|
|
79
|
+
restartPolicy: kplus.RestartPolicy.NEVER,
|
|
80
|
+
});
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Assigning a ServiceAccount
|
|
84
|
+
|
|
85
|
+
A service account can only be specified at instantiation time:
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
89
|
+
|
|
90
|
+
const app = new k.App();
|
|
91
|
+
const chart = new k.Chart(app, 'Chart');
|
|
92
|
+
|
|
93
|
+
const pod = new kplus.Pod(chart, 'Pod', {
|
|
94
|
+
serviceAccount: kplus.ServiceAccount.fromServiceAccountName('aws'),
|
|
95
|
+
});
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
## Select pods
|
|
99
|
+
|
|
100
|
+
Pods can also be selected by various mechanisms. These selections are often used in other
|
|
101
|
+
cdk8s+ API's, such as [pod selection](./pod.md#pod-selection) during scheduling.
|
|
102
|
+
|
|
103
|
+
### Select pods with labels
|
|
104
|
+
|
|
105
|
+
Selects all pods that have the `app=store` label.
|
|
106
|
+
|
|
107
|
+
```ts
|
|
108
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
109
|
+
|
|
110
|
+
const pods = kplus.Pods.select(this, 'Store', { labels: { app: 'store' }});
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### Select pods with expressions
|
|
114
|
+
|
|
115
|
+
Selects all pods that have the `app` label, regardless of the value.
|
|
116
|
+
|
|
117
|
+
```ts
|
|
118
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
119
|
+
|
|
120
|
+
const pods = kplus.Pods.select(this, 'App', {
|
|
121
|
+
expressions: [kplus.LabelExpression.exists('app')]
|
|
122
|
+
});
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
### Select pods with labels in a particular namespace
|
|
126
|
+
|
|
127
|
+
Pod selection can also be scoped to specific namespaces.
|
|
128
|
+
This is done using the `namespaces` property, which can accept any [namespace selector](./namespace.md#select-namespaces).
|
|
129
|
+
|
|
130
|
+
For example, select all pods that have the `app=store` label in the `backoffice` namespace:
|
|
131
|
+
|
|
132
|
+
```ts
|
|
133
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
134
|
+
|
|
135
|
+
const pods = kplus.Pods.select(this, 'Pods', {
|
|
136
|
+
labels: { app: 'store' },
|
|
137
|
+
namespaces: kplus.Namespaces.select(this, 'Backoffice', { names: ['backoffice'] }),
|
|
138
|
+
});
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
## Scheduling
|
|
142
|
+
|
|
143
|
+
Kubernetes offers a few properties for controlling how pods are scheduled onto nodes.
|
|
144
|
+
|
|
145
|
+
- [`nodeName`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename)
|
|
146
|
+
- [`nodeSelector`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector)
|
|
147
|
+
- [`nodeAffinity`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)
|
|
148
|
+
- [`podAffinity`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)
|
|
149
|
+
- [`podAntiAffinity`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)
|
|
150
|
+
- [`tolearations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
|
|
151
|
+
|
|
152
|
+
CDK8s+ collapses all these different features and exposes them under one unified API we refer to as `Scheduling`. This API is available on a `Pod` via the `scheduling` property.
|
|
153
|
+
|
|
154
|
+
> The same API is also available on all workload resources (i.e `Deployment`, `StatefulSet`, `Job`, `DaemonSet`).
|
|
155
|
+
|
|
156
|
+
Scheduling is comprised of two different types:
|
|
157
|
+
|
|
158
|
+
- [Node Selection](#node-selection)
|
|
159
|
+
- [Pod Selection](#pod-selection)
|
|
160
|
+
|
|
161
|
+
### Node Selection
|
|
162
|
+
|
|
163
|
+
Node selection is the process of directly selecting which
|
|
164
|
+
nodes should pods be scheduled on, by selecting node attributes.
|
|
165
|
+
|
|
166
|
+
#### Node Assignment
|
|
167
|
+
|
|
168
|
+
You can statically assign a pod to a specific node, by using the node's name.
|
|
169
|
+
|
|
170
|
+
```ts
|
|
171
|
+
import * as k from 'cdk8s';
|
|
172
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
173
|
+
|
|
174
|
+
const app = new k.App();
|
|
175
|
+
const chart = new k.Chart(app, 'Chart');
|
|
176
|
+
|
|
177
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
178
|
+
containers: [{ image: 'redis' }]
|
|
179
|
+
});
|
|
180
|
+
redis.scheduling.assign(kplus.Node.named('node1'));
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
This example will cause the `Redis` pod to be scheduled on a node with name `node1`.
|
|
184
|
+
|
|
185
|
+
#### Node Attraction
|
|
186
|
+
|
|
187
|
+
Pods can attract themselves to nodes. As opposed to an assignment,
|
|
188
|
+
an attraction can be made to a **set** of nodes, specified by node labels.
|
|
189
|
+
An attraction can be either required, or preferred.
|
|
190
|
+
|
|
191
|
+
```ts
|
|
192
|
+
import * as k from 'cdk8s';
|
|
193
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
194
|
+
|
|
195
|
+
const app = new k.App();
|
|
196
|
+
const chart = new k.Chart(app, 'Chart');
|
|
197
|
+
|
|
198
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
199
|
+
containers: [{ image: 'redis' }]
|
|
200
|
+
});
|
|
201
|
+
const highMemoryNodes = kplus.Node.labeled(kplus.NodeLabelQuery.is('memory', 'high'));
|
|
202
|
+
redis.scheduling.attract(highMemoryNodes);
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
This example will **require** the `Redis` pod be scheduled on a
|
|
206
|
+
node that has the `memory=high` label. To request a **preference**, specify the [`weight`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity-weight) property:
|
|
207
|
+
|
|
208
|
+
```ts
|
|
209
|
+
redis.scheduling.attract(highMemoryNodes, { weight: 50 });
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
#### Node Toleration
|
|
213
|
+
|
|
214
|
+
While attractions is a property of Pods that attracts them to a set of nodes,
|
|
215
|
+
taints are the opposite -- they allow a node to repel a set of pods.
|
|
216
|
+
|
|
217
|
+
Tolerations are applied to pods, and allow (but do not require) the pods to
|
|
218
|
+
schedule onto nodes with matching taints.
|
|
219
|
+
|
|
220
|
+
Taints and tolerations work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node; this marks that the node should not accept any pods that do not tolerate the taints.
|
|
221
|
+
|
|
222
|
+
A toleration can be made to a **set** of nodes, specified by node taints.
|
|
223
|
+
|
|
224
|
+
```ts
|
|
225
|
+
import * as k from 'cdk8s';
|
|
226
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
227
|
+
|
|
228
|
+
const app = new k.App();
|
|
229
|
+
const chart = new k.Chart(app, 'Chart');
|
|
230
|
+
|
|
231
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
232
|
+
containers: [{ image: 'redis' }]
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
const node = kplus.Node.tainted(kplus.NodeTaintQuery.is('key1', 'value1', {
|
|
236
|
+
effect: kplus.TainEffect.NO_SCHEDULE
|
|
237
|
+
}));
|
|
238
|
+
redis.scheduling.tolerate(node);
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
This example says the the `Redis` pod is able to tolerate
|
|
242
|
+
nodes tainted with `key1=value1:NoSchedule`.
|
|
243
|
+
|
|
244
|
+
### Pod Selection
|
|
245
|
+
|
|
246
|
+
Pod selection is the process of selecting which **nodes** should pods be scheduled on,
|
|
247
|
+
by looking at which other **pods** are already scheduled on those nodes.
|
|
248
|
+
|
|
249
|
+
The API's presented here interact either with specific pods,
|
|
250
|
+
i.e instances of `Pod` or `Workload` (e.g `Deployment`, `StatefulSet`, `Job`, ...), or with a group of pods, i.e ones that are identified by a set of [selectors](#select-pods).
|
|
251
|
+
|
|
252
|
+
#### Pod Co-location
|
|
253
|
+
|
|
254
|
+
Pod co-location is a way to tell the scheduler to place a pod in a *topology*
|
|
255
|
+
that already hosts other pods that meet some criteria.
|
|
256
|
+
|
|
257
|
+
A topology is expressed via the `topology` property, and
|
|
258
|
+
represents a failure domain that Kubernetes is aware of. It can be one of:
|
|
259
|
+
|
|
260
|
+
- `kplus.Topology.HOSTNAME`: A single node. This is the default value.
|
|
261
|
+
- `kplus.Topology.ZONE`: Multiple nodes in a single availability zone.
|
|
262
|
+
- `kplus.Topology.REGION`: Multiple nodes in a single region.
|
|
263
|
+
- `kplus.Topology.custom`: Any other configurable value.
|
|
264
|
+
|
|
265
|
+
Similarly to node attractions, co-location can also be either required, or preferred.
|
|
266
|
+
|
|
267
|
+
```ts
|
|
268
|
+
import * as k from 'cdk8s';
|
|
269
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
270
|
+
|
|
271
|
+
const app = new k.App();
|
|
272
|
+
const chart = new k.Chart(app, 'Chart');
|
|
273
|
+
|
|
274
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
275
|
+
containers: [{ image: 'redis' }]
|
|
276
|
+
});
|
|
277
|
+
const web = new kplus.Pod(chart, 'Web', {
|
|
278
|
+
containers: [{ image: 'web' }]
|
|
279
|
+
});
|
|
280
|
+
|
|
281
|
+
web.scheduling.colocate(redis);
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
This example will require the `Web` pod to be scheduled on the same node as
|
|
285
|
+
the `Redis` pod. To request a **preference**, specify the [`weight`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity-weight) property:
|
|
286
|
+
|
|
287
|
+
```ts
|
|
288
|
+
web.scheduling.colocate(redis, { weight: 50 });
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
To use a different topology, specify the `topology` property:
|
|
292
|
+
|
|
293
|
+
```ts
|
|
294
|
+
web.scheduling.colocate(redis, { weight: 50, topology: kplus.Topology.ZONE });
|
|
295
|
+
```
|
|
296
|
+
|
|
297
|
+
This scenario configures co-location between two pods that are defined and managed
|
|
298
|
+
in the same cdk8s application. You can also co-locate with an externally
|
|
299
|
+
managed pod, by specifying a pod selector:
|
|
300
|
+
|
|
301
|
+
```ts
|
|
302
|
+
const redis = kplus.Pods.select(this, 'Cache', {
|
|
303
|
+
labels: { app: 'cache' },
|
|
304
|
+
});
|
|
305
|
+
web.scheduling.colocate(redis);
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
This will co-locate the `Web` pod with pods that have the `app=cache` label, regardless of
|
|
309
|
+
whether they are defined in the cdk8s app or not.
|
|
310
|
+
|
|
311
|
+
> **Under the hood**: Co-location with managed pods will automatically
|
|
312
|
+
> extract its labels and form the appropriate pod selector.
|
|
313
|
+
|
|
314
|
+
#### Pod Separation
|
|
315
|
+
|
|
316
|
+
Pod separation (e.g anti co-location) is a way to tell the scheduler **not to** place a
|
|
317
|
+
pod in a *topology* that already hosts other pods that meet some criteria.
|
|
318
|
+
Similarly to co-location, separation can also be either required, or preferred.
|
|
319
|
+
|
|
320
|
+
```ts
|
|
321
|
+
import * as k from 'cdk8s';
|
|
322
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
323
|
+
|
|
324
|
+
const app = new k.App();
|
|
325
|
+
const chart = new k.Chart(app, 'Chart');
|
|
326
|
+
|
|
327
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
328
|
+
containers: [{ image: 'redis' }]
|
|
329
|
+
});
|
|
330
|
+
const web = new kplus.Pod(chart, 'Web', {
|
|
331
|
+
containers: [{ image: 'web' }]
|
|
332
|
+
});
|
|
333
|
+
|
|
334
|
+
web.scheduling.separate(redis);
|
|
335
|
+
```
|
|
336
|
+
|
|
337
|
+
This example will require the `Web` pod to **not be** scheduled on the same
|
|
338
|
+
node (because the default value of the topology is `HOSTNAME`) as the `Redis` pod.
|
|
339
|
+
To request a **preference**, specify the [`weight`](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity-weight) property:
|
|
340
|
+
|
|
341
|
+
```ts
|
|
342
|
+
web.scheduling.separate(redis, { weight: 50 });
|
|
343
|
+
```
|
|
344
|
+
|
|
345
|
+
To use a different topology, specify the `topology` property:
|
|
346
|
+
|
|
347
|
+
```ts
|
|
348
|
+
web.scheduling.separate(redis, { weight: 50, topology: kplus.Topology.ZONE });
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
This scenario configures separation between two pods that are defined and managed
|
|
352
|
+
in the same cdk8s application. You can also separate with an externally
|
|
353
|
+
managed pod, by specifying a pod selector:
|
|
354
|
+
|
|
355
|
+
```ts
|
|
356
|
+
const redis = kplus.Pods.select(this, 'Cache', {
|
|
357
|
+
labels: { app: 'cache' },
|
|
358
|
+
});
|
|
359
|
+
web.scheduling.separate(redis);
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
This will separate the `Web` pod from pods that have the `app=cache` label, regardless of
|
|
363
|
+
whether they are defined in the cdk8s app or not.
|
|
364
|
+
|
|
365
|
+
> **Under the hood**: Co-location with managed pods will automatically
|
|
366
|
+
> extract its labels and form the appropriate pod selector.
|
|
367
|
+
|
|
368
|
+
## Connections
|
|
369
|
+
|
|
370
|
+
Pod connections offer a simplified API to automatically create [network policies](./network-policy.md) on
|
|
371
|
+
both ends of a connection. Accessing this API is done via the `connections` property
|
|
372
|
+
of a specific `Pod`, which serves as one end of the connection.
|
|
373
|
+
The other end is a network policy [peer](./network-policy.md#peers).
|
|
374
|
+
|
|
375
|
+
### Allow To
|
|
376
|
+
|
|
377
|
+
To allow connections from a `Pod` to a [peer](./network-policy.md#peers):
|
|
378
|
+
|
|
379
|
+
```ts
|
|
380
|
+
import * as k from 'cdk8s';
|
|
381
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
382
|
+
|
|
383
|
+
const app = new k.App();
|
|
384
|
+
const chart = new k.Chart(app, 'Chart');
|
|
385
|
+
|
|
386
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
387
|
+
containers: [{ image: 'redis', portNumber: 6379 }]
|
|
388
|
+
});
|
|
389
|
+
const web = new kplus.Pod(chart, 'Web', {
|
|
390
|
+
containers: [{ image: 'web' }]
|
|
391
|
+
});
|
|
392
|
+
|
|
393
|
+
web.connections.allowTo(redis);
|
|
394
|
+
```
|
|
395
|
+
|
|
396
|
+
This will allow the `web` pod to connect to the `redis` port on port 6379,
|
|
397
|
+
and will allow the `redis` pod to accept connections from the `web` pod on port 6379.
|
|
398
|
+
Note that the port is not specified in the `allowTo` invocation, it is automatically
|
|
399
|
+
extracted from the `redis` pod definition.
|
|
400
|
+
|
|
401
|
+
You can also pass ports explicitly, overriding this extraction:
|
|
402
|
+
|
|
403
|
+
```ts
|
|
404
|
+
web.connections.allowTo(redis, { ports: [kplus.NetworkPolicyPort.tcp(4444)] });
|
|
405
|
+
```
|
|
406
|
+
|
|
407
|
+
### Allow From
|
|
408
|
+
|
|
409
|
+
To allow connections from a [peer](./network-policy.md#peers) to a `Pod`:
|
|
410
|
+
|
|
411
|
+
```ts
|
|
412
|
+
import * as k from 'cdk8s';
|
|
413
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
414
|
+
|
|
415
|
+
const app = new k.App();
|
|
416
|
+
const chart = new k.Chart(app, 'Chart');
|
|
417
|
+
|
|
418
|
+
const redis = new kplus.Pod(chart, 'Redis', {
|
|
419
|
+
containers: [{ image: 'redis', portNumber: 6379 }]
|
|
420
|
+
});
|
|
421
|
+
const web = new kplus.Pod(chart, 'Web', {
|
|
422
|
+
containers: [{ image: 'web' }]
|
|
423
|
+
});
|
|
424
|
+
|
|
425
|
+
redis.connections.allowFrom(web);
|
|
426
|
+
```
|
|
427
|
+
|
|
428
|
+
This will allow the `redis` pod to accept connection from the `web` pod on port 6379,
|
|
429
|
+
and will allow the `web` pod to connect to the `redis` pod on port 6379.
|
|
430
|
+
Note that the port is not specified in the `allowFrom` invocation, it is automatically
|
|
431
|
+
extracted from the `redis` pod definition.
|
|
432
|
+
|
|
433
|
+
### Isolation
|
|
434
|
+
|
|
435
|
+
By default, the `allowXXX` methods will create both an egress policy on the initiating end,
|
|
436
|
+
as well as an ingress policy on the accepting end of the connection.
|
|
437
|
+
|
|
438
|
+
This means that, if no other policies apply, both sides of the connection will be *isolated*,
|
|
439
|
+
each in the corresponding direction. In the above [example](#allow-to), if the `redis` pod
|
|
440
|
+
needs to be accessed from any pod other than `web`, an explicit policy needs to be applied,
|
|
441
|
+
because the default *non-isolated* behavior is now disabled.
|
|
442
|
+
|
|
443
|
+
To control the isolation this API incurs, you can use the `isolation` option. It accepts two
|
|
444
|
+
possible values:
|
|
445
|
+
|
|
446
|
+
- `PodConnectionsIsolation.POD`: Only isolate the pod that offers the `connections` API.
|
|
447
|
+
- `PodConnectionsIsolation.PEER`: Only isolate the peer the pod needs to communicate with.
|
|
448
|
+
|
|
449
|
+
```ts
|
|
450
|
+
// this will only create an egress policy on the 'web' pod.
|
|
451
|
+
web.connections.allowTo(redis, { isolation: PodConnectionsIsolation.POD });
|
|
452
|
+
|
|
453
|
+
// this will only create an ingress policy on the 'redis' pod.
|
|
454
|
+
web.connections.allowTo(redis, { isolation: PodConnectionsIsolation.PEER });
|
|
455
|
+
```
|
package/docs/plus/pv.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# PersistentVolume
|
|
2
|
+
|
|
3
|
+
A `PersistentVolume` (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes.
|
|
4
|
+
|
|
5
|
+
!!! tip ""
|
|
6
|
+
[API Reference](../../reference/cdk8s-plus-33/typescript.md#persistent-volume)
|
|
7
|
+
|
|
8
|
+
PV's are used by pods via the pod's `volumes` spec, just like regular [volumes](./volume.md).
|
|
9
|
+
They are not intended to be interchangable with volumes, you can think of a `PersistentVolume`
|
|
10
|
+
as a specific type of volume, that is detached from a pod's lifecycle, and exist even if the pod is shutdown.
|
|
11
|
+
|
|
12
|
+
The `PersistentVolume` construct represents a pre-existing volume in the cluster.
|
|
13
|
+
|
|
14
|
+
## Types
|
|
15
|
+
|
|
16
|
+
Each type is implmented as its own construct, exposing both common properties as well as type
|
|
17
|
+
specific ones. Currently the supported types are:
|
|
18
|
+
|
|
19
|
+
- `AwsElasticBlockStorePersistentVolume`
|
|
20
|
+
- `AzureDiskPersistentVolume`
|
|
21
|
+
- `GCEPersistentDiskPersistentVolume`
|
|
22
|
+
|
|
23
|
+
For example, to create a PV from an existing AWS EBS volume:
|
|
24
|
+
|
|
25
|
+
```ts
|
|
26
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
27
|
+
import * as cdk8s from 'cdk8s';
|
|
28
|
+
|
|
29
|
+
const vol = new kplus.AwsElasticBlockStorePersistentVolume(chart, 'Volume', {
|
|
30
|
+
// must exist in aws
|
|
31
|
+
volumeId: 'vol1234',
|
|
32
|
+
|
|
33
|
+
// assign the volume to small-ebs storage class
|
|
34
|
+
storageClassName: 'small-ebs',
|
|
35
|
+
|
|
36
|
+
// what is the volume storage
|
|
37
|
+
storage: cdk8s.Size.gibibytes(50),
|
|
38
|
+
});
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
Note that this **does not** actually create a new volume, it merely manifests an existing
|
|
42
|
+
volume in AWS as a Kubernetes resource.
|
|
43
|
+
|
|
44
|
+
## Reserve
|
|
45
|
+
|
|
46
|
+
> See https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reserving-a-persistentvolume
|
|
47
|
+
|
|
48
|
+
Once the PV is defined, you can reserve it:
|
|
49
|
+
|
|
50
|
+
```ts
|
|
51
|
+
const claim = vol.reserve();
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
This method creates a new `PersistentVolumeClaim` and performs a
|
|
55
|
+
bi-directional binding that reserves the volume for usage.
|
|
56
|
+
You can use the claim to mount a volume onto a container like usual:
|
|
57
|
+
|
|
58
|
+
```ts
|
|
59
|
+
container.mount('/data', kplus.Volume.fromPersistentVolumeClaim(claim));
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
You can also directly mount a persistent volume, which will implicitly reserve it
|
|
63
|
+
and create a volume from the created claim:
|
|
64
|
+
|
|
65
|
+
```ts
|
|
66
|
+
const vol = new kplus.AwsElasticBlockStorePersistentVolume(chart, 'Volume', { volumeId: 'vol1234' });
|
|
67
|
+
container.mount('/data', vol);
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
## Bind
|
|
71
|
+
|
|
72
|
+
Binding is a part of the reservation process, but it only creates a one directional link.
|
|
73
|
+
You can use it to bind a PV to an existing PVC. Note however that if the PVC is not bound to the PV,
|
|
74
|
+
there's no guarantee this volume will indeed be given that specific claim.
|
|
75
|
+
|
|
76
|
+
```ts
|
|
77
|
+
const claim = kplus.PersistentVolumeClaim.fromClaimName('claim');
|
|
78
|
+
|
|
79
|
+
// will modify the vol resource to refer to the claim.
|
|
80
|
+
// but no the other way around.
|
|
81
|
+
vol.bind(claim);
|
|
82
|
+
```
|
package/docs/plus/pvc.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# PersistentVolumeClaim
|
|
2
|
+
|
|
3
|
+
A `PersistentVolumeClaim` (PVC) is a request for storage by a pod.
|
|
4
|
+
|
|
5
|
+
!!! tip ""
|
|
6
|
+
[API Reference](../../reference/cdk8s-plus-33/typescript.md#persistent-volume-claim)
|
|
7
|
+
|
|
8
|
+
A `PersistentVolumeClaim` contains the requirements of the request, and the Kubernetes control plane is responsible providing a physical volume that satisfies the claim's requirements.
|
|
9
|
+
|
|
10
|
+
```ts
|
|
11
|
+
import * as kplus from 'cdk8s-plus-33';
|
|
12
|
+
import * as cdk8s from 'cdk8s';
|
|
13
|
+
|
|
14
|
+
const pod = new kplus.Pod(chart, 'Pod');
|
|
15
|
+
const container = pod.addContainer({ image: 'node' });
|
|
16
|
+
|
|
17
|
+
// create the storage request
|
|
18
|
+
const claim = new kplus.PersistentVolumeClaim(chart, 'Claim', {
|
|
19
|
+
storage: cdk8s.Size.gibibytes(50),
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
// mount a volume based on the request to the container
|
|
23
|
+
// this will also add the volume itself to the pod spec.
|
|
24
|
+
container.mount('/data', kplus.Volume.fromPersistentVolumeClaim(claim));
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Storage Class
|
|
28
|
+
|
|
29
|
+
By default, the `storageClassName` property of a claim is not set.
|
|
30
|
+
This means that the backing volume can be provided by one of two methods:
|
|
31
|
+
|
|
32
|
+
1. Dynamically provision a volume with the default storage class.
|
|
33
|
+
2. If a default storage class is not configured in the cluster, the backing
|
|
34
|
+
volume must pre-exist and not be assigned to any storage class.
|
|
35
|
+
|
|
36
|
+
> See [Provisioning](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#provisioning) for more details.
|
|
37
|
+
|
|
38
|
+
You can also provide an explicit storage class name,
|
|
39
|
+
|
|
40
|
+
```ts
|
|
41
|
+
const claim = new kplus.PersistentVolumeClaim(chart, 'Claim', {
|
|
42
|
+
storage: cdk8s.Size.gibibytes(50),
|
|
43
|
+
storageClassName: 'large-ebs',
|
|
44
|
+
});
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
In this case, Kubernetes control plane will either locate an existing volume with the `larg-ebs` storage class, or dynamically provision a new using the appropriate provisioner.
|
|
48
|
+
|
|
49
|
+
You can also pass in a special `""` value, this means the volume must not be assigned to any storage class.
|
|
50
|
+
Since all dynamically provisioend volumes belong to a storage class, setting this value effectively disables
|
|
51
|
+
dynamic provisioning for this claim.
|
|
52
|
+
|
|
53
|
+
```ts
|
|
54
|
+
const claim = new kplus.PersistentVolumeClaim(chart, 'Claim', {
|
|
55
|
+
storage: cdk8s.Size.gibibytes(50),
|
|
56
|
+
// disable dynamic provisioning
|
|
57
|
+
storageClassName: "",
|
|
58
|
+
});
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Bind
|
|
62
|
+
|
|
63
|
+
Binding is a part of the reservation process, but it only creates a one directional link.
|
|
64
|
+
You can use it to bind a PVC to an existing PV. Note however that if the PV is not bound to the PVC,
|
|
65
|
+
there's no guarantee this claim will indeed be given to that specific volume.
|
|
66
|
+
|
|
67
|
+
```ts
|
|
68
|
+
const claim = new kplus.PersistentVolumeClaim(chart, 'Claim', {
|
|
69
|
+
storage: cdk8s.Size.gibibytes(50),
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
const vol = kplus.PersistentVolume.fromPersistentVolumeName('vol');
|
|
73
|
+
|
|
74
|
+
// will modify the claim resource to refer to the volume.
|
|
75
|
+
// but no the other way around.
|
|
76
|
+
claim.bind(vol);
|
|
77
|
+
```
|