This repository was archived by the owner on Jun 29, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 47
Expand file tree
/
Copy pathcontroller.yaml.tmpl
More file actions
389 lines (389 loc) · 17.1 KB
/
controller.yaml.tmpl
File metadata and controls
389 lines (389 loc) · 17.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
---
systemd:
units:
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Unit]
Requires=create-etcd-config.service
After=create-etcd-config.service
[Service]
EnvironmentFile=/etc/kubernetes/etcd.config
Environment="ETCD_IMAGE_URL=docker://quay.io/coreos/etcd"
Environment="RKT_RUN_ARGS=--insecure-options=image"
Environment="ETCD_IMAGE_TAG=v3.4.9${etcd_arch_tag_suffix}"
Environment="ETCD_NAME=${etcd_name}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
Environment="ETCD_INITIAL_CLUSTER=${etcd_initial_cluster}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
Environment="ETCD_CLIENT_CERT_AUTH=true"
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
Environment="${etcd_arch_options}"
ExecStopPost=-/opt/etcd-rejoin
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: coreos-metadata-sshkeys@core.service
mask: true
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service etcd-member.service bootkube.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done; /opt/wait-for-dns ${dns_zone} ${cluster_name}-private 3600'
[Install]
RequiredBy=kubelet.service etcd-member.service bootkube.service
- name: create-etcd-config.service
# This service will extract value of private interface from the env var file `/run/metadata/flatcar`.
# And then assign it to the variables that which will be stored in file `/etc/kubernetes/etcd.config`,
# this file is sourced by etcd-member service.
enable: true
contents: |
[Unit]
Description=Create a etcd env file to be used by etcd to listen on private interface
Before=etcd-member.service
Requires=coreos-metadata.service
After=coreos-metadata.service
[Service]
EnvironmentFile=/run/metadata/flatcar
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'echo "ETCD_LISTEN_CLIENT_URLS=https://$COREOS_PACKET_IPV4_PRIVATE_0:2379" > /etc/kubernetes/etcd.config && echo "ETCD_LISTEN_PEER_URLS=https://$COREOS_PACKET_IPV4_PRIVATE_0:2380" >> /etc/kubernetes/etcd.config && echo "ETCD_LISTEN_METRICS_URLS=http://$COREOS_PACKET_IPV4_PRIVATE_0:2381" >> /etc/kubernetes/etcd.config'
[Install]
RequiredBy=etcd-member.service
- name: coreos-metadata.service
enable: true
contents: |
[Unit]
Description=Flatcar Metadata Agent
[Service]
Type=oneshot
Environment=COREOS_METADATA_OPT_PROVIDER=--cmdline
ExecStart=/usr/bin/coreos-metadata $${COREOS_METADATA_OPT_PROVIDER} --attributes=/run/metadata/flatcar
[Install]
RequiredBy=metadata.target
- name: kubelet.service
enable: true
contents: |
[Unit]
Description=Kubelet
Requires=coreos-metadata.service
After=coreos-metadata.service
[Service]
EnvironmentFile=/run/metadata/flatcar
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume var-lib-calico,kind=host,source=/var/lib/calico \
--mount volume=var-lib-calico,target=/var/lib/calico \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--volume etc-cni-netd,kind=host,source=/etc/cni/net.d \
--mount volume=etc-cni-netd,target=/etc/cni/net.d \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStartPre=/etc/kubernetes/configure-kubelet-cgroup-driver
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--node-ip=$${COREOS_PACKET_IPV4_PRIVATE_0} \
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${k8s_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/cni/net.d \
--config=/etc/kubernetes/kubelet.config \
--exit-on-lock-contention \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=$${NODE_LABELS} \
--node-labels=lokomotive.alpha.kinvolk.io/public-ipv4=$${COREOS_PACKET_IPV4_PUBLIC_0} \
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--register-with-taints=$${NODE_TAINTS} \
--address=$${COREOS_PACKET_IPV4_PRIVATE_0} \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes cluster
ConditionPathExists=!/opt/bootkube/init_bootkube.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
[Install]
WantedBy=multi-user.target
- name: "iptables-restore.service"
enabled: true
enable: true
- name: "ip6tables-restore.service"
enabled: true
enable: true
storage:
files:
- path: /etc/kubernetes/kubeconfig
filesystem: root
mode: 0644
contents:
inline: |
${kubeconfig}
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=docker://quay.io/poseidon/kubelet
KUBELET_IMAGE_TAG=v1.18.6-${os_arch}
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/master,node.kubernetes.io/controller=true"
NODE_TAINTS="node-role.kubernetes.io/master=:NoSchedule"
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Pre-pull kubelet image because when it is later pulled but takes too long it times out
docker pull quay.io/poseidon/kubelet:v1.18.6-${os_arch}
# Move experimental manifests
[ -n "$(ls /opt/bootkube/assets/manifests-*/* 2>/dev/null)" ] && mv /opt/bootkube/assets/manifests-*/* /opt/bootkube/assets/manifests && rm -rf /opt/bootkube/assets/manifests-*
exec /usr/bin/rkt run \
--stage1-path=/usr/lib64/rkt/stage1-images/stage1-fly.aci \
--insecure-options=image \
--trust-keys-from-https \
--volume assets,kind=host,source=/opt/bootkube/assets \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$${RKT_OPTS} \
docker://quay.io/kinvolk/bootkube:v0.14.0-helm-${os_arch} \
--insecure-options=image \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
- path: /opt/etcd-rejoin
filesystem: root
mode: 0555
contents:
inline: |
#!/bin/bash
set -eou pipefail
# Rejoin a cluster as fresh node when etcd cannot join
# (e.g., after repovisioning, crashing or node being down).
# Set ExecStopPost=-/opt/etcd-rejoin to run when etcd failed and
# use env vars of etcd-member.service.
# Skip if not provisioned
if [ ! -d "/etc/ssl/etcd/" ]; then exit 0; fi
# or got stopped.
if [ "$EXIT_CODE" = "killed" ]; then exit 0; fi
now=$(date +%s)
if [ -f /var/lib/etcd-last-fail ]; then
last=$(cat /var/lib/etcd-last-fail)
else
last=0
fi
echo "$now" > /var/lib/etcd-last-fail
let "d = $now - $last"
# Skip and restart regularly if it does not fail within 120s.
if [ "$d" -gt 120 ]; then exit 0; fi
export ETCDCTL_API=3
urls=$(echo "$ETCD_INITIAL_CLUSTER" | tr "," "\n" | cut -d "=" -f 2 | tr "\n" "," | head -c -1)
# $$ for terraform
endpoints="$${urls//2380/2379}"
ARGS="--cacert=/etc/ssl/etcd/etcd-client-ca.crt --cert=/etc/ssl/etcd/etcd-client.crt --key=/etc/ssl/etcd/etcd-client.key --endpoints=$endpoints"
# Check if unhealthy (should be because etcd is not running)
unhealty=$((etcdctl endpoint health $ARGS 2> /dev/stdout | grep "is unhealthy" | grep "$ETCD_NAME") || true)
if [ -z "$unhealty" ]; then exit 0; fi
# Remove old ID if still exists
ID=$((etcdctl member list $ARGS | grep "$ETCD_NAME" | cut -d "," -f 1) || true)
if [ ! -z "$ID" ]; then
etcdctl member remove "$ID" $ARGS
fi
# Re-add as new member
etcdctl member add "$ETCD_NAME" --peer-urls="$ETCD_INITIAL_ADVERTISE_PEER_URLS" $ARGS
# Join fresh without state
mv /var/lib/etcd "/var/lib/etcd-bkp-$(date +%s)" || true
if [ -z "$(grep ETCD_INITIAL_CLUSTER_STATE=existing /etc/systemd/system/etcd-member.service.d/40-etcd-cluster.conf)" ]; then
echo 'Environment="ETCD_INITIAL_CLUSTER_STATE=existing"' >> /etc/systemd/system/etcd-member.service.d/40-etcd-cluster.conf
# Apply change
systemctl daemon-reload
fi
# Restart unit (yes, within itself)
systemctl restart etcd-member &
- path: /var/lib/iptables/rules-save
filesystem: root
mode: 0644
contents:
inline: |
*filter
:INPUT DROP [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp --dport 22 -j ACCEPT
# Use 10.0.0.0/8 as this is Packet private network CIDR.
# It will be closed more tightly via Calico, which rules are easy to update.
-A INPUT -s 10.0.0.0/8 -p tcp --dport 179 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 2379 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 2380 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 2381 -j ACCEPT
-A INPUT -p tcp --dport 6443 -j ACCEPT
# With single controller node setup, the traffic from nodes to kube-apiserver may flow via either port 6443 or 7443.
# This is because --advertise-address flag of kube-apiserver defines which IP address will be added as an endpoint
# to kubernetes.default.svc service, where in-cluster traffic to API will be going. It's uniqueness though it's based
# on IP address and not on IP+port combination. This is why we randomize the IP address to listen on and we set
# --advertise-address to Node IP, to keep it deterministic. If we would randomize the port, we would have to open
# all the ports in the range on the firewall to get it to work.
#
# So, as uniqueness of Endpoint in kubernetes.default.svc Service is based on IP address, the address with port 6443
# will be added first, as this is how bootstrap kube-apiserver binds. This binding port is later replaced by HAProxy.
# However, if you delete kubernetes.default.svc service (kubectl delete service -n default kubernetes), then
# kube-apiserver will re-create with existing listening port, which will be 7443 and then the traffic flows directly
# into the kube-apiserver process. The removal should not happen under regular operation, but we should be prepared for it.
#
# That's why we need both port 6443 and 7443 to be opened on controller node.
%{~ if controller_count == "1" ~}
-A INPUT -s 10.0.0.0/8 -p tcp --dport 7443 -j ACCEPT
%{~ endif }
-A INPUT -s 10.0.0.0/8 -p tcp --dport 10250 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 10256 -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
- path: /var/lib/ip6tables/rules-save
filesystem: root
mode: 0644
contents:
inline: |
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp --dport 22 -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
- path: /etc/kubernetes/configure-kubelet-cgroup-driver
filesystem: root
mode: 0744
contents:
inline: |
#!/bin/bash
set -e
readonly docker_cgroup_driver="$(docker info -f '{{.CgroupDriver}}')"
cat <<EOF >/etc/kubernetes/kubelet.config
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: "$${docker_cgroup_driver}"
EOF
- path: /opt/wait-for-dns
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash
if [[ $# -ne 3 ]]; then
echo "Usage: $0 <zone> <record> <max_attempts>"
exit 1
fi
zone=$1
record=$2
max_attempts=$3
echo "Figuring out the nameservers for $zone"
nameservers=""
counter=0
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 "$zone" ns)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
nameservers=$out
break
fi
if [[ "$out" = "" ]]; then
echo "No nameservers found for $zone"
else
echo "dig failed with exit code $ret: $out"
fi
sleep 1
counter=$((counter+1))
done
if [[ "$nameservers" == "" ]]; then
echo "Could not resolve nameservers for $zone"
exit 1
fi
for ns in $nameservers; do
echo "Polling $ns for $record.$zone..."
counter=0
ok=false
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 @"$ns" "$record"."$zone" a)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
echo "Looks good!"
ok=true
break
fi
echo "Not available yet"
sleep 1
counter=$((counter+1))
done
if ! $ok; then
echo "$record.$zone didn't become available within the allowed time"
exit 1
fi
done
echo "$record.$zone is available on all nameservers"
exit 0
passwd:
users:
- name: core
ssh_authorized_keys: ${ssh_keys}