This repository was archived by the owner on Jun 29, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 47
Expand file tree
/
Copy pathworker.yaml.tmpl
More file actions
417 lines (404 loc) · 15.7 KB
/
worker.yaml.tmpl
File metadata and controls
417 lines (404 loc) · 15.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
---
systemd:
units:
- name: persist-data-raid.service
enable: true
contents: |
[Unit]
Description=Persist data RAID if exists
ConditionPathExists=!/etc/mdadm.conf
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/opt/persist-data-raid
[Install]
WantedBy=multi-user.target
RequiredBy=kubelet.service
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: coreos-metadata-sshkeys@core.service
mask: true
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done; /opt/wait-for-dns ${dns_zone} ${cluster_name}-private 3600'
[Install]
RequiredBy=kubelet.service
- name: coreos-metadata.service
enable: true
contents: |
[Unit]
Description=Flatcar Container Linux Metadata Agent
[Service]
Type=oneshot
Environment=COREOS_METADATA_OPT_PROVIDER=--cmdline
ExecStart=/usr/bin/coreos-metadata $${COREOS_METADATA_OPT_PROVIDER} --attributes=/run/metadata/flatcar
[Install]
RequiredBy=metadata.target
- name: kubelet.service
enable: true
contents: |
[Unit]
Description=Kubelet
Requires=coreos-metadata.service
After=coreos-metadata.service
[Service]
EnvironmentFile=/run/metadata/flatcar
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume var-lib-calico,kind=host,source=/var/lib/calico \
--mount volume=var-lib-calico,target=/var/lib/calico \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--volume data,kind=host,source=/mnt \
--mount volume=data,target=/mnt \
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \
--volume etc-cni-netd,kind=host,source=/etc/cni/net.d \
--mount volume=etc-cni-netd,target=/etc/cni/net.d \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/cni/net.d
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/bin/mkdir -p /var/lib/calico
ExecStartPre=/bin/mkdir -p /var/lib/kubelet/volumeplugins
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStartPre=/etc/kubernetes/configure-kubelet-cgroup-driver
# TODO: Workaround until https://github.com/coreos/afterburn/pull/358
# makes it into Flatcar. Then we can read COREOS_PACKET_IPV4_PRIVATE_GATEWAY_0
# from /run/metadata/flatcar and disable the template conditionals below.
ExecStart=/bin/sh -c \
"%{~ if bgp_node_labels != "" ~}
BGP_PEER_ADDRESS=$(ip route | grep '10.0.0.0/8' | awk {'print $3'}); \
%{~ endif ~}
/usr/lib/coreos/kubelet-wrapper \
--node-ip=$${COREOS_PACKET_IPV4_PRIVATE_0} \
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${k8s_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
--cni-conf-dir=/etc/cni/net.d \
--config=/etc/kubernetes/kubelet.config \
--exit-on-lock-contention \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=$${NODE_LABELS} \
--node-labels=lokomotive.alpha.kinvolk.io/public-ipv4=$${COREOS_PACKET_IPV4_PUBLIC_0} \
%{~ if bgp_node_labels != "" ~}
--node-labels=$${BGP_NODE_LABELS},metallb.universe.tf/peer-address=$BGP_PEER_ADDRESS \
%{~ endif ~}
--pod-manifest-path=/etc/kubernetes/manifests \
--read-only-port=0 \
--register-with-taints=$${NODE_TAINTS} \
--address=$${COREOS_PACKET_IPV4_PRIVATE_0} \
--volume-plugin-dir=/var/lib/kubelet/volumeplugins"
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
- name: delete-node.service
enable: true
contents: |
[Unit]
Description=Waiting to delete Kubernetes node on shutdown
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/true
ExecStop=/etc/kubernetes/delete-node
[Install]
WantedBy=multi-user.target
- name: "iptables-restore.service"
enabled: true
enable: true
- name: "ip6tables-restore.service"
enabled: true
enable: true
- name: "iscsid.service"
enabled: true
enable: true
storage:
filesystems:
- name: "OEM"
mount:
device: "/dev/disk/by-label/OEM"
format: "ext4"
files:
# XXX: We need to manually specify the RAID0 layout because of a kernel bug.
# We use default_layout=2 as **we assume all installations from now on are
# using a kernel >= 3.14** (released on 2014-03-30).
# For more info on this kernel bug, see:
# https://github.com/torvalds/linux/commit/c84a1372df929033cb1a0441fb57bd3932f39ac9
# It is needed as kernel cmdline and modprobe config for some bug, see
# commit msg for details
- path: /etc/modprobe.d/raid0.conf
filesystem: root
mode: 0644
contents:
inline: options raid0 default_layout=2
- path: /etc/modules-load.d/raid0.conf
filesystem: root
mode: 0644
contents:
inline: raid0
- filesystem: "OEM"
path: "/grub.cfg"
mode: 0644
append: true
contents:
inline: |
set linux_append="$linux_append raid0.default_layout=2"
- path: /opt/persist-data-raid
filesystem: root
mode: 0700
contents:
inline: |
#!/bin/bash -xe
# Create a RAID 0 from extra disks to be used for persistent container storage.
function create_data_raid() {
local major_numbers="$1"
# Filtering by disk type, -1 to not filter, 1 for HDDs and 0 for SSDs
local disk_type="$2"
# RAID device path, starting with /dev/
local device_path="$3"
local setup_fs_on_raid="$4"
# Select disks for RAID sorted by size, filter by disk type and
# ignore the disks which are mounted (e.g., where Linux is installed)
local disks=$(lsblk -lnpd -x size -o path,rota -I "$${major_numbers}" \
| (
while IFS= read -r line; do
local drive=$(echo "$line" | awk '{print $1}')
local rota=$(echo "$line" | awk '{print $2}')
if [ $disk_type != -1 ] && [ $disk_type != $rota ]; then
continue
fi
local mountpoints=$(lsblk -ln -o mountpoint "$drive")
if [[ -z "$mountpoints" ]]; then
echo "$line"
fi
done) | awk '{print $1}' | tr '\n' ' ')
local count=$(echo "$disks" | wc -w)
# Exit if we don't have any disks to create an array
[ $count -lt 1 ] && return 0
# Create, format and mount array.
local extra_opts=""
if [ $count -lt 2 ]; then
# Force array creation even with one disk
extra_opts="--force"
fi
# If the device_path is /dev/md/node-local-hdd-storage then the
# array name would be node-local-hdd-storage
array_name=$(basename "$${device_path}")
mdadm --create "$${device_path}" \
--homehost=any \
$extra_opts \
--verbose \
--name="$${array_name}" \
--level=0 \
--raid-devices="$${count}" \
$${disks}
cat /proc/mdstat
# Wait for udev to create the symlinks
while [ ! -L "$${device_path}" ]; do
echo "Waiting for $${device_path}"
sleep 1
done
mdadm --detail --scan | grep "ARRAY $${device_path} " | tee -a /etc/mdadm.conf
if [ "$${setup_fs_on_raid}" = true ]; then
mkfs.ext4 "$${device_path}"
mount "$${device_path}" "/mnt/"
# Make mount persistent across reboots
echo "$${device_path} /mnt/ ext4 defaults,nofail,discard 0 0" | tee -a /etc/fstab
fi
}
# Don't do anything if a RAID was configured (but /etc/mdadm.conf deleted)
already_has_raid_config=$(mdadm --detail --scan)
[ ! -z "$${already_has_raid_config}" ] && exit 0
# A comma-separated list of major device numbers. Modify to control which device types
# are considered for data RAID.
# https://www.kernel.org/doc/Documentation/admin-guide/devices.txt
major_numbers="8,259"
# XXX: These options are exclusive, as only one fs can be mounted
# to /mnt/
# This is, partly, because when creating dirs inside /mnt to mount
# several paths (like /mnt/node-local-storage), those are not visible
# to the pods. See this issue for more info:
# https://github.com/kinvolk/lokomotive-kubernetes/issues/73
#
# Variables replaced by Terraform
if [ ${setup_raid} = true ]; then
create_data_raid "$${major_numbers}" -1 /dev/md/node-local-storage true
elif [ ${setup_raid_hdd} = true ]; then
create_data_raid "$${major_numbers}" 1 /dev/md/node-local-hdd-storage true
elif [ ${setup_raid_ssd} = true ]; then
create_data_raid "$${major_numbers}" 0 /dev/md/node-local-ssd-storage ${setup_raid_ssd_fs}
fi
- path: /etc/kubernetes/kubeconfig
filesystem: root
mode: 0644
contents:
inline: |
${kubeconfig}
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=docker://quay.io/poseidon/kubelet
KUBELET_IMAGE_TAG=v1.18.6-${os_arch}
KUBELET_IMAGE_ARGS="--exec=/usr/local/bin/kubelet"
NODE_LABELS="node.kubernetes.io/node,${node_labels}"
BGP_NODE_LABELS="${bgp_node_labels}"
NODE_TAINTS="${taints}"
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /etc/kubernetes/delete-node
filesystem: root
mode: 0744
contents:
inline: |
#!/bin/bash
set -e
exec /usr/bin/rkt run \
--stage1-path=/usr/lib64/rkt/stage1-images/stage1-fly.aci \
--insecure-options=image \
--trust-keys-from-https \
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://quay.io/poseidon/kubelet:v1.18.6-${os_arch} \
--net=host \
--dns=host \
-- \
kubectl --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
- path: /var/lib/iptables/rules-save
filesystem: root
mode: 0644
contents:
inline: |
*filter
:INPUT DROP [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp --dport 22 -j ACCEPT
# Use 10.0.0.0/8 as this is Packet private network CIDR.
# It will be closed more tightly via Calico, which rules are easy to update.
-A INPUT -s 10.0.0.0/8 -p tcp --dport 179 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 10250 -j ACCEPT
-A INPUT -s 10.0.0.0/8 -p tcp --dport 10256 -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
- path: /var/lib/ip6tables/rules-save
filesystem: root
mode: 0644
contents:
inline: |
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -p tcp --dport 22 -j ACCEPT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
COMMIT
- path: /etc/kubernetes/configure-kubelet-cgroup-driver
filesystem: root
mode: 0744
contents:
inline: |
#!/bin/bash
set -e
readonly docker_cgroup_driver="$(docker info -f '{{.CgroupDriver}}')"
cat <<EOF >/etc/kubernetes/kubelet.config
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: "$${docker_cgroup_driver}"
EOF
- path: /opt/wait-for-dns
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash
if [[ $# -ne 3 ]]; then
echo "Usage: $0 <zone> <record> <max_attempts>"
exit 1
fi
zone=$1
record=$2
max_attempts=$3
echo "Figuring out the nameservers for $zone"
nameservers=""
counter=0
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 "$zone" ns)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
nameservers=$out
break
fi
if [[ "$out" = "" ]]; then
echo "No nameservers found for $zone"
else
echo "dig failed with exit code $ret: $out"
fi
sleep 1
counter=$((counter+1))
done
if [[ "$nameservers" == "" ]]; then
echo "Could not resolve nameservers for $zone"
exit 1
fi
for ns in $nameservers; do
echo "Polling $ns for $record.$zone..."
counter=0
ok=false
while [[ $counter -lt $max_attempts ]]; do
out=$(dig +short +timeout=2 @"$ns" "$record"."$zone" a)
ret=$?
if [[ $ret -eq 0 && "$out" != "" ]]; then
echo "Looks good!"
ok=true
break
fi
echo "Not available yet"
sleep 1
counter=$((counter+1))
done
if ! $ok; then
echo "$record.$zone didn't become available within the allowed time"
exit 1
fi
done
echo "$record.$zone is available on all nameservers"
exit 0
passwd:
users:
- name: core
ssh_authorized_keys: ${ssh_keys}