-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-k3s-multiarch-rootful.yaml
195 lines (180 loc) · 6.84 KB
/
docker-k3s-multiarch-rootful.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
# https://github.com/lima-vm/lima/blob/master/examples
#
# Example to use Docker instead of containerd & nerdctl, with binfmt for multi-arch, and k3s
# $ limactl start ./docker-k3s-multiarch.yaml
# $ limactl shell docker docker run -it -v $HOME:$HOME --rm alpine
# To run `docker` on the host (assumes docker-cli is installed):
# $ export DOCKER_HOST=$(limactl list docker --format 'unix://{{.Dir}}/sock/docker.sock')
# $ docker ...
# $ export KUBECONFIG=$(limactl list k3s --format 'unix://{{.Dir}}/copied-from-guest/kubeconfig.yaml')
# $ kubectl get no
# NAME STATUS ROLES AGE VERSION
# lima-k3s Ready control-plane,master 69s v1.21.1+k3s1
# merge into current .kube/config
# limactl shell $(limactl list | grep Running | awk '{print $1}') sudo kubectl config view --flatten > /tmp/config.tmp
# KUBECONFIG=~/.kube/config:/tmp/config.tmp kubectl config view --merge --flatten > ~/.kube/config.new
# mv ~/.kube/config ~/.kube/config.bak
# mv ~/.kube/config.new ~/.kube/config
# kubectl config get-contexts
# This example requires Lima v0.8.0 or later
images:
# Try to use release-yyyyMMdd image if available. Note that release-yyyyMMdd will be removed after several months.
#- location: "https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img"
# arch: "x86_64"
#- location: "https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-arm64.img"
# arch: "aarch64"
#- location: "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-amd64.img"
# arch: "x86_64"
#- location: "https://cloud-images.ubuntu.com/releases/focal/release/ubuntu-20.04-server-cloudimg-arm64.img"
# arch: "aarch64"
#- location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-amd64.img"
# arch: "x86_64"
#- location: "https://cloud-images.ubuntu.com/releases/22.04/release/ubuntu-22.04-server-cloudimg-arm64.img"
# arch: "aarch64"
- location: "https://cloud-images.ubuntu.com/releases/23.10/release/ubuntu-23.10-server-cloudimg-amd64.img"
arch: "x86_64"
- location: "https://cloud-images.ubuntu.com/releases/23.10/release/ubuntu-23.10-server-cloudimg-arm64.img"
arch: "aarch64"
cpus: 4
memory: "8GiB"
# mountType: "reverse-sshfs"
mountType: "9p"
mounts:
- location: "~"
writable: true
9p:
#cache: "fscache"
cache: "mmap"
- location: "/tmp/lima"
writable: true
9p:
cache: "mmap"
# external disk
# limactl disk create docker --size 10G
# will be available as /mnt/lima-docker
additionalDisks:
- docker
# containerd is managed by Docker, not by Lima, so the values are set to false here.
containerd:
system: false
user: false
provision:
- mode: system
# mount docker directories to external disk for persistency
# courtesy jandubois
script: |
#!/bin/bash
set -eux -o pipefail
MNT=/mnt/lima-docker
ROOT=/var/lib/docker
USER=/home/${LIMA_CIDATA_USER}.linux/.local/share/docker
mkdir -p $MNT/root $MNT/user
mkdir -p $ROOT $USER
mount --bind /$MNT/root $ROOT
mount --bind /$MNT/user $USER
chown -R $LIMA_CIDATA_USER:$LIMA_CIDATA_USER /home/${LIMA_CIDATA_USER}.linux/.local
- mode: system
# This script defines the host.docker.internal hostname when hostResolver is disabled.
# It is also needed for lima 0.8.2 and earlier, which does not support hostResolver.hosts.
# Names defined in /etc/hosts inside the VM are not resolved inside containers when
# using the hostResolver; use hostResolver.hosts instead (requires lima 0.8.3 or later).
script: |
#!/bin/sh
sed -i 's/host.lima.internal.*/host.lima.internal host.docker.internal/' /etc/hosts
- mode: system
# sysctl changes
script: |
#!/bin/bash
# NOTE: disabling ipv6 breaks kubernetes for some reason
# try disabling in Apple -> System Settings first
#sysctl -w net.ipv6.conf.all.disable_ipv6=1
#sysctl -w net.ipv6.conf.default.disable_ipv6=1
sysctl -w vm.max_map_count=262144
cat <<-EOF >> /etc/sysctl.conf
#net.ipv6.conf.all.disable_ipv6=1
#net.ipv6.conf.default.disable_ipv6=1
vm.max_map_count=262144
EOF
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
command -v docker >/dev/null 2>&1 && exit 0
if [ ! -e /etc/systemd/system/docker.socket.d/override.conf ]; then
mkdir -p /etc/systemd/system/docker.socket.d
# Alternatively we could just add the user to the "docker" group, but that requires restarting the user session
cat <<-EOF >/etc/systemd/system/docker.socket.d/override.conf
[Socket]
SocketUser=${LIMA_CIDATA_USER}
EOF
fi
export DEBIAN_FRONTEND=noninteractive
curl -fsSL https://get.docker.com | sh
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
docker run --privileged --rm tonistiigi/binfmt --install all
- mode: system
script: |
#!/bin/bash
mkdir -p /etc/rancher/k3s/
cat <<EOF > /etc/rancher/k3s/registries.yaml
mirrors:
localhost:
endpoint:
- "http://localhost:5000"
EOF
- mode: system
script: |
#!/bin/sh
curl -sfL https://get.k3s.io | sh -
networks:
# shared, bridged, host, user-v2
- lima: shared
probes:
- script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 30s bash -c "until command -v docker >/dev/null 2>&1; do sleep 3; done"; then
echo >&2 "docker is not installed yet"
exit 1
fi
if ! timeout 30s bash -c "until pgrep dockerd; do sleep 3; done"; then
echo >&2 "dockerd is not running"
exit 1
fi
hint: See "/var/log/cloud-init-output.log". in the guest
- script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 30s bash -c "until test -f /etc/rancher/k3s/k3s.yaml; do sleep 3; done"; then
echo >&2 "k3s is not running yet"
exit 1
fi
hint: |
The k3s kubeconfig file has not yet been created.
Run "limactl shell k3s sudo journalctl -u k3s" to check the log.
If that is still empty, check the bottom of the log at "/var/log/cloud-init-output.log".
hostResolver:
# hostResolver.hosts requires lima 0.8.3 or later. Names defined here will also
# resolve inside containers, and not just inside the VM itself.
hosts:
host.docker.internal: host.lima.internal
portForwards:
- guestSocket: "/var/run/docker.sock"
hostSocket: "{{.Dir}}/sock/docker.sock"
message: |
To run `docker` on the host (assumes docker-cli is installed), run the following commands:
------
docker context create lima-{{.Name}} --docker "host=unix://{{.Dir}}/sock/docker.sock"
docker context use lima-{{.Name}}
docker run hello-world
------
To run `kubectl` on the host (assumes kubectl is installed), run the following commands:
------
mkdir -p {{.Dir}}/copied-from-guest
limactl shell {{.Name}} sudo cat /etc/rancher/k3s/k3s.yaml > {{.Dir}}/copied-from-guest/kubeconfig.yaml
export KUBECONFIG="{{.Dir}}/copied-from-guest/kubeconfig.yaml"
kubectl ...
------