- 发布于
LXC容器安装k8s
6504-–
- 作者
- 姓名
- zhli
操作系统:centos 8 stream
在PVE的shell里执行以下代码
- 假设容器ID是111(特权容器),打开PVE Shell,执行 (1) 禁用AppArmor, (2) 允许容器的 cgroup 访问所有设备, (3) 防止丢弃容器的任何功能, (4) 将/proc和/sys挂载为读写容器
vmid=111
pct stop $vmid && cat >> /etc/pve/lxc/$vmid.conf << EOF
lxc.apparmor.profile: unconfined
lxc.cgroup.devices.allow: a
lxc.cap.drop:
lxc.mount.auto: "proc:rw sys:rw"
EOF
- 启动容器后,将内核启动配置发布到容器中
pct start $vmid && pct push $vmid /boot/config-$(uname -r) /boot/config-$(uname -r)
在所有节点上运行
- 开启ssh
yum -y install openssh-server && systemctl enable --now sshd
- 关闭swap
swapoff -a && sed -i '/ swap / s/^/#/' /etc/fstab
- 视情况关闭防火墙和selinux
# 关闭防火墙
# systemctl stop firewalld && systemctl disable firewalld
# 关闭selinux
# sed -i 's/enforcing/disabled/' /etc/selinux/config
- 安装docker,版本20.10.9
yum -y install wget && \
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo && \
yum -y install docker-ce-20.10.9-3.el8 && systemctl enable --now docker
- 在每个容器中,我们需要确保/dev/kmsg存在。 Kubelet 将它用于一些日志记录功能,默认情况下它不存在于容器中。
cat > /usr/local/bin/conf-kmsg.sh << EOF
#!/bin/sh -e
if [ ! -e /dev/kmsg ]; then
ln -s /dev/console /dev/kmsg
fi
mount --make-rshared /
EOF
- 注册为服务
cat > /etc/systemd/system/conf-kmsg.service << EOF
[Unit]
Description=Make sure /dev/kmsg exists
[Service]
Type=simple
RemainAfterExit=yes
ExecStart=/usr/local/bin/conf-kmsg.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target
- 启用该服务
chmod +x /usr/local/bin/conf-kmsg.sh && systemctl daemon-reload && systemctl enable --now conf-kmsg
- 安装k8s,版本1.23.0
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.23.0 kubeadm-1.23.0 kubectl-1.23.0 && systemctl enable --now kubelet
rm -f /etc/containerd/config.toml && systemctl restart containerd
master节点执行以下代码
- 初始化
kubeadm init \
--apiserver-advertise-address=master节点的IP \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
- 部署网络插件
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
- 部署ingress-nginx,版本v1.6.4.yaml
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.6.4/deploy/static/provider/cloud/deploy.yaml
# 文件下载后,需要把3处镜像地址换成国内可以访问的地址
# registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f
# 修改为:giantswarm/ingress-nginx-controller:v1.6.4
# registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
# 修改为:registry.aliyuncs.com/google_containers/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f
# 再设置下type为LoadBalancer的Service的externalIPs。如:
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
app.kubernetes.io/version: 1.6.4
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
externalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- appProtocol: http
name: http
port: 80
protocol: TCP
targetPort: http
- appProtocol: https
name: https
port: 443
protocol: TCP
targetPort: https
externalIPs:
- 这里可设置为master节点的IP
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
type: LoadBalancer
kubectl apply -f deploy.yaml
将节点加入到集群,
# TOKEN可以在master节点init之后的控制台输出的文字里找到,如何找不到了也可以执行kubeadm token create --print-join-command
kubeadm join -token TOKEN master节点的IP:6443