kubeadm 部署 k8s

本贴最后更新于 1703 天前,其中的信息可能已经事过境迁

角色分配说明,k8s 版本 1.14.5

三台 master 172.19.168.2527
三台 node 172.19.168.28
30

环境初始化

修改主机名

在各自主机执行命令设置主机名

hostnamectl set-hostname test-k8s-master01
。。。 。。。
hostnamectl set-hostname test-k8s-node01
。。。 。。。

配置 ssh 免秘钥

[root@k8s01 ~]# cat sshconfig.sh

#!/bin/bash
yum -y install expect
# 密钥对不存在则创建密钥
[ ! -f /root/.ssh/id_rsa.pub ] && ssh-keygen -t rsa -P '' &>/dev/null    
# 要设置免密登录的机器IP 前3台master,后3台node
SERVERS="172.19.168.25 172.19.168.26 172.19.168.27 172.19.168.28 172.19.168.29 172.19.168.30"
# 目标机器要统一密码
PASSWORD=clPL35IZLKRRKyUp
PORT=9833
auto_ssh_copy_id() {
    expect -c "set timeout -1;
        spawn ssh-copy-id $1 -p$3;
        expect {
            *(yes/no)* {send -- yes\r;exp_continue;}
            *assword:* {send -- $2\r;exp_continue;}
            eof        {exit 0;}
        }";
}

ssh_copy_id_to_all() {
    for SERVER in $SERVERS
    do
        auto_ssh_copy_id $SERVER $PASSWORD $PORT
    done
}

ssh_copy_id_to_all

sh sshconfig.sh 

关闭 selinux,firewalld,iptables

setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
systemctl stop firewalld.service iptables.service && systemctl disable firewalld.service iptables.service

关闭 swap

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

配置主机时间、时区、系统语言

ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'LANG="en_US.UTF-8"' >> /etc/profile;source /etc/profile 

同步时间

systemctl start chronyd.service && systemctl enable chronyd.service

nofile 调整

cat >> /etc/security/limits.conf <<EOF
* soft nofile 65535
* hard nofile 65536
EOF

Kernel 性能调优

echo "
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.ipv4.conf.all.forwarding = 1
net.ipv4.neigh.default.gc_thresh1 = 4096
net.ipv4.neigh.default.gc_thresh2 = 6144
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv4.neigh.default.gc_interval=60
net.ipv4.neigh.default.gc_stale_time=120
" >> /etc/sysctl.conf
sysctl -p

启用 ipvs 内核模块

vi ipvs_mod.sh

#!/bin/sh
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir | grep -o "^[^.]*");do
  /sbin/modinfo -F filename $i &> /dev/null
  if [ $? -eq 0 ]; then
    /sbin/modprobe $i
  fi
done
for N in `seq 26 30`;do scp -P9833 -r ipvs_mod.sh 172.19.168.$N:/root;done
# 在各主机执行脚本
sh ipvs_mod.sh

下面脚本创建了的/etc/sysconfig/modules/ipvs.modules 文件,保证在节点重启后能自动加载所需模块。

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

Docker 安装

# 定义安装版本
export docker_version=18.06.3
# step 1: 安装必要的一些系统工具
yum update -y;
yum install -y yum-utils device-mapper-persistent-data \
    lvm2 bash-completion;
# Step 2: 添加软件源信息
yum-config-manager --add-repo \
    http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo;
# Step 3: 更新并安装 Docker-CE
yum makecache all;
version=$(yum list docker-ce.x86_64 --showduplicates | sort -r|grep ${docker_version}|awk '{print $2}');
yum -y install --setopt=obsoletes=0 docker-ce-${version} docker-ce-selinux-${version};

优化配置

配置镜像下载和上传并发数
配置镜像加速地址
配置 cgroups 驱动
配置 Docker 存储驱动
配置日志驱动

mkdir /etc/docker/
cat <<EOF >> /etc/docker/daemon.json
{
"max-concurrent-downloads": 3,
"max-concurrent-uploads": 5,  
"registry-mirrors": ["https://7bezldxe.mirror.aliyuncs.com/"],
"exec-opts": ["native.cgroupdriver=systemd"],
"storage-driver": "overlay2",
"storage-opts": ["overlay2.override_kernel_check=true"],
"log-driver": "json-file",
"log-opts": {
    "max-size": "100m",
    "max-file": "3"
    }
}
EOF

vi /usr/lib/systemd/system/docker.service,在[Service]下添加

# 防止docker服务OOM:
OOMScoreAdjust=-1000
# 开启iptables转发链:
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT

分发到其他节点

for N in `seq 26 30`;do scp -P9833 -r /usr/lib/systemd/system/docker.service 172.19.168.$N:/usr/lib/systemd/system/;done

设置 docker 开机启动

systemctl daemon-reload
systemctl restart docker
systemctl enable docker

hosts 修改

cat <<EOF >> /etc/hosts
172.19.168.25 test-k8s-master01 k8s-api2.hhotel.com
172.19.168.26 test-k8s-master02 k8s-api2.hhotel.com
172.19.168.27 test-k8s-master03 k8s-api2.hhotel.com
172.19.168.28 test-k8s-node01
172.19.168.29 test-k8s-node02
172.19.168.30 test-k8s-node03
EOF
for N in `seq 25 30`;do scp -P9833 -r /etc/hosts 172.19.168.$N:/etc;done

配置阿里 k8s 的 yum 源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

部署 kubernetes

所有节点安装 kubeadm kubelet kubectl

# 查看可用版本yum list kubeadm --showduplicates
echo y | yum list
yum -y install kubeadm-1.14.5 kubelet-1.14.5 kubectl-1.14.5
systemctl enable kubelet.service
systemctl restart kubelet.service

创建 kubeadm 配置文件

cat <<EOF > kubeadm-config.yaml
### 1.15.1+用v1beta2, 1.14.5用v1beta1
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.5
imageRepository: gcr.azk8s.cn/google_containers
apiServer:
  certSANs:
  - "k8s-api2.hhotel.com"
### apiServer的集群访问地址
controlPlaneEndpoint: "k8s-api2.hhotel.com:6443"
### 网络插件的子网
networking:
  podSubnet: "192.168.0.0/16"
  dnsDomain: cluster.local
  serviceSubnet: "10.96.0.0/12"
EOF
for N in `seq 26 30`;do scp -P9833 -r kubeadm-config.yaml 172.19.168.$N:~;done

每台机器执行下面命令拉取用到的镜像

kubeadm config images pull --config kubeadm-config.yaml

kubeadm 初始化,在 master01 执行

kubeadm init --config kubeadm-config.yaml --experimental-upload-certs

输出结果:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join api.k8s.hhotel.com:6443 --token 9n6g0e.v7u8nao1isirg2yt \
    --discovery-token-ca-cert-hash sha256:0b1d6f35288fb2640b54e9a0f4017d43ca39420911e9d15a0b1f779a078ae02c \
    --control-plane --certificate-key 6c07dd5fe113850b24e273721a91eaed97ffd520a31b36eee709e983b4c5d58b

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use 
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join api.k8s.hhotel.com:6443 --token 9n6g0e.v7u8nao1isirg2yt \
    --discovery-token-ca-cert-hash sha256:0b1d6f35288fb2640b54e9a0f4017d43ca39420911e9d15a0b1f779a078ae02c 

第一个 join 是用来添加 master,第二个 join 用来添加 node

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

for N in `seq 28 30`;do scp -P9833 -r $HOME/.kube 172.19.168.$N:~;done

安装 canal 网络插件

修改网络段和 kubeadm init 中的 --pod-network-cidr=192.168.0.0/16 保持一致

wget https://docs.projectcalico.org/v3.8/manifests/canal.yaml
sed -i s@"10.244.0.0/16"@"192.168.0.0/16"@g canal.yaml
kubectl apply -f canal.yaml

去除污点,使 master 可以作为 node,只要 join master 后去除污点,这个机器就既是 master 又是 node 了

kubectl taint nodes --all node-role.kubernetes.io/master-

token 等秘钥失效后重新获取

# 默认token有效期是24小时,如果token已经过期,以下命令重新生成
kubeadm token create
# 生成--discovery-token-ca-cert-hash的值
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
# ertificate-key用于其他master节点获取证书文件时验证,有小时间为2小时,超过2小时候需要重新生成
kubeadm init phase upload-certs --experimental-upload-certs
# 然后再使用kubeadm join命令加入Master或node
kubeadm join api.k8s.hhotel.com:6443 --token dhn1rz.ij36pu94qpkj43xk \
--discovery-token-ca-cert-hash sha256:0b1d6f35288fb2640b54e9a0f4017d43ca39420911e9d15a0b1f779a078ae02c \
--control-plane --certificate-key e5264a589b2133d328b599dc9c94e3d4f8c51dd6e5e98877528a22b9af5bc622
# 只生成join node的命令
kubeadm token create --print-join-command

部署出现问题可以在当前节点重置 kubeadm

kubeadm reset
rm -rf * ~/.kube/*
  • DevOps

    DevOps(Development 和 Operations 的组合词)是一组过程、方法与系统的统称,用于促进开发(应用程序/软件工程)、技术运营和质量保障(QA)部门之间的沟通、协作与整合。

    38 引用 • 24 回帖

相关帖子

欢迎来到这里!

我们正在构建一个小众社区,大家在这里相互信任,以平等 • 自由 • 奔放的价值观进行分享交流。最终,希望大家能够找到与自己志同道合的伙伴,共同成长。

注册 关于
请输入回帖内容 ...