zl程序教程

您现在的位置是:首页 >  其他

当前栏目

kubernetes v1.16.0 部署实践

2023-03-14 22:49:50 时间

和以往一样,记录一下kubernetes的部署过程;环境采用Centos7.6版本

三台主机

  • 192.168.0.10 node1
  • 192.168.0.11 node2
  • 192.168.0.12 node3

安装master节点的过程

#!/usr/bin/bash 

##################
# Author: 风清扬的角色
# Date:   2019.11.11
##################
set -e

preTools(){
    yum update -y
    yum install -y wget vim net-tools epel-release
    echo "##################"
    echo "# 基础组件安装完成 #"
    echo "##################"
}

closeFw(){
    systemctl disable firewalld
    systemctl stop firewalld
    echo "##################"
    echo "# 防火墙已关闭 #"
    echo "##################"
}

closeSelinux(){
    sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
    if [ `getenforce` == "Enforcing" ];then
    setenforce 0
    else
        echo "current selinux status..." `getenforce`
    echo "##################"
    echo "# selinux已关闭 #"
    echo "##################"
}

closeSwap(){
    swapoff -a
    sed -i 's/.*swap.*/#&/' /etc/fstab

    echo "##################"
    echo "# swap已关闭 #"
    echo "##################"
}

initHosts(){
    cat << EOF >> /etc/host
192.168.0.10    node1
192.168.0.11    node2
192.168.0.12    node3
EOF
    echo "##################"
    echo "# 本地DNS解析设置完成 #"
    echo "##################"
}

updateKernelPerms(){
    cat << EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
    sysctl --system
    echo "##################"
    echo "# 内核参数优化完成 #"
    echo "##################"
}

updateYumRepository(){
    mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.`date +%F`.backup
    wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    yum makecache fast
    cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
EOF
    yum clean all
    yum makecache fast
    yum -y update
    echo "##################"
    echo "# 包管理器更新完成 #"
    echo "##################"
}

installDocker(){
    yum -y install yum-utils device-mapper-persistent-data lvm2
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    yum install -y docker-ce-18.09.9-3.el7
    mkdir  /etc/docker -pv
    cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://52szbbbs.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF
    systemctl enable --now docker.service
    echo "##################"
    echo "# Docker安装完成 #"
    echo "##################"
}

installK8sClient(){
    yum install -y kubeadm kubelet
    echo "##################"
    echo "# k8s管理工具安装完成 #"
    echo "##################"
}

getK8sComponentImages(){
    KUBE_VERSION=v1.16.0
    KUBE_PAUSE_VERSION=3.1
    ETCD_VERSION=3.3.15-0
    CORE_DNS_VERSION=1.6.2
    GCR_URL=k8s.gcr.io
    ALIYUN_URL=registry.cn-hangzhou.aliyuncs.com/google_containers
    images=(kube-proxy:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-apiserver:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION}
coredns:${CORE_DNS_VERSION})
    for imageName in ${images[@]} ; do
        docker pull $ALIYUN_URL/$imageName
        docker tag  $ALIYUN_URL/$imageName $GCR_URL/$imageName
        docker rmi $ALIYUN_URL/$imageName
    done
    docker images
    echo "##################"
    echo "# master镜像拉取完成 #"
    echo "##################"
}

installK8s(){
    sudo kubeadm init 
 --apiserver-advertise-address 192.168.0.10 
 --kubernetes-version=v1.16.0 
 --pod-network-cidr=10.244.0.0/16
    echo "##################"
    echo "# kubernetes安装完成 #"
    echo "##################"
}

perpare(){
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    echo "##################"
    echo "# k8s配置文件准备完成 #"
    echo "##################"
}

installFlannel(){
    wget -O /opt/kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
    FLANNEL_VERSION=v0.11.0
    QUAY_URL=quay.io/coreos
    QINIU_URL=quay-mirror.qiniu.com/coreos
    images=(flannel:${FLANNEL_VERSION}-amd64
flannel:${FLANNEL_VERSION}-arm64
flannel:${FLANNEL_VERSION}-arm
flannel:${FLANNEL_VERSION}-ppc64le
flannel:${FLANNEL_VERSION}-s390x)
    for imageName in ${images[@]} ; do
        docker pull $QINIU_URL/$imageName
        docker tag  $QINIU_URL/$imageName $QUAY_URL/$imageName
        docker rmi $QINIU_URL/$imageName
    done
    kubectl apply -f /opt/kube-flannel.yml
}


preTools
closeFw
closeSelinux
closeSwap

initHosts
updateKernelPerms
updateYumRepository
installDocker
installK8sClient
getK8sComponentImages
installK8s
# installFlannel
perpare

大概的安装步骤如下,在安装集群的时候,要注意Kubernetes依赖的运行时环境版本的要求:

kubectl get cm # 查看安装的组件 
kubectl get nodes  # 查看节点的信息
kubectl cluster-info # 查看集群的信息

最后通过kubeadm添加节点

kubeadm join 192.168.0.10:6443 --token kkijby.e2x4j6clya5zron5     --discovery-token-ca-cert-hash sha256:4ca0408560f1008c147b43ead8da2cc55f064e74863aa8462966a134aa182545

在准备work节点的时候,可以通过master部署脚本中获取镜像的方式拉去镜像,也可以通过导入导出的方式获取

docker save -o pause.tar k8s.gcr.io/pause:3.1 
scp # 传输给work节点
docker load -i pause.tar 进行导入镜像

最后就是安装flannel

wget -O ./kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
watch -n1 "kubectl get pods --all-namespaces" # 查看是否运行起来了

在添加节点之后,可能遇到下面的错误:

runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized Addresses:

解决方式为

kubectl delete -f kube-flannel.yml
cat << EOF > /var/lib/kubelet/kubeadm-flags.env
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --pod-infra-container-image=k8s.gcr.io/pause:3.1"
EOF
systemctl restart kubelet.service

最后,我们在重新安装flannel网络插件

kubectl apply -f kube-flannel.yml