系统Centos7.9最小化安装
已配置阿里云的base源和epel源
Bash
10.0.0.201 master
10.0.0.204 node01
10.0.0.205 node02
10.0.0.206 node03
Bash
[root@master ~]# echo "10.0.0.201 master" >> /etc/hosts
[root@master ~]# echo "10.0.0.204 node01" >> /etc/hosts
[root@master ~]# echo "10.0.0.205 node02" >> /etc/hosts
[root@master ~]# echo "10.0.0.206 node03" >> /etc/hosts
Bash
[root@node01 ~]# echo "10.0.0.201 master" >> /etc/hosts
[root@node01 ~]# echo "10.0.0.204 node01" >> /etc/hosts
[root@node01 ~]# echo "10.0.0.205 node02" >> /etc/hosts
[root@node01 ~]# echo "10.0.0.206 node03" >> /etc/hosts
Bash
[root@node02 ~]# echo "10.0.0.201 master" >> /etc/hosts
[root@node02 ~]# echo "10.0.0.204 node01" >> /etc/hosts
[root@node02 ~]# echo "10.0.0.205 node02" >> /etc/hosts
[root@node02 ~]# echo "10.0.0.206 node03" >> /etc/hosts
Bash
[root@node03 ~]# echo "10.0.0.201 master" >> /etc/hosts
[root@node03 ~]# echo "10.0.0.204 node01" >> /etc/hosts
[root@node03 ~]# echo "10.0.0.205 node02" >> /etc/hosts
[root@node03 ~]# echo "10.0.0.206 node03" >> /etc/hosts
Bash
[root@master ~]# swapoff -a
[root@master ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
Bash
[root@node01 ~]# swapoff -a
[root@node01 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
Bash
[root@node02 ~]# swapoff -a
[root@node02 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
Bash
[root@node03 ~]# swapoff -a
[root@node03 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
Bash
[root@master ~]# modprobe br_netfilter
[root@master ~]# echo "modprobe br_netfilter" >> /etc/profile
创建/etc/sysctl.d/k8s.conf⽂件,添加如下内容
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
让配置生效
[root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf
Bash
[root@node01 ~]# modprobe br_netfilter
[root@node01 ~]# echo "modprobe br_netfilter" >> /etc/profile
创建/etc/sysctl.d/k8s.conf⽂件,添加如下内容
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
让配置生效
[root@node01 ~]# sysctl -p /etc/sysctl.d/k8s.conf
Bash
[root@node02 ~]# modprobe br_netfilter
[root@node02 ~]# echo "modprobe br_netfilter" >> /etc/profile
创建/etc/sysctl.d/k8s.conf⽂件,添加如下内容
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
让配置生效
[root@node02 ~]# sysctl -p /etc/sysctl.d/k8s.conf
Bash
[root@node03 ~]# modprobe br_netfilter
[root@node03 ~]# echo "modprobe br_netfilter" >> /etc/profile
创建/etc/sysctl.d/k8s.conf⽂件,添加如下内容
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
让配置生效
[root@node03 ~]# sysctl -p /etc/sysctl.d/k8s.conf
Bash
bridge-nf 使得 netfilter 可以对 Linux ⽹桥上的IPv4/ARP/IPv6 包过滤。⽐如,设置net.bridge.bridge-nfcall-iptables=1后,⼆层的⽹桥在转发包时也会被 iptables的
FORWARD 规则所过滤。常⽤的选项包括:
net.bridge.bridge-nf-call-arptables:是否在arptables 的 FORWARD 中过滤⽹桥的 ARP 包
net.bridge.bridge-nf-call-ip6tables:是否在ip6tables 链中过滤 IPv6 包
net.bridge.bridge-nf-call-iptables:是否在 iptables链中过滤 IPv4 包
net.bridge.bridge-nf-filter-vlan-tagged:是否在iptables/arptables 中过滤打了 vlan 标签的包。
1.5.1为了便于查看 ipvs 的代理规则,需要安装管理⼯具 ipvsadm;
Bash
[root@master ~]# yum install ipset ipvsadm -y
node01安装ipvs
Bash
[root@node01 ~]# yum install ipset ipvsadm -y
node02安装ipvs
Bash
[root@node02 ~]# yum install ipset ipvsadm -y
node03安装ipvs
Bash
[root@node03 ~]# yum install ipset ipvsadm -y
Bash
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
返回结果
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
node01上执行
Bash
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@node01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
返回结果
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
node02上执行
Bash
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@node02 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
返回结果
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
node03上执行
Bash
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
[root@node03 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
返回结果
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
Bash
四台同时执行
[root@master ~]# yum install ntpdate -y
[root@node01 ~]# yum install ntpdate -y
[root@node02 ~]# yum install ntpdate -y
[root@node03 ~]# yum install ntpdate -y
[root@master ~]# ntpdate cn.pool.ntp.org
[root@node01 ~]# ntpdate cn.pool.ntp.org
[root@node02 ~]# ntpdate cn.pool.ntp.org
[root@node03 ~]# ntpdate cn.pool.ntp.org
添加定时任务
写入定时任务
[root@master ~]crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@node01 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@node02 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@node03 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
安装基础软件包
Bash
[root@master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@node01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@node02 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@node03 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
配置阿里云docker-ce源
Bash
[root@master ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node02 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node03 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
替换为阿里云docker-e源
[root@master ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@node01 ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@node02 ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
[root@node03 ~]# sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
安装docker-ce
[root@master ~]# yum install docker-ce-19.03.15 -y
[root@node01 ~]# yum install docker-ce-19.03.15 -y
[root@node02 ~]# yum install docker-ce-19.03.15 -y
[root@node03 ~]# yum install docker-ce-19.03.15 -y
启动docker-ce,并设置为开机自启动
[root@master ~]# systemctl enable docker.service --now
[root@node01 ~]# systemctl enable docker.service --now
[root@node02 ~]# systemctl enable docker.service --now
[root@node03 ~]# systemctl enable docker.service --now
修改四台docker-ce配置文件/etc/docker/daemon.json
都要修改
用vim /etc/docker/daemon.json把下面的内容添加进去
[root@master ~]# vim /etc/docker/daemon.json
[root@node01 ~]# vim /etc/docker/daemon.json
[root@node02 ~]# vim /etc/docker/daemon.json
[root@node03 ~]# vim /etc/docker/daemon.json
写入一下内容
{
"registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com",
"https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
重启docker-ce
[root@master ~]# systemctl restart docker
[root@node01 ~]# systemctl restart docker
[root@node02 ~]# systemctl restart docker
[root@node03 ~]# systemctl restart docker
Bash
配置阿里云k8s源
所有主机全部添加阿里云镜像源。全部执行如下操作
master执行
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
node01执行添加源
[root@node01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
node02执行添加源
[root@node02 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
node03执行添加源
[root@node03 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
kubeadm:初始化集群的指令
kubelet:在集群中的每个节点上⽤来启动 Pod 和容器等。
kubectl:⽤来与集群通信的命令⾏⼯具。
安装k8s
[root@master ~]# yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3
[root@node01 ~]# yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3
[root@node02 ~]# yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3
[root@node03 ~]# yum install -y kubelet-1.22.3 kubeadm-1.22.3 kubectl-1.22.3
启动k8s
[root@master ~]# systemctl enable kubelet --now
[root@node01 ~]# systemctl enable kubelet --now
[root@node02 ~]# systemctl enable kubelet --now
[root@node03 ~]# systemctl enable kubelet --now
初始化集群,这一步在拉取镜像,可能时间比较长一些
只需要要在master上执行
在开始初始化集群之前可以预先拉取所k8s需要的容器镜像,由于镜像都在国外⽆法获取,所以通过国内镜像仓库获取。
[root@master ~]# kubeadm config images pull --image-repository oldxu3957 --kubernetes-version v1.22.3
初始化
[root@master ~]# kubeadm init --apiserver-advertise-address=10.0.0.201 --image-repository oldxu3957 --kubernetes-version=1.22.3 --service-cidr=10.96.0.0/16 --pod-network-cidr=192.168.0.0/16
命令解释:
# --apiserver-advertise-address 指定APIServer节点地址
# --image-repository 指定镜像获取仓库
# --kuernetes-version 指定k8s运⾏版本
# --service-cidr 指定service运⾏⽹段(内部负载均衡的⽹段)
# --pod-network-cidr 指定pod运⾏⽹段
返回结果:
[init] Using Kubernetes version: v1.22.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.96.0.1 10.0.0.201]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [10.0.0.201 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [10.0.0.201 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 6.502901 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.22" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 3k05db.moq6imynpolhl18z
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0.0.201:6443 --token 3k05db.moq6imynpolhl18z \
--discovery-token-ca-cert-hash sha256:f3f1c2329869c0452414159f11e454c51f21dd1fbc8968c8d832e8500c50e5e4
!!!这条是加入集群的信息!!!
为用户配置环境变量,方便调用K8S命令
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
!!!如果命令不能使用再来添加环境变量!!!
添加到系统环境变量,并加载环境变量
[root@master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
[root@master ~]# source /etc/profile
!!!如果命令不能使用再来添加环境变量!!
让node01.nide02.node03加入集群
执行如下命令,然后复制返回结果,到node01和nide02还有node03面分别去执行
[root@k8s-1 ~]# kubeadm token create --print-join-command
!!复制你自己的返回结果,不要复制我的!!!!!!
返回结果:!!!!
kubeadm join 10.0.0.201:6443 --token 3k05db.moq6imynpolhl18z \
--discovery-token-ca-cert-hash sha256:f3f1c2329869c0452414159f11e454c51f21dd1fbc8968c8d832e8500c50e5e4
node01上面执行
[root@node01 ~]# kubeadm join 10.0.0.201:6443 --token 3k05db.moq6imynpolhl18z \
> --discovery-token-ca-cert-hash sha256:f3f1c2329869c0452414159f11e454c51f21dd1fbc8968c8d832e8500c50e5e4
返回结果:
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
返回以上结果说明加入集群成功
node02上执行
[root@node02 ~]# kubeadm join 10.0.0.201:6443 --token 3k05db.moq6imynpolhl18z \
> --discovery-token-ca-cert-hash sha256:f3f1c2329869c0452414159f11e454c51f21dd1fbc8968c8d832e8500c50e5e4
返回结果:
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
返回以上结果说明加入集群成功
node03上执行
[root@node03 ~]# kubeadm join 10.0.0.201:6443 --token 3k05db.moq6imynpolhl18z \
> --discovery-token-ca-cert-hash sha256:f3f1c2329869c0452414159f11e454c51f21dd1fbc8968c8d832e8500c50e5e4
返回结果
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
返回以上结果说明加入集群成功
在master上查看集群状态,看一下node1-3是否加入到集群李
[root@master ~]# kubectl get nodes
返回结果:
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 17m v1.22.3
node01 NotReady <none> 7m9s v1.22.3
node02 NotReady <none> 7m6s v1.22.3
node03 NotReady <none> 7m4s v1.22.3
Bash
在master上下载calico.yaml插件
[root@master ~]# wget https://docs.projectcalico.org/manifests/calico.yaml --no-check-certificate
修改网段为192.168.0.0/16位
[root@master ~]# sed -i 's#10.244.0.0/16#192.168.0.0/16#g' kube-flannel.yml
应用kube-flannel.yml插件
[root@master ~]# kubectl apply -f kube-flannel.yml
返回结果:
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
等待几分钟再来执行如下命令查看集群状态,STATUS状态为Ready,说明网络插件应用成功
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 32m v1.22.3
node01 Ready <none> 22m v1.22.3
node02 Ready <none> 22m v1.22.3
node03 Ready <none> 22m v1.22.3
解释:
STATUS状态为Ready,说明网络插件应用成功
查看kube-system名称空间下运行POD
[root@master ~]# kubectl get pod -n kube-system
返回结果:
NAME READY STATUS RESTARTS AGE
coredns-589d89566f-hw7fl 1/1 Running 0 35m
coredns-589d89566f-nz8lk 1/1 Running 0 35m
etcd-master 1/1 Running 0 35m
kube-apiserver-master 1/1 Running 0 35m
kube-controller-manager-master 1/1 Running 0 35m
kube-flannel-ds-2t9nz 1/1 Running 0 5m54s
kube-flannel-ds-jhw9b 1/1 Running 0 5m54s
kube-flannel-ds-nrtw6 1/1 Running 0 5m54s
kube-flannel-ds-rxmqt 1/1 Running 0 5m54s
kube-proxy-9dmc4 1/1 Running 0 25m
kube-proxy-kmtjw 1/1 Running 0 25m
kube-proxy-n88qb 1/1 Running 0 35m
kube-proxy-tpvgj 1/1 Running 0 25m
kube-scheduler-master 1/1 Running 0 35m
Bash
安装命令补全命令
[root@master ~]# yum install bash-completion -y
[root@master ~]# echo 'source <(kubectl completion bash)' >> ~/.bashrc
Bash
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置:
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置:
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置:
[root@master ~]# kubeadm reset
[root@master ~]# ifconfig tunl0 down !# ip link delete tunl0
[root@master ~]# rm -rf /var/lib/cni/
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置:
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置:
如果你的集群安装过程中遇到了其他问题,我们可以使⽤下⾯的命令来进⾏重置: