课件:
链接:https://pan.baidu.com/s/1P8xhfPGyPvkZ4_vV_Ek-yg
提取码:xrwg
单master
IP规划,IP地址配置成静态IP
磁盘60G+
192.168.1.30
k8s-1 master Centos7.6-4G
192.168.1.31
k8s-2 worker Centos7.6-4G
192.168.1.32
k8s-3 worker Centos7.6-4G
配置hosts解析,所有的机器都配置好30.31.32机器都配置
vim /etc/hosts
最后一行添加如下内容
192.168.1.30 k8s-1
192.168.1.31 k8s-2
192.168.1.32 k8s-3
192.168.1.33 k8s-4
做好免密钥配置
k8s-1主机配置
创建密钥
[root@k8s-1 ~]# ssh-keygen(一直回车).
复制密钥到每台机器上面
[root@k8s-1 ~]# ssh-copy-id k8s-1
[root@k8s-1 ~]# ssh-copy-id k8s-2
[root@k8s-1 ~]# ssh-copy-id k8s-3
k8s-2主机配置
创建密钥
[root@k8s-2 ~]# ssh-keygen(一直回车).
复制密钥到每台机器上面
[root@k8s-2 ~]# ssh-copy-id k8s-1
[root@k8s-2 ~]# ssh-copy-id k8s-2
[root@k8s-2 ~]# ssh-copy-id k8s-3
k8s-3主机配置
创建密钥
[root@k8s-3 ~]# ssh-keygen(一直回车).
复制密钥到每台机器上面
[root@k8s-3 ~]# ssh-copy-id k8s-1
[root@k8s-3 ~]# ssh-copy-id k8s-2
[root@k8s-3 ~]# ssh-copy-id k8s-3
关闭防火墙和selinux
关闭防火墙
[root@k8s-1 ~]# systemctl disable firewalld --now
[root@k8s-2 ~]# systemctl disable firewalld --now
[root@k8s-3 ~]# systemctl disable firewalld --now
关闭selinux
[root@k8s-1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@k8s-2 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@k8s-3 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@k8s-1 ~]# setenforce 0
[root@k8s-2 ~]# setenforce 0
[root@k8s-3 ~]# setenforce 0
关闭交换分区 swap
临时关闭
[root@k8s-1 ~]# swapoff -a
[root@k8s-2 ~]# swapoff -a
[root@k8s-3 ~]# swapoff -a
永久关闭
vim /etc/fstab
注释掉包含有swap的那一行
例子:
#UUID=8f5ada1f-0e4f-4e45-ae8c-0d5d6f2e90ef swap swap defaults 0 0
UUID可能不会一样,但是主要注释有swap的行就行了
互动 1:swap 是什么?
当内存不足时,linux 会自动使用 swap,将部分内存数据存放到磁盘中,这个这样会使性能下降
互动 2:为什么要关闭 swap 交换分区?
关闭 swap 主要是为了性能考虑。设计者在设计 k8s 的时候,初衷就是解决性能问题
大家如果自己电脑本身内存较小,不想要关闭交换分区,安装 k8s 的时候可以指定--ignore-preflight-errors=Swap
来解决。
修改内核参数
[root@k8s-1 ~]# modprobe br_netfilter
[root@k8s-2 ~]# modprobe br_netfilter
[root@k8s-3 ~]# modprobe br_netfilter
[root@k8s-1 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@k8s-2 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@k8s-3 ~]# echo "modprobe br_netfilter" >> /etc/profile
三台全部执行以下命令
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@k8s-1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
[root@k8s-2 ~]# sysctl -p /etc/sysctl.d/k8s.conf
[root@k8s-3 ~]# sysctl -p /etc/sysctl.d/k8s.conf
互动 3:为什么要要开启 ip_forward
如果容器的宿主机上的 ip_forward 未打开,那么该宿主机上的容器则不能被其他宿主机访问
互动 4:为什么要开启 net.bridge.bridge-nf-call-ip6tables
默认情况下,从容器发送到默认网桥的流量,并不会被转发到外部。要开启转发:
net.bridge.bridge-nf-call-ip6tables = 1
互动 5:为什么要加载 br_netfilter 模块?
在/etc/sysctl.conf 中添加:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
执行 sysctl -p 时出现:
解决办法:
modprobe br_netfilter
配置阿里云yum源
备份原有的源
[root@k8s-1 ~]# mv /etc/yum.repos.d/CentOS-Base.repo
[root@k8s-2 ~]# mv /etc/yum.repos.d/CentOS-Base.repo
[root@k8s-3 ~]# mv /etc/yum.repos.d/CentOS-Base.repo
下载阿里云的镜像源
[root@k8s-1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-2 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-3 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
配置阿里云 docker 源
[root@k8s-1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-2 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-3 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
配置阿里云安装 k8s 和 docker 需要的 repo 源
[root@k8s-1 ~]# tee /etc/yum.repos.d/kubernetes.repo <<-'EOF'
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@k8s-2 ~]#
tee /etc/yum.repos.d/kubernetes.repo <<-'EOF'
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@k8s-3 ~]# tee /etc/yum.repos.d/kubernetes.repo <<-'EOF'
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
同步时间
安装ntpdate
[root@k8s-1 ~]# yum install ntpdate -y
[root@k8s-2 ~]# yum install ntpdate -y
[root@k8s-3 ~]# yum install ntpdate -y
[root@k8s-1 ~]# ntpdate cn.pool.ntp.org
[root@k8s-2 ~]# ntpdate cn.pool.ntp.org
[root@k8s-3 ~]# ntpdate cn.pool.ntp.org
写入定时任务
[root@k8s-1 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@k8s-2 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
[root@k8s-3 ~]# crontab -e
写入一下内容
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
开启 IPVS,不开启 ipvs 将会使用 iptables,但是效率低,所以官网推荐需要开通 ipvs 内核
把 ipvs.modules 上传到 k8s-1 k8s-2 k8s-3 机器的/etc/sysconfig/modules/目录下
[root@k8s-1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
[root@k8s-2 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
[root@k8s-3 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
安装基础软件包
[root@k8s-1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet
[root@k8s-2 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet
[root@k8s-3 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet
安装 docker-ce,并启动docker
[root@k8s-1 ~]# yum install docker-ce docker-ce-cli containerd.io -y && systemctl enable docker.service --now
[root@k8s-2 ~]# yum install docker-ce docker-ce-cli containerd.io -y && systemctl enable docker.service --now
[root@k8s-3 ~]# yum install docker-ce docker-ce-cli containerd.io -y && systemctl enable docker.service --now
三台机器全部执行
用vim /etc/docker/daemon.json把下面的内容添加进去
{
"registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com",
"https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
重新加载
[root@k8s-1 ~]# systemctl daemon-reload
[root@k8s-2 ~]# systemctl daemon-reload
[root@k8s-3 ~]# systemctl daemon-reload
重启docker
[root@k8s-1 ~]# systemctl restart docker
[root@k8s-2 ~]# systemctl restart docker
[root@k8s-3 ~]# systemctl restart docker
[root@k8s-3 ~]# systemctl restart docker
互动 1:为什么要指定 native.cgroupdriver=systemd?
在安装 kubernetes 的过程中,会出现:
failed to create kubelet: misconfiguration: kubelet cgroup driver: "cgroupfs" is
different from docker cgroup driver: "systemd"
文件驱动默认由 systemd 改成 cgroupfs, 而我们安装的 docker 使用的文件驱动是 systemd, 造
成不一致, 导致镜像无法启动
docker info 查看
Cgroup Driver: systemd
修改 docker:
修改或创建/etc/docker/daemon.json,加入下面的内容:
"exec-opts": ["native.cgroupdriver=systemd"]
重启 docker 即可
安装初始化 k8s 需要的组件
在 master 和 node 上安装 kubeadm 组件,用于后期安装 k8s 使用,现在还没有安装 k8s,注意
安装 kubelet 的软件包在我们离线 yum 源中已经包含,可以直接安装,如需安装其他版本 k8s 可以配置在线源进行安装。
[root@k8s-1 ~]# yum install -y kubelet-1.20.4 kubeadm-1.20.4 kubectl-1.20.4
[root@k8s-2 ~]# yum install -y kubelet-1.20.4 kubeadm-1.20.4 kubectl-1.20.4
[root@k8s-3 ~]# yum install -y kubelet-1.20.4 kubeadm-1.20.4 kubectl-1.20.4
启动并开机自启动
[root@k8s-1 ~]# systemctl enable kubelet --now
[root@k8s-2 ~]# systemctl enable kubelet --now
[root@k8s-3 ~]# systemctl enable kubelet --now
注:每个软件包的作用
kubelet :运行在集群所有节点上,用于启动 Pod 和容器等对象的工具
kubeadm :用于初始化集群,启动集群的命令工具
kubectl :用于和集群通信的命令行,通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件
初始化集群
在 k8s-1 上,离线导入 docker 镜像。k8s-images-v1.20.4.tar.gz 在课件,把 k8simages-v1.20.4.tar.gz 上传到 k8s-1 上,4 台机器都导入一下,避免 node 节点找不到镜像。
[root@k8s-1 ~]# docker load -i k8s-images-v1.20.4.tar.gz
[root@k8s-1 ~]# gzip -dc k8s-images-v1.20.4.tar.gz |ssh root@k8s-2 'cat | docker load'
[root@k8s-1 ~]# gzip -dc k8s-images-v1.20.4.tar.gz |ssh root@k8s-3 'cat | docker load'
在 K8S-1 上创建 kubeadm-config.yaml 文件:
[root@k8s-1 ~]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.4
controlPlaneEndpoint: 192.168.1.29:16443
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
certSANs:
- 192.168.1.30
- 192.168.1.31
- 192.168.1.32
- 192.168.1.29
networking:
podSubnet: 10.244.0.0/16
serviceSubnet: 10.10.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
使用 kubeadm 初始化 k8s 集群
kubeadm init --kubernetes-version=1.20.4 --apiserver-advertise-address=192.168.1.30 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification
解释:
--kubernetes-version=1.20.4
k8s版本
--apiserver-advertise-address=192.168.1.30
k8s控制节点IP
--image-repository registry.aliyuncs.com/google_containers
用到的镜像,导入时用到时的镜像
--pod-network-cidr=10.244.0.0/16
pod用到的网段IP
返回结果:
[init] Using Kubernetes version: v1.20.4
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.12. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.30]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-1 localhost] and IPs [192.168.1.30 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-1 localhost] and IPs [192.168.1.30 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 84.502166 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-1 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: dqkcc3.wwwsz0tbkyzgvmmv
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.30:6443 --token dqkcc3.wwwsz0tbkyzgvmmv \
--discovery-token-ca-cert-hash sha256:0ea06cc8b8f1fcf982d0413912fcd99f3c44b905c3b8eb98937ee2768db69a44
执行如下操作,方便调用K8S命令
[root@k8s-1 ~]# mkdir -p $HOME/.kube
[root@k8s-1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
!!!如果命令不能使用再来添加环境变量!!!
添加到系统环境变量,并加载环境变量
[root@k8s-1 ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
[root@k8s-1 ~]# source /etc/profile
查看k8s节点信息命令
[root@k8s-1 ~]# kubectl get nodes
返回结果:
NAME STATUS ROLES AGE VERSION
k8s-1 NotReady control-plane,master 19m v1.20.4
解释:
NotReady
没有网络,因为没有安装网络插件
control-plane,master
控制节点
VERSION
版本
把k8s2和k8s-3加入到集群
执行如下命令,然后复制返回结果,到k8s-2和k8s-3上面分别去执行
[root@k8s-1 ~]# kubeadm token create --print-join-command
!!复制你自己的返回结果,不要复制我的!!!!!!
返回结果:!!!!
kubeadm join 192.168.1.30:6443 --token lqf9bz.wchtyz3nfywc9vk8 --discovery-token-ca-cert-hash sha256:0ea06cc8b8f1fcf982d0413912fcd99f3c44b905c3b8eb98937ee2768db69a44
!!!!!!!!!!!!!!!!!!!!
默认情况,token会在24小时后过期,如果要在令牌过去后重新添向集群中添加新的节点,
则需要重新生产token,并获取ca证书sha256编码hash值
[root@k8s-1 ~]# kubeadm token crate
[root@k8s-1 ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa - pubin -outform der 2>/dev/null |openssl dgst -sha256 -hex | sed 's/^.*//'
[root@k8s-1 ~]# kubeadm join 192.168.1.30:6443 --token 第一条命令生产的结果 --discovery-token-ca-cert-hash sha256:第二条命令返回的内容
!!!!!!!!!!!!!!!!!!!!
在k8s-2节点上执行
[root@k8s-2 ~]# kubeadm join 192.168.1.30:6443 --token lqf9bz.wchtyz3nfywc9vk8 --discovery-token-ca-cert-hash sha256:0ea06cc8b8f1fcf982d0413912fcd99f3c44b905c3b8eb98937ee2768db69a44
返回结果:
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.12. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
返回以上结果说明加入集群成功
在k8s-3节点上执行
[root@k8s-3 ~]# kubeadm token create --print-join-command
返回结果:
failed to load admin kubeconfig: open /root/.kube/config: no such file or directory
To see the stack trace of this error execute with --v=5 or higher
You have new mail in /var/spool/mail/root
[root@k8s-3 ~]# kubeadm join 192.168.1.30:6443 --token lqf9bz.wchtyz3nfywc9vk8 --discovery-token-ca-cert-hash sha256:0ea06cc8b8f1fcf982d0413912fcd99f3c44b905c3b8eb98937ee2768db69a44
[preflight] Running pre-flight checks
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.12. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
返回以上结果说明加入集群成功
在k8s-1节点上查看刚才k8s-2和k8s-3有没有加入到集群里面
[root@k8s-1 ~]# kubectl get nodes
返回结果:
NAME STATUS ROLES AGE VERSION
k8s-1 NotReady control-plane,master 83m v1.20.4
k8s-2 NotReady <none> 6m19s v1.20.4
k8s-3 NotReady <none> 4m48s v1.20.4
解释:
可以看到以下内容,这说明已经添加成功,none说明添加成功,这里也可以自定义显示名称
k8s-2 NotReady <none> 6m19s v1.20.4
k8s-3 NotReady <none> 4m48s v1.20.4
把显示none名称改成worker,相当于打标签
改k8s-2
[root@k8s-1 ~]# kubectl label node k8s-2 node-role.kubernetes.io/worker=worker
改k8s-3
[root@k8s-1 ~]# kubectl label node k8s-3 node-role.kubernetes.io/worker=worker
再来查看节点显示名称
[root@k8s-1 ~]# kubectl get nodes
返回结果:这里可以看到把原来的none改成了worker
NAME STATUS ROLES AGE VERSION
k8s-1 NotReady control-plane,master 90m v1.20.4
k8s-2 NotReady worker 13m v1.20.4
k8s-3 NotReady worker 11m v1.20.4
安装 kubernetes 网络组件-Calico
上传 calico.yaml 到 k8s-1 中,calico.yaml 在课件,使用 yaml 文件安装 calico 网络插件
[root@k8s-1 ~]# kubectl apply -f calico.yaml
返回结果:
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
返回以上结果说明安装成功
查看运行中的pod
[root@k8s-1 ~]# kubectl get pods -n kube-system
返回结果:
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6949477b58-qc6k4 1/1 Running 0 4m59s
calico-node-tn6pj 1/1 Running 0 4m58s
calico-node-vz4mg 1/1 Running 0 4m58s
calico-node-z4wx5 1/1 Running 0 4m58s
coredns-7f89b7bc75-l2m2k 1/1 Running 0 122m
coredns-7f89b7bc75-mrrsj 1/1 Running 0 122m
etcd-k8s-1 1/1 Running 0 122m
kube-apiserver-k8s-1 1/1 Running 0 122m
kube-controller-manager-k8s-1 1/1 Running 0 122m
kube-proxy-d284v 1/1 Running 0 122m
kube-proxy-dgqj9 1/1 Running 0 45m
kube-proxy-q6rfr 1/1 Running 0 43m
kube-scheduler-k8s-1 1/1 Running 0 122m
这里可以看到calico-kube-controllers-6949477b58-qc6k4在运行中,这个组件是做网络策略用的
calico-node-tn6pj
calico-node-vz4mg
calico-node-z4wx5
这几个每个节点都会安装
查看pod详细信息,比如在哪个节点上,ip等
[root@k8s-1 ~]# kubectl get pods -n kube-system -o wide
返回结果:
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-6949477b58-qc6k4 1/1 Running 0 12m 10.244.13.65 k8s-3 <none> <none>
calico-node-tn6pj 1/1 Running 0 11m 192.168.1.32 k8s-3 <none> <none>
calico-node-vz4mg 1/1 Running 0 11m 192.168.1.30 k8s-1 <none> <none>
calico-node-z4wx5 1/1 Running 0 11m 192.168.1.31 k8s-2 <none> <none>
coredns-7f89b7bc75-l2m2k 1/1 Running 0 129m 10.244.13.67 k8s-3 <none> <none>
coredns-7f89b7bc75-mrrsj 1/1 Running 0 129m 10.244.13.66 k8s-3 <none> <none>
etcd-k8s-1 1/1 Running 0 129m 192.168.1.30 k8s-1 <none> <none>
kube-apiserver-k8s-1 1/1 Running 0 129m 192.168.1.30 k8s-1 <none> <none>
kube-controller-manager-k8s-1 1/1 Running 0 129m 192.168.1.30 k8s-1 <none> <none>
kube-proxy-d284v 1/1 Running 0 129m 192.168.1.30 k8s-1 <none> <none>
kube-proxy-dgqj9 1/1 Running 0 52m 192.168.1.31 k8s-2 <none> <none>
kube-proxy-q6rfr 1/1 Running 0 50m 192.168.1.32 k8s-3 <none> <none>
kube-scheduler-k8s-1 1/1 Running 0 129m 192.168.1.30 k8s-1 <none> <none>
查看status状态是否为Ready
[root@k8s-1 ~]# kubectl get nodes
返回结果:
NAME STATUS ROLES AGE VERSION
k8s-1 Ready control-plane,master 134m v1.20.4
k8s-2 Ready worker 57m v1.20.4
k8s-3 Ready worker 55m v1.20.4
测试网络是否正常
导入容器
[root@k8s-1 ~]# docker load -i busybox-1-28.tar.gz
启动容器,退出就杀死容器
[root@k8s-1 ~]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
进入以后ping www.baidu.com是否能ping通
/ # ping www.baidu.com
返回结果:
PING www.baidu.com (110.242.68.4): 56 data bytes
64 bytes from 110.242.68.4: seq=0 ttl=53 time=21.119 ms
64 bytes from 110.242.68.4: seq=1 ttl=53 time=23.346 ms
64 bytes from 110.242.68.4: seq=2 ttl=53 time=21.632 ms
^C
--- www.baidu.com ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 21.119/22.032/23.346 ms
测试DNS是否正常
返回结果:
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
退出当前容器
/ #exit