K8S-1.28.2-部署
一、主机规划
- 10.0.0.40 k8s-master
- 10.0.0.41 k8s-node1
- 10.0.0.42 k8s-node2
二、修改证书有效期
1、源码包准备
kubernetes-1.28.2.tar.gz
go1.20.8.linux-amd64.tar.gz
2、解压源码包
shell
cd /usr/local/src
tar zxvf kubernetes-1.28.2.tar.gz
3、修改CA有效期为100年
shell
cd kubernetes-1.28.2/
vim staging/src/k8s.io/client-go/util/cert/cert.go
go
// 这个方法里面 NotAfter: now.Add(duration365d * 10).UTC()
// 默认有效期就是 10 年,改成 100 年
// 输入 /NotAfter 查找,回车定位
func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
now := time.Now()
tmpl := x509.Certificate{
SerialNumber: new(big.Int).SetInt64(0),
Subject: pkix.Name{
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
NotBefore: now.UTC(),
// NotAfter: now.Add(duration365d * 10).UTC(),
NotAfter: now.Add(duration365d * 100).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
return nil, err
}
return x509.ParseCertificate(certDERBytes)
}
4、修改证书有效期为100年
go
vim cmd/kubeadm/app/constants/constants.go
// 就是这个常量定义 CertificateValidity,改成 * 100 年
// 输入 /CertificateValidity 查找,回车定位
const (
// KubernetesDir is the directory Kubernetes owns for storing various configuration files
KubernetesDir = "/etc/kubernetes"
// ManifestsSubDirName defines directory name to store manifests
ManifestsSubDirName = "manifests"
// TempDirForKubeadm defines temporary directory for kubeadm
// should be joined with KubernetesDir.
TempDirForKubeadm = "tmp"
// CertificateValidity defines the validity for all the signed certificates generated by kubeadm
// CertificateValidity = time.Hour * 24 * 365
CertificateValidity = time.Hour * 24 * 365 * 100
// CACertAndKeyBaseName defines certificate authority base name
CACertAndKeyBaseName = "ca"
// CACertName defines certificate name
CACertName = "ca.crt"
// CAKeyName defines certificate name
CAKeyName = "ca.key"
5、查看1.28.2版本用的go语言版本
shell
cat build/build-image/cross/VERSION
# 如:v1.28.0-go1.20.8-bullseye.0,说明所用语言版本为go1.20.8
6、安装编译用到的依赖
shell
yum ``install` `-y gcc ``make` `rsync` `jq
7、安装go环境
shell
# 解压包
tar xzvf go1.20.8.linux-amd64.tar.gz -C /usr/local
# 配置环境变量,编辑 /etc/profile 文件添加如下:
cat >> /etc/profile << EOF
export GOROOT=/usr/local/go
export GOPATH=/usr/local/gopath
export PATH=$PATH:$GOROOT/bin
EOF
# 生效配置
source /etc/profile
# 执行 go version 验证,有如下输出证明go语言环境安装成功
go version go1.20.8 linux/amd64
8、编译kubeadm
shell
cd /usr/local/src/kubernetes-1.28.2/
# 编译 kubeadm, 这里主要编译 kubeadm 即可
make all WHAT=cmd/kubeadm GOFLAGS=-v
# 编译 kubelet
# make all WHAT=cmd/kubelet GOFLAGS=-v
# 编译 kubectl
# make all WHAT=cmd/kubectl GOFLAGS=-v
## 编译完产物在 _output/bin/kubeadm 目录下,其中 bin 是使用了软连接
## 真实路径是_output/local/bin/linux/amd64/kubeadm
# 验证
cd _output/bin/
./kubeadm version
# 输出如下:
kubeadm version: &version.Info{Major:"1", Minor:"28", GitVersion:"v1.28.2", GitCommit:"89a4ea3e1e4ddd7f7572286090359983e0387b2f", GitTreeState:"archive", BuildDate:"2024-04-01T05:32:41Z", GoVersion:"go1.20.8", Compiler:"gc", Platform:"linux/amd64"}
三、服务器初始化(所有主机均执行)
1、升级内核(可选)
shell
#安装YUM源
#导入ELRepo软件仓库的公共秘钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#安装elrepo YUM源仓库
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
#升级内核版本,默认安装为最新内核
#kernel-lt:表示longterm,即长期支持的内核
#kernel-ml:表示mainline,即当前主线的内核
yum -y --enablerepo=elrepo-kernel install kernel-ml
#查看系统可用内核,并设置启动项
sudo awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
0 : CentOS Linux (6.8.2-1.el7.elrepo.x86_64) 7 (Core)
1 : CentOS Linux (3.10.0-1160.el7.x86_64) 7 (Core)
2 : CentOS Linux (0-rescue-12e6f1eb3f18477ea0b4379f69302451) 7 (Core)
#指定开机启动内核版本
grub2-set-default 0
#生成 grub 配置文件
grub2-mkconfig -o /etc/grub2.cfg 或
grub2-mkconfig -o /boot/grub2/grub.cfg
#使用下面命令看看确认下是否启动默认内核指向上面安装的内核
grubby --default-kernel
#这里的输出结果应该为我们升级后的内核信息
reboot
yum update -y
2、卸载集群 & 清理环境
shell
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
yum -y remove kubeadm* kubectl* kubelet* docker*
reboot
3、服务器配置IP地址
shell
vim /etc/sysconfig/network-scripts/ifcfg-ens33
systemctl restart network
4、关闭防火墙
shell
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
5、关闭selinux
shell
# 临时关闭
setenforce 0
# 查看临时关闭结果
getenforce
# 永久关闭(重启服务器生效,若不想重启,可同时设置临时和永久关闭)
sed -i 's/enforcing/disabled/' /etc/selinux/config
# 查看永久关闭
cat /etc/selinux/config
6、关闭swap
shell
# 临时关闭
swapoff -a
# 永久关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab
# 验证
free -m
7、设置主机名 & 配置hosts
shell
# 根据规划设置主机名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
# 在master添加hosts
cat >> /etc/hosts << EOF
10.0.0.40 k8s-master
10.0.0.41 k8s-node1
10.0.0.42 k8s-node2
EOF
8、配置时间同步
shell
yum install ntpdate -y
# 一次性,时间久了可能导致三台服务器时间不一致
ntpdate time.windows.com
# 永久生效,设置计划任务
crontab -e
*/2 * * * * /usr/sbin/ntpdate time.windows.com
9、添加必需的内核模块
shell
# 准备配置文件(主机重新启动时自动加载)
cat >> /etc/modules-load.d/k8s.conf << EOF
overlay
br_netfilter
EOF
# 手动加载,验证
modprobe overlay
modprobe br_netfilter
lsmod | grep -E "overlay | br_netfilter"
# ipvs内核模块
cat >> /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep -E "ip_vs | ip_vs_rr | ip_vs_wrr | ip_vs_sh | nf_conntrack_ipv4"
10、添加必需的内核参数
shell
# 准备配置文件
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 生效配置
sysctl --system
# 查看结果
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
11、安装docker和docker-cri
shell
# 添加YUM仓库
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum -y install docker-ce
systemctl enable docker && systemctl start docker
# 准备docker-cri文件(cri-dockerd-0.3.12-3.el7.x86_64.rpm)
yum install -y cri-dockerd-0.3.12-3.el7.x86_64.rpm
# 指定镜像依赖源
vi /usr/lib/systemd/system/cri-docker.service
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
## pause版本可以通过kubernetes对应的kubeadm查看
kubeadm config images list --kubernetes-version=v1.28.2
# registry.k8s.io/kube-apiserver:v1.28.0
# registry.k8s.io/kube-controller-manager:v1.28.0
# registry.k8s.io/kube-scheduler:v1.28.0
# registry.k8s.io/kube-proxy:v1.28.0
# registry.k8s.io/pause:3.9
# registry.k8s.io/etcd:3.5.9-0
# registry.k8s.io/coredns/coredns:v1.10.1
systemctl daemon-reload
systemctl enable cri-docker && systemctl start cri-docker
12、配置kubeadm
shell
# 添加Kubernetes YUM仓库
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装kubeadm (安装kubeadm时,kubelet-1.28.2和kubectl-1.28.2会作为依赖,自动安装)
yum install -y kubeadm-1.28.2
# 备份默认安装的kubeadm可执行文件,替换为编译的kubeadm
mv /usr/bin/kubeadm kubeadm.bak
cp -arpf /usr/local/src/kubernetes-1.28.2/_output/bin/kubeadm /usr/bin/kubeadm
# 另外两节点可以通过scp命令发送再拷贝到对应目录
scp /usr/local/src/kubernetes-1.28.2/_output/bin/kubeadm root@10.0.0.41:/tmp/
scp /usr/local/src/kubernetes-1.28.2/_output/bin/kubeadm root@10.0.0.42:/tmp/
# 设置containerd和kubelet开机自启动
systemctl start containerd
systemctl enable containerd
systemctl enable kubelet(kubelet不启动,初始化集群时会自动启动)
四、初始化集群
1、准备kubeadm配置文件
shell
kubeadm config print init-defaults --component-configs KubeProxyConfiguration,KubeletConfiguration > kubeadm-config.yaml
# 修改完如下:
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.0.0.40
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
imagePullPolicy: IfNotPresent
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
---
apiServer:
timeoutForControlPlane: 4m0s
certSANs:
- k8s-master
- 10.0.0.40
apiVersion: kubeadm.k8s.io/v1beta3
controlPlaneEndpoint: "10.0.0.40:6443"
certificatesDir: /etc/kubernetes/pki
clusterName: Dev-kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
detectLocal:
bridgeInterface: ""
interfaceNamePrefix: ""
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
localhostNodePorts: null
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
kind: KubeProxyConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
metricsBindAddress: ""
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
winkernel:
enableDSR: false
forwardHealthCheckVip: false
networkName: ""
rootHnsEndpointName: ""
sourceVip: ""
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerRuntimeEndpoint: ""
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
flushFrequency: 0
options:
json:
infoBufferSize: "0"
verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
2、初始化控制平面
shell
kubeadm init --config ./kubeadm-config.yaml --upload-certs --ignore-preflight-errors=all
--upload-certs,上传证书到集群内
--ignore-preflight-errors=all,忽略一些不重要的错误,如coredns镜像拉取失败
3、配置kubeconfig
shell
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
五、添加node节点
1、添加节点
shell
# node1和node2
kubeadm join 10.0.0.40:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:8b96cca7b3574b9cd8a617743c3f5d2dfe459990631d74c1a1a188e06ae88709 --cri-socket unix:///var/run/cri-dockerd.sock
# --cri-socket,指定容器运行时
# 查看节点(节点状态时显示为NotReady是因为还未部署网络组件)
kubectl get node
2、设置节点role
shell
kubectl label node k8s-master node-role.kubernetes.io/master=
kubectl label node k8s-node1 node-role.kubernetes.io/worker=
kubectl label node k8s-node2 node-role.kubernetes.io/worker=
六、安装网络插件calico
1、下载calico文件
下载地址为:(可能需要科学上网提前下载好,该地址不一定能正常访问)
https:``//raw``.githubusercontent.com``/projectcalico/calico/v3``.27.0``/manifests/calico``.yaml
2、修改配置(打开注释,修改为初始化集群时的pod地址段)
yaml
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
3、部署calico相关资源
shell
kubectl apply -f calico.yaml
# 等到calico相关资源部书成功后,再次执行kubectl get node,节点状态均为Ready