基于kubeadm安装1.30版本k8s

avatar
作者
猴君
阅读量:3

实验系统:CentOS7.9

实验环境:单节点虚拟机

配置国内软件源

[root@k8s-master1 ~]# mv /etc/yum.repos.d/* /data [root@k8s-master1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo [root@k8s-master1 ~]# yum makecache

添加host

[root@k8s-master1 ~]# vi /etc/hosts 127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1         localhost localhost.localdomain localhost6 localhost6.localdomain6 172.29.1.101 k8s-master1

关闭防火墙、selinux

[root@k8s-master1 ~]# systemctl stop firewalld && systemctl disable firewalld [root@k8s-master1 ~]# setenforce 0 [root@k8s-master1 ~]# cat /etc/selinux/config  # This file controls the state of SELinux on the system. # SELINUX= can take one of these three values: #     enforcing - SELinux security policy is enforced. #     permissive - SELinux prints warnings instead of enforcing. #     disabled - No SELinux policy is loaded. SELINUX=disabled # SELINUXTYPE= can take one of three values: #     targeted - Targeted processes are protected, #     minimum - Modification of targeted policy. Only selected processes are protected.  #     mls - Multi Level Security protection. SELINUXTYPE=targeted 

关闭swap分区

[root@k8s-master1 ~]# swapoff -a    # 临时关闭 [root@k8s-master1 ~]# vim /etc/fstab # 注释到swap那一行  永久关闭 # /etc/fstab: static file system information. # # Use 'blkid' to print the universally unique identifier for a # device; this may be used with UUID= as a more robust way to name devices # that works even if disks are added and removed. See fstab(5). # # <file system> <mount point>   <type>  <options>       <dump>  <pass> # / was on /dev/sda3 during installation UUID=af1f3f13-f592-42af-a4c1-fa38c19e4fda /               ext4    errors=remount-ro 0       1 # /boot/efi was on /dev/sda2 during installation UUID=0FF3-84A3  /boot/efi       vfat    umask=0077      0       1 # /swapfile                                 none            swap    sw              0       0                                                                                

安装docker

[root@k8s-master1 ~]# cat /etc/yum.repos.d/docker-ce.repo  [docker-ce-stable] name=Docker CE Stable - $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable enabled=1 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-stable-debuginfo] name=Docker CE Stable - Debuginfo $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/stable enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-stable-source] name=Docker CE Stable - Sources baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/stable enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-test] name=Docker CE Test - $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/test enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-test-debuginfo] name=Docker CE Test - Debuginfo $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/test enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-test-source] name=Docker CE Test - Sources baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/test enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-nightly] name=Docker CE Nightly - $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/nightly enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-nightly-debuginfo] name=Docker CE Nightly - Debuginfo $basearch baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/nightly enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg  [docker-ce-nightly-source] name=Docker CE Nightly - Sources baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/nightly enabled=0 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg [root@k8s-master1 ~]# yum install -y docker [root@k8s-master1 ~]# systemctl start docker && systemctl enable docker

配置镜像加速

[root@k8s-master1 ~]# vi /etc/docker/daemon.json {   # 配置为自己的阿里镜像加速地址   "registry-mirrors": ["https://8740sp47.mirror.aliyuncs.com"] } [root@k8s-master1 ~]# systemctl daemon-reload [root@k8s-master1 ~]# systemctl restart docker

安装containerd

[root@k8s-master1 ~]# yum install -y containerd [root@k8s-master1 ~]# mkdir -p /etc/containerd [root@k8s-master1 ~]# containerd config default | sudo tee /etc/containerd/config.toml [root@k8s-master1 ~]# vi /etc/containerd/config.toml # 修改或者添加这个选项   [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]   SystemdCgroup = true # 修改此处替换成阿里云的源 sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7" [root@k8s-master1 ~]# systemctl restart containerd [root@k8s-master1 ~]# systemctl enable containerd

配置Kubernetes仓库 安装kubelet kubeadm kubectl 并锁定版本

[root@k8s-master1 ~]# vi /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/ enabled=1 gpgcheck=1 gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key [root@k8s-master1 ~]# yum install -y kubelet kubeadm kubectl

配置主节点

# 替换成当前节点的ip [root@k8s-master1 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address 172.29.1.101 --image-repository registry.aliyuncs.com/google_containers [root@k8s-master1 ~]# sudo mkdir -p $HOME/.kube [root@k8s-master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s-master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config [root@k8s-master1 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf

安装网络插件

root@master:~# vi kube-flannel.yaml --- kind: Namespace apiVersion: v1 metadata:   name: kube-flannel   labels:     k8s-app: flannel     pod-security.kubernetes.io/enforce: privileged --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata:   labels:     k8s-app: flannel   name: flannel rules: - apiGroups:   - ""   resources:   - pods   verbs:   - get - apiGroups:   - ""   resources:   - nodes   verbs:   - get   - list   - watch - apiGroups:   - ""   resources:   - nodes/status   verbs:   - patch - apiGroups:   - networking.k8s.io   resources:   - clustercidrs   verbs:   - list   - watch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata:   labels:     k8s-app: flannel   name: flannel roleRef:   apiGroup: rbac.authorization.k8s.io   kind: ClusterRole   name: flannel subjects: - kind: ServiceAccount   name: flannel   namespace: kube-flannel --- apiVersion: v1 kind: ServiceAccount metadata:   labels:     k8s-app: flannel   name: flannel   namespace: kube-flannel --- kind: ConfigMap apiVersion: v1 metadata:   name: kube-flannel-cfg   namespace: kube-flannel   labels:     tier: node     k8s-app: flannel     app: flannel data:   cni-conf.json: |     {       "name": "cbr0",       "cniVersion": "0.3.1",       "plugins": [         {           "type": "flannel",           "delegate": {             "hairpinMode": true,             "isDefaultGateway": true           }         },         {           "type": "portmap",           "capabilities": {             "portMappings": true           }         }       ]     }   net-conf.json: |     {       "Network": "10.244.0.0/16",       "Backend": {         "Type": "vxlan"       }     } --- apiVersion: apps/v1 kind: DaemonSet metadata:   name: kube-flannel-ds   namespace: kube-flannel   labels:     tier: node     app: flannel     k8s-app: flannel spec:   selector:     matchLabels:       app: flannel   template:     metadata:       labels:         tier: node         app: flannel     spec:       affinity:         nodeAffinity:           requiredDuringSchedulingIgnoredDuringExecution:             nodeSelectorTerms:             - matchExpressions:               - key: kubernetes.io/os                 operator: In                 values:                 - linux       hostNetwork: true       priorityClassName: system-node-critical       tolerations:       - operator: Exists         effect: NoSchedule       serviceAccountName: flannel       initContainers:       - name: install-cni-plugin         image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1         command:         - cp         args:         - -f         - /flannel         - /opt/cni/bin/flannel         volumeMounts:         - name: cni-plugin           mountPath: /opt/cni/bin       - name: install-cni         image: docker.io/flannel/flannel:v0.24.2         command:         - cp         args:         - -f         - /etc/kube-flannel/cni-conf.json         - /etc/cni/net.d/10-flannel.conflist         volumeMounts:         - name: cni           mountPath: /etc/cni/net.d         - name: flannel-cfg           mountPath: /etc/kube-flannel/       containers:       - name: kube-flannel         image: docker.io/flannel/flannel:v0.24.2         command:         - /opt/bin/flanneld         args:         - --ip-masq         - --kube-subnet-mgr         resources:           requests:             cpu: "100m"             memory: "50Mi"         securityContext:           privileged: false           capabilities:             add: ["NET_ADMIN", "NET_RAW"]         env:         - name: POD_NAME           valueFrom:             fieldRef:               fieldPath: metadata.name         - name: POD_NAMESPACE           valueFrom:             fieldRef:               fieldPath: metadata.namespace         - name: EVENT_QUEUE_DEPTH           value: "5000"         volumeMounts:         - name: run           mountPath: /run/flannel         - name: flannel-cfg           mountPath: /etc/kube-flannel/         - name: xtables-lock           mountPath: /run/xtables.lock       volumes:       - name: run         hostPath:           path: /run/flannel       - name: cni-plugin         hostPath:           path: /opt/cni/bin       - name: cni         hostPath:           path: /etc/cni/net.d       - name: flannel-cfg         configMap:           name: kube-flannel-cfg       - name: xtables-lock         hostPath:           path: /run/xtables.lock           type: FileOrCreate
[root@k8s-master1 ~]# kubectl apply -f kube-flannel.yaml

查看节点状态并加入集群

[root@k8s-master1 ~]# kubectl get nodes  NAME          STATUS   ROLES           AGE   VERSION k8s-master1   Ready    control-plane   22m   v1.30.0 [root@k8s-master1 ~]# kubeadm token create --print-join-command  # 生成加入集群的命令

移除master节点上的污点

[root@k8s-master1 ~]# kubectl taint nodes k8s-master1 node-role.kubernetes.io/control-plane:NoSchedule- node/k8s-master1 untainted

广告一刻

为您即时展示最新活动产品广告消息,让您随时掌握产品活动新动态!