1 v1.26+Containerd

create by nohi 20230106

参见:使用kubeadm部署Kubernetes 1.26在新窗口打开

环境配置(所有节点操作)

# vi /etc/hosts
10.0.0.216 k8s-m1
10.0.0.217 k8s-n1
10.0.0.218 k8s-n2

# 修改网卡配置,关闭ipv6

防火墙配置

所有主机均需要操作。

关闭现有防火墙firewalld
# systemctl disable firewalld
# systemctl stop firewalld
# firewall-cmd --state
not running

SELINUX配置

所有主机均需要操作。修改SELinux配置需要重启操作系统。

setenforce 0 #临时生效,下方命令为永久生效

# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

时间同步配置

所有主机均需要操作。最小化安装系统需要安装ntpdate软件。

systemctl start chronyd
systemctl enable chronyd

创建/etc/modules-load.d/containerd.conf配置文件:

cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF


# 执行以下命令使配置生效:
modprobe overlay
modprobe br_netfilter

创建/etc/sysctl.d/99-kubernetes-cri.conf配置文件:

cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
user.max_user_namespaces=28633
vm.swappiness=0
EOF

# 执行以下命令使配置生效:
sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
  • centos8.5 关闭ipv6 修改

    cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
    net.ipv6.conf.all.disable_ipv6 = 1
    net.ipv6.conf.default.disable_ipv6 = 1
    net.ipv4.ip_forward = 1
    user.max_user_namespaces=28633
    vm.swappiness=0
    EOF
    
    # IPV6 配置全部no
    vi /etc/sysconfig/network-scripts/ifcfg-ens192
    TYPE=Ethernet
    PROXY_METHOD=none
    BROWSER_ONLY=no
    BOOTPROTO=none
    DEFROUTE=yes
    IPV4_FAILURE_FATAL=no
    IPV6INIT=no
    IPV6_AUTOCONF=no
    IPV6_DEFROUTE=no
    IPV6_FAILURE_FATAL=no
    NAME=ens192
    UUID=35cc807a-2de1-453c-984d-94a33ec11431
    DEVICE=ens192
    ONBOOT=yes
    IPV6_PRIVACY=no
    IPADDR=10.0.0.216
    PREFIX=24
    GATEWAY=10.0.0.1
    
    # 重启网卡nmcli c reload +网卡名
    nmcli c reload ens192
    

配置服务器支持开启ipvs的前提条件

所有主机均需要操作。

# centos8.5 nf_conntrack_ipv4 修改为 nf_conntrack, 下同
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF


# 赋权、执行生效
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# 查看是否生效
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

####安装ipvsadm

yum install -y ipset ipvsadm

关闭SWAP分区

修改完成后需要重启操作系统,如不重启,可临时关闭,命令为swapoff -a

永远关闭swap分区,需要重启操作系统
# cat /etc/fstab
......
# /dev/mapper/centos-swap swap                    swap    defaults        0 0
在上一行中行首添加#
# 临时关闭
swapoff -a  # 禁用swap
free -h # 查看分区

1.1 Containerd安装

1.1.1 下载二进制包

wget https://github.com/containerd/containerd/releases/download/v1.6.14/cri-containerd-cni-1.6.14-linux-amd64.tar.gz
# 解压
tar -zxvf cri-containerd-cni-1.6.14-linux-amd64.tar.gz -C /

注意经测试cri-containerd-cni-1.6.4-linux-amd64.tar.gz包中包含的runc在CentOS 7下的动态链接有问题,这里从runc的github上单独下载runc,并替换上面安装的containerd中的runc:

wget https://github.com/opencontainers/runc/releases/download/v1.1.2/runc.amd64

1.1.2 生成containerd的配置文件

mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
  • 修改前面生成的配置文件/etc/containerd/config.toml

    [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]  
    ...  
    [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]    
    SystemdCgroup = true
    
  • 再修改/etc/containerd/config.toml中的

    [plugins."io.containerd.grpc.v1.cri"]  
     ...  
     # sandbox_image = "k8s.gcr.io/pause:3.6"  
     sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" 
    
  • 配置containerd开机启动,并启动containerd

    systemctl enable containerd --now
    
  • 使用crictl测试一下,确保可以打印出版本信息并且没有错误信息输出:

    crictl version
    
    Version:  0.1.0
    RuntimeName:  containerd
    RuntimeVersion:  v1.6.14
    RuntimeApiVersion:  v1
    
  • 出现:unknown service runtime.v1alpha2.RuntimeService错误

    rm /etc/containerd/config.toml
    systemctl restart containerd
    

1.2 K8s 1.26 使用kubeadm部署Kubernetes

1.2.1 阿里云yum源

cat > /etc/yum.repos.d/kubernetes.repo  << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

查看是否在最新版本k8s: yum list kubeadm.x86_64 --showduplicates | sort -f

1.2.2 安装

安装最新版本/指定版本 二选一

yum makecache
# 安装最新版本
yum -y install kubelet kubeadm kubectl
# 安装指定版本
yum -y install kubelet-1.26.X kubeadm-1.26.X kubectl-1.26.X

1.2.3 集群初始化

  • 在各节点开机启动kubelet服务:

    systemctl enable kubelet.service
    
  • 生成配置文件:kubeadm.yaml

    kubeadm config print init-defaults --component-configs KubeletConfiguration可以打印集群初始化默认的使用的配置

    Kubeadm.yaml如下:

    apiVersion: kubeadm.k8s.io/v1beta3
    kind: InitConfiguration
    localAPIEndpoint:
      # 此处为master节点IP
      advertiseAddress: 10.0.0.216
      bindPort: 6443
    nodeRegistration:
      criSocket: unix:///run/containerd/containerd.sock
      taints:
      - effect: PreferNoSchedule
        key: node-role.kubernetes.io/master
    ---
    apiVersion: kubeadm.k8s.io/v1beta2
    kind: ClusterConfiguration
    kubernetesVersion: 1.26.0
    # 镜像地址
    imageRepository: registry.aliyuncs.com/google_containers
    networking:
      podSubnet: 10.243.0.0/16
    ---
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    cgroupDriver: systemd
    failSwapOn: false
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    
    • imageRepository为阿里云的registry
    • advertiseAddress: 10.0.0.216 对应master节点ip
  • 初始化

    kubeadm init --config kubeadm.yaml
    
    • 重置:

      kubeadm reset --cri-socket unix:///var/run/cri-dockerd.sock
      
      • 成功

        [root@k8s-m1 ~]# kubeadm init --config kubeadm.yaml
        W0109 13:46:19.711370   52328 common.go:84] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta2". Please use 'kubeadm config migrate --old-config old.yaml --new-config new.yaml', which will write the new, similar spec using a newer API version.
        [init] Using Kubernetes version: v1.26.0
        [preflight] Running pre-flight checks
        	[WARNING FileExisting-tc]: tc not found in system path
        [preflight] Pulling images required for setting up a Kubernetes cluster
        [preflight] This might take a minute or two, depending on the speed of your internet connection
        [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
        [certs] Using certificateDir folder "/etc/kubernetes/pki"
        [certs] Generating "ca" certificate and key
        [certs] Generating "apiserver" certificate and key
        [certs] apiserver serving cert is signed for DNS names [k8s-m1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.216]
        [certs] Generating "apiserver-kubelet-client" certificate and key
        [certs] Generating "front-proxy-ca" certificate and key
        [certs] Generating "front-proxy-client" certificate and key
        [certs] Generating "etcd/ca" certificate and key
        [certs] Generating "etcd/server" certificate and key
        [certs] etcd/server serving cert is signed for DNS names [k8s-m1 localhost] and IPs [10.0.0.216 127.0.0.1 ::1]
        [certs] Generating "etcd/peer" certificate and key
        [certs] etcd/peer serving cert is signed for DNS names [k8s-m1 localhost] and IPs [10.0.0.216 127.0.0.1 ::1]
        [certs] Generating "etcd/healthcheck-client" certificate and key
        [certs] Generating "apiserver-etcd-client" certificate and key
        [certs] Generating "sa" key and public key
        [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
        [kubeconfig] Writing "admin.conf" kubeconfig file
        [kubeconfig] Writing "kubelet.conf" kubeconfig file
        [kubeconfig] Writing "controller-manager.conf" kubeconfig file
        [kubeconfig] Writing "scheduler.conf" kubeconfig file
        [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
        [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
        [kubelet-start] Starting the kubelet
        [control-plane] Using manifest folder "/etc/kubernetes/manifests"
        [control-plane] Creating static Pod manifest for "kube-apiserver"
        [control-plane] Creating static Pod manifest for "kube-controller-manager"
        [control-plane] Creating static Pod manifest for "kube-scheduler"
        [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
        [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
        [apiclient] All control plane components are healthy after 6.002364 seconds
        [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
        [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
        [upload-certs] Skipping phase. Please see --upload-certs
        [mark-control-plane] Marking the node k8s-m1 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
        [mark-control-plane] Marking the node k8s-m1 as control-plane by adding the taints [node-role.kubernetes.io/master:PreferNoSchedule]
        [bootstrap-token] Using token: c7t582.f1g50dlhliwmvr9a
        [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
        [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
        [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
        [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
        [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
        [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
        [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
        [addons] Applied essential addon: CoreDNS
        [addons] Applied essential addon: kube-proxy
        
        Your Kubernetes control-plane has initialized successfully!
        
        To start using your cluster, you need to run the following as a regular user:
        
          mkdir -p $HOME/.kube
          sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
          sudo chown $(id -u):$(id -g) $HOME/.kube/config
        
        Alternatively, if you are the root user, you can run:
        
          export KUBECONFIG=/etc/kubernetes/admin.conf
        
        You should now deploy a pod network to the cluster.
        Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
          https://kubernetes.io/docs/concepts/cluster-administration/addons/
        
        Then you can join any number of worker nodes by running the following on each as root:
        
        kubeadm join 10.0.0.216:6443 --token c7t582.f1g50dlhliwmvr9a \
        	--discovery-token-ca-cert-hash sha256:3c55755a429e2057793afedef0dd4faf878d4df3727ff464365d43264428a679
        
    • 上面记录了完成的初始化输出的内容,根据输出的内容基本上可以看出手动初始化安装一个Kubernetes集群所需要的关键步骤。 其中有以下关键内容:

      • [certs]生成相关的各种证书
      • [kubeconfig]生成相关的kubeconfig文件
      • [kubelet-start] 生成kubelet的配置文件"/var/lib/kubelet/config.yaml"
      • [control-plane]使用/etc/kubernetes/manifests目录中的yaml文件创建apiserver、controller-manager、scheduler的静态pod
      • [bootstraptoken]生成token记录下来,后边使用kubeadm join往集群中添加节点时会用到
      • [addons]安装基本插件:CoreDNS, kube-proxy 下面的命令是配置常规用户如何使用kubectl访问集群:
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
      
  • 其他节点加入集群:(集群初始化成功后输出)

    kubeadm join 10.0.0.216:6443 --token c7t582.f1g50dlhliwmvr9a \
    	--discovery-token-ca-cert-hash sha256:3c55755a429e2057793afedef0dd4faf878d4df3727ff464365d43264428a679
    
  • 查看一下集群状态,确认个组件都处于healthy状态

    kubectl get cs
    
    Warning: v1 ComponentStatus is deprecated in v1.19+
    NAME                 STATUS    MESSAGE                         ERROR
    controller-manager   Healthy   ok
    scheduler            Healthy   ok
    etcd-0               Healthy   {"health":"true","reason":""}
    

1.3 安装包管理器helm3

Helm是Kubernetes的包管理器,后续流程也将使用Helm安装Kubernetes的常用组件。 这里先在master节点上安装helm。

wget https://get.helm.sh/helm-v3.10.3-linux-amd64.tar.gz
tar -zxvf helm-v3.10.3-linux-amd64.tar.gz
mv linux-amd64/helm  /usr/local/bin/

执行helm list确认没有错误输出。

1.4 部署Pod Network组件Calico

选择calico作为k8s的Pod网络组件,下面使用helm在k8s集群中安装calico。

下载tigera-operator的helm chart:

wget https://github.com/projectcalico/calico/releases/download/v3.24.5/tigera-operator-v3.24.5.tgz

查看这个chart的中可定制的配置:

helm show values tigera-operator-v3.24.5.tgz

imagePullSecrets: {}

installation:
  enabled: true
  kubernetesProvider: ""

apiServer:
  enabled: true

certs:
  node:
    key:
    cert:
    commonName:
  typha:
    key:
    cert:
    commonName:
    caBundle:

# Resource requests and limits for the tigera/operator pod.
resources: {}

# Tolerations for the tigera/operator pod.
tolerations:
- effect: NoExecute
  operator: Exists
- effect: NoSchedule
  operator: Exists

# NodeSelector for the tigera/operator pod.
nodeSelector:
  kubernetes.io/os: linux

# Custom annotations for the tigera/operator pod.
podAnnotations: {}

# Custom labels for the tigera/operator pod.
podLabels: {}

# Image and registry configuration for the tigera/operator pod.
tigeraOperator:
  image: tigera/operator
  version: v1.28.5
  registry: quay.io
calicoctl:
  image: docker.io/calico/ctl
  tag: v3.24.5
  • 定制的calico_values.yaml如下:

    # 可针对上面的配置进行定制,例如calico的镜像改成从私有库拉取。
    # 这里只是个人本地环境测试k8s新版本,这里只有下面几行配置
    apiServer:
      enabled: false
    
  • 使用helm安装calico:

    helm install calico tigera-operator-v3.24.5.tgz -n kube-system  --create-namespace -f calico_values.yaml
    
  • 等待并确认所有pod处于Running状态:

    kubectl get pod -n kube-system | grep tigera-operator
    tigera-operator-5fb55776df-wxbph   1/1     Running   0             5m10s
    
    kubectl get pods -n calico-system
    NAME                                       READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-68884f975d-5d7p9   1/1     Running   0          5m24s
    calico-node-twbdh                          1/1     Running   0          5m24s
    calico-typha-7b4bdd99c5-ssdn2              1/1     Running   0          5m24s
    
  • 查看一下calico向k8s中添加的api资源:

    kubectl api-resources | grep calico
    bgpconfigurations                              crd.projectcalico.org/v1               false        BGPConfiguration
    bgppeers                                       crd.projectcalico.org/v1               false        BGPPeer
    blockaffinities                                crd.projectcalico.org/v1               false        BlockAffinity
    caliconodestatuses                             crd.projectcalico.org/v1               false        CalicoNodeStatus
    clusterinformations                            crd.projectcalico.org/v1               false        ClusterInformation
    felixconfigurations                            crd.projectcalico.org/v1               false        FelixConfiguration
    globalnetworkpolicies                          crd.projectcalico.org/v1               false        GlobalNetworkPolicy
    globalnetworksets                              crd.projectcalico.org/v1               false        GlobalNetworkSet
    hostendpoints                                  crd.projectcalico.org/v1               false        HostEndpoint
    ipamblocks                                     crd.projectcalico.org/v1               false        IPAMBlock
    ipamconfigs                                    crd.projectcalico.org/v1               false        IPAMConfig
    ipamhandles                                    crd.projectcalico.org/v1               false        IPAMHandle
    ippools                                        crd.projectcalico.org/v1               false        IPPool
    ipreservations                                 crd.projectcalico.org/v1               false        IPReservation
    kubecontrollersconfigurations                  crd.projectcalico.org/v1               false        KubeControllersConfiguration
    networkpolicies                                crd.projectcalico.org/v1               true         NetworkPolicy
    networksets                                    crd.projectcalico.org/v1               true         NetworkSet
    
  • 这些api资源是属于calico的,因此不建议使用kubectl来管理,推荐按照calicoctl来管理这些api资源。 将calicoctl安装为kubectl的插件:

cd /usr/local/bin curl -o kubectl-calico -O -L  "https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64"  
chmod +x kubectl-calico 
  • 验证插件正常工作:
kubectl calico -h

1.5 验证k8s DNS是否可用

kubectl run curl --image=radial/busyboxplus:curl -it
If you don't see a command prompt, try pressing enter.
[ root@curl:/ ]$

# 已运行进入
kubectl exec -it curl -- /bin/sh
  • 进入后执行nslookup kubernetes.default确认解析正常:

    $ nslookup kubernetes.default
    Server:    10.96.0.10
    Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
    
    Name:      kubernetes.default
    Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
    
  • 20230108

    修改centos8.5 ipv6关闭后重新 kubeadm init后成功

    [ root@curl:/ ]$ nslookup kubernetes.default
    Server:    10.96.0.10
    Address 1: 10.96.0.10
    
    nslookup: can't resolve 'kubernetes.default'
    

2 Kubernetes常用组件部署

1使用Helm部署ingress-nginx

为了便于将集群中的服务暴露到集群外部,需要使用Ingress。接下来使用Helm将ingress-nginx部署到Kubernetes上。 Nginx Ingress Controller被部署在Kubernetes的边缘节点上。

  • kubectl get node

    NAME     STATUS   ROLES           AGE     VERSION
    k8s-m1   Ready    control-plane   6h39m   v1.26.0
    k8s-n1   Ready    <none>          6h28m   v1.26.0
    k8s-n2   Ready    <none>          6h27m   v1.26.0
    
  • 这里将k8s-m1(10.0.0.216)作为边缘节点,打上Label:

    kubectl label node k8s-m1 node-role.kubernetes.io/edge=
    
  • 下载ingress-nginx的helm chart:

    wget https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.2/ingress-nginx-4.4.2.tgz
    
  • 查看ingress-nginx-4.4.2.tgz这个chart的可定制配置:

    helm show values ingress-nginx-4.4.2.tgz
    
  • 对ingress_values.yaml配置定制如下:

    controller:
      ingressClassResource:
        name: nginx
        enabled: true
        default: true
        controllerValue: "k8s.io/ingress-nginx"
      admissionWebhooks:
        enabled: false
      replicaCount: 1
      image:
        # registry: registry.k8s.io
        # image: ingress-nginx/controller
        # tag: "v1.5.1"
        registry: docker.io
        image: unreachableg/registry.k8s.io_ingress-nginx_controller
        tag: "v1.5.1"
        digest: sha256:97fa1ff828554ff4ee1b0416e54ae2238b27d1faa6d314d5a94a92f1f99cf767
      hostNetwork: true
      nodeSelector:
        node-role.kubernetes.io/edge: ''
      affinity:
        podAntiAffinity:
            requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - nginx-ingress
                - key: component
                  operator: In
                  values:
                  - controller
              topologyKey: kubernetes.io/hostname
      tolerations:
          - key: node-role.kubernetes.io/master
            operator: Exists
            effect: NoSchedule
          - key: node-role.kubernetes.io/master
            operator: Exists
            effect: PreferNoSchedule
    

    nginx ingress controller的副本数replicaCount为1,将被调度到k8s-m1这个边缘节点上。这里并没有指定nginx ingress controller service的externalIPs,而是通过hostNetwork: true设置nginx ingress controller使用宿主机网络。 因为k8s.gcr.io被墙,这里替换成unreachableg/registry.k8s.io_ingress-nginx_controller提前拉取一下镜像:

  • 拉取镜像

    crictl pull unreachableg/registry.k8s.io_ingress-nginx_controller:v1.5.1
    
  • 安装

    helm install ingress-nginx ingress-nginx-4.4.2.tgz --create-namespace -n ingress-nginx -f ingress_values.yaml
    
  • 查看运行状态

    $ kubectl get pod -n ingress-nginx
    NAME                                       READY   STATUS    RESTARTS   AGE
    ingress-nginx-controller-7c96f857f-8f5ls   1/1     Running   0          2m3s
    
  • 访问:http://10.0.0.216 (k8s-ms)返回 nginx404页

2 使用Helm部署dashboard

先部署metrics-serve

  • 先部署metrics-server:

    wget https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-3.8.3/components.yaml
    

    修改components.yaml中的image为docker.io/unreachableg/k8s.gcr.io_metrics-server_metrics-server:v0.6.2。 修改components.yaml中容器的启动参数,加入--kubelet-insecure-tls

        spec:
          containers:
          - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
            - --kubelet-use-node-status-port
            - --metric-resolution=15s
            # 新增启动参数 注意空格缩进,不要用tab键,此行需要删除
            - --kubelet-insecure-tls  
    
    kubectl apply -f components.yaml
    
  • metrics-server的pod正常启动后,等一段时间就可以使用kubectl top查看集群和pod的metrics信息:

    [root@k8s-m1 ~]# kubectl top node
    NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
    k8s-m1   406m         10%    1197Mi          69%
    k8s-n1   199m         4%     990Mi           57%
    k8s-n2   99m          2%     957Mi           55%
    
    [root@k8s-m1 ~]# kubectl top pod -n kube-system
    NAME                               CPU(cores)   MEMORY(bytes)
    coredns-5bbd96d687-52ttw           3m           22Mi
    coredns-5bbd96d687-prtk8           7m           24Mi
    etcd-k8s-m1                        47m          74Mi
    kube-apiserver-k8s-m1              122m         401Mi
    kube-controller-manager-k8s-m1     41m          76Mi
    kube-proxy-5vp7z                   9m           31Mi
    kube-proxy-8vj5c                   16m          33Mi
    kube-proxy-qwmzl                   15m          24Mi
    kube-scheduler-k8s-m1              19m          38Mi
    metrics-server-5d466b9d66-2wkrv    7m           15Mi
    tigera-operator-7795f5d79b-rhht5   3m           49Mi
    

dashboard

  • 使用helm部署k8s的dashboard,添加chart repo:

    [root@k8s-m1 ~]# helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
    "kubernetes-dashboard" has been added to your repositories
    [root@k8s-m1 ~]# helm repo update
    Hang tight while we grab the latest from your chart repositories...
    ...Successfully got an update from the "kubernetes-dashboard" chart repository
    Update Complete. ⎈Happy Helming!⎈
    
  • 查看chart的可定制配置

    helm show values kubernetes-dashboard/kubernetes-dashboard
    
  • 定制dashboard_values.yaml配置如下:

    image:
      repository: kubernetesui/dashboard
      tag: v2.7.0
    ingress:
      enabled: true
      annotations:
        nginx.ingress.kubernetes.io/ssl-redirect: "true"
        nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
      hosts:
      - k8s.thisisnohi.com
      tls:
        - secretName: example-com-tls-secret
          hosts:
          - k8s.example.com
    metricsScraper:
      enabled: true
    

    以上配置中, 部署的dashboard将通过ingress以域名k8s.example.com暴露出来, 并为此域名开启HTTPS。

    为了开启HTTPS,需要为此域名申请SSL证书或使用自签证书,这里使用的证书和私钥文件分别为cert.pemkey.pem

    • 配置TLS证书 参考在新窗口打开

      # 1 执行以下命令,生成TLS证书。
      openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout thisisnohi.key -out thisisnohi.crt -subj "/CN=k8s.thisisnohi.com/O=k8s.thisisnohi.com"
      
  • 先创建存放k8s.thisisnohi.comssl证书的secret:

    kubectl create secret tls example-com-tls-secret \
      --cert=thisisnohi.crt \
      --key=thisisnohi.key \
      -n kube-system
    # kubectl create secret tls cert-example --key tls.key --cert tls.crt  
    # 查看新建TLS证书配置
    kubectl get secret -A
    
  • 使用helm部署dashboard:

    helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard \
    -n kube-system \
    -f dashboard_values.yaml
    

** 确认上面命令部署成功**

  • 创建管理员sa:

    kubectl create serviceaccount kube-dashboard-admin-sa -n kube-system
    
    kubectl create clusterrolebinding kube-dashboard-admin-sa \
    --clusterrole=cluster-admin --serviceaccount=kube-system:kube-dashboard-admin-sa
    
  • 创建集群管理员登录dashboard所需token:

    kubectl create token kube-dashboard-admin-sa -n kube-system --duration=87600h
    
    eyJhbGciOiJSUzI1NiIsImtpZCI6IlFKZzhKeVJQVFFXb2l1XzZLWE41V2JNR0R5WGx4dEZoZE5UVmtvNS1OQ3MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxOTg4Njg4NTI2LCJpYXQiOjE2NzMzMjg1MjYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJrdWJlLWRhc2hib2FyZC1hZG1pbi1zYSIsInVpZCI6ImY4NmI0MjdlLTljZjQtNDk5MS04MmU5LWYxYWRhNDUyZDg5ZSJ9fSwibmJmIjoxNjczMzI4NTI2LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06a3ViZS1kYXNoYm9hcmQtYWRtaW4tc2EifQ.wQg7_3BV_jtDUyjFaTqs6ekSu3iBOwLGT5BDp6YaRwMdebotcUE2OYyTthKsjyKs9WEXGDPEE5fwPDtlHq7f2UUbF1s2vt9suHJKEvVhNvCS4TbBKRJljy0G4DbsLenAo9Cf3acNVH_ES_fRBTpoiRRuBBrW5_qUUY1320-LnR-7h_YGvdbSwdNtkZa3KELQsdEhlkvYgC34_JOUONWw6-xGPJ02WjKWzIBtfYlNNPTKC8C23676m67JTn7zMhEKJ-aMCXvA_CLSoq0hlui0PBOO6mpv0SeIgy-uG8XqSSbHOiOO_VMwOJVinVDVanaf_7FOp3JxdKdCE30nghJqtg
    
  • 使用上面的token登录k8s dashboard。

上次更新:
贡献者: NOHI