规划
IP |
Hostname |
配置 |
系统 |
192.168.0.11 |
master1 |
4c8g |
Centos7.6 |
192.168.0.12 |
node1 |
4c8g |
Centos7.6 |
192.168.0.13 |
node2 |
4c8g |
Centos7.6 |
hosts文件
1 2 3 4
| cat /etc/hosts 192.168.0.11 master1 192.168.0.12 node1 192.168.0.13 node2
|
关闭防火墙及selinux
1 2 3 4 5
| systemctl stop firewalld systemctl disable firewalld
setenforce 0 sed -i s
|
加载内核模块
1 2 3 4 5 6 7 8 9 10 11 12
|
modprobe br_netfilter
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
sysctl --system sysctl -p
|
安装ipvs
1 2 3 4 5 6 7 8 9 10 11
| cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
|
安装必要软件
1 2 3 4 5 6 7 8 9 10 11
| yum install ipset ipvsadm wget vim net-tools curl chrony -y
systemctl enable chronyd systemctl start chronyd chronyc sources
swapoff -a
|
安装Containerd
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
|
wget https://download.fastgit.org/containerd/containerd/releases/download/v1.5.5/cri-containerd-cni-1.5.5-linux-amd64.tar.gz
tar -C / -xzf cri-containerd-cni-1.5.5-linux-amd64.tar.gz
echo 'export PATH=$PATH:/usr/local/bin:/usr/local/sbin' >> /root/.bashrc source ~/.bashrc
mkdir -p /etc/containerd containerd config default > /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri"] ... sandbox_image = "registry.aliyuncs.com/k8sxio/pause:3.5" ... [plugins."io.containerd.grpc.v1.cri".registry] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["https://bqr1dr1n.mirror.aliyuncs.com"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"] endpoint = ["https://registry.aliyuncs.com/k8sxio"]
systemctl daemon-reload systemctl enable containerd --now
ctr version crictl version
|
下载kubeadm、kubelet
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
| cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg EOF
cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
yum makecache fast -y yum install -y kubelet-1.22.2 kubeadm-1.22.2 kubectl-1.22.2 --disableexcludes=kubernetes
kubeadm version
systemctl enable --now kubelet
|
初始化集群
1
| kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm.yaml
|
kubeadm.yaml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
| apiVersion: kubeadm.k8s.io/v1beta3 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages: - signing - authentication kind: InitConfiguration localAPIEndpoint: advertiseAddress: 192.168.0.11 bindPort: 6443 nodeRegistration: criSocket: /run/containerd/containerd.sock imagePullPolicy: IfNotPresent name: master taints: - effect: "NoSchedule" key: "node-role.kubernetes.io/master"
--- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: ipvs
--- apiServer: timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controllerManager: {} dns: {} etcd: local: dataDir: /var/lib/etcd imageRepository: registry.aliyuncs.com/k8sxio kind: ClusterConfiguration kubernetesVersion: 1.22.2 networking: dnsDomain: cluster.local serviceSubnet: 10.96.0.0/12 podSubnet: 10.244.0.0/16 scheduler: {}
--- apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: cacheTTL: 0s enabled: true x509: clientCAFile: /etc/kubernetes/pki/ca.crt authorization: mode: Webhook webhook: cacheAuthorizedTTL: 0s cacheUnauthorizedTTL: 0s clusterDNS: - 10.96.0.10 clusterDomain: cluster.local cpuManagerReconcilePeriod: 0s evictionPressureTransitionPeriod: 0s fileCheckFrequency: 0s healthzBindAddress: 127.0.0.1 healthzPort: 10248 httpCheckFrequency: 0s imageMinimumGCAge: 0s kind: KubeletConfiguration cgroupDriver: systemd logging: {} memorySwap: {} nodeStatusReportFrequency: 0s nodeStatusUpdateFrequency: 0s rotateCertificates: true runtimeRequestTimeout: 0s shutdownGracePeriod: 0s shutdownGracePeriodCriticalPods: 0s staticPodPath: /etc/kubernetes/manifests streamingConnectionIdleTimeout: 0s syncFrequency: 0s volumeStatsAggPeriod: 0s
|
下载镜像
1 2 3 4 5 6 7 8
| kubeadm config images pull --config kubeadm.yaml
ctr -n k8s.io i pull docker.io/coredns/coredns:1.8.4
ctr -n k8s.io i tag docker.io/coredns/coredns:1.8.4 registry.aliyuncs.com/k8sxio/coredns:v1.8.4
|
初始化集群
1 2 3 4 5 6 7 8 9
| kubeadm init --config kubeadm.yaml
mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config kubeadm join 192.168.31.31:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:ca0c87226c69309d7779096c15b6a41e14b077baf4650bfdb6f9d3178d4da645
|
查看是否初始化成功
安装flannel
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
| wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
vim kube-flannel.yml ...... containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.15.0 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr - --iface=eth0 ...... kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
|
Dashboard安装
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
| wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml ➜ ~ vi recommended.yaml
...... kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard type: NodePort ......
kubectl apply -f recommended.yaml
kubectl get pods -n kubernetes-dashboard -o wide
|
更换cni网络
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
| mv /etc/cni/net.d/10-containerd-net.conflist /etc/cni/net.d/10-containerd-net.conflist.bak
ifconfig cni0 down && ip link delete cni0
systemctl daemon-reload
systemctl restart containerd kubelet
kubectl -n kube-system delete pod coredns-7568f67dbd-9wcv4
kubectl delete -f recommended.yaml kubectl apply -f recommended.yaml
|
进入Dashboard
1 2 3
| kubectl get svc -n kubernetes-dashboard
|
创建权限
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
| kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: admin roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: admin namespace: kubernetes-dashboard --- apiVersion: v1 kind: ServiceAccount metadata: name: admin namespace: kubernetes-dashboard
|
创建并生成token
1 2 3 4 5 6 7 8 9 10 11
| kubectl apply -f admin.yaml
kubectl get secret -n kubernetes-dashboard|grep admin-token
kubectl get secret {admin-token-xxx} -o jsonpath={.data.token} -n kubernetes-dashboard |base64 -d
|
kubectl命令补全
1 2 3
| yum install -y bash-completion* source <(kubectl completion bash) echo "source <(kubectl completion bash)" >> ~/.bashrc
|
master污点
1 2 3 4 5
| kubectl taint node master node-role.kubernetes.io/master-
kubectl taint node master node-role.kubernetes.io/master="":NoSchedule
|
剔除节点并重新加入
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
|
kubectl drain node3 --delete-local-data --ignore-daemonsets --force
kubectl delete nodes node3
kubeadm token create --print-join-command
kubeadm reset
kubeadm join xxx
|
kubectl远程
mac(zsh下)操作
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
| brew install kubectl
echo 'source <(kubectl completion zsh)' > ~/.zshrc
mkdir ~/.kube
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text ... DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:master1, IP Address:10.96.0.1, IP Address:192.168.0.21 Signature Algorithm: sha256WithRSAEncryption ...
cat ~/.kube/config ... server: https://master1:6443 ...
cat /etc/hosts 139.155.237.70 master1
这个时候就可以愉快的本地操作k8s集群了
|
brew安装
1 2 3 4 5 6 7 8 9 10 11 12 13 14
| https://zhuanlan.zhihu.com/p/111014448
/bin/zsh -c "$(curl -fsSL https://gitee.com/cunkai/HomebrewCN/raw/master/Homebrew.sh)"
/bin/zsh -c "$(curl -fsSL https://gitee.com/cunkai/HomebrewCN/raw/master/Homebrew.sh)" speed
/bin/zsh -c "$(curl -fsSL https://gitee.com/cunkai/HomebrewCN/raw/master/HomebrewUninstall.sh)"
https://gitee.com/cunkai/HomebrewCN/blob/master/error.md
|
文档整理:https://www.qikqiak.com/k3s/