From d6b1fd3ba34af5112aca7c81431458e84eb30dc6 Mon Sep 17 00:00:00 2001 From: "huanqing.shao" Date: Fri, 20 Sep 2019 09:09:32 +0800 Subject: [PATCH] v1.16.0 --- .../install-script/v1.16.0/init-master.sh | 33 ++ .../install-script/v1.16.0/install-kubelet.sh | 97 ++++ .../install-script/v1.16.0/nginx-ingress.yaml | 168 +++++++ install/history-k8s/install-k8s-1.15.4.md | 476 ++++++++++++++++++ install/install-k8s.md | 37 +- 5 files changed, 793 insertions(+), 18 deletions(-) create mode 100644 .vuepress/public/install-script/v1.16.0/init-master.sh create mode 100644 .vuepress/public/install-script/v1.16.0/install-kubelet.sh create mode 100644 .vuepress/public/install-script/v1.16.0/nginx-ingress.yaml create mode 100644 install/history-k8s/install-k8s-1.15.4.md diff --git a/.vuepress/public/install-script/v1.16.0/init-master.sh b/.vuepress/public/install-script/v1.16.0/init-master.sh new file mode 100644 index 0000000..f15a383 --- /dev/null +++ b/.vuepress/public/install-script/v1.16.0/init-master.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# 只在 master 节点执行 + +# 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 +rm -f ./kubeadm-config.yaml +cat < ./kubeadm-config.yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v1.16.0 +imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers +controlPlaneEndpoint: "${APISERVER_NAME}:6443" +networking: + serviceSubnet: "10.96.0.0/12" + podSubnet: "${POD_SUBNET}" + dnsDomain: "cluster.local" +EOF + +# kubeadm init +# 根据您服务器网速的情况,您需要等候 3 - 10 分钟 +kubeadm init --config=kubeadm-config.yaml --upload-certs + +# 配置 kubectl +rm -rf /root/.kube/ +mkdir /root/.kube/ +cp -i /etc/kubernetes/admin.conf /root/.kube/config + +# 安装 calico 网络插件 +# 参考文档 https://docs.projectcalico.org/v3.8/getting-started/kubernetes/ +rm -f calico.yaml +wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml +sed -i "s#192\.168\.0\.0/16#${POD_SUBNET}#" calico.yaml +kubectl apply -f calico.yaml diff --git a/.vuepress/public/install-script/v1.16.0/install-kubelet.sh b/.vuepress/public/install-script/v1.16.0/install-kubelet.sh new file mode 100644 index 0000000..f4d90e5 --- /dev/null +++ b/.vuepress/public/install-script/v1.16.0/install-kubelet.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +# 在 master 节点和 worker 节点都要执行 + +# 安装 docker +# 参考文档如下 +# https://docs.docker.com/install/linux/docker-ce/centos/ +# https://docs.docker.com/install/linux/linux-postinstall/ + +# 卸载旧版本 +yum remove -y docker \ +docker-client \ +docker-client-latest \ +docker-common \ +docker-latest \ +docker-latest-logrotate \ +docker-logrotate \ +docker-selinux \ +docker-engine-selinux \ +docker-engine + +# 设置 yum repository +yum install -y yum-utils \ +device-mapper-persistent-data \ +lvm2 +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo + +# 安装并启动 docker +yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io +systemctl enable docker +systemctl start docker + +# 安装 nfs-utils +# 必须先安装 nfs-utils 才能挂载 nfs 网络存储 +yum install -y nfs-utils + +# 关闭 防火墙 +systemctl stop firewalld +systemctl disable firewalld + +# 关闭 SeLinux +setenforce 0 +sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + +# 关闭 swap +swapoff -a +yes | cp /etc/fstab /etc/fstab_bak +cat /etc/fstab_bak |grep -v swap > /etc/fstab + +# 修改 /etc/sysctl.conf +# 如果有配置,则修改 +sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf +# 可能没有,追加 +echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf +# 执行命令以应用 +sysctl -p + +# 配置K8S的yum源 +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg + http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg +EOF + +# 卸载旧版本 +yum remove -y kubelet kubeadm kubectl + +# 安装kubelet、kubeadm、kubectl +yum install -y kubelet-1.16.0 kubeadm-1.16.0 kubectl-1.16.0 + +# 修改docker Cgroup Driver为systemd +# # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock +# # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd +# 如果不修改,在添加 worker 节点时可能会碰到如下错误 +# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". +# Please follow the guide at https://kubernetes.io/docs/setup/cri/ +sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service + +# 设置 docker 镜像,提高 docker 镜像下载速度和稳定性 +# 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤 +curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io + +# 重启 docker,并启动 kubelet +systemctl daemon-reload +systemctl restart docker +systemctl enable kubelet && systemctl start kubelet + +docker version diff --git a/.vuepress/public/install-script/v1.16.0/nginx-ingress.yaml b/.vuepress/public/install-script/v1.16.0/nginx-ingress.yaml new file mode 100644 index 0000000..0866dbd --- /dev/null +++ b/.vuepress/public/install-script/v1.16.0/nginx-ingress.yaml @@ -0,0 +1,168 @@ +# 如果打算用于生产环境,请参考 https://github.com/nginxinc/kubernetes-ingress/blob/v1.5.5/docs/installation.md 并根据您自己的情况做进一步定制 + +apiVersion: v1 +kind: Namespace +metadata: + name: nginx-ingress + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress + namespace: nginx-ingress + +--- +apiVersion: v1 +kind: Secret +metadata: + name: default-server-secret + namespace: nginx-ingress +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN2akNDQWFZQ0NRREFPRjl0THNhWFhEQU5CZ2txaGtpRzl3MEJBUXNGQURBaE1SOHdIUVlEVlFRRERCWk8KUjBsT1dFbHVaM0psYzNORGIyNTBjbTlzYkdWeU1CNFhEVEU0TURreE1qRTRNRE16TlZvWERUSXpNRGt4TVRFNApNRE16TlZvd0lURWZNQjBHQTFVRUF3d1dUa2RKVGxoSmJtZHlaWE56UTI5dWRISnZiR3hsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvN2hIUEtFWGRMdjNyaUM3QlBrMTNpWkt5eTlyQ08KR2xZUXYyK2EzUDF0azIrS3YwVGF5aGRCbDRrcnNUcTZzZm8vWUk1Y2Vhbkw4WGM3U1pyQkVRYm9EN2REbWs1Qgo4eDZLS2xHWU5IWlg0Rm5UZ0VPaStlM2ptTFFxRlBSY1kzVnNPazFFeUZBL0JnWlJVbkNHZUtGeERSN0tQdGhyCmtqSXVuektURXUyaDU4Tlp0S21ScUJHdDEwcTNRYzhZT3ExM2FnbmovUWRjc0ZYYTJnMjB1K1lYZDdoZ3krZksKWk4vVUkxQUQ0YzZyM1lma1ZWUmVHd1lxQVp1WXN2V0RKbW1GNWRwdEMzN011cDBPRUxVTExSakZJOTZXNXIwSAo1TmdPc25NWFJNV1hYVlpiNWRxT3R0SmRtS3FhZ25TZ1JQQVpQN2MwQjFQU2FqYzZjNGZRVXpNQ0F3RUFBVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWpLb2tRdGRPcEsrTzhibWVPc3lySmdJSXJycVFVY2ZOUitjb0hZVUoKdGhrYnhITFMzR3VBTWI5dm15VExPY2xxeC9aYzJPblEwMEJCLzlTb0swcitFZ1U2UlVrRWtWcitTTFA3NTdUWgozZWI4dmdPdEduMS9ienM3bzNBaS9kclkrcUI5Q2k1S3lPc3FHTG1US2xFaUtOYkcyR1ZyTWxjS0ZYQU80YTY3Cklnc1hzYktNbTQwV1U3cG9mcGltU1ZmaXFSdkV5YmN3N0NYODF6cFErUyt1eHRYK2VBZ3V0NHh3VlI5d2IyVXYKelhuZk9HbWhWNThDd1dIQnNKa0kxNXhaa2VUWXdSN0diaEFMSkZUUkk3dkhvQXprTWIzbjAxQjQyWjNrN3RXNQpJUDFmTlpIOFUvOWxiUHNoT21FRFZkdjF5ZytVRVJxbStGSis2R0oxeFJGcGZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdi91RWM4b1JkMHUvZXVJTHNFK1RYZUprckxMMnNJNGFWaEMvYjVyYy9XMlRiNHEvClJOcktGMEdYaVN1eE9ycXgrajlnamx4NXFjdnhkenRKbXNFUkJ1Z1B0ME9hVGtIekhvb3FVWmcwZGxmZ1dkT0EKUTZMNTdlT1l0Q29VOUZ4amRXdzZUVVRJVUQ4R0JsRlNjSVo0b1hFTkhzbysyR3VTTWk2Zk1wTVM3YUhudzFtMApxWkdvRWEzWFNyZEJ6eGc2clhkcUNlUDlCMXl3VmRyYURiUzc1aGQzdUdETDU4cGszOVFqVUFQaHpxdmRoK1JWClZGNGJCaW9CbTVpeTlZTW1hWVhsMm0wTGZzeTZuUTRRdFFzdEdNVWozcGJtdlFmazJBNnljeGRFeFpkZFZsdmwKMm82MjBsMllxcHFDZEtCRThCay90elFIVTlKcU56cHpoOUJUTXdJREFRQUJBb0lCQVFDZklHbXowOHhRVmorNwpLZnZJUXQwQ0YzR2MxNld6eDhVNml4MHg4Mm15d1kxUUNlL3BzWE9LZlRxT1h1SENyUlp5TnUvZ2IvUUQ4bUFOCmxOMjRZTWl0TWRJODg5TEZoTkp3QU5OODJDeTczckM5bzVvUDlkazAvYzRIbjAzSkVYNzZ5QjgzQm9rR1FvYksKMjhMNk0rdHUzUmFqNjd6Vmc2d2szaEhrU0pXSzBwV1YrSjdrUkRWYmhDYUZhNk5nMUZNRWxhTlozVDhhUUtyQgpDUDNDeEFTdjYxWTk5TEI4KzNXWVFIK3NYaTVGM01pYVNBZ1BkQUk3WEh1dXFET1lvMU5PL0JoSGt1aVg2QnRtCnorNTZud2pZMy8yUytSRmNBc3JMTnIwMDJZZi9oY0IraVlDNzVWYmcydVd6WTY3TWdOTGQ5VW9RU3BDRkYrVm4KM0cyUnhybnhBb0dCQU40U3M0ZVlPU2huMVpQQjdhTUZsY0k2RHR2S2ErTGZTTXFyY2pOZjJlSEpZNnhubmxKdgpGenpGL2RiVWVTbWxSekR0WkdlcXZXaHFISy9iTjIyeWJhOU1WMDlRQ0JFTk5jNmtWajJTVHpUWkJVbEx4QzYrCk93Z0wyZHhKendWelU0VC84ajdHalRUN05BZVpFS2FvRHFyRG5BYWkyaW5oZU1JVWZHRXFGKzJyQW9HQkFOMVAKK0tZL0lsS3RWRzRKSklQNzBjUis3RmpyeXJpY05iWCtQVzUvOXFHaWxnY2grZ3l4b25BWlBpd2NpeDN3QVpGdwpaZC96ZFB2aTBkWEppc1BSZjRMazg5b2pCUmpiRmRmc2l5UmJYbyt3TFU4NUhRU2NGMnN5aUFPaTVBRHdVU0FkCm45YWFweUNweEFkREtERHdObit3ZFhtaTZ0OHRpSFRkK3RoVDhkaVpBb0dCQUt6Wis1bG9OOTBtYlF4VVh5YUwKMjFSUm9tMGJjcndsTmVCaWNFSmlzaEhYa2xpSVVxZ3hSZklNM2hhUVRUcklKZENFaHFsV01aV0xPb2I2NTNyZgo3aFlMSXM1ZUtka3o0aFRVdnpldm9TMHVXcm9CV2xOVHlGanIrSWhKZnZUc0hpOGdsU3FkbXgySkJhZUFVWUNXCndNdlQ4NmNLclNyNkQrZG8wS05FZzFsL0FvR0FlMkFVdHVFbFNqLzBmRzgrV3hHc1RFV1JqclRNUzRSUjhRWXQKeXdjdFA4aDZxTGxKUTRCWGxQU05rMXZLTmtOUkxIb2pZT2pCQTViYjhibXNVU1BlV09NNENoaFJ4QnlHbmR2eAphYkJDRkFwY0IvbEg4d1R0alVZYlN5T294ZGt5OEp0ek90ajJhS0FiZHd6NlArWDZDODhjZmxYVFo5MWpYL3RMCjF3TmRKS2tDZ1lCbyt0UzB5TzJ2SWFmK2UwSkN5TGhzVDQ5cTN3Zis2QWVqWGx2WDJ1VnRYejN5QTZnbXo5aCsKcDNlK2JMRUxwb3B0WFhNdUFRR0xhUkcrYlNNcjR5dERYbE5ZSndUeThXczNKY3dlSTdqZVp2b0ZpbmNvVlVIMwphdmxoTUVCRGYxSjltSDB5cDBwWUNaS2ROdHNvZEZtQktzVEtQMjJhTmtsVVhCS3gyZzR6cFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-config + namespace: nginx-ingress +data: + server-names-hash-bucket-size: "1024" + + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: nginx-ingress +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update + - create +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - list + - watch + - get +- apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - k8s.nginx.org + resources: + - virtualservers + - virtualserverroutes + verbs: + - list + - watch + - get + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: nginx-ingress +subjects: +- kind: ServiceAccount + name: nginx-ingress + namespace: nginx-ingress +roleRef: + kind: ClusterRole + name: nginx-ingress + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: nginx-ingress + namespace: nginx-ingress + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9113" +spec: + selector: + matchLabels: + app: nginx-ingress + template: + metadata: + labels: + app: nginx-ingress + spec: + serviceAccountName: nginx-ingress + containers: + - image: nginx/nginx-ingress:1.5.5 + name: nginx-ingress + ports: + - name: http + containerPort: 80 + hostPort: 80 + - name: https + containerPort: 443 + hostPort: 443 + - name: prometheus + containerPort: 9113 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + args: + - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config + - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret + #- -v=3 # Enables extensive logging. Useful for troubleshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election + - -enable-prometheus-metrics + #- -enable-custom-resources diff --git a/install/history-k8s/install-k8s-1.15.4.md b/install/history-k8s/install-k8s-1.15.4.md new file mode 100644 index 0000000..cceaff9 --- /dev/null +++ b/install/history-k8s/install-k8s-1.15.4.md @@ -0,0 +1,476 @@ +--- +# layout: StepLayout +description: Kubernetes 最新稳定版 v1.15.4 的快速安装文档。该文档由众多网友验证并在线提出修改意见、持续不断地更新和完善、并且通过 QQ 群提供免费在线答疑的服务。 +storyBook: + title: '使用 kubeadm 安装 kubernetes v1.15.4(单Master节点)' + initial: StoryBook + pages: + - name: introduction + title: 文档特点 + - name: overview + title: 配置要求 + - name: step1 + title: 检查环境 + - name: step2 + title: 安装 docker/kubelet + - name: step3 + title: 初始化 master 节点 + - name: step4 + title: 初始化 worker 节点 + - name: step5 + title: 安装 Ingress Controller + - name: step6 + title: 总结 +--- + +# 使用 kubeadm 安装 kubernetes v1.15.4 + + + + +
+ +## 文档特点 + +**网上那么多 Kubernetes 安装文档,为什么这篇文档更有参考价值?** + +* **众多网友验证** + * 每天有超过 200 人参照此文档完成 Kubernetes 安装 + * 不断有网友对安装文档提出改进意见 + +* **持续更新和完善** + * 始终有最新的 Kubernetes 稳定版安装文档,当前版本 v1.15.4 + * 当前已更新了 50 次 , [查看更新历史](https://github.com/eip-work/kuboard-press/commits/master/install/install-k8s.md) + +* **在线答疑** + + 也可以扫描二维码加群 +

+ +

+ + +
+
+ +## 配置要求 + +对于 Kubernetes 初学者,推荐在阿里云采购如下配置:(您也可以使用自己的虚拟机、私有云等您最容易获得的 Linux 环境) +[领取阿里云最高2000元红包](https://promotion.aliyun.com/ntms/yunparter/invite.html?userCode=obezo3pg) + +* 3台 **2核4G** 的ECS(突发性能实例 t5 ecs.t5-c1m2.large或同等配置,单台约 0.4元/小时,停机时不收费) +* **Cent OS 7.6** + +**安装后的软件版本为** + +* Kubernetes v1.15.4 + * calico 3.8.2 + * nginx-ingress 1.5.3 +* Docker 18.09.7 + +> 如果要安装 Kubernetes 历史版本,请参考: +> * [安装 Kubernetes v1.15.3 单Master节点](/install/history-k8s/install-k8s-1.15.3.html) +> * [安装 Kubernetes v1.15.2 单Master节点](/install/history-k8s/install-k8s-1.15.2.html) +> * [安装 Kubernetes v1.15.1 单Master节点](/install/history-k8s/install-k8s-1.15.1.html) + +安装后的拓扑图如下:下载拓扑图源文件 使用Axure RP 9.0可打开该文件 + +![image-20190826000521999](/images/topology/k8s.png) + + + + + +::: tip +**关于二进制安装** + +鉴于目前已经有比较方便的办法获得 kubernetes 镜像,我将回避 ***二进制*** 安装是否更好的争论。本文采用 kubernetes.io 官方推荐的 kubeadm 工具安装 kubernetes 集群。 + +::: + +
+
+ +## 检查 centos / hostname + +``` sh +# 在 master 节点和 worker 节点都要执行 +cat /etc/redhat-release + +# 此处 hostname 的输出将会是该机器在 Kubernetes 集群中的节点名字 +# 不能使用 localhost 作为节点的名字 +hostname + +# 请使用 lscpu 命令,核对 CPU 信息 +# Architecture: x86_64 本安装文档不支持 arm 架构 +# CPU(s): 2 CPU 内核数量不能低于 2 +lscpu +``` + +**操作系统兼容性** + +| CentOS 版本 | 本文档是否兼容 | 备注 | +| ----------- | --------------------------------------- | ----------------------------------- | +| 7.6 | 😄 | 已验证 | +| 7.5 | 😄 | 已验证 | +| 7.4 | 🤔 | 待验证 | +| 7.3 | 🤔 | 待验证 | +| 7.2 | 😞 | 已证实会出现 kubelet 无法启动的问题 | + +::: tip 修改 hostname +如果您需要修改 hostname,可执行如下指令: +``` sh +# 修改 hostname +hostnamectl set-hostname your-new-host-name +# 查看修改结果 +hostnamectl status +# 设置 hostname 解析 +echo "127.0.0.1 $(hostname)" >> /etc/hosts +``` +::: + +
+
+ + + +
  • 我的任意节点 centos 版本在兼容列表中
  • +
  • 我的任意节点 hostname 不是 localhost
  • +
  • 我的任意节点 CPU 内核数量大于等于 2
  • +
    +
    +
    +
    + +
    + +
    + +## 安装 docker / kubelet + +使用 root 身份在所有节点执行如下代码,以安装软件: +- docker +- nfs-utils +- kubectl / kubeadm / kubelet + +:::: tabs type:border-card + +::: tab 快速安装 lazy + +``` sh +# 在 master 节点和 worker 节点都要执行 + +curl -sSL https://kuboard.cn/install-script/v1.15.4/install-kubelet.sh | sh + +``` + +::: + +::: tab 手动安装 lazy + +手动执行以下代码,效果与快速安装完全相同。 + +<<< @/.vuepress/public/install-script/v1.15.4/install-kubelet.sh + +::: warning +如果此时执行 `service status kubelet` 命令,将得到 kubelet 启动失败的错误提示,请忽略此错误,因为必须完成后续步骤中 kubeadm init 的操作,kubelet 才能正常启动 +::: + +:::: + +
    + +
    + +## 初始化 master 节点 + +::: tip +* 以 root 身份在 demo-master-a-1 机器上执行 +* 初始化 master 节点时,如果因为中间某些步骤的配置出错,想要重新初始化 master 节点,请先执行 `kubeadm reset` 操作 +::: + +::: warning +* POD_SUBNET 所使用的网段不能与 ***master节点/worker节点*** 所在的网段重叠。该字段的取值为一个 CIDR 值,如果您对 CIDR 这个概念还不熟悉,请不要修改这个字段的取值 10.100.0.1/20 +::: + +:::: tabs type:border-card + +::: tab 快速初始化 lazy + +``` sh +# 只在 master 节点执行 +# 替换 x.x.x.x 为 master 节点实际 IP(请使用内网 IP) +# export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令 +export MASTER_IP=x.x.x.x +# 替换 apiserver.demo 为 您想要的 dnsName (不建议使用 master 的 hostname 作为 APISERVER_NAME) +export APISERVER_NAME=apiserver.demo +# Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中 +export POD_SUBNET=10.100.0.1/20 +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts +curl -sSL https://kuboard.cn/install-script/v1.15.4/init-master.sh | sh +``` + +::: + +::: tab 手工初始化 lazy + +``` sh +# 只在 master 节点执行 +# 替换 x.x.x.x 为 master 节点实际 IP(请使用内网 IP) +# export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令 +export MASTER_IP=x.x.x.x +# 替换 apiserver.demo 为 您想要的 dnsName (不建议使用 master 的 hostname 作为 APISERVER_NAME) +export APISERVER_NAME=apiserver.demo +# Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中 +export POD_SUBNET=10.100.0.1/20 +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts +``` + +<<< @/.vuepress/public/install-script/v1.15.4/init-master.sh + +::: + +:::: + + +**检查 master 初始化结果** + +``` sh +# 只在 master 节点执行 + +# 执行如下命令,等待 3-10 分钟,直到所有的容器组处于 Running 状态 +watch kubectl get pod -n kube-system -o wide + +# 查看 master 节点初始化结果 +kubectl get nodes -o wide +``` + +
    + +
    + +## 初始化 worker节点 + +### 获得 join命令参数 + +**在 master 节点上执行** + +``` sh +# 只在 master 节点执行 +kubeadm token create --print-join-command +``` + +可获取kubeadm join 命令及参数,如下所示 + +``` sh +# kubeadm token create 命令的输出 +kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303 +``` + + +### 初始化worker + +**针对所有的 worker 节点执行** + +``` sh +# 只在 worker 节点执行 +# 替换 ${MASTER_IP} 为 master 节点实际 IP +# 替换 ${APISERVER_NAME} 为初始化 master 节点时所使用的 APISERVER_NAME +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts + +# 替换为 master 节点上 kubeadm token create 命令的输出 +kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303 +``` + +### 检查初始化结果 + +在 master 节点上执行 + +``` sh +# 只在 master 节点执行 +kubectl get nodes +``` +输出结果如下所示: +```sh +[root@demo-master-a-1 ~]# kubectl get nodes +NAME STATUS ROLES AGE VERSION +demo-master-a-1 Ready master 5m3s v1.15.4 +demo-worker-a-1 Ready 2m26s v1.15.4 +demo-worker-a-2 Ready 3m56s v1.15.4 +``` + + + +## 移除 worker 节点 + +::: warning +正常情况下,您无需移除 worker 节点,如果添加到集群出错,您可以移除 worker 节点,再重新尝试添加 +::: + +在准备移除的 worker 节点上执行 + +``` sh +# 只在 worker 节点执行 +kubeadm reset +``` + +在 master 节点 demo-master-a-1 上执行 + +``` sh +# 只在 master 节点执行 +kubectl delete node demo-worker-x-x +``` + +::: tip +* 将 demo-worker-x-x 替换为要移除的 worker 节点的名字 +* worker 节点的名字可以通过在节点 demo-master-a-1 上执行 kubectl get nodes 命令获得 +::: + +
    + +
    + +## 安装 Ingress Controller + +:::: tabs type:border-card + +::: tab 安装IngressController lazy + +**在 master 节点上执行** + +``` sh +# 只在 master 节点执行 +kubectl apply -f https://kuboard.cn/install-script/v1.15.4/nginx-ingress.yaml +``` + +::: + +::: tab 卸载IngressController lazy + +**在 master 节点上执行** + +只在您想选择其他 Ingress Controller 的情况下卸载 + +``` sh +# 只在 master 节点执行 +kubectl delete -f https://kuboard.cn/install-script/v1.15.4/nginx-ingress.yaml +``` + +::: + +::: tab YAML文件 lazy + +<<< @/.vuepress/public/install-script/v1.15.4/nginx-ingress.yaml + +::: + +:::: + + +**配置域名解析** + +将域名 *.demo.yourdomain.com 解析到 demo-worker-a-2 的 IP 地址 z.z.z.z (也可以是 demo-worker-a-1 的地址 y.y.y.y) + +**验证配置** + +在浏览器访问 a.demo.yourdomain.com,将得到 404 NotFound 错误页面 + +::: tip 提示 + +许多初学者在安装 Ingress Controller 时会碰到问题,请不要灰心,可暂时跳过 ***安装 Ingress Controller*** 这个部分,等您学完 www.kuboard.cn 上 [Kubernetes 入门](/learning/k8s-basics/kubernetes-basics.html) 以及 [通过互联网访问您的应用程序](/learning/k8s-intermediate/ingress.html) 这两部分内容后,再来回顾 Ingress Controller 的安装。 + +::: + +::: warning +如果您打算将 Kubernetes 用于生产环境,请参考此文档 [Installing Ingress Controller](https://github.com/nginxinc/kubernetes-ingress/blob/v1.5.3/docs/installation.md),完善 Ingress 的配置 +::: + + +
    + +
    + + +## 下一步 +:tada: :tada: :tada: + +您已经完成了 Kubernetes 集群的安装,下一步请: + +[安装 Kuboard](/install/install-dashboard.html) + +安装 Kuboard 之前先 + + 在线体验 Kuboard + + +::: tip +* Kubernetes 初学者,[点击这里获取 Kubernetes 学习路径](/overview/#kubernetes-%E5%88%9D%E5%AD%A6%E8%80%85) +::: + + +
    +
    diff --git a/install/install-k8s.md b/install/install-k8s.md index cceaff9..d6cca68 100644 --- a/install/install-k8s.md +++ b/install/install-k8s.md @@ -1,8 +1,8 @@ --- # layout: StepLayout -description: Kubernetes 最新稳定版 v1.15.4 的快速安装文档。该文档由众多网友验证并在线提出修改意见、持续不断地更新和完善、并且通过 QQ 群提供免费在线答疑的服务。 +description: Kubernetes 最新稳定版 v1.16.0 的快速安装文档。该文档由众多网友验证并在线提出修改意见、持续不断地更新和完善、并且通过 QQ 群提供免费在线答疑的服务。 storyBook: - title: '使用 kubeadm 安装 kubernetes v1.15.4(单Master节点)' + title: '使用 kubeadm 安装 kubernetes v1.16.0(单Master节点)' initial: StoryBook pages: - name: introduction @@ -23,7 +23,7 @@ storyBook: title: 总结 --- -# 使用 kubeadm 安装 kubernetes v1.15.4 +# 使用 kubeadm 安装 kubernetes v1.16.0