From bbc54e5b840beb47d5e621144b8d48ab56d0ede7 Mon Sep 17 00:00:00 2001 From: "huanqing.shao" Date: Thu, 12 Dec 2019 23:03:27 +0800 Subject: [PATCH] v1.17.x --- .../install-script/calico/calico-3.10.2.yaml | 786 ++++++++++++++++++ .../install-script/v1.17.x/init-master.sh | 34 + .../install-script/v1.17.x/init_master.sh | 45 + .../install-script/v1.17.x/install-kubelet.sh | 98 +++ .../install-script/v1.17.x/install_kubelet.sh | 98 +++ .../install-script/v1.17.x/nginx-ingress.yaml | 168 ++++ install/faq/logs-terminal.md | 3 +- install/history-k8s/install-k8s-1.16.3.md | 516 ++++++++++++ install/install-dashboard.md | 36 +- install/install-k8s.md | 53 +- .../k8s-intermediate/service/connecting.md | 2 +- .../micro-service/kuboard-view-of-k8s.md | 2 +- support/change-log/change-log-on-the-way.md | 3 + support/change-log/v1.0.x.md | 13 + 14 files changed, 1799 insertions(+), 58 deletions(-) create mode 100644 .vuepress/public/install-script/calico/calico-3.10.2.yaml create mode 100644 .vuepress/public/install-script/v1.17.x/init-master.sh create mode 100644 .vuepress/public/install-script/v1.17.x/init_master.sh create mode 100644 .vuepress/public/install-script/v1.17.x/install-kubelet.sh create mode 100644 .vuepress/public/install-script/v1.17.x/install_kubelet.sh create mode 100644 .vuepress/public/install-script/v1.17.x/nginx-ingress.yaml create mode 100644 install/history-k8s/install-k8s-1.16.3.md diff --git a/.vuepress/public/install-script/calico/calico-3.10.2.yaml b/.vuepress/public/install-script/calico/calico-3.10.2.yaml new file mode 100644 index 0000000..9f6d697 --- /dev/null +++ b/.vuepress/public/install-script/calico/calico-3.10.2.yaml @@ -0,0 +1,786 @@ +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use + veth_mtu: "1440" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only requried for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: calico/cni:v3.10.2 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v3.10.2 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.10.2 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: calico/node:v3.10.2 + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +--- +# Source: calico/templates/calico-kube-controllers.yaml + +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: calico/kube-controllers:v3.10.2 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-etcd-secrets.yaml + +--- +# Source: calico/templates/calico-typha.yaml + +--- +# Source: calico/templates/configure-canal.yaml + + diff --git a/.vuepress/public/install-script/v1.17.x/init-master.sh b/.vuepress/public/install-script/v1.17.x/init-master.sh new file mode 100644 index 0000000..e242ebe --- /dev/null +++ b/.vuepress/public/install-script/v1.17.x/init-master.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# 只在 master 节点执行 + +# 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 +rm -f ./kubeadm-config.yaml +cat < ./kubeadm-config.yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v${1} +imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers +controlPlaneEndpoint: "${APISERVER_NAME}:6443" +networking: + serviceSubnet: "10.96.0.0/16" + podSubnet: "${POD_SUBNET}" + dnsDomain: "cluster.local" +EOF + +# kubeadm init +# 根据您服务器网速的情况,您需要等候 3 - 10 分钟 +kubeadm init --config=kubeadm-config.yaml --upload-certs + +# 配置 kubectl +rm -rf /root/.kube/ +mkdir /root/.kube/ +cp -i /etc/kubernetes/admin.conf /root/.kube/config + +# 安装 calico 网络插件 +# 参考文档 https://docs.projectcalico.org/v3.10/getting-started/kubernetes/ +echo "安装calico-3.10.2" +rm -f calico-3.10.2.yaml +wget https://kuboard.cn/install-script/calico/calico-3.10.2.yaml +sed -i "s#192\.168\.0\.0/16#${POD_SUBNET}#" calico-3.10.2.yaml +kubectl apply -f calico-3.10.2.yaml diff --git a/.vuepress/public/install-script/v1.17.x/init_master.sh b/.vuepress/public/install-script/v1.17.x/init_master.sh new file mode 100644 index 0000000..066e056 --- /dev/null +++ b/.vuepress/public/install-script/v1.17.x/init_master.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# 只在 master 节点执行 + +# 脚本出错时终止执行 +set -e + +if [ ${#POD_SUBNET} -eq 0 ] || [ ${#APISERVER_NAME} -eq 0 ]; then + echo -e "\033[31;1m请确保您已经设置了环境变量 POD_SUBNET 和 APISERVER_NAME \033[0m" + echo 当前POD_SUBNET=$POD_SUBNET + echo 当前APISERVER_NAME=$APISERVER_NAME + exit 1 +fi + + +# 查看完整配置选项 https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 +rm -f ./kubeadm-config.yaml +cat < ./kubeadm-config.yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +kubernetesVersion: v${1} +imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers +controlPlaneEndpoint: "${APISERVER_NAME}:6443" +networking: + serviceSubnet: "10.96.0.0/16" + podSubnet: "${POD_SUBNET}" + dnsDomain: "cluster.local" +EOF + +# kubeadm init +# 根据您服务器网速的情况,您需要等候 3 - 10 分钟 +kubeadm init --config=kubeadm-config.yaml --upload-certs + +# 配置 kubectl +rm -rf /root/.kube/ +mkdir /root/.kube/ +cp -i /etc/kubernetes/admin.conf /root/.kube/config + +# 安装 calico 网络插件 +# 参考文档 https://docs.projectcalico.org/v3.10/getting-started/kubernetes/ +echo "安装calico-3.10.2" +rm -f calico-3.10.2.yaml +wget https://kuboard.cn/install-script/calico/calico-3.10.2.yaml +sed -i "s#192\.168\.0\.0/16#${POD_SUBNET}#" calico-3.10.2.yaml +kubectl apply -f calico-3.10.2.yaml diff --git a/.vuepress/public/install-script/v1.17.x/install-kubelet.sh b/.vuepress/public/install-script/v1.17.x/install-kubelet.sh new file mode 100644 index 0000000..9fd51a7 --- /dev/null +++ b/.vuepress/public/install-script/v1.17.x/install-kubelet.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# 在 master 节点和 worker 节点都要执行 + +# 安装 docker +# 参考文档如下 +# https://docs.docker.com/install/linux/docker-ce/centos/ +# https://docs.docker.com/install/linux/linux-postinstall/ + +# 卸载旧版本 +yum remove -y docker \ +docker-client \ +docker-client-latest \ +docker-common \ +docker-latest \ +docker-latest-logrotate \ +docker-logrotate \ +docker-selinux \ +docker-engine-selinux \ +docker-engine + +# 设置 yum repository +yum install -y yum-utils \ +device-mapper-persistent-data \ +lvm2 +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo + +# 安装并启动 docker +yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io +systemctl enable docker +systemctl start docker + +# 安装 nfs-utils +# 必须先安装 nfs-utils 才能挂载 nfs 网络存储 +yum install -y nfs-utils +yum install -y wget + +# 关闭 防火墙 +systemctl stop firewalld +systemctl disable firewalld + +# 关闭 SeLinux +setenforce 0 +sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + +# 关闭 swap +swapoff -a +yes | cp /etc/fstab /etc/fstab_bak +cat /etc/fstab_bak |grep -v swap > /etc/fstab + +# 修改 /etc/sysctl.conf +# 如果有配置,则修改 +sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf +# 可能没有,追加 +echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf +# 执行命令以应用 +sysctl -p + +# 配置K8S的yum源 +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg + http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg +EOF + +# 卸载旧版本 +yum remove -y kubelet kubeadm kubectl + +# 安装kubelet、kubeadm、kubectl +yum install -y kubelet-${1} kubeadm-${1} kubectl-${1} + +# 修改docker Cgroup Driver为systemd +# # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock +# # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd +# 如果不修改,在添加 worker 节点时可能会碰到如下错误 +# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". +# Please follow the guide at https://kubernetes.io/docs/setup/cri/ +sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service + +# 设置 docker 镜像,提高 docker 镜像下载速度和稳定性 +# 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤 +curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io + +# 重启 docker,并启动 kubelet +systemctl daemon-reload +systemctl restart docker +systemctl enable kubelet && systemctl start kubelet + +docker version diff --git a/.vuepress/public/install-script/v1.17.x/install_kubelet.sh b/.vuepress/public/install-script/v1.17.x/install_kubelet.sh new file mode 100644 index 0000000..9fd51a7 --- /dev/null +++ b/.vuepress/public/install-script/v1.17.x/install_kubelet.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# 在 master 节点和 worker 节点都要执行 + +# 安装 docker +# 参考文档如下 +# https://docs.docker.com/install/linux/docker-ce/centos/ +# https://docs.docker.com/install/linux/linux-postinstall/ + +# 卸载旧版本 +yum remove -y docker \ +docker-client \ +docker-client-latest \ +docker-common \ +docker-latest \ +docker-latest-logrotate \ +docker-logrotate \ +docker-selinux \ +docker-engine-selinux \ +docker-engine + +# 设置 yum repository +yum install -y yum-utils \ +device-mapper-persistent-data \ +lvm2 +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo + +# 安装并启动 docker +yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io +systemctl enable docker +systemctl start docker + +# 安装 nfs-utils +# 必须先安装 nfs-utils 才能挂载 nfs 网络存储 +yum install -y nfs-utils +yum install -y wget + +# 关闭 防火墙 +systemctl stop firewalld +systemctl disable firewalld + +# 关闭 SeLinux +setenforce 0 +sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config + +# 关闭 swap +swapoff -a +yes | cp /etc/fstab /etc/fstab_bak +cat /etc/fstab_bak |grep -v swap > /etc/fstab + +# 修改 /etc/sysctl.conf +# 如果有配置,则修改 +sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf +sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf +# 可能没有,追加 +echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf +echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf +# 执行命令以应用 +sysctl -p + +# 配置K8S的yum源 +cat < /etc/yum.repos.d/kubernetes.repo +[kubernetes] +name=Kubernetes +baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg + http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg +EOF + +# 卸载旧版本 +yum remove -y kubelet kubeadm kubectl + +# 安装kubelet、kubeadm、kubectl +yum install -y kubelet-${1} kubeadm-${1} kubectl-${1} + +# 修改docker Cgroup Driver为systemd +# # 将/usr/lib/systemd/system/docker.service文件中的这一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock +# # 修改为 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd +# 如果不修改,在添加 worker 节点时可能会碰到如下错误 +# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". +# Please follow the guide at https://kubernetes.io/docs/setup/cri/ +sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service + +# 设置 docker 镜像,提高 docker 镜像下载速度和稳定性 +# 如果您访问 https://hub.docker.io 速度非常稳定,亦可以跳过这个步骤 +curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io + +# 重启 docker,并启动 kubelet +systemctl daemon-reload +systemctl restart docker +systemctl enable kubelet && systemctl start kubelet + +docker version diff --git a/.vuepress/public/install-script/v1.17.x/nginx-ingress.yaml b/.vuepress/public/install-script/v1.17.x/nginx-ingress.yaml new file mode 100644 index 0000000..06d4d8e --- /dev/null +++ b/.vuepress/public/install-script/v1.17.x/nginx-ingress.yaml @@ -0,0 +1,168 @@ +# 如果打算用于生产环境,请参考 https://github.com/nginxinc/kubernetes-ingress/blob/v1.5.5/docs/installation.md 并根据您自己的情况做进一步定制 + +apiVersion: v1 +kind: Namespace +metadata: + name: nginx-ingress + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress + namespace: nginx-ingress + +--- +apiVersion: v1 +kind: Secret +metadata: + name: default-server-secret + namespace: nginx-ingress +type: Opaque +data: + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN2akNDQWFZQ0NRREFPRjl0THNhWFhEQU5CZ2txaGtpRzl3MEJBUXNGQURBaE1SOHdIUVlEVlFRRERCWk8KUjBsT1dFbHVaM0psYzNORGIyNTBjbTlzYkdWeU1CNFhEVEU0TURreE1qRTRNRE16TlZvWERUSXpNRGt4TVRFNApNRE16TlZvd0lURWZNQjBHQTFVRUF3d1dUa2RKVGxoSmJtZHlaWE56UTI5dWRISnZiR3hsY2pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUwvN2hIUEtFWGRMdjNyaUM3QlBrMTNpWkt5eTlyQ08KR2xZUXYyK2EzUDF0azIrS3YwVGF5aGRCbDRrcnNUcTZzZm8vWUk1Y2Vhbkw4WGM3U1pyQkVRYm9EN2REbWs1Qgo4eDZLS2xHWU5IWlg0Rm5UZ0VPaStlM2ptTFFxRlBSY1kzVnNPazFFeUZBL0JnWlJVbkNHZUtGeERSN0tQdGhyCmtqSXVuektURXUyaDU4Tlp0S21ScUJHdDEwcTNRYzhZT3ExM2FnbmovUWRjc0ZYYTJnMjB1K1lYZDdoZ3krZksKWk4vVUkxQUQ0YzZyM1lma1ZWUmVHd1lxQVp1WXN2V0RKbW1GNWRwdEMzN011cDBPRUxVTExSakZJOTZXNXIwSAo1TmdPc25NWFJNV1hYVlpiNWRxT3R0SmRtS3FhZ25TZ1JQQVpQN2MwQjFQU2FqYzZjNGZRVXpNQ0F3RUFBVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQWpLb2tRdGRPcEsrTzhibWVPc3lySmdJSXJycVFVY2ZOUitjb0hZVUoKdGhrYnhITFMzR3VBTWI5dm15VExPY2xxeC9aYzJPblEwMEJCLzlTb0swcitFZ1U2UlVrRWtWcitTTFA3NTdUWgozZWI4dmdPdEduMS9ienM3bzNBaS9kclkrcUI5Q2k1S3lPc3FHTG1US2xFaUtOYkcyR1ZyTWxjS0ZYQU80YTY3Cklnc1hzYktNbTQwV1U3cG9mcGltU1ZmaXFSdkV5YmN3N0NYODF6cFErUyt1eHRYK2VBZ3V0NHh3VlI5d2IyVXYKelhuZk9HbWhWNThDd1dIQnNKa0kxNXhaa2VUWXdSN0diaEFMSkZUUkk3dkhvQXprTWIzbjAxQjQyWjNrN3RXNQpJUDFmTlpIOFUvOWxiUHNoT21FRFZkdjF5ZytVRVJxbStGSis2R0oxeFJGcGZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdi91RWM4b1JkMHUvZXVJTHNFK1RYZUprckxMMnNJNGFWaEMvYjVyYy9XMlRiNHEvClJOcktGMEdYaVN1eE9ycXgrajlnamx4NXFjdnhkenRKbXNFUkJ1Z1B0ME9hVGtIekhvb3FVWmcwZGxmZ1dkT0EKUTZMNTdlT1l0Q29VOUZ4amRXdzZUVVRJVUQ4R0JsRlNjSVo0b1hFTkhzbysyR3VTTWk2Zk1wTVM3YUhudzFtMApxWkdvRWEzWFNyZEJ6eGc2clhkcUNlUDlCMXl3VmRyYURiUzc1aGQzdUdETDU4cGszOVFqVUFQaHpxdmRoK1JWClZGNGJCaW9CbTVpeTlZTW1hWVhsMm0wTGZzeTZuUTRRdFFzdEdNVWozcGJtdlFmazJBNnljeGRFeFpkZFZsdmwKMm82MjBsMllxcHFDZEtCRThCay90elFIVTlKcU56cHpoOUJUTXdJREFRQUJBb0lCQVFDZklHbXowOHhRVmorNwpLZnZJUXQwQ0YzR2MxNld6eDhVNml4MHg4Mm15d1kxUUNlL3BzWE9LZlRxT1h1SENyUlp5TnUvZ2IvUUQ4bUFOCmxOMjRZTWl0TWRJODg5TEZoTkp3QU5OODJDeTczckM5bzVvUDlkazAvYzRIbjAzSkVYNzZ5QjgzQm9rR1FvYksKMjhMNk0rdHUzUmFqNjd6Vmc2d2szaEhrU0pXSzBwV1YrSjdrUkRWYmhDYUZhNk5nMUZNRWxhTlozVDhhUUtyQgpDUDNDeEFTdjYxWTk5TEI4KzNXWVFIK3NYaTVGM01pYVNBZ1BkQUk3WEh1dXFET1lvMU5PL0JoSGt1aVg2QnRtCnorNTZud2pZMy8yUytSRmNBc3JMTnIwMDJZZi9oY0IraVlDNzVWYmcydVd6WTY3TWdOTGQ5VW9RU3BDRkYrVm4KM0cyUnhybnhBb0dCQU40U3M0ZVlPU2huMVpQQjdhTUZsY0k2RHR2S2ErTGZTTXFyY2pOZjJlSEpZNnhubmxKdgpGenpGL2RiVWVTbWxSekR0WkdlcXZXaHFISy9iTjIyeWJhOU1WMDlRQ0JFTk5jNmtWajJTVHpUWkJVbEx4QzYrCk93Z0wyZHhKendWelU0VC84ajdHalRUN05BZVpFS2FvRHFyRG5BYWkyaW5oZU1JVWZHRXFGKzJyQW9HQkFOMVAKK0tZL0lsS3RWRzRKSklQNzBjUis3RmpyeXJpY05iWCtQVzUvOXFHaWxnY2grZ3l4b25BWlBpd2NpeDN3QVpGdwpaZC96ZFB2aTBkWEppc1BSZjRMazg5b2pCUmpiRmRmc2l5UmJYbyt3TFU4NUhRU2NGMnN5aUFPaTVBRHdVU0FkCm45YWFweUNweEFkREtERHdObit3ZFhtaTZ0OHRpSFRkK3RoVDhkaVpBb0dCQUt6Wis1bG9OOTBtYlF4VVh5YUwKMjFSUm9tMGJjcndsTmVCaWNFSmlzaEhYa2xpSVVxZ3hSZklNM2hhUVRUcklKZENFaHFsV01aV0xPb2I2NTNyZgo3aFlMSXM1ZUtka3o0aFRVdnpldm9TMHVXcm9CV2xOVHlGanIrSWhKZnZUc0hpOGdsU3FkbXgySkJhZUFVWUNXCndNdlQ4NmNLclNyNkQrZG8wS05FZzFsL0FvR0FlMkFVdHVFbFNqLzBmRzgrV3hHc1RFV1JqclRNUzRSUjhRWXQKeXdjdFA4aDZxTGxKUTRCWGxQU05rMXZLTmtOUkxIb2pZT2pCQTViYjhibXNVU1BlV09NNENoaFJ4QnlHbmR2eAphYkJDRkFwY0IvbEg4d1R0alVZYlN5T294ZGt5OEp0ek90ajJhS0FiZHd6NlArWDZDODhjZmxYVFo5MWpYL3RMCjF3TmRKS2tDZ1lCbyt0UzB5TzJ2SWFmK2UwSkN5TGhzVDQ5cTN3Zis2QWVqWGx2WDJ1VnRYejN5QTZnbXo5aCsKcDNlK2JMRUxwb3B0WFhNdUFRR0xhUkcrYlNNcjR5dERYbE5ZSndUeThXczNKY3dlSTdqZVp2b0ZpbmNvVlVIMwphdmxoTUVCRGYxSjltSDB5cDBwWUNaS2ROdHNvZEZtQktzVEtQMjJhTmtsVVhCS3gyZzR6cFE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nginx-config + namespace: nginx-ingress +data: + server-names-hash-bucket-size: "1024" + + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: nginx-ingress +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - update + - create +- apiGroups: + - "" + resources: + - pods + verbs: + - list +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - list + - watch + - get +- apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update +- apiGroups: + - k8s.nginx.org + resources: + - virtualservers + - virtualserverroutes + verbs: + - list + - watch + - get + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: nginx-ingress +subjects: +- kind: ServiceAccount + name: nginx-ingress + namespace: nginx-ingress +roleRef: + kind: ClusterRole + name: nginx-ingress + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx-ingress + namespace: nginx-ingress + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9113" +spec: + selector: + matchLabels: + app: nginx-ingress + template: + metadata: + labels: + app: nginx-ingress + spec: + serviceAccountName: nginx-ingress + containers: + - image: nginx/nginx-ingress:1.5.5 + name: nginx-ingress + ports: + - name: http + containerPort: 80 + hostPort: 80 + - name: https + containerPort: 443 + hostPort: 443 + - name: prometheus + containerPort: 9113 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + args: + - -nginx-configmaps=$(POD_NAMESPACE)/nginx-config + - -default-server-tls-secret=$(POD_NAMESPACE)/default-server-secret + #- -v=3 # Enables extensive logging. Useful for troubleshooting. + #- -report-ingress-status + #- -external-service=nginx-ingress + #- -enable-leader-election + - -enable-prometheus-metrics + #- -enable-custom-resources diff --git a/install/faq/logs-terminal.md b/install/faq/logs-terminal.md index b88edba..470f19c 100644 --- a/install/faq/logs-terminal.md +++ b/install/faq/logs-terminal.md @@ -19,7 +19,8 @@ Kuboard 日志界面和终端界面都使用了 websocket 与服务器端通信 * 您当前使用的浏览器不支持 WebSocket,推荐使用最新版本的 chrome 浏览器,也可以尝试最新版本的 firefox 如果您还有问题,请尝试: -* 清空浏览器缓存,重新登录 Kuboard +* (如果刚完成 Kuboard 的升级)退出 Kuboard 登录,重新输入 token 登录 Kuboard +* 清空浏览器缓存 ## 第二步 diff --git a/install/history-k8s/install-k8s-1.16.3.md b/install/history-k8s/install-k8s-1.16.3.md new file mode 100644 index 0000000..99436f6 --- /dev/null +++ b/install/history-k8s/install-k8s-1.16.3.md @@ -0,0 +1,516 @@ +--- +vssueId: 15 +# layout: StepLayout +sharingTitle: K8S入门第一步---安装,装不好还有人免费远程协助,更有K8S免费教程提供,你还在等什么? +description: Kubernete安装文档_Kubernetes最新稳定版v1.16.3的快速安装文档_该文档由众多网友验证并在线提出修改意见_持续不断地更新和完善_并且通过QQ群提供免费在线答疑的服务 +meta: + - name: keywords + content: Kubernetes安装,K8S安装,kubeadm,Kubernetes 安装,K8S 安装,k8s搭建 +--- + +# 使用kubeadm安装kubernetes_v1.16.3 + + + +## 文档特点 + +
+ +
+ +## 配置要求 + +对于 Kubernetes 初学者,在搭建K8S集群时,推荐在阿里云或腾讯云采购如下配置:(您也可以使用自己的虚拟机、私有云等您最容易获得的 Linux 环境) + +* 至少2台 **2核4G** 的服务器 +* **Cent OS 7.6** + + + +[腾讯云11.11爆款1核2G云服务器首购88元,免费领9888元代金券,百款云产品一折起](https://cloud.tencent.com/act/cps/redirect?redirect=1050&cps_key=2ee6baa049659f4713ddc55a51314372&from=console) + + + +[阿里云双十一,All in Cloud,低至一折](https://www.aliyun.com/1111/2019/home?userCode=obezo3pg) + + + +::: danger 警告 +* 因为双十一,许多网友使用腾讯云、阿里云不同的账号各买了一台优惠价格的机器。目前我没有找到方法将不同局域网内的机器通过公网连接组成K8S集群,请谨慎。 +* 建议的做法是:腾讯云(或阿里云)采购一台优惠价格的包年实例作为 Master,同时再采购一台竞价实例(腾讯云)或抢占式实例(阿里云)作为 Worker 节点。竞价实例(抢占式实例)按分钟付费,以阿里云为例,一台2核8G的机器一天下来的费用大概是 1.2 - 1.5元。 +* 再次强调:按照本文档进行安装时,所有节点必须在同一个局域网内 +::: + +**安装后的软件版本为** + +* Kubernetes v1.16.3 + * calico 3.9 + * nginx-ingress 1.5.5 +* Docker 18.09.7 + +> 如果要安装 Kubernetes 历史版本,请参考: +> * [安装 Kubernetes v1.16.2 单Master节点](/install/history-k8s/install-k8s-1.16.2.html) +> * [安装 Kubernetes v1.16.1 单Master节点](/install/history-k8s/install-k8s-1.16.1.html) +> * [安装 Kubernetes v1.16.0 单Master节点](/install/history-k8s/install-k8s-1.16.0.html) +> * [安装 Kubernetes v1.15.4 单Master节点](/install/history-k8s/install-k8s-1.15.4.html) +> * [安装 Kubernetes v1.15.3 单Master节点](/install/history-k8s/install-k8s-1.15.3.html) +> * [安装 Kubernetes v1.15.2 单Master节点](/install/history-k8s/install-k8s-1.15.2.html) +> * [安装 Kubernetes v1.15.1 单Master节点](/install/history-k8s/install-k8s-1.15.1.html) + + + +安装后的拓扑图如下:下载拓扑图源文件 使用Axure RP 9.0可打开该文件 + +强烈建议初学者先按照此文档完成安装,在对 K8S 有更多理解后,再参考文档 [安装Kubernetes高可用](./install-kubernetes.html) + +

+Kubernetes安装:Kubernetes安装拓扑图 +

+ +::: tip 关于二进制安装 + +kubeadm 是 Kubernetes 官方支持的安装方式,“二进制” 不是。本文档采用 kubernetes.io 官方推荐的 kubeadm 工具安装 kubernetes 集群。 + +::: + + + +## 检查 centos / hostname + +``` sh +# 在 master 节点和 worker 节点都要执行 +cat /etc/redhat-release + +# 此处 hostname 的输出将会是该机器在 Kubernetes 集群中的节点名字 +# 不能使用 localhost 作为节点的名字 +hostname + +# 请使用 lscpu 命令,核对 CPU 信息 +# Architecture: x86_64 本安装文档不支持 arm 架构 +# CPU(s): 2 CPU 内核数量不能低于 2 +lscpu +``` + +**操作系统兼容性** + + + + +
+ +| CentOS 版本 | 本文档是否兼容 | 备注 | +| ----------- | --------------------------------------- | ----------------------------------- | +| 7.7 | 😄 | 已验证 | +| 7.6 | 😄 | 已验证 | +| 7.5 | 😞 | 已证实会出现 kubelet 无法启动的问题 | +| 7.4 | 😞 | 同上 | +| 7.3 | 😞 | 同上 | +| 7.2 | 😞 | 同上 | + +
+
+ + + +
+ +::: tip 修改 hostname +如果您需要修改 hostname,可执行如下指令: +``` sh +# 修改 hostname +hostnamectl set-hostname your-new-host-name +# 查看修改结果 +hostnamectl status +# 设置 hostname 解析 +echo "127.0.0.1 $(hostname)" >> /etc/hosts +``` +::: + +## 检查网络 + +在所有节点执行命令 +``` {2,11,13} +[root@demo-master-a-1 ~]$ ip route show +default via 172.21.0.1 dev eth0 +169.254.0.0/16 dev eth0 scope link metric 1002 +172.21.0.0/20 dev eth0 proto kernel scope link src 172.21.0.12 + +[root@demo-master-a-1 ~]$ ip address +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever +2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 + link/ether 00:16:3e:12:a4:1b brd ff:ff:ff:ff:ff:ff + inet 172.17.216.80/20 brd 172.17.223.255 scope global dynamic eth0 + valid_lft 305741654sec preferred_lft 305741654sec +``` +::: tip kubelet使用的IP地址 +* `ip route show` 命令中,可以知道机器的默认网卡,通常是 `eth0`,如 ***default via 172.21.0.23 dev eth0*** +* `ip address` 命令中,可显示默认网卡的 IP 地址,Kubernetes 将使用此 IP 地址与集群内的其他节点通信,如 `172.17.216.80` +* 所有节点上 Kubernetes 所使用的 IP 地址必须可以互通(无需 NAT 映射、无安全组或防火墙隔离) +::: + + +## 安装docker及kubelet + + + + + +使用 root 身份在所有节点执行如下代码,以安装软件: +- docker +- nfs-utils +- kubectl / kubeadm / kubelet + + + + + + + +``` sh +# 在 master 节点和 worker 节点都要执行 + +curl -sSL https://kuboard.cn/install-script/v1.16.3/install_kubelet.sh | sh + +``` + + + + +手动执行以下代码,效果与快速安装完全相同。 + +<<< @/.vuepress/public/install-script/v1.16.3/install_kubelet.sh + +::: warning +如果此时执行 `service status kubelet` 命令,将得到 kubelet 启动失败的错误提示,请忽略此错误,因为必须完成后续步骤中 kubeadm init 的操作,kubelet 才能正常启动 +::: + + + + + + + + + + + +## 初始化 master 节点 + +::: danger 关于初始化时用到的环境变量 +* **APISERVER_NAME** 不能是 master 的 hostname +* **APISERVER_NAME** 必须全为小写字母、数字、小数点,不能包含减号 +* **POD_SUBNET** 所使用的网段不能与 ***master节点/worker节点*** 所在的网段重叠。该字段的取值为一个 CIDR 值,如果您对 CIDR 这个概念还不熟悉,请仍然执行 export POD_SUBNET=10.100.0.1/16 命令,不做修改 +::: + + + + + +``` sh +# 只在 master 节点执行 +# 替换 x.x.x.x 为 master 节点实际 IP(请使用内网 IP) +# export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令 +export MASTER_IP=x.x.x.x +# 替换 apiserver.demo 为 您想要的 dnsName +export APISERVER_NAME=apiserver.demo +# Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中 +export POD_SUBNET=10.100.0.1/16 +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts +curl -sSL https://kuboard.cn/install-script/v1.16.3/init_master.sh | sh +``` + + + + +``` sh +# 只在 master 节点执行 +# 替换 x.x.x.x 为 master 节点的内网IP +# export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令 +export MASTER_IP=x.x.x.x +# 替换 apiserver.demo 为 您想要的 dnsName +export APISERVER_NAME=apiserver.demo +# Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中 +export POD_SUBNET=10.100.0.1/16 +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts +``` + +<<< @/.vuepress/public/install-script/v1.16.3/init_master.sh {22} + + + + + +如果出错点这里 + + + +* 请确保您的环境符合 [安装docker及kubelet](#安装docker及kubelet) 中所有勾选框的要求 +* 请确保您使用 root 用户执行初始化命令 +* 不能下载 kubernetes 的 docker 镜像 + * 安装文档中,默认使用阿里云的 docker 镜像仓库,然而,有时候,该镜像会罢工 + * 如碰到不能下载 docker 镜像的情况,请尝试手工初始化,并修改手工初始化脚本里的第22行(文档中已高亮)为: + ```yaml + imageRepository: gcr.azk8s.cn/google-containers + ``` +* 检查环境变量,执行如下命令 + ``` sh + echo MASTER_IP=${MASTER_IP} && echo APISERVER_NAME=${APISERVER_NAME} && echo POD_SUBNET=${POD_SUBNET} + ``` + 请验证如下几点: + * 环境变量 ***MASTER_IP*** 的值应该为 master 节点的 **内网IP**,如果不是,请重新 export + * **APISERVER_NAME** 不能是 master 的 hostname + * **APISERVER_NAME** 必须全为小写字母、数字、小数点,不能包含减号 + * **POD_SUBNET** 所使用的网段不能与 ***master节点/worker节点*** 所在的网段重叠。该字段的取值为一个 CIDR 值,如果您对 CIDR 这个概念还不熟悉,请仍然执行 export POD_SUBNET=10.100.0.1/16 命令,不做修改 +* 重新初始化 master 节点前,请先执行 `kubeadm reset -f` 操作 + + + + +**检查 master 初始化结果** + +``` sh +# 只在 master 节点执行 + +# 执行如下命令,等待 3-10 分钟,直到所有的容器组处于 Running 状态 +watch kubectl get pod -n kube-system -o wide + +# 查看 master 节点初始化结果 +kubectl get nodes -o wide +``` + + + +## 初始化 worker节点 + +### 获得 join命令参数 + +**在 master 节点上执行** + +``` sh +# 只在 master 节点执行 +kubeadm token create --print-join-command +``` + +可获取kubeadm join 命令及参数,如下所示 + +``` sh +# kubeadm token create 命令的输出 +kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303 +``` + +::: tip 有效时间 +该 token 的有效时间为 2 个小时,2小时内,您可以使用此 token 初始化任意数量的 worker 节点。 +::: + + +### 初始化worker + +**针对所有的 worker 节点执行** + +``` sh +# 只在 worker 节点执行 +# 替换 x.x.x.x 为 master 节点的内网 IP +export MASTER_IP=x.x.x.x +# 替换 apiserver.demo 为初始化 master 节点时所使用的 APISERVER_NAME +export APISERVER_NAME=apiserver.demo +echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts + +# 替换为 master 节点上 kubeadm token create 命令的输出 +kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303 +``` + +如果出错点这里 + + + +### 常见错误原因 + +经常在群里提问为什么 join 不成功的情况大致有这几种: + +#### worker 节点不能访问 apiserver + + 在worker节点执行以下语句可验证worker节点是否能访问 apiserver + ``` sh + curl -ik https://apiserver.demo:6443 + ``` + 如果不能,请在 master 节点上验证 + ``` sh + curl -ik https://localhost:6443 + ``` + 正常输出结果如下所示: + ``` {1} + HTTP/1.1 403 Forbidden + Cache-Control: no-cache, private + Content-Type: application/json + X-Content-Type-Options: nosniff + Date: Fri, 15 Nov 2019 04:34:40 GMT + Content-Length: 233 + + { + "kind": "Status", + "apiVersion": "v1", + "metadata": { + ... + ``` + ::: tip 可能原因 + * 如果 master 节点能够访问 apiserver、而 worker 节点不能,则请检查自己的网络设置 + * /etc/hosts 是否正确设置? + * 是否有安全组或防火墙的限制? + ::: + +#### worker 节点默认网卡 + + * [Kubelet使用的 IP 地址](#检查网络) 与 master 节点可互通(无需 NAT 映射),且没有防火墙、安全组隔离 + * 如果你使用 vmware 或 virtualbox 创建虚拟机用于 K8S 学习,可以尝试 NAT 模式的网络,而不是桥接模式的网络 + +### 移除worker节点并重试 + +::: warning +正常情况下,您无需移除 worker 节点,如果添加到集群出错,您可以移除 worker 节点,再重新尝试添加 +::: + +在准备移除的 worker 节点上执行 + +``` sh +# 只在 worker 节点执行 +kubeadm reset -f +``` + +在 master 节点 demo-master-a-1 上执行 + +```sh +# 只在 master 节点执行 +kubectl get nodes -o wide +``` +如果列表中没有您要移除的节点,则忽略下一个步骤 + +``` sh +# 只在 master 节点执行 +kubectl delete node demo-worker-x-x +``` + +::: tip +* 将 demo-worker-x-x 替换为要移除的 worker 节点的名字 +* worker 节点的名字可以通过在节点 demo-master-a-1 上执行 kubectl get nodes 命令获得 +::: + + + + +### 检查初始化结果 + +在 master 节点上执行 + +``` sh +# 只在 master 节点执行 +kubectl get nodes -o wide +``` +输出结果如下所示: +```sh +[root@demo-master-a-1 ~]# kubectl get nodes +NAME STATUS ROLES AGE VERSION +demo-master-a-1 Ready master 5m3s v1.16.3 +demo-worker-a-1 Ready 2m26s v1.16.3 +demo-worker-a-2 Ready 3m56s v1.16.3 +``` + + + + +## 安装 Ingress Controller + + + + + +**在 master 节点上执行** + +``` sh +# 只在 master 节点执行 +kubectl apply -f https://kuboard.cn/install-script/v1.16.3/nginx-ingress.yaml +``` + + + + + +**在 master 节点上执行** + +只在您想选择其他 Ingress Controller 的情况下卸载 + +``` sh +# 只在 master 节点执行 +kubectl delete -f https://kuboard.cn/install-script/v1.16.3/nginx-ingress.yaml +``` + + + + +<<< @/.vuepress/public/install-script/v1.16.3/nginx-ingress.yaml + + + + + + +**配置域名解析** + +将域名 *.demo.yourdomain.com 解析到 demo-worker-a-2 的 IP 地址 z.z.z.z (也可以是 demo-worker-a-1 的地址 y.y.y.y) + +**验证配置** + +在浏览器访问 a.demo.yourdomain.com,将得到 404 NotFound 错误页面 + +::: tip 提示 + +许多初学者在安装 Ingress Controller 时会碰到问题,请不要灰心,可暂时跳过 ***安装 Ingress Controller*** 这个部分,等您学完 www.kuboard.cn 上 [Kubernetes 入门](/learning/k8s-basics/kubernetes-basics.html) 以及 [通过互联网访问您的应用程序](/learning/k8s-intermediate/service/ingress.html) 这两部分内容后,再来回顾 Ingress Controller 的安装。 + +::: + +::: warning +如果您打算将 Kubernetes 用于生产环境,请参考此文档 [Installing Ingress Controller](https://github.com/nginxinc/kubernetes-ingress/blob/v1.5.3/docs/installation.md),完善 Ingress 的配置 +::: + + + + + +## 下一步 + +如果您使用自己笔记本上的虚拟机安装的集群,将来打算重启虚拟机,请参考 [重启Kubernetes集群](./k8s-restart.html) + +:tada: :tada: :tada: + +您已经完成了 Kubernetes 集群的安装,下一步请: + +点击此处,给个GitHub Star +支持一下吧,这么多人都 star 了呢,怎么能少得了您呢? + +[安装 Kuboard - 微服务管理界面](/install/install-dashboard.html) + +[获取 Kubernetes 免费教程](/learning/) + + + diff --git a/install/install-dashboard.md b/install/install-dashboard.md index 9b7cd6b..6747754 100644 --- a/install/install-dashboard.md +++ b/install/install-dashboard.md @@ -96,26 +96,14 @@ kubectl delete -f https://kuboard.cn/install-script/kuboard.yaml ```bash # 如果您参考 www.kuboard.cn 提供的文档安装 Kuberenetes,可在第一个 Master 节点上执行此命令 -kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') +kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d ``` **输出** 取输出信息中 token 字段 -```{13} -Name: admin-user-token-g8hxb -Namespace: kube-system -Labels: -Annotations: [kubernetes.io/service-account.name](http://kubernetes.io/service-account.name): Kuboard-user -[kubernetes.io/service-account.uid](http://kubernetes.io/service-account.uid): 948bb5e6-8cdc-11e9-b67e-fa163e5f7a0f - -Type: [kubernetes.io/service-account-token](http://kubernetes.io/service-account-token) - -Data -==== -ca.crt: 1025 bytes -namespace: 11 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWc4aHhiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NDhiYjVlNi04Y2RjLTExZTktYjY3ZS1mYTE2M2U1ZjdhMGYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.DZ6dMTr8GExo5IH_vCWdB_MDfQaNognjfZKl0E5VW8vUFMVvALwo0BS-6Qsqpfxrlz87oE9yGVCpBYV0D00811bLhHIg-IR_MiBneadcqdQ_TGm_a0Pz0RbIzqJlRPiyMSxk1eXhmayfPn01upPdVCQj6D3vAY77dpcGplu3p5wE6vsNWAvrQ2d_V1KhR03IB1jJZkYwrI8FHCq_5YuzkPfHsgZ9MBQgH-jqqNXs6r8aoUZIbLsYcMHkin2vzRsMy_tjMCI9yXGiOqI-E5efTb-_KbDVwV5cbdqEIegdtYZ2J3mlrFQlmPGYTwFI8Ba9LleSYbCi4o0k74568KcN_w +```{1} +eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWc4aHhiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NDhiYjVlNi04Y2RjLTExZTktYjY3ZS1mYTE2M2U1ZjdhMGYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.DZ6dMTr8GExo5IH_vCWdB_MDfQaNognjfZKl0E5VW8vUFMVvALwo0BS-6Qsqpfxrlz87oE9yGVCpBYV0D00811bLhHIg-IR_MiBneadcqdQ_TGm_a0Pz0RbIzqJlRPiyMSxk1eXhmayfPn01upPdVCQj6D3vAY77dpcGplu3p5wE6vsNWAvrQ2d_V1KhR03IB1jJZkYwrI8FHCq_5YuzkPfHsgZ9MBQgH-jqqNXs6r8aoUZIbLsYcMHkin2vzRsMy_tjMCI9yXGiOqI-E5efTb-_KbDVwV5cbdqEIegdtYZ2J3mlrFQlmPGYTwFI8Ba9LleSYbCi4o0k74568KcN_w ``` @@ -137,26 +125,14 @@ token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2Nv ```bash # 如果您参考 www.kuboard.cn 提供的文档安装 Kuberenetes,可在第一个 Master 节点上执行此命令 -kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep kuboard-viewer | awk '{print $1}') +kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-viewer | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d ``` **输出** 取输出信息中 token 字段 -``` {13} -Name: admin-user-token-g8hxb -Namespace: kube-system -Labels: -Annotations: [kubernetes.io/service-account.name](http://kubernetes.io/service-account.name): Kuboard-viewer -[kubernetes.io/service-account.uid](http://kubernetes.io/service-account.uid): 948bb5e6-8cdc-11e9-b67e-fa163e5f7a0f - -Type: [kubernetes.io/service-account-token](http://kubernetes.io/service-account-token) - -Data -==== -ca.crt: 1025 bytes -namespace: 11 bytes -token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWc4aHhiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NDhiYjVlNi04Y2RjLTExZTktYjY3ZS1mYTE2M2U1ZjdhMGYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.DZ6dMTr8GExo5IH_vCWdB_MDfQaNognjfZKl0E5VW8vUFMVvALwo0BS-6Qsqpfxrlz87oE9yGVCpBYV0D00811bLhHIg-IR_MiBneadcqdQ_TGm_a0Pz0RbIzqJlRPiyMSxk1eXhmayfPn01upPdVCQj6D3vAY77dpcGplu3p5wE6vsNWAvrQ2d_V1KhR03IB1jJZkYwrI8FHCq_5YuzkPfHsgZ9MBQgH-jqqNXs6r8aoUZIbLsYcMHkin2vzRsMy_tjMCI9yXGiOqI-E5efTb-_KbDVwV5cbdqEIegdtYZ2J3mlrFQlmPGYTwFI8Ba9LleSYbCi4o0k74568KcN_w +``` {1} +eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWc4aHhiIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NDhiYjVlNi04Y2RjLTExZTktYjY3ZS1mYTE2M2U1ZjdhMGYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.DZ6dMTr8GExo5IH_vCWdB_MDfQaNognjfZKl0E5VW8vUFMVvALwo0BS-6Qsqpfxrlz87oE9yGVCpBYV0D00811bLhHIg-IR_MiBneadcqdQ_TGm_a0Pz0RbIzqJlRPiyMSxk1eXhmayfPn01upPdVCQj6D3vAY77dpcGplu3p5wE6vsNWAvrQ2d_V1KhR03IB1jJZkYwrI8FHCq_5YuzkPfHsgZ9MBQgH-jqqNXs6r8aoUZIbLsYcMHkin2vzRsMy_tjMCI9yXGiOqI-E5efTb-_KbDVwV5cbdqEIegdtYZ2J3mlrFQlmPGYTwFI8Ba9LleSYbCi4o0k74568KcN_w ``` diff --git a/install/install-k8s.md b/install/install-k8s.md index 99436f6..873f3bd 100644 --- a/install/install-k8s.md +++ b/install/install-k8s.md @@ -2,20 +2,20 @@ vssueId: 15 # layout: StepLayout sharingTitle: K8S入门第一步---安装,装不好还有人免费远程协助,更有K8S免费教程提供,你还在等什么? -description: Kubernete安装文档_Kubernetes最新稳定版v1.16.3的快速安装文档_该文档由众多网友验证并在线提出修改意见_持续不断地更新和完善_并且通过QQ群提供免费在线答疑的服务 +description: Kubernete安装文档_Kubernetes最新稳定版v1.17.x的快速安装文档_该文档由众多网友验证并在线提出修改意见_持续不断地更新和完善_并且通过QQ群提供免费在线答疑的服务 meta: - name: keywords content: Kubernetes安装,K8S安装,kubeadm,Kubernetes 安装,K8S 安装,k8s搭建 --- -# 使用kubeadm安装kubernetes_v1.16.3 +# 使用kubeadm安装kubernetes_v1.17.x ## 文档特点
- +
## 配置要求 @@ -30,28 +30,23 @@ meta:
--> -[腾讯云11.11爆款1核2G云服务器首购88元,免费领9888元代金券,百款云产品一折起](https://cloud.tencent.com/act/cps/redirect?redirect=1050&cps_key=2ee6baa049659f4713ddc55a51314372&from=console) +[腾讯云,热门云产品限量特惠秒杀,云服务器1核2G,99元/1年](https://cloud.tencent.com/act/cps/redirect?redirect=1052&cps_key=2ee6baa049659f4713ddc55a51314372&from=console) -[阿里云双十一,All in Cloud,低至一折](https://www.aliyun.com/1111/2019/home?userCode=obezo3pg) +[阿里云,双十二主会场,低至一折](https://www.aliyun.com/1212/2019/home?userCode=obezo3pg) -::: danger 警告 -* 因为双十一,许多网友使用腾讯云、阿里云不同的账号各买了一台优惠价格的机器。目前我没有找到方法将不同局域网内的机器通过公网连接组成K8S集群,请谨慎。 -* 建议的做法是:腾讯云(或阿里云)采购一台优惠价格的包年实例作为 Master,同时再采购一台竞价实例(腾讯云)或抢占式实例(阿里云)作为 Worker 节点。竞价实例(抢占式实例)按分钟付费,以阿里云为例,一台2核8G的机器一天下来的费用大概是 1.2 - 1.5元。 -* 再次强调:按照本文档进行安装时,所有节点必须在同一个局域网内 -::: - **安装后的软件版本为** -* Kubernetes v1.16.3 - * calico 3.9 +* Kubernetes v1.17.x + * calico 3.10.2 * nginx-ingress 1.5.5 * Docker 18.09.7 > 如果要安装 Kubernetes 历史版本,请参考: +> * [安装 Kubernetes v1.16.3 单Master节点](/install/history-k8s/install-k8s-1.16.3.html) > * [安装 Kubernetes v1.16.2 单Master节点](/install/history-k8s/install-k8s-1.16.2.html) > * [安装 Kubernetes v1.16.1 单Master节点](/install/history-k8s/install-k8s-1.16.1.html) > * [安装 Kubernetes v1.16.0 单Master节点](/install/history-k8s/install-k8s-1.16.0.html) @@ -178,20 +173,23 @@ default via 172.21.0.1 dev eth0 +**请将脚本最后的 1.17.0 替换成您需要的版本号,** +脚本中间的 v1.17.x 不要替换 ``` sh # 在 master 节点和 worker 节点都要执行 +# 最后一个参数 1.17.0 用于指定 kubenetes 版本,支持所有 1.17.x 版本的安装 -curl -sSL https://kuboard.cn/install-script/v1.16.3/install_kubelet.sh | sh +curl -sSL https://kuboard.cn/install-script/v1.17.x/install_kubelet.sh | sh -s 1.17.0 ``` -手动执行以下代码,效果与快速安装完全相同。 +手动执行以下代码,结果与快速安装相同。***请将脚本第79行(已高亮)的 ${1} 替换成您需要的版本号,例如 1.17.0*** -<<< @/.vuepress/public/install-script/v1.16.3/install_kubelet.sh +<<< @/.vuepress/public/install-script/v1.17.x/install_kubelet.sh {79} ::: warning 如果此时执行 `service status kubelet` 命令,将得到 kubelet 启动失败的错误提示,请忽略此错误,因为必须完成后续步骤中 kubeadm init 的操作,kubelet 才能正常启动 @@ -221,7 +219,10 @@ curl -sSL https://kuboard.cn/install-script/v1.16.3/install_kubelet.sh | sh -``` sh +**请将脚本最后的 1.17.0 替换成您需要的版本号,** +脚本中间的 v1.17.x 不要替换 + +``` sh {10} # 只在 master 节点执行 # 替换 x.x.x.x 为 master 节点实际 IP(请使用内网 IP) # export 命令只在当前 shell 会话中有效,开启新的 shell 窗口后,如果要继续安装过程,请重新执行此处的 export 命令 @@ -231,12 +232,14 @@ export APISERVER_NAME=apiserver.demo # Kubernetes 容器组所在的网段,该网段安装完成后,由 kubernetes 创建,事先并不存在于您的物理网络中 export POD_SUBNET=10.100.0.1/16 echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts -curl -sSL https://kuboard.cn/install-script/v1.16.3/init_master.sh | sh +curl -sSL https://kuboard.cn/install-script/v1.17.x/init_master.sh | sh -s 1.17.0 ``` +手动执行以下代码,结果与快速初始化相同。***请将脚本第21行(已高亮)的 ${1} 替换成您需要的版本号,例如 1.17.0*** + ``` sh # 只在 master 节点执行 # 替换 x.x.x.x 为 master 节点的内网IP @@ -249,7 +252,7 @@ export POD_SUBNET=10.100.0.1/16 echo "${MASTER_IP} ${APISERVER_NAME}" >> /etc/hosts ``` -<<< @/.vuepress/public/install-script/v1.16.3/init_master.sh {22} +<<< @/.vuepress/public/install-script/v1.17.x/init_master.sh {21} @@ -426,9 +429,9 @@ kubectl get nodes -o wide ```sh [root@demo-master-a-1 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION -demo-master-a-1 Ready master 5m3s v1.16.3 -demo-worker-a-1 Ready 2m26s v1.16.3 -demo-worker-a-2 Ready 3m56s v1.16.3 +demo-master-a-1 Ready master 5m3s v1.17.x +demo-worker-a-1 Ready 2m26s v1.17.x +demo-worker-a-2 Ready 3m56s v1.17.x ``` @@ -446,7 +449,7 @@ demo-worker-a-2 Ready 3m56s v1.16.3 ``` sh # 只在 master 节点执行 -kubectl apply -f https://kuboard.cn/install-script/v1.16.3/nginx-ingress.yaml +kubectl apply -f https://kuboard.cn/install-script/v1.17.x/nginx-ingress.yaml ``` @@ -459,13 +462,13 @@ kubectl apply -f https://kuboard.cn/install-script/v1.16.3/nginx-ingress.yaml ``` sh # 只在 master 节点执行 -kubectl delete -f https://kuboard.cn/install-script/v1.16.3/nginx-ingress.yaml +kubectl delete -f https://kuboard.cn/install-script/v1.17.x/nginx-ingress.yaml ``` -<<< @/.vuepress/public/install-script/v1.16.3/nginx-ingress.yaml +<<< @/.vuepress/public/install-script/v1.17.x/nginx-ingress.yaml diff --git a/learning/k8s-intermediate/service/connecting.md b/learning/k8s-intermediate/service/connecting.md index 6647849..9079dcd 100644 --- a/learning/k8s-intermediate/service/connecting.md +++ b/learning/k8s-intermediate/service/connecting.md @@ -268,7 +268,7 @@ Hit enter for command prompt cat /d/tmp/nginx.crt | base64 cat /d/tmp/nginx.key | base64 ``` -* 创建一个如下格式的 nginxsecrets.yaml 文件,使用前面命令输出的 base64 编码替换其中的内容(base64编码内容不能换行) +* 创建一个如下格式的 nginxsecrets.yaml 文件,使用前面命令输出的 base64 编码替换其中的内容(base64编码内容不能换行)(请使用前面两行命令生成的结果替换 nginx.crt 和 nginx.key 的内容,) ```yaml apiVersion: "v1" kind: "Secret" diff --git a/learning/k8s-practice/micro-service/kuboard-view-of-k8s.md b/learning/k8s-practice/micro-service/kuboard-view-of-k8s.md index 5901e21..15c822a 100644 --- a/learning/k8s-practice/micro-service/kuboard-view-of-k8s.md +++ b/learning/k8s-practice/micro-service/kuboard-view-of-k8s.md @@ -188,7 +188,7 @@ Kuboard 认为,掌握这些概念并正确理解这些概念的关系之后, * 资源层监控:Prometheus + Grafana + Node Exporter 等组件,监控Kubernetes 节点的 CPU、内存、网络、磁盘等使用情况 * 中间件层监控:Prometheus + Grafana + MySQL Exporter + Nginx Exporter + JVM Exporter 等,监控 MySQL、Nginx、JVM 等中间件的使用情况 -* 链路/APM监控:Pinpoint / SkyWorking 等监控工具,监控应用程序的性能表现 +* 链路/APM监控:Pinpoint / Skywalking 等监控工具,监控应用程序的性能表现 各种监控系统各有侧重,如果想要取得比较好的监控效果,必须克服如下几个困难: diff --git a/support/change-log/change-log-on-the-way.md b/support/change-log/change-log-on-the-way.md index edb5bb9..04c7431 100644 --- a/support/change-log/change-log-on-the-way.md +++ b/support/change-log/change-log-on-the-way.md @@ -1,6 +1,9 @@ Kuboard v1.0.x 的更新说明 + +* 更新版本时,可以通过下拉列表选择仓库中的版本号 * 导入导出时,需要支持 nfs 等类型的数据卷 +* subPathExpr https://kuboard.cn/learning/k8s-intermediate/persistent/volume-mount-point.html#%E6%95%B0%E6%8D%AE%E5%8D%B7%E5%86%85%E5%AD%90%E8%B7%AF%E5%BE%84 * 工作负载查看 --> 未显示 SecurityContext * EndPoint diff --git a/support/change-log/v1.0.x.md b/support/change-log/v1.0.x.md index c1090d4..a45cd42 100644 --- a/support/change-log/v1.0.x.md +++ b/support/change-log/v1.0.x.md @@ -13,6 +13,19 @@ eipwork/kuboard:latest 当前对应的版本是 kuboard v1.0.5.1 Kuboard v1.0.x 的更新说明 +## v1.0.5.2 + +**发布日期** + +2019年12月12日 + +**优化** +* 日志及终端 --> 将字体设置为 'Melon, Ubuntu Mono, courier-new, courier, monospace' +* 集群概览页 --> 页头显示 Kubernetes/Kuboard 版本 + +**Bug修复** +* CI/CD集成脚本里,当端口号不存在时,url显示不正确 + ## v1.0.5.1 **发布日期**