diff --git a/.vuepress/config-sidebar.js b/.vuepress/config-sidebar.js index 4781f75..b3a4a62 100644 --- a/.vuepress/config-sidebar.js +++ b/.vuepress/config-sidebar.js @@ -280,6 +280,8 @@ let sidebar = { 'k8s-intermediate/persistent/volume-mount-point.html', 'k8s-intermediate/persistent/pv', 'k8s-intermediate/persistent/storage-class', + 'k8s-intermediate/persistent/ceph/k8s-config', + 'k8s-intermediate/persistent/ceph/rook-config', 'k8s-intermediate/persistent/nfs', 'k8s-intermediate/persistent/limits', ] diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20200930180512585.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20200930180512585.png deleted file mode 100644 index e797bd0..0000000 Binary files a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20200930180512585.png and /dev/null differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006091349104.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006091349104.png new file mode 100644 index 0000000..0b2a2a7 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006091349104.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006092244551.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006092244551.png new file mode 100644 index 0000000..81c4f3b Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006092244551.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006093422107.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006093422107.png new file mode 100644 index 0000000..3a1f8fa Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006093422107.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006095554079.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006095554079.png new file mode 100644 index 0000000..e5b41c6 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006095554079.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006100157721.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006100157721.png new file mode 100644 index 0000000..9ffbfe7 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006100157721.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006103006219.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006103006219.png new file mode 100644 index 0000000..af32486 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006103006219.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006104518556.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006104518556.png new file mode 100644 index 0000000..0e35c3d Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006104518556.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105449536.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105449536.png new file mode 100644 index 0000000..97a049f Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105449536.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105641944.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105641944.png new file mode 100644 index 0000000..e402d62 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006105641944.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115317391.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115317391.png new file mode 100644 index 0000000..492a322 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115317391.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115504088.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115504088.png new file mode 100644 index 0000000..b9b81ad Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115504088.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115932809.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115932809.png new file mode 100644 index 0000000..013431c Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006115932809.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120055445.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120055445.png new file mode 100644 index 0000000..4067c26 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120055445.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120247227.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120247227.png new file mode 100644 index 0000000..64c5d92 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006120247227.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121320474.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121320474.png new file mode 100644 index 0000000..7d4b2ff Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121320474.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121547422.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121547422.png new file mode 100644 index 0000000..b84cb25 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121547422.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121702450.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121702450.png new file mode 100644 index 0000000..1341f95 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006121702450.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124153316.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124153316.png new file mode 100644 index 0000000..396ac88 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124153316.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124504263.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124504263.png new file mode 100644 index 0000000..5a22708 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124504263.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124650466.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124650466.png new file mode 100644 index 0000000..f3e38d5 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006124650466.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006125702865.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006125702865.png new file mode 100644 index 0000000..71a4487 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006125702865.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006140701887.png b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006140701887.png new file mode 100644 index 0000000..b63a5b4 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/k8s-config.assets/image-20201006140701887.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/k8s-config.md b/learning/k8s-intermediate/persistent/ceph/k8s-config.md index 4642923..a021a28 100644 --- a/learning/k8s-intermediate/persistent/ceph/k8s-config.md +++ b/learning/k8s-intermediate/persistent/ceph/k8s-config.md @@ -7,39 +7,232 @@ meta: content: Kubernetes教程,K8S教程,StorageClass,CephFS --- -# CephFS 作为存储卷 +# 使用 CephFS 作为存储类 -### Ceph 集群信息 +本文描述了如何使用 Kuboard / Kuberenetes 对接 CephFS 作为存储类,并完成如下场景: -在浏览器打开 `http://your-node-ip:32567/namespace/kube-system/settings/storage/ceph-csi`,可以添加 Ceph 集群的连接参数信息,如下图所示: +* [动态提供存储卷](../pv.html#提供-provisioning) +* 绑定 PVC 到 Pod +* PVC 相关操作 + * 扩容 + * 克隆 + * 快照 + * 从快照恢复 -

- -

+## 前提条件 -在 Ceph 集群的节点上执行如下指令, +* 您已经安装了 Kubernetes 集群,且集群版本不低于 v1.17.0,安装方法请参考 [安装 Kubernetes 集群](/install/install-k8s.html); -``` sh -ceph mon dump -``` +* 您已经安装了 Kuboard,且 Kuboard 版本不低于 v2.0.5,安装方法请参考 [安装 Kuboard](/install/install-dashboard.html); -输出结果如下所示: +* 您已经安装了 Ceph 集群, Ceph 集群版本不低于 v15.2.3,且已经在集群中创建了一个 FileSystem,安装方法请参考 [Deploying a new Ceph Cluster with cephadm](https://docs.ceph.com/en/latest/cephadm/install/) -``` {3,7,8,9} -dumped monmap epoch 3 -epoch 3 -fsid 652e06d4-0199-11eb-ac32-ad26de1a6bb7 -last_changed 2020-09-28T15:00:22.672327+0000 -created 2020-09-28T14:46:38.231478+0000 -min_mon_release 15 (octopus) -0: [v2:192.168.2.201:3300/0,v1:192.168.2.201:6789/0] mon.raspberry-01 -1: [v2:192.168.2.202:3300/0,v1:192.168.2.202:6789/0] mon.raspberry-02 -2: [v2:192.168.2.203:3300/0,v1:192.168.2.203:6789/0] mon.raspberry-03 -``` + > * Ceph 当前推荐的集群安装方式有 [Deploying a new Ceph Cluster with cephadm](https://docs.ceph.com/en/latest/cephadm/install/) 和 [Rook](https://rook.io/docs/rook/v1.4/ceph-quickstart.html); + > * Kuboard 可以同时兼容两种形式安装的 Ceph 集群,本文描述了如何对接使用 Cephadm 安装的 Ceph 集群,如果想了解如何对接 Rook 安装的 Ceph 集群,请参考文档 [CephFS 存储类 - Rook](./rook-config.html); + > * Kuboard 也可以兼容其他形式安装的 Ceph 集群,例如 Ceph-ansible、DeepSea、puppet-ceph、ceph-deploy; + > * Kuboard 2.0.5 也可以对接更低版本的 Kubernetes 或 Ceph 集群,但是会有一部分功能受到限制,请参考 [Ceph CSI 功能特性对照表](https://github.com/ceph/ceph-csi) 中关于 CephFS 的部分。 -* 其中第三行 fsid 即为 clusterID -* 第31、32、33 行的 `v1:` 与 `/0` 之间的字符串(例如:`192.168.2.201:6789`)即为 monitors 的连接信息 -https://github.com/ceph/ceph-csi/blob/master/docs/capabilities.md + +## 初始化 CephFS CSI 插件 + +在第一次创建 CephFS StorageClass 时,Kuboard 界面会引导您完成一系列对集群的设置工作,每个集群中,此初始化设置只需要执行一次即可。 + +* 创建快照 CRD + + 打开 Kuboard 集群概览页,按照下图的步骤,在界面的引导下,可以完成快照 CRD 的创建。 + + > * 此步骤只在第一次创建 CephFS StorageClass 时需要执行。 + + ![创建CephFS StorageClass](./k8s-config.assets/image-20201006091349104.png) + +* 初始化 Ceph CSI 插件 + + 创建好了快照 CRD 之后,再次尝试创建存储类时,Kuboard 界面将引导您初始化 Ceph CSI 插件,按照下图所示步骤,可以打开 Ceph CSI 插件管理页面: + + ![image-20201006092244551](./k8s-config.assets/image-20201006092244551.png) + + Ceph CIS 插件管理页面的初始状态如下图所示: + + > * 如果您的集群节点不能访问互联,则您必须执行下图中的第二个步骤; + > * 在您的集群节点可以访问互联网的情况下,此步骤为可选,如果执行此步骤,可以加快插件所需镜像的加载速度。 + + ![image-20201006093422107](./k8s-config.assets/image-20201006093422107.png) + +* 添加 Ceph 集群基本信息 + + 在完成上面步骤中 Ceph CSI 插件的安装以后,该页面的页尾会显示 Ceph 集群信息 维护的区域,点击页尾的编辑按钮,如果下图所示: + + ![CephFS StorageClass](./k8s-config.assets/image-20201006095554079.png) + + 点击编辑按钮之后,可以添加 Ceph 集群的基本信息: + + ![Kubernetes CephFS StorageClass](./k8s-config.assets/image-20201006100157721.png) + + ::: tip 获取 ceph 集群参数信息 + + 您必须能够通过 `ceph` 指令连接到 Ceph 集群,才可以获得上图中所需要的参数,具体步骤描述如下: + + 在 Ceph 集群的节点上执行如下指令: + + ``` sh + ceph mon dump + ``` + + 输出结果如下所示: + + ``` {3,7,8,9} + dumped monmap epoch 3 + epoch 3 + fsid 652e06d4-0199-11eb-ac32-ad26de1a6bb7 + last_changed 2020-09-28T15:00:22.672327+0000 + created 2020-09-28T14:46:38.231478+0000 + min_mon_release 15 (octopus) + 0: [v2:192.168.2.201:3300/0,v1:192.168.2.201:6789/0] mon.raspberry-01 + 1: [v2:192.168.2.202:3300/0,v1:192.168.2.202:6789/0] mon.raspberry-02 + 2: [v2:192.168.2.203:3300/0,v1:192.168.2.203:6789/0] mon.raspberry-03 + ``` + + * 其中第三行 fsid 即为 clusterID + * 第7、8、9 行的 `v1:` 与 `/0` 之间的字符串(例如:`192.168.2.201:6789`)即为 monitors 的连接信息 + + ::: + + + +## 创建 CephFS StorageClass + +完成上述初始化 CephFS CSI 插件的操作以后,您就可以创建 CephFS StorageClass 了,具体步骤如下所示: + +![Kubernetes CephFS StorageClass](./k8s-config.assets/image-20201006103006219.png) + +::: tip CephFS 参数获取方式 + +您必须能够使用 `ceph` 指令连接到 Ceph 集群,才能获得 CephFS 所需的参数,获取方式如下所述: + +* UserID / UserKey / AdminID /AdminKey + + 假设您想要获取 `admin` 用户的 AdminKey,执行指令 + + ```sh + ceph auth get client.admin + ``` + + 输出结果如下所示: + + ``` + exported keyring for client.admin + [client.admin] + key = AQBM93FfRzIsGxAAWR0bSgAnhFNRxqE9Rjil5w== + caps mds = "allow *" + caps mgr = "allow *" + caps mon = "allow *" + caps osd = "allow *" + ``` + + 您还可以执行 `ceph auth ls` 指令查看 Ceph 集群中的用户列表,执行 `ceph auth get-or-create` 指令创建 Ceph 用户,更多与 Ceph 用户管理相关的信息,请参考 [Ceph User Management](https://docs.ceph.com/en/latest/rados/operations/user-management/) + +* File System Name / pool + + 执行 `ceph fs ls` 指令,该指令输出结果如下所示: + + ``` + name: cephfs-raspberry, metadata pool: cephfs.cephfs-raspberry.meta, data pools: [cephfs.cephfs-raspberry.data ] + ``` + + > 输出结果中: + > + > * name 字段作为 `File System Name` 的取值; + > * data pools 字段中的一个作为 `pool` 字段的值。 + +::: + + + +## 创建 PVC + +完成 CephFS StorageClass 的创建后,进入名称空间界面, + +* 创建 PVC: + + 按照下图所示步骤创建 PVC + + ![Kubernetes CephFS StorageClass PVC](./k8s-config.assets/image-20201006104518556.png) + +* 创建工作负载挂载该 PVC: + + 按照下图所示步骤,可以进入创建工作负载的界面: + + ![创建工作负载并挂载 PVC](./k8s-config.assets/image-20201006105641944.png) + + 按照下图所示步骤,创建一个工作负载用于测试 CephFS 存储: + + ![挂载PVC](./k8s-config.assets/image-20201006105449536.png) + +* 向存储卷中写入内容 + + 按照下图所示,进入刚才所创建工作负载的文件浏览器: + + ![Kubernetes 文件浏览器](./k8s-config.assets/image-20201006115317391.png) + + 点击 ***日志/终端*** 按钮后,将显示该工作负载的所有 Pod / Container,如下图所示: + + ![从Pod 列表中打开文件浏览器](./k8s-config.assets/image-20201006115504088.png) + + 点击文件浏览器按钮后,按照下图描述的步骤创建文件: + + ![image-20201006115932809](./k8s-config.assets/image-20201006115932809.png) + + 创建完成后,可以查看到 `/cephfs` 目录的文件列表如下图所示: + + ![image-20201006120055445](./k8s-config.assets/image-20201006120055445.png) + + 点击编辑按钮后,按照下图描述的步骤往 hello.txt 文件中写入一些内容: + + ![image-20201006120247227](./k8s-config.assets/image-20201006120247227.png) + +* 查看存储卷内容: + + 切换到集群概览页,按照下图所示操作,可以查看到刚才写入到 CephFS 存储卷中的信息: + + ![Kubernetes PV 浏览器](./k8s-config.assets/image-20201006121320474.png) + + 您也可以查看 PersistentVolume 的 YAML 信息,获得该存储卷的 subVolume,然后通过 `cephfs-shell` 指令查看 Ceph 集群中存储的内容: + + ![image-20201006121547422](./k8s-config.assets/image-20201006121547422.png) + + `cephfs-shell` 的执行结果可以参考下图所示:关于如何使用 `cephfs-shell` 请参考文档 [CEPHFS SHELL](https://docs.ceph.com/en/latest/cephfs/cephfs-shell/) + + ![cephfs shell](./k8s-config.assets/image-20201006121702450.png) + +## 对 PVC 执行操作 + + + +### 扩容 + +按照如下步骤,可以为存储卷执行扩容的操作: + +![Kubernetes 存储卷声明 扩容](./k8s-config.assets/image-20201006124153316.png) + +### 克隆 + +按照如下步骤,可以克隆存储卷 + +![image-20201006124504263](./k8s-config.assets/image-20201006124504263.png) + +### 快照 + +按照如下步骤,进入快照页面: + +![Kubernetes 创建快照](./k8s-config.assets/image-20201006124650466.png) + +![Kubernetes 创建存储卷快照](./k8s-config.assets/image-20201006125702865.png) + +### 从快照恢复 + +在上面的步骤中创建快照以后,点击快照列表中的 ***恢复*** 按钮,可以将快照恢复到一个新建的 PVC 中。 + +![Kubernetes 从快照中恢复](./k8s-config.assets/image-20201006140701887.png) diff --git a/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185022912.png b/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185022912.png new file mode 100644 index 0000000..7f1e262 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185022912.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185624187.png b/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185624187.png new file mode 100644 index 0000000..2355891 Binary files /dev/null and b/learning/k8s-intermediate/persistent/ceph/rook-config.assets/image-20201006185624187.png differ diff --git a/learning/k8s-intermediate/persistent/ceph/rook-config.md b/learning/k8s-intermediate/persistent/ceph/rook-config.md new file mode 100644 index 0000000..3625f60 --- /dev/null +++ b/learning/k8s-intermediate/persistent/ceph/rook-config.md @@ -0,0 +1,167 @@ +--- +# vssueId: 91 +layout: LearningLayout +description: Kubernetes教程_本文描述如何在 Kuboard 中配置 StorageClass 连接 CephFS on Rook +meta: + - name: keywords + content: Kubernetes教程,K8S教程,StorageClass,CephFS +--- + +# 使用 CephFS 作为存储类 - Rook + + + +本文描述了如何使用 Kuboard / Kuberenetes 对接 Rook 安装的 CephFS 作为存储类,并完成如下场景: + +* 安装 Rook - Ceph +* 创建 StorageClass,[动态提供存储卷](../pv.html#提供-provisioning) +* 绑定 PVC 到 Pod +* PVC 相关操作 + * 扩容 + * 克隆 + * 快照 + * 从快照恢复 + +## 前提条件 + +* 您已经安装了 Kubernetes 集群,且集群版本不低于 v1.17.0,安装方法请参考 [安装 Kubernetes 集群](/install/install-k8s.html); + + * Kubernetes 集群有至少 3 个工作节点,且每个工作节点都有一块初系统盘以外的 **未格式化** 的裸盘(工作节点是虚拟机时,未格式化的裸盘可以是虚拟磁盘),用于创建 3 个 Ceph OSD; + + * 也可以只有 1 个工作节点,挂载了一块 **未格式化** 的裸盘; + + * 在节点机器上执行 `lsblk -f ` 指令可以查看磁盘是否需被格式化,输出结果如下: + + ``` sh + lsblk -f + NAME FSTYPE LABEL UUID MOUNTPOINT + vda + └─vda1 LVM2_member eSO50t-GkUV-YKTH-WsGq-hNJY-eKNf-3i07IB + ├─ubuntu--vg-root ext4 c2366f76-6e21-4f10-a8f3-6776212e2fe4 / + └─ubuntu--vg-swap_1 swap 9492a3dc-ad75-47cd-9596-678e8cf17ff9 [SWAP] + vdb + ``` + + 如果 `FSTYPE` 字段不为空,则表示该磁盘上已经被格式化。在上面的例子中,可以将磁盘 `vdb` 用于 Ceph 的 OSD,而磁盘 `vda` 及其分区则不能用做 Ceph 的 OSD。 + +* 您已经安装了 Kuboard,且 Kuboard 版本不低于 v2.0.5,安装方法请参考 [安装 Kuboard](/install/install-dashboard.html); + +## 安装 Rook - Ceph + +本章节参考 [Rook Ceph Storage Quickstart](https://rook.io/docs/rook/v1.4/ceph-quickstart.html) 在 Kubernetes 集群上快速安装了一个 Ceph 集群。 + +* 执行如下命令安装 Rook - Ceph 集群 + + ``` sh + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.5/common.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.5/operator.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.5/cluster.yaml + ``` + + 在执行上述指令之前,可以使用 [docker-image-loader](https://github.com/eip-work/docker-image-loader) 提前将所需要的镜像加载到所有节点机器上,相比较每个节点分别从公网抓取镜像会更快一些;如果您的集群在内网环境,也可以使用 docker-image-loader 加载镜像到集群节点。需要加载的镜像如下: + + ``` + quay.io/cephcsi/cephcsi:v3.1.1 + quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + quay.io/k8scsi/csi-attacher:v2.1.0 + quay.io/k8scsi/csi-snapshotter:v2.1.1 + quay.io/k8scsi/csi-resizer:v0.4.0 + quay.io/k8scsi/csi-provisioner:v1.6.0 + rook/ceph:v1.4.5 + ceph/ceph:v15.2.4 + ``` + +* 执行 `watch kubectl get pods -n rook-ceph` 指令,直到所有的 Pod 处于 `Running` 或者 `Completed` 状态,如下所示: + + ```sh {1} + watch kubectl get pods -n rook-ceph + NAME READY STATUS RESTARTS AGE + csi-cephfsplugin-5hfb7 3/3 Running 0 39m + csi-cephfsplugin-5xdz4 3/3 Running 0 39m + csi-cephfsplugin-provisioner-5c65b94c8d-9txpv 6/6 Running 0 39m + csi-cephfsplugin-provisioner-5c65b94c8d-rt4fp 6/6 Running 0 39m + csi-cephfsplugin-pstw9 3/3 Running 0 39m + csi-rbdplugin-ft4dk 3/3 Running 0 39m + csi-rbdplugin-fxj9n 3/3 Running 0 39m + csi-rbdplugin-provisioner-569c75558-h2jv7 6/6 Running 0 39m + csi-rbdplugin-provisioner-569c75558-q4fkt 6/6 Running 0 39m + csi-rbdplugin-rw7jn 3/3 Running 0 39m + rook-ceph-crashcollector-k8s-node-01-6fbb5cb4b8-nwvhj 1/1 Running 0 35m + rook-ceph-crashcollector-k8s-node-02-5c67f6f9f5-qm47c 1/1 Running 0 37m + rook-ceph-crashcollector-k8s-node-03-7f6cfc655b-b8cv2 1/1 Running 0 40m + rook-ceph-mgr-a-5844874f9c-rqggg 1/1 Running 0 35m + rook-ceph-mon-a-67b6865644-4bkm9 1/1 Running 0 40m + rook-ceph-mon-b-59f855c47d-tg44q 1/1 Running 0 40m + rook-ceph-mon-d-7576586cc9-nn94w 1/1 Running 0 37m + rook-ceph-operator-6db6f67cd4-smhz8 1/1 Running 0 41m + rook-ceph-osd-prepare-k8s-node-01-x2p74 0/1 Completed 0 35m + rook-ceph-osd-prepare-k8s-node-02-7j4s7 0/1 Completed 0 35m + rook-ceph-osd-prepare-k8s-node-03-w2mgf 0/1 Completed 0 35m + rook-discover-9hzds 1/1 Running 0 41m + rook-discover-gz7xv 1/1 Running 0 41m + rook-discover-hljrn 1/1 Running 0 41m + ``` + +* Ceph 集群部署好以后,可以通过 Ceph 提供 块存储、文件存储和对象存储。此处,我们通过如下指令来创建文件存储服务: + + > 参考文档 [Rook Ceph-FileSystem](https://rook.io/docs/rook/v1.4/ceph-filesystem.html) + + ``` sh + cat > myfs.yaml < 此步骤只在第一次创建 CephFS StorageClass 时需要执行 + + ![Kubernetes CephFS StorageClass](./rook-config.assets/image-20201006185022912.png) + +## 创建 CephFS Rook StorageClass + +完成上述初始化快照 CRD 的操作以后,您就可以创建 CephFS Rook StorageClass 了,具体步骤如下图所示: + +![image-20201006185624187](rook-config.assets/image-20201006185624187.png) + +## 创建 PVC + +创建 PVC、将 PVC 挂载到 Pod、并向存储卷中写入内容等操作步骤请参考 [创建 PVC](./k8s-config.html#创建-pvc) + + + +## PVC 相关操作 + +PVC 创建以后,可以通过 Kuboard 界面执行如下操作: + +* 扩容 +* 克隆 +* 快照 +* 从快照恢复 + +具体操作步骤请参考 [对 PVC 执行操作](./k8s-config.html#对-pvc-执行操作) \ No newline at end of file diff --git a/learning/k8s-intermediate/persistent/pv.md b/learning/k8s-intermediate/persistent/pv.md index 94cfd25..591bbc2 100644 --- a/learning/k8s-intermediate/persistent/pv.md +++ b/learning/k8s-intermediate/persistent/pv.md @@ -124,7 +124,7 @@ PersistentVolumeClaim 将始终停留在 ***未绑定 unbound*** 状态,直到 动态提供的 PersistentVolume 将从其对应的 StorageClass 继承回收策略的属性。 ::: -### 扩展 Expanding Persistent Volumes Claims +### 扩容 Expanding Persistent Volumes Claims Kubernetes v1.15 及以上版本 该特性只针对极少数的 PersistentVolume 类型有效。请参考 [Expanding Persistent Volumes Claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims) @@ -187,7 +187,7 @@ PersistentVolume 字段描述如下表所示: 在 Kuboard 中查看存储卷声明的界面如下图所示: -![Kubernetes教程:存储卷PersistentVolume-在Kuboard中查看存储卷声明PersistentVolumeClaims](pv.assets/image-20200913195026464.png) +![Kubernetes教程:存储卷PersistentVolume-在Kuboard中查看存储卷声明PersistentVolumeClaims](./pv.assets/image-20200913195026464.png) | 字段名称 | 可选项/备注 | | --------------------- | ------------------------------------------------------------ |