From cf1c9150efefdf6f005eeb973ebb2ffdbf600d14 Mon Sep 17 00:00:00 2001 From: "huanqing.shao" Date: Tue, 29 Dec 2020 21:07:10 +0800 Subject: [PATCH] rook-1.5.4 --- .../learning/ceph/rook-1.5.4/cluster.yaml | 252 +++ .../learning/ceph/rook-1.5.4/common.yaml | 1134 ++++++++++++++ .../learning/ceph/rook-1.5.4/crds.yaml | 1371 +++++++++++++++++ .../learning/ceph/rook-1.5.4/operator.yaml | 470 ++++++ .../persistent/ceph/rook-config-1.4.7.md | 167 ++ .../persistent/ceph/rook-config.md | 24 +- 6 files changed, 3400 insertions(+), 18 deletions(-) create mode 100644 .vuepress/public/statics/learning/ceph/rook-1.5.4/cluster.yaml create mode 100644 .vuepress/public/statics/learning/ceph/rook-1.5.4/common.yaml create mode 100644 .vuepress/public/statics/learning/ceph/rook-1.5.4/crds.yaml create mode 100644 .vuepress/public/statics/learning/ceph/rook-1.5.4/operator.yaml create mode 100644 learning/k8s-intermediate/persistent/ceph/rook-config-1.4.7.md diff --git a/.vuepress/public/statics/learning/ceph/rook-1.5.4/cluster.yaml b/.vuepress/public/statics/learning/ceph/rook-1.5.4/cluster.yaml new file mode 100644 index 0000000..0fda1e9 --- /dev/null +++ b/.vuepress/public/statics/learning/ceph/rook-1.5.4/cluster.yaml @@ -0,0 +1,252 @@ +################################################################################################################# +# Define the settings for the rook-ceph cluster with common settings for a production cluster. +# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required +# in this example. See the documentation for more details on storage settings available. + +# For example, to create the cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +################################################################################################################# + +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph # namespace:cluster +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v13 is mimic, v14 is nautilus, and v15 is octopus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v15.2.8-20201217 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: ceph/ceph:v15.2.8 + # Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported. + # Future versions such as `pacific` would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. Must be specified. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades + skipUpgradeChecks: false + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + mon: + # Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + mgr: + modules: + # Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules + # are already enabled by other settings in the cluster CR. + - name: pg_autoscaler + enabled: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + ssl: true + # enable prometheus alerting for cluster + monitoring: + # requires Prometheus to be pre-installed + enabled: false + # namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used. + # Recommended: + # If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespace: rook-ceph + network: + # enable host networking + #provider: host + # EXPERIMENTAL: enable the Multus network provider + #provider: multus + #selectors: + # The selector keys are required to be `public` and `cluster`. + # Based on the configuration, the operator will do the following: + # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # + # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # + #public: public-conf --> NetworkAttachmentDefinition object name in Multus + #cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + #ipFamily: "IPv6" + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # Uncomment daysToRetain to prune ceph crash entries older than the + # specified number of days. + #daysToRetain: 30 + # enable log collector, daemons will log on files and rotate + # logCollector: + # enabled: true + # periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days. + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# topologySpreadConstraints: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# Monitor deployments may contain an anti-affinity rule for avoiding monitor +# collocation on the same node. This is a required rule when host network is used +# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a +# preferred rule with weight: 50. +# osd: +# mgr: +# cleanup: + annotations: +# all: +# mon: +# osd: +# cleanup: +# prepareosd: +# If no mgr annotations are set, prometheus scrape annotations will be set by default. +# mgr: + labels: +# all: +# mon: +# osd: +# cleanup: +# mgr: +# prepareosd: + resources: +# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory +# mgr: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: +# prepareosd: +# crashcollector: +# logcollector: +# cleanup: + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false +# priorityClassNames: +# all: rook-ceph-default-priority-class +# mon: rook-ceph-mon-priority-class +# osd: rook-ceph-osd-priority-class +# mgr: rook-ceph-mgr-priority-class + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + #deviceFilter: + config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: false + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up. + # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`. + # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain. + pgHealthCheckTimeout: 0 + # If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy. + # Only available on OpenShift. + manageMachineDisruptionBudgets: false + # Namespace in which to watch for the MachineDisruptionBudgets. + machineDisruptionBudgetNamespace: openshift-machine-api + + # healthChecks + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon,mgr,osd daemons + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false diff --git a/.vuepress/public/statics/learning/ceph/rook-1.5.4/common.yaml b/.vuepress/public/statics/learning/ceph/rook-1.5.4/common.yaml new file mode 100644 index 0000000..6ccbc74 --- /dev/null +++ b/.vuepress/public/statics/learning/ceph/rook-1.5.4/common.yaml @@ -0,0 +1,1134 @@ +################################################################################################################### +# Create the common resources that are necessary to start the operator and the ceph cluster. +# These resources *must* be created before the operator.yaml and cluster.yaml or their variants. +# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace. +# +# If the operator needs to manage multiple clusters (in different namespaces), see the section below +# for "cluster-specific resources". The resources below that section will need to be created for each namespace +# where the operator needs to manage the cluster. The resources above that section do not be created again. +# +# Most of the sections are prefixed with a 'OLM' keyword which is used to build our CSV for an OLM (Operator Life Cycle manager) +################################################################################################################### + +# Namespace where the operator and other rook resources are created +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph # namespace:cluster +# OLM: BEGIN OBJECTBUCKET ROLEBINDING +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-object-bucket +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +# OLM: END OBJECTBUCKET ROLEBINDING +# OLM: BEGIN OPERATOR ROLE +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-admission-controller + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-admission-controller-role +rules: + - apiGroups: ["ceph.rook.io"] + resources: ["*"] + verbs: ["get", "watch", "list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-admission-controller-rolebinding +subjects: + - kind: ServiceAccount + name: rook-ceph-admission-controller + apiGroup: "" + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rook-ceph-admission-controller-role + apiGroup: rbac.authorization.k8s.io +--- +# The cluster role for managing all the cluster-specific resources in a namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-cluster-mgmt + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + - apps + - extensions + resources: + - secrets + - pods + - pods/log + - services + - configmaps + - deployments + - daemonsets + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +--- +# The role for the operator to manage resources in its own namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + - services + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - statefulsets + - deployments + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - delete +--- +# The cluster role for managing the Rook CRDs +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + # Pod access is needed for fencing + - pods + # Node access is needed for determining nodes where mons should run + - nodes + - nodes/proxy + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + # PVs and PVCs are managed by the Rook provisioner + - persistentvolumes + - persistentvolumeclaims + - endpoints + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - rook.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - policy + - apps + - extensions + resources: + # This is for the clusterdisruption controller + - poddisruptionbudgets + # This is for both clusterdisruption and nodedrain controllers + - deployments + - replicasets + verbs: + - "*" +- apiGroups: + - healthchecking.openshift.io + resources: + - machinedisruptionbudgets + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - machine.openshift.io + resources: + - machines + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - get + - update +- apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - get +--- +# Aspects of ceph-mgr that require cluster-wide access +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + - nodes + - nodes/proxy + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - list + - get + - watch +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-object-bucket + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + verbs: + - "*" + resources: + - secrets + - configmaps +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - "objectbucket.io" + verbs: + - "*" + resources: + - "*" +# OLM: END OPERATOR ROLE +# OLM: BEGIN SERVICE ACCOUNT SYSTEM +--- +# The rook system service account used by the operator, agent, and discovery pods +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT SYSTEM +# OLM: BEGIN OPERATOR ROLEBINDING +--- +# Grant the operator, agent, and discovery agents access to resources in the namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-system + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +# OLM: END OPERATOR ROLEBINDING +################################################################################################################# +# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph" +# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles +# and bindings accordingly. +################################################################################################################# +# Service account for the Ceph OSDs. Must exist and cannot be renamed. +# OLM: BEGIN SERVICE ACCOUNT OSD +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT OSD +# OLM: BEGIN SERVICE ACCOUNT MGR +--- +# Service account for the Ceph Mgr. Must exist and cannot be renamed. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +# imagePullSecrets: +# - name: my-registry-secret + +# OLM: END SERVICE ACCOUNT MGR +# OLM: BEGIN CMD REPORTER SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CMD REPORTER SERVICE ACCOUNT +# OLM: BEGIN CLUSTER ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: [ "get", "list", "watch", "create", "update", "delete" ] +- apiGroups: ["ceph.rook.io"] + resources: ["cephclusters", "cephclusters/finalizers"] + verbs: [ "get", "list", "create", "update", "delete" ] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list +--- +# Aspects of ceph-mgr that require access to the system namespace +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +rules: +- apiGroups: + - "" + resources: + - pods + - services + - pods/log + verbs: + - get + - list + - watch + - delete +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +# OLM: END CLUSTER ROLE +# OLM: BEGIN CMD REPORTER ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete +# OLM: END CMD REPORTER ROLE +# OLM: BEGIN CLUSTER ROLEBINDING +--- +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-osd +subjects: +- kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access the rook system resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph # namespace:operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-system +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-mgr-cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster + +--- +# Allow the ceph osd to access cluster-wide resources necessary for determining their topology location +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-osd +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-osd +subjects: +- kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster + +# OLM: END CLUSTER ROLEBINDING +# OLM: BEGIN CMD REPORTER ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-cmd-reporter +subjects: +- kind: ServiceAccount + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CMD REPORTER ROLEBINDING +################################################################################################################# +# Beginning of pod security policy resources. The example will assume the cluster will be created in the +# "rook-ceph" namespace. If you want to create the cluster in a different namespace, you will need to modify +# the roles and bindings accordingly. +################################################################################################################# +# OLM: BEGIN CLUSTER POD SECURITY POLICY +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + # Note: Kubernetes matches PSPs to deployments alphabetically. In some environments, this PSP may + # need to be renamed with a value that will match before others. + name: 00-rook-privileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' +spec: + privileged: true + allowedCapabilities: + # required by CSI + - SYS_ADMIN + # fsGroup - the flexVolume agent has fsGroup capabilities and could potentially be any group + fsGroup: + rule: RunAsAny + # runAsUser, supplementalGroups - Rook needs to run some pods as root + # Ceph pods could be run as the Ceph user, but that user isn't always known ahead of time + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + # seLinux - seLinux context is unknown ahead of time; set if this is well-known + seLinux: + rule: RunAsAny + volumes: + # recommended minimum set + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - secret + - projected + # required for Rook + - hostPath + - flexVolume + # allowedHostPaths can be set to Rook's known host volume mount points when they are fully-known + # allowedHostPaths: + # - pathPrefix: "/run/udev" # for OSD prep + # readOnly: false + # - pathPrefix: "/dev" # for OSD prep + # readOnly: false + # - pathPrefix: "/var/lib/rook" # or whatever the dataDirHostPath value is set to + # readOnly: false + # Ceph requires host IPC for setting up encrypted devices + hostIPC: true + # Ceph OSDs need to share the same PID namespace + hostPID: true + # hostNetwork can be set to 'false' if host networking isn't used + hostNetwork: true + hostPorts: + # Ceph messenger protocol v1 + - min: 6789 + max: 6790 # <- support old default port + # Ceph messenger protocol v2 + - min: 3300 + max: 3300 + # Ceph RADOS ports for OSDs, MDSes + - min: 6800 + max: 7300 + # # Ceph dashboard port HTTP (not recommended) + # - min: 7000 + # max: 7000 + # Ceph dashboard port HTTPS + - min: 8443 + max: 8443 + # Ceph mgr Prometheus Metrics + - min: 9283 + max: 9283 +# OLM: END CLUSTER POD SECURITY POLICY +# OLM: BEGIN POD SECURITY POLICY BINDINGS +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: 'psp:rook' +rules: + - apiGroups: + - policy + resources: + - podsecuritypolicies + resourceNames: + - 00-rook-privileged + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-ceph-system-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-default-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: +- kind: ServiceAccount + name: default + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-osd-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: +- kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-mgr-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph # namespace:cluster +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rook-ceph-cmd-reporter-psp + namespace: rook-ceph # namespace:cluster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp:rook +subjects: +- kind: ServiceAccount + name: rook-ceph-cmd-reporter + namespace: rook-ceph # namespace:cluster +# OLM: END CLUSTER POD SECURITY POLICY BINDINGS +# OLM: BEGIN CSI CEPHFS SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +# OLM: END CSI CEPHFS SERVICE ACCOUNT +# OLM: BEGIN CSI CEPHFS ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "create", "delete"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +# OLM: END CSI CEPHFS ROLE +# OLM: BEGIN CSI CEPHFS ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: cephfs-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI CEPHFS ROLEBINDING +# OLM: BEGIN CSI CEPHFS CLUSTER ROLE +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] +# OLM: END CSI CEPHFS CLUSTER ROLE +# OLM: BEGIN CSI CEPHFS CLUSTER ROLEBINDING +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-cephfs-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cephfs-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-cephfs-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: cephfs-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI CEPHFS CLUSTER ROLEBINDING +# OLM: BEGIN CSI RBD SERVICE ACCOUNT +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +# OLM: END CSI RBD SERVICE ACCOUNT +# OLM: BEGIN CSI RBD ROLE +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-cfg + namespace: rook-ceph # namespace:operator +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +# OLM: END CSI RBD ROLE +# OLM: BEGIN CSI RBD ROLEBINDING +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role-cfg + namespace: rook-ceph # namespace:operator +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: Role + name: rbd-external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI RBD ROLEBINDING +# OLM: BEGIN CSI RBD CLUSTER ROLE +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "update"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list"] +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-external-provisioner-runner +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] +# OLM: END CSI RBD CLUSTER ROLE +# OLM: BEGIN CSI RBD CLUSTER ROLEBINDING +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-plugin-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rook-csi-rbd-provisioner-sa-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: 'psp:rook' +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-nodeplugin +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-plugin-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-csi-nodeplugin + apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rbd-csi-provisioner-role +subjects: + - kind: ServiceAccount + name: rook-csi-rbd-provisioner-sa + namespace: rook-ceph # namespace:operator +roleRef: + kind: ClusterRole + name: rbd-external-provisioner-runner + apiGroup: rbac.authorization.k8s.io +# OLM: END CSI RBD CLUSTER ROLEBINDING diff --git a/.vuepress/public/statics/learning/ceph/rook-1.5.4/crds.yaml b/.vuepress/public/statics/learning/ceph/rook-1.5.4/crds.yaml new file mode 100644 index 0000000..2eb77a5 --- /dev/null +++ b/.vuepress/public/statics/learning/ceph/rook-1.5.4/crds.yaml @@ -0,0 +1,1371 @@ +################################################################################################################### +# Create the CRDs that are necessary before creating your Rook cluster. +# These resources *must* be created before the cluster.yaml or their variants. +################################################################################################################### +# OLM: BEGIN CEPH CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephclusters.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephCluster + plural: cephclusters + singular: cephcluster + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cephVersion: + type: object + properties: + allowUnsupported: + type: boolean + image: + type: string + dashboard: + type: object + properties: + enabled: + type: boolean + urlPrefix: + type: string + port: + type: integer + minimum: 0 + maximum: 65535 + ssl: + type: boolean + dataDirHostPath: + pattern: ^/(\S+) + type: string + disruptionManagement: + type: object + properties: + machineDisruptionBudgetNamespace: + type: string + managePodBudgets: + type: boolean + osdMaintenanceTimeout: + type: integer + pgHealthCheckTimeout: + type: integer + manageMachineDisruptionBudgets: + type: boolean + skipUpgradeChecks: + type: boolean + continueUpgradeAfterChecksEvenIfNotHealthy: + type: boolean + mon: + type: object + properties: + allowMultiplePerNode: + type: boolean + count: + maximum: 9 + minimum: 0 + type: integer + volumeClaimTemplate: + type: object + x-kubernetes-preserve-unknown-fields: true + stretchCluster: + type: object + nullable: true + properties: + failureDomainLabel: + type: string + subFailureDomain: + type: string + zones: + type: array + items: + type: object + properties: + name: + type: string + arbiter: + type: boolean + volumeClaimTemplate: + type: object + x-kubernetes-preserve-unknown-fields: true + mgr: + type: object + properties: + modules: + type: array + items: + type: object + properties: + name: + type: string + enabled: + type: boolean + crashCollector: + type: object + properties: + disable: + type: boolean + daysToRetain: + type: integer + network: + type: object + nullable: true + properties: + hostNetwork: + type: boolean + provider: + type: string + selectors: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + ipFamily: + type: string + storage: + type: object + properties: + disruptionManagement: + type: object + nullable: true + properties: + machineDisruptionBudgetNamespace: + type: string + managePodBudgets: + type: boolean + osdMaintenanceTimeout: + type: integer + pgHealthCheckTimeout: + type: integer + manageMachineDisruptionBudgets: + type: boolean + useAllNodes: + type: boolean + nodes: + type: array + nullable: true + items: + type: object + properties: + name: + type: string + config: + type: object + nullable: true + properties: + metadataDevice: + type: string + storeType: + type: string + pattern: ^(bluestore)$ + databaseSizeMB: + type: string + walSizeMB: + type: string + journalSizeMB: + type: string + osdsPerDevice: + type: string + encryptedDevice: + type: string + pattern: ^(true|false)$ + useAllDevices: + type: boolean + deviceFilter: + type: string + nullable: true + devicePathFilter: + type: string + devices: + type: array + items: + type: object + properties: + name: + type: string + config: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + useAllDevices: + type: boolean + deviceFilter: + type: string + nullable: true + devicePathFilter: + type: string + config: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + storageClassDeviceSets: + type: array + nullable: true + items: + type: object + properties: + name: + type: string + count: + type: integer + format: int32 + portable: + type: boolean + tuneDeviceClass: + type: boolean + tuneFastDeviceClass: + type: boolean + encrypted: + type: boolean + schedulerName: + type: string + config: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + preparePlacement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + volumeClaimTemplates: + type: array + items: + type: object + x-kubernetes-preserve-unknown-fields: true + driveGroups: + type: array + nullable: true + items: + type: object + properties: + name: + type: string + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + required: + - name + - spec + monitoring: + type: object + properties: + enabled: + type: boolean + rulesNamespace: + type: string + externalMgrEndpoints: + type: array + items: + type: object + properties: + ip: + type: string + externalMgrPrometheusPort: + type: integer + minimum: 0 + maximum: 65535 + removeOSDsIfOutAndSafeToRemove: + type: boolean + external: + type: object + properties: + enable: + type: boolean + cleanupPolicy: + type: object + properties: + allowUninstallWithVolumes: + type: boolean + confirmation: + type: string + pattern: ^$|^yes-really-destroy-data$ + sanitizeDisks: + type: object + properties: + method: + type: string + pattern: ^(complete|quick)$ + dataSource: + type: string + pattern: ^(zero|random)$ + iteration: + type: integer + format: int32 + security: + type: object + properties: + kms: + type: object + properties: + connectionDetails: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + tokenSecretName: + type: string + logCollector: + type: object + properties: + enabled: + type: boolean + periodicity: + type: string + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + labels: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + healthCheck: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + priorityClassNames: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: DataDirHostPath + type: string + description: Directory used on the K8s nodes + jsonPath: .spec.dataDirHostPath + - name: MonCount + type: string + description: Number of MONs + jsonPath: .spec.mon.count + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + - name: Phase + type: string + description: Phase + jsonPath: .status.phase + - name: Message + type: string + description: Message + jsonPath: .status.message + - name: Health + type: string + description: Ceph Health + jsonPath: .status.ceph.health + subresources: + status: {} +# OLM: END CEPH CRD +# OLM: BEGIN CEPH CLIENT CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephclients.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephClient + listKind: CephClientList + plural: cephclients + singular: cephclient + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + caps: + type: object + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH CLIENT CRD +# OLM: BEGIN CEPH RBD MIRROR CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephrbdmirrors.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephRBDMirror + listKind: CephRBDMirrorList + plural: cephrbdmirrors + singular: cephrbdmirror + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + count: + type: integer + minimum: 1 + maximum: 100 + peers: + type: object + properties: + secretNames: + type: array + items: + type: string + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + type: string + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + +# OLM: END CEPH RBD MIRROR CRD +# OLM: BEGIN CEPH FS CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephfilesystems.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystem + listKind: CephFilesystemList + plural: cephfilesystems + singular: cephfilesystem + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + metadataServer: + type: object + properties: + activeCount: + minimum: 1 + maximum: 10 + type: integer + activeStandby: + type: boolean + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + type: string + labels: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + metadataPool: + type: object + nullable: true + properties: + failureDomain: + type: string + deviceClass: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + minimum: 0 + maximum: 10 + type: integer + requireSafeReplicaSize: + type: boolean + replicasPerFailureDomain: + type: integer + subFailureDomain: + type: string + parameters: + type: object + x-kubernetes-preserve-unknown-fields: true + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + minimum: 0 + maximum: 10 + type: integer + codingChunks: + minimum: 0 + maximum: 10 + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + dataPools: + type: array + nullable: true + items: + type: object + properties: + failureDomain: + type: string + deviceClass: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + minimum: 0 + maximum: 10 + type: integer + requireSafeReplicaSize: + type: boolean + replicasPerFailureDomain: + type: integer + subFailureDomain: + type: string + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + minimum: 0 + maximum: 10 + type: integer + codingChunks: + minimum: 0 + maximum: 10 + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + parameters: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + preservePoolsOnDelete: + type: boolean + preserveFilesystemOnDelete: + type: boolean + status: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: ActiveMDS + type: string + description: Number of desired active MDS daemons + jsonPath: .spec.metadataServer.activeCount + - name: Age + type: date + jsonPath: .metadata.creationTimestamp + subresources: + status: {} +# OLM: END CEPH FS CRD +# OLM: BEGIN CEPH NFS CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephnfses.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephNFS + listKind: CephNFSList + plural: cephnfses + singular: cephnfs + shortNames: + - nfs + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + rados: + type: object + properties: + pool: + type: string + namespace: + type: string + server: + type: object + properties: + active: + type: integer + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + type: string + logLevel: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH NFS CRD +# OLM: BEGIN CEPH OBJECT STORE CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephobjectstores.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStore + listKind: CephObjectStoreList + plural: cephobjectstores + singular: cephobjectstore + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + gateway: + type: object + properties: + type: + type: string + sslCertificateRef: + type: string + nullable: true + port: + type: integer + minimum: 0 + maximum: 65535 + securePort: + type: integer + minimum: 0 + maximum: 65535 + instances: + type: integer + externalRgwEndpoints: + type: array + nullable: true + items: + type: object + properties: + ip: + type: string + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + placement: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + resources: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + labels: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + priorityClassName: + type: string + metadataPool: + type: object + nullable: true + properties: + failureDomain: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + type: integer + requireSafeReplicaSize: + type: boolean + replicasPerFailureDomain: + type: integer + subFailureDomain: + type: string + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + type: integer + codingChunks: + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + parameters: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + zone: + type: object + properties: + name: + type: string + dataPool: + type: object + nullable: true + properties: + failureDomain: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + type: integer + requireSafeReplicaSize: + type: boolean + replicasPerFailureDomain: + type: integer + subFailureDomain: + type: string + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + type: integer + codingChunks: + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + parameters: + type: object + x-kubernetes-preserve-unknown-fields: true + preservePoolsOnDelete: + type: boolean + healthCheck: + type: object + nullable: true + properties: + bucket: + type: object + nullable: true + properties: + enabled: + type: boolean + interval: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + +# OLM: END CEPH OBJECT STORE CRD +# OLM: BEGIN CEPH OBJECT STORE USERS CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephobjectstoreusers.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStoreUser + listKind: CephObjectStoreUserList + plural: cephobjectstoreusers + singular: cephobjectstoreuser + shortNames: + - rcou + - objectuser + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + store: + type: string + displayName: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH OBJECT STORE USERS CRD +# OLM: BEGIN CEPH OBJECT REALM CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephobjectrealms.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectRealm + listKind: CephObjectRealmList + plural: cephobjectrealms + singular: cephobjectrealm + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + pull: + type: object + properties: + endpoint: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH OBJECT REALM CRD +# OLM: BEGIN CEPH OBJECT ZONEGROUP CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephobjectzonegroups.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZoneGroup + listKind: CephObjectZoneGroupList + plural: cephobjectzonegroups + singular: cephobjectzonegroup + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + realm: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH OBJECT ZONEGROUP CRD +# OLM: BEGIN CEPH OBJECT ZONE CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephobjectzones.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectZone + listKind: CephObjectZoneList + plural: cephobjectzones + singular: cephobjectzone + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + zoneGroup: + type: string + metadataPool: + type: object + nullable: true + properties: + failureDomain: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + type: integer + requireSafeReplicaSize: + type: boolean + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + type: integer + codingChunks: + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + parameters: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + dataPool: + type: object + properties: + failureDomain: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + type: integer + requireSafeReplicaSize: + type: boolean + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + type: integer + codingChunks: + type: integer + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + parameters: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + preservePoolsOnDelete: + type: boolean + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH OBJECT ZONE CRD +# OLM: BEGIN CEPH BLOCK POOL CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cephblockpools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPool + listKind: CephBlockPoolList + plural: cephblockpools + singular: cephblockpool + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + failureDomain: + type: string + deviceClass: + type: string + crushRoot: + type: string + replicated: + type: object + nullable: true + properties: + size: + type: integer + minimum: 0 + maximum: 9 + targetSizeRatio: + type: number + requireSafeReplicaSize: + type: boolean + replicasPerFailureDomain: + type: integer + subFailureDomain: + type: string + erasureCoded: + type: object + nullable: true + properties: + dataChunks: + type: integer + minimum: 0 + maximum: 9 + codingChunks: + type: integer + minimum: 0 + maximum: 9 + compressionMode: + type: string + enum: + - "" + - none + - passive + - aggressive + - force + enableRBDStats: + description: + EnableRBDStats is used to enable gathering of statistics + for all RBD images in the pool + type: boolean + parameters: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + mirroring: + type: object + nullable: true + properties: + enabled: + type: boolean + mode: + type: string + enum: + - image + - pool + snapshotSchedules: + type: array + items: + type: object + nullable: true + properties: + interval: + type: string + startTime: + type: string + statusCheck: + type: object + x-kubernetes-preserve-unknown-fields: true + annotations: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + +# OLM: END CEPH BLOCK POOL CRD +# OLM: BEGIN CEPH VOLUME POOL CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumes.rook.io +spec: + group: rook.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + singular: volume + shortNames: + - rv + scope: Namespaced + versions: + - name: v1alpha2 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + attachments: + type: array + items: + type: object + properties: + node: + type: string + podNamespace: + type: string + podName: + type: string + clusterName: + type: string + mountDir: + type: string + readOnly: + type: boolean + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END CEPH VOLUME POOL CRD +# OLM: BEGIN OBJECTBUCKET CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbuckets.objectbucket.io +spec: + group: objectbucket.io + names: + kind: ObjectBucket + listKind: ObjectBucketList + plural: objectbuckets + singular: objectbucket + shortNames: + - ob + - obs + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + endpoint: + type: object + nullable: true + properties: + bucketHost: + type: string + bucketPort: + type: integer + format: int32 + bucketName: + type: string + region: + type: string + subRegion: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + authentication: + type: object + nullable: true + items: + type: object + x-kubernetes-preserve-unknown-fields: true + additionalState: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + reclaimPolicy: + type: string + claimRef: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END OBJECTBUCKET CRD +# OLM: BEGIN OBJECTBUCKETCLAIM CRD +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: objectbucketclaims.objectbucket.io +spec: + group: objectbucket.io + names: + kind: ObjectBucketClaim + listKind: ObjectBucketClaimList + plural: objectbucketclaims + singular: objectbucketclaim + shortNames: + - obc + - obcs + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + storageClassName: + type: string + bucketName: + type: string + generateBucketName: + type: string + additionalConfig: + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + objectBucketName: + type: string + status: + type: object + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} +# OLM: END OBJECTBUCKETCLAIM CRD diff --git a/.vuepress/public/statics/learning/ceph/rook-1.5.4/operator.yaml b/.vuepress/public/statics/learning/ceph/rook-1.5.4/operator.yaml new file mode 100644 index 0000000..7ce2c67 --- /dev/null +++ b/.vuepress/public/statics/learning/ceph/rook-1.5.4/operator.yaml @@ -0,0 +1,470 @@ +################################################################################################################# +# The deployment for the rook operator +# Contains the common settings for most Kubernetes deployments. +# For example, to create the rook-ceph cluster: +# kubectl create -f crds.yaml -f common.yaml -f operator.yaml +# kubectl create -f cluster.yaml +# +# Also see other operator sample files for variations of operator.yaml: +# - operator-openshift.yaml: Common settings for running in OpenShift +############################################################################################################### + +# Rook Ceph Operator Config ConfigMap +# Use this ConfigMap to override Rook-Ceph Operator configurations. +# NOTE! Precedence will be given to this config if the same Env Var config also exists in the +# Operator Deployment. +# To move a configuration(s) from the Operator Deployment to this ConfigMap, add the config +# here. It is recommended to then remove it from the Deployment to eliminate any future confusion. +kind: ConfigMap +apiVersion: v1 +metadata: + name: rook-ceph-operator-config + # should be in the namespace of the operator + namespace: rook-ceph # namespace:operator +data: + # Enable the CSI driver. + # To run the non-default version of the CSI driver, see the override-able image properties in operator.yaml + ROOK_CSI_ENABLE_CEPHFS: "true" + # Enable the default version of the CSI RBD driver. To start another version of the CSI driver, see image properties below. + ROOK_CSI_ENABLE_RBD: "true" + ROOK_CSI_ENABLE_GRPC_METRICS: "true" + + # Set logging level for csi containers. + # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. + # CSI_LOG_LEVEL: "0" + + # OMAP generator will generate the omap mapping between the PV name and the RBD image. + # CSI_ENABLE_OMAP_GENERATOR need to be enabled when we are using rbd mirroring feature. + # By default OMAP generator sidecar is deployed with CSI provisioner pod, to disable + # it set it to false. + # CSI_ENABLE_OMAP_GENERATOR: "false" + + # Enable cephfs kernel driver instead of ceph-fuse. + # If you disable the kernel client, your application may be disrupted during upgrade. + # See the upgrade guide: https://rook.io/docs/rook/master/ceph-upgrade.html + # NOTE! cephfs quota is not supported in kernel version < 4.17 + CSI_FORCE_CEPHFS_KERNEL_CLIENT: "true" + + # (Optional) Allow starting unsupported ceph-csi image + ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: "false" + # The default version of CSI supported by Rook will be started. To change the version + # of the CSI driver to something other than what is officially supported, change + # these images to the desired release of the CSI driver. + ROOK_CSI_CEPH_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/cephcsi:v3.2.0" + ROOK_CSI_REGISTRAR_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/csi-node-driver-registrar:v2.1.0" + ROOK_CSI_RESIZER_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/csi-resizer:v1.1.0" + ROOK_CSI_PROVISIONER_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/csi-provisioner:v2.1.0" + ROOK_CSI_SNAPSHOTTER_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/csi-snapshotter:v3.0.3" + ROOK_CSI_ATTACHER_IMAGE: "swr.cn-east-2.myhuaweicloud.com/kuboard-dependency/csi-attacher:v3.1.0" + + # (Optional) set user created priorityclassName for csi plugin pods. + # CSI_PLUGIN_PRIORITY_CLASSNAME: "system-node-critical" + + # (Optional) set user created priorityclassName for csi provisioner pods. + # CSI_PROVISIONER_PRIORITY_CLASSNAME: "system-cluster-critical" + + # CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. + # Default value is RollingUpdate. + # CSI_CEPHFS_PLUGIN_UPDATE_STRATEGY: "OnDelete" + # CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. + # Default value is RollingUpdate. + # CSI_RBD_PLUGIN_UPDATE_STRATEGY: "OnDelete" + + # kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. + # ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubelet" + + # Labels to add to the CSI CephFS Deployments and DaemonSets Pods. + # ROOK_CSI_CEPHFS_POD_LABELS: "key1=value1,key2=value2" + # Labels to add to the CSI RBD Deployments and DaemonSets Pods. + # ROOK_CSI_RBD_POD_LABELS: "key1=value1,key2=value2" + + # (Optional) Ceph Provisioner NodeAffinity. + # CSI_PROVISIONER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) CEPH CSI provisioner tolerations list. Put here list of taints you want to tolerate in YAML format. + # CSI provisioner would be best to start on the same nodes as other ceph daemons. + # CSI_PROVISIONER_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) Ceph CSI plugin NodeAffinity. + # CSI_PLUGIN_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) CEPH CSI plugin tolerations list. Put here list of taints you want to tolerate in YAML format. + # CSI plugins need to be started on all the nodes where the clients need to mount the storage. + # CSI_PLUGIN_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + + # (Optional) CEPH CSI RBD provisioner resource requirement list, Put here list of resource + # requests and limits you want to apply for provisioner pod + # CSI_RBD_PROVISIONER_RESOURCE: | + # - name : csi-provisioner + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-resizer + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-attacher + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-snapshotter + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-rbdplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI RBD plugin resource requirement list, Put here list of resource + # requests and limits you want to apply for plugin pod + # CSI_RBD_PLUGIN_RESOURCE: | + # - name : driver-registrar + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # - name : csi-rbdplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI CephFS provisioner resource requirement list, Put here list of resource + # requests and limits you want to apply for provisioner pod + # CSI_CEPHFS_PROVISIONER_RESOURCE: | + # - name : csi-provisioner + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-resizer + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-attacher + # resource: + # requests: + # memory: 128Mi + # cpu: 100m + # limits: + # memory: 256Mi + # cpu: 200m + # - name : csi-cephfsplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # (Optional) CEPH CSI CephFS plugin resource requirement list, Put here list of resource + # requests and limits you want to apply for plugin pod + # CSI_CEPHFS_PLUGIN_RESOURCE: | + # - name : driver-registrar + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + # - name : csi-cephfsplugin + # resource: + # requests: + # memory: 512Mi + # cpu: 250m + # limits: + # memory: 1Gi + # cpu: 500m + # - name : liveness-prometheus + # resource: + # requests: + # memory: 128Mi + # cpu: 50m + # limits: + # memory: 256Mi + # cpu: 100m + + # Configure CSI CSI Ceph FS grpc and liveness metrics port + # CSI_CEPHFS_GRPC_METRICS_PORT: "9091" + # CSI_CEPHFS_LIVENESS_METRICS_PORT: "9081" + # Configure CSI RBD grpc and liveness metrics port + # CSI_RBD_GRPC_METRICS_PORT: "9090" + # CSI_RBD_LIVENESS_METRICS_PORT: "9080" + + # Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used + ROOK_OBC_WATCH_OPERATOR_NAMESPACE: "true" + + # (Optional) Admission controller NodeAffinity. + # ADMISSION_CONTROLLER_NODE_AFFINITY: "role=storage-node; storage=rook, ceph" + # (Optional) Admission controller tolerations list. Put here list of taints you want to tolerate in YAML format. + # Admission controller would be best to start on the same nodes as other ceph daemons. + # ADMISSION_CONTROLLER_TOLERATIONS: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists +--- +# OLM: BEGIN OPERATOR DEPLOYMENT +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rook-ceph-operator + namespace: rook-ceph # namespace:operator + labels: + operator: rook + storage-backend: ceph +spec: + selector: + matchLabels: + app: rook-ceph-operator + replicas: 1 + template: + metadata: + labels: + app: rook-ceph-operator + spec: + serviceAccountName: rook-ceph-system + containers: + - name: rook-ceph-operator + image: rook/ceph:master + args: ["ceph", "operator"] + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir + env: + # If the operator should only watch for cluster CRDs in the same namespace, set this to "true". + # If this is not set to true, the operator will watch for cluster CRDs in all namespaces. + - name: ROOK_CURRENT_NAMESPACE_ONLY + value: "false" + # To disable RBAC, uncomment the following: + # - name: RBAC_ENABLED + # value: "false" + # Rook Agent toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: AGENT_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate + # - name: AGENT_TOLERATION_KEY + # value: "" + # (Optional) Rook Agent tolerations list. Put here list of taints you want to tolerate in YAML format. + # - name: AGENT_TOLERATIONS + # value: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) Rook Agent priority class name to set on the pod(s) + # - name: AGENT_PRIORITY_CLASS_NAME + # value: "" + # (Optional) Rook Agent NodeAffinity. + # - name: AGENT_NODE_AFFINITY + # value: "role=storage-node; storage=rook,ceph" + # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. + # `Any` uses Ceph admin credentials by default/fallback. + # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and + # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. + # to the namespace in which the `mountSecret` Kubernetes secret namespace. + # - name: AGENT_MOUNT_SECURITY_MODE + # value: "Any" + # Set the path where the Rook agent can find the flex volumes + # - name: FLEXVOLUME_DIR_PATH + # value: "" + # Set the path where kernel modules can be found + # - name: LIB_MODULES_DIR_PATH + # value: "" + # Mount any extra directories into the agent container + # - name: AGENT_MOUNTS + # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" + # Rook Discover toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: DISCOVER_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate + # - name: DISCOVER_TOLERATION_KEY + # value: "" + # (Optional) Rook Discover tolerations list. Put here list of taints you want to tolerate in YAML format. + # - name: DISCOVER_TOLERATIONS + # value: | + # - effect: NoSchedule + # key: node-role.kubernetes.io/controlplane + # operator: Exists + # - effect: NoExecute + # key: node-role.kubernetes.io/etcd + # operator: Exists + # (Optional) Rook Discover priority class name to set on the pod(s) + # - name: DISCOVER_PRIORITY_CLASS_NAME + # value: "" + # (Optional) Discover Agent NodeAffinity. + # - name: DISCOVER_AGENT_NODE_AFFINITY + # value: "role=storage-node; storage=rook, ceph" + # (Optional) Discover Agent Pod Labels. + # - name: DISCOVER_AGENT_POD_LABELS + # value: "key1=value1,key2=value2" + # Allow rook to create multiple file systems. Note: This is considered + # an experimental feature in Ceph as described at + # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster + # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027 + - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS + value: "false" + + # The logging level for the operator: INFO | DEBUG + - name: ROOK_LOG_LEVEL + value: "INFO" + + # The duration between discovering devices in the rook-discover daemonset. + - name: ROOK_DISCOVER_DEVICES_INTERVAL + value: "60m" + + # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. + # Set this to true if SELinux is enabled (e.g. OpenShift) to workaround the anyuid issues. + # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "false" + + # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). + # Disable it here if you have similar issues. + # For more details see https://github.com/rook/rook/issues/2417 + - name: ROOK_ENABLE_SELINUX_RELABELING + value: "true" + + # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. + # For more details see https://github.com/rook/rook/issues/2254 + - name: ROOK_ENABLE_FSGROUP + value: "true" + + # Disable automatic orchestration when new devices are discovered + - name: ROOK_DISABLE_DEVICE_HOTPLUG + value: "false" + + # Provide customised regex as the values using comma. For eg. regex for rbd based volume, value will be like "(?i)rbd[0-9]+". + # In case of more than one regex, use comma to separate between them. + # Default regex will be "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" + # Add regex expression after putting a comma to blacklist a disk + # If value is empty, the default regex will be used. + - name: DISCOVER_DAEMON_UDEV_BLACKLIST + value: "(?i)dm-[0-9]+,(?i)rbd[0-9]+,(?i)nbd[0-9]+" + + # Whether to enable the flex driver. By default it is enabled and is fully supported, but will be deprecated in some future release + # in favor of the CSI driver. + - name: ROOK_ENABLE_FLEX_DRIVER + value: "false" + + # Whether to start the discovery daemon to watch for raw storage devices on nodes in the cluster. + # This daemon does not need to run if you are only going to create your OSDs based on StorageClassDeviceSets with PVCs. + - name: ROOK_ENABLE_DISCOVERY_DAEMON + value: "false" + + # Time to wait until the node controller will move Rook pods to other + # nodes after detecting an unreachable node. + # Pods affected by this setting are: + # mgr, rbd, mds, rgw, nfs, PVC based mons and osds, and ceph toolbox + # The value used in this variable replaces the default value of 300 secs + # added automatically by k8s as Toleration for + # + # The total amount of time to reschedule Rook pods in healthy nodes + # before detecting a condition will be the sum of: + # --> node-monitor-grace-period: 40 seconds (k8s kube-controller-manager flag) + # --> ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS: 5 seconds + - name: ROOK_UNREACHABLE_NODE_TOLERATION_SECONDS + value: "5" + + # The name of the node to pass with the downward API + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The pod name to pass with the downward API + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # The pod namespace to pass with the downward API + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + + # Uncomment it to run lib bucket provisioner in multithreaded mode + #- name: LIB_BUCKET_PROVISIONER_THREADS + # value: "5" + + # Uncomment it to run rook operator on the host network + #hostNetwork: true + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} +# OLM: END OPERATOR DEPLOYMENT diff --git a/learning/k8s-intermediate/persistent/ceph/rook-config-1.4.7.md b/learning/k8s-intermediate/persistent/ceph/rook-config-1.4.7.md new file mode 100644 index 0000000..729383b --- /dev/null +++ b/learning/k8s-intermediate/persistent/ceph/rook-config-1.4.7.md @@ -0,0 +1,167 @@ +--- +# vssueId: 91 +layout: LearningLayout +description: Kubernetes教程_本文描述如何在 Kuboard 中配置 StorageClass 连接 CephFS on Rook +meta: + - name: keywords + content: Kubernetes教程,K8S教程,StorageClass,CephFS +--- + +# 使用 CephFS 作为存储类 - Rook + + + +本文描述了如何使用 Kuboard / Kuberenetes 对接 Rook 安装的 CephFS 作为存储类,并完成如下场景: + +* 安装 Rook - Ceph +* 创建 StorageClass,[动态提供存储卷](../pv.html#提供-provisioning) +* 绑定 PVC 到 Pod +* PVC 相关操作 + * 扩容 + * 克隆 + * 快照 + * 从快照恢复 + +## 前提条件 + +* 您已经安装了 Kubernetes 集群,且集群版本不低于 v1.17.0,安装方法请参考 [安装 Kubernetes 集群](/install/install-k8s.html); + + * Kubernetes 集群有至少 3 个工作节点,且每个工作节点都有一块初系统盘以外的 **未格式化** 的裸盘(工作节点是虚拟机时,未格式化的裸盘可以是虚拟磁盘),用于创建 3 个 Ceph OSD; + + * 也可以只有 1 个工作节点,挂载了一块 **未格式化** 的裸盘; + + * 在节点机器上执行 `lsblk -f ` 指令可以查看磁盘是否需被格式化,输出结果如下: + + ``` sh + lsblk -f + NAME FSTYPE LABEL UUID MOUNTPOINT + vda + └─vda1 LVM2_member eSO50t-GkUV-YKTH-WsGq-hNJY-eKNf-3i07IB + ├─ubuntu--vg-root ext4 c2366f76-6e21-4f10-a8f3-6776212e2fe4 / + └─ubuntu--vg-swap_1 swap 9492a3dc-ad75-47cd-9596-678e8cf17ff9 [SWAP] + vdb + ``` + + 如果 `FSTYPE` 字段不为空,则表示该磁盘上已经被格式化。在上面的例子中,可以将磁盘 `vdb` 用于 Ceph 的 OSD,而磁盘 `vda` 及其分区则不能用做 Ceph 的 OSD。 + +* 您已经安装了 Kuboard,且 Kuboard 版本不低于 v2.0.5,安装方法请参考 [安装 Kuboard](/install/install-dashboard.html); + +## 安装 Rook - Ceph + +本章节参考 [Rook Ceph Storage Quickstart](https://rook.io/docs/rook/v1.4/ceph-quickstart.html) 在 Kubernetes 集群上快速安装了一个 Ceph 集群。 + +* 执行如下命令安装 Rook - Ceph 集群 + + ``` sh + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/common.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/operator.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/cluster.yaml + ``` + + 在执行上述指令之前,可以使用 [docker-image-loader](https://github.com/eip-work/docker-image-loader) 提前将所需要的镜像加载到所有节点机器上,相比较每个节点分别从公网抓取镜像会更快一些;如果您的集群在内网环境,也可以使用 docker-image-loader 加载镜像到集群节点。需要加载的镜像如下: + + ``` + quay.io/cephcsi/cephcsi:v3.1.1 + quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 + quay.io/k8scsi/csi-attacher:v2.1.0 + quay.io/k8scsi/csi-snapshotter:v2.1.1 + quay.io/k8scsi/csi-resizer:v0.4.0 + quay.io/k8scsi/csi-provisioner:v1.6.0 + rook/ceph:v1.4.5 + ceph/ceph:v15.2.4 + ``` + +* 执行 `watch kubectl get pods -n rook-ceph` 指令,直到所有的 Pod 处于 `Running` 或者 `Completed` 状态,如下所示: + + ```sh {1} + watch kubectl get pods -n rook-ceph + NAME READY STATUS RESTARTS AGE + csi-cephfsplugin-5hfb7 3/3 Running 0 39m + csi-cephfsplugin-5xdz4 3/3 Running 0 39m + csi-cephfsplugin-provisioner-5c65b94c8d-9txpv 6/6 Running 0 39m + csi-cephfsplugin-provisioner-5c65b94c8d-rt4fp 6/6 Running 0 39m + csi-cephfsplugin-pstw9 3/3 Running 0 39m + csi-rbdplugin-ft4dk 3/3 Running 0 39m + csi-rbdplugin-fxj9n 3/3 Running 0 39m + csi-rbdplugin-provisioner-569c75558-h2jv7 6/6 Running 0 39m + csi-rbdplugin-provisioner-569c75558-q4fkt 6/6 Running 0 39m + csi-rbdplugin-rw7jn 3/3 Running 0 39m + rook-ceph-crashcollector-k8s-node-01-6fbb5cb4b8-nwvhj 1/1 Running 0 35m + rook-ceph-crashcollector-k8s-node-02-5c67f6f9f5-qm47c 1/1 Running 0 37m + rook-ceph-crashcollector-k8s-node-03-7f6cfc655b-b8cv2 1/1 Running 0 40m + rook-ceph-mgr-a-5844874f9c-rqggg 1/1 Running 0 35m + rook-ceph-mon-a-67b6865644-4bkm9 1/1 Running 0 40m + rook-ceph-mon-b-59f855c47d-tg44q 1/1 Running 0 40m + rook-ceph-mon-d-7576586cc9-nn94w 1/1 Running 0 37m + rook-ceph-operator-6db6f67cd4-smhz8 1/1 Running 0 41m + rook-ceph-osd-prepare-k8s-node-01-x2p74 0/1 Completed 0 35m + rook-ceph-osd-prepare-k8s-node-02-7j4s7 0/1 Completed 0 35m + rook-ceph-osd-prepare-k8s-node-03-w2mgf 0/1 Completed 0 35m + rook-discover-9hzds 1/1 Running 0 41m + rook-discover-gz7xv 1/1 Running 0 41m + rook-discover-hljrn 1/1 Running 0 41m + ``` + +* Ceph 集群部署好以后,可以通过 Ceph 提供 块存储、文件存储和对象存储。此处,我们通过如下指令来创建文件存储服务: + + > 参考文档 [Rook Ceph-FileSystem](https://rook.io/docs/rook/v1.4/ceph-filesystem.html) + + ``` sh + cat > myfs.yaml < 此步骤只在第一次创建 CephFS StorageClass 时需要执行 + + ![Kubernetes CephFS StorageClass](./rook-config.assets/image-20201006185022912.png) + +## 创建 CephFS Rook StorageClass + +完成上述初始化快照 CRD 的操作以后,您就可以创建 CephFS Rook StorageClass 了,具体步骤如下图所示: + +![Kubernetes CephFS StorageClass](./rook-config.assets/image-20201006185624187.png) + +## 创建 PVC + +创建 PVC、将 PVC 挂载到 Pod、并向存储卷中写入内容等操作步骤请参考 [创建 PVC](./k8s-config.html#创建-pvc) + + + +## PVC 相关操作 + +PVC 创建以后,可以通过 Kuboard 界面执行如下操作: + +* 扩容 +* 克隆 +* 快照 +* 从快照恢复 + +具体操作步骤请参考 [对 PVC 执行操作](./k8s-config.html#对-pvc-执行操作) diff --git a/learning/k8s-intermediate/persistent/ceph/rook-config.md b/learning/k8s-intermediate/persistent/ceph/rook-config.md index 729383b..323531a 100644 --- a/learning/k8s-intermediate/persistent/ceph/rook-config.md +++ b/learning/k8s-intermediate/persistent/ceph/rook-config.md @@ -48,27 +48,15 @@ meta: ## 安装 Rook - Ceph -本章节参考 [Rook Ceph Storage Quickstart](https://rook.io/docs/rook/v1.4/ceph-quickstart.html) 在 Kubernetes 集群上快速安装了一个 Ceph 集群。 +本章节参考 [Rook Ceph Storage Quickstart](https://rook.io/docs/rook/v1.5/ceph-quickstart.html) 在 Kubernetes 集群上快速安装了一个 Ceph 集群。 * 执行如下命令安装 Rook - Ceph 集群 ``` sh - kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/common.yaml - kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/operator.yaml - kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.4.7/cluster.yaml - ``` - - 在执行上述指令之前,可以使用 [docker-image-loader](https://github.com/eip-work/docker-image-loader) 提前将所需要的镜像加载到所有节点机器上,相比较每个节点分别从公网抓取镜像会更快一些;如果您的集群在内网环境,也可以使用 docker-image-loader 加载镜像到集群节点。需要加载的镜像如下: - - ``` - quay.io/cephcsi/cephcsi:v3.1.1 - quay.io/k8scsi/csi-node-driver-registrar:v1.2.0 - quay.io/k8scsi/csi-attacher:v2.1.0 - quay.io/k8scsi/csi-snapshotter:v2.1.1 - quay.io/k8scsi/csi-resizer:v0.4.0 - quay.io/k8scsi/csi-provisioner:v1.6.0 - rook/ceph:v1.4.5 - ceph/ceph:v15.2.4 + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.5.4/crds.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.5.4/common.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.5.4/operator.yaml + kubectl create -f https://kuboard.cn/statics/learning/ceph/rook-1.5.4/cluster.yaml ``` * 执行 `watch kubectl get pods -n rook-ceph` 指令,直到所有的 Pod 处于 `Running` 或者 `Completed` 状态,如下所示: @@ -104,7 +92,7 @@ meta: * Ceph 集群部署好以后,可以通过 Ceph 提供 块存储、文件存储和对象存储。此处,我们通过如下指令来创建文件存储服务: - > 参考文档 [Rook Ceph-FileSystem](https://rook.io/docs/rook/v1.4/ceph-filesystem.html) + > 参考文档 [Rook Ceph-FileSystem](https://rook.io/docs/rook/v1.5/ceph-filesystem.html) ``` sh cat > myfs.yaml <