From 3c9e4bd486f4c0370f8fa0a9b49388d2977bbc42 Mon Sep 17 00:00:00 2001 From: lorellalou Date: Fri, 11 Oct 2024 17:26:10 +0200 Subject: [PATCH] docs: add an example how to use openEBS LVM Signed-off-by: Laurent Rolaz --- deploy/openebs/lvm/README | 123 ++++++++++++++++++ .../clickhouse-installation-with-openebs.yaml | 41 ++++++ deploy/openebs/lvm/delete-openebs-lvm.sh | 13 ++ deploy/openebs/lvm/install-openebs-lvm.sh | 105 +++++++++++++++ deploy/openebs/lvm/openebs-lvm-pvc-test.yaml | 12 ++ .../openebs/lvm/openebs-lvm-storageclass.yaml | 12 ++ 6 files changed, 306 insertions(+) create mode 100644 deploy/openebs/lvm/README create mode 100644 deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml create mode 100755 deploy/openebs/lvm/delete-openebs-lvm.sh create mode 100755 deploy/openebs/lvm/install-openebs-lvm.sh create mode 100644 deploy/openebs/lvm/openebs-lvm-pvc-test.yaml create mode 100644 deploy/openebs/lvm/openebs-lvm-storageclass.yaml diff --git a/deploy/openebs/lvm/README b/deploy/openebs/lvm/README new file mode 100644 index 000000000..890dadd2a --- /dev/null +++ b/deploy/openebs/lvm/README @@ -0,0 +1,123 @@ +## OpenEBS - LocalPV-LVM CSI Driver with ClickHouse +LocalPV-LVM CSI Driver became GA in August 2021 (with the release v0.8.0). It is now a very mature product and a core component of the OpenEBS storage platform. +Due to the major adoption of LocalPV-LVM (+50,000 users), this Data-Engine is now being unified and integrated into the core OpenEBS Storage platform; instead of being maintained as an external Data-Engine within our project. + +## Setup + +Find the disk which you want to use for the LocalPV-LVM. Note: For testing you can use the loopback device. + +``` +truncate -s 1024G /tmp/disk.img +sudo losetup -f /tmp/disk.img --show +``` + +> [!NOTE] +> - This is the old maual config process
+> - LocalPV-LVM will num dynamically provision the VG fro you
+> - The PV, VG and LV names will be dynamically provisioned by OpenEBS LocalPV-LVM as K8s unique entities (for safety, you cannot provide your own PV, VG or LV names) + +Create the Volume group on all the nodes, which will be used by the LVM2 Driver for provisioning the volumes + +``` +sudo pvcreate /dev/loop0 +sudo vgcreate vg-test /dev/loop0 ## here lvmvg is the volume group name to be created +``` + +Display the Volume Group + +``` +vgdisplay +``` + +## Installation + +Install the latest release of OpenEBS LVM2 LocalPV-LVM driver by running the following command. Note: All nodes must be running the same version of LocalPV-LVM, LMV2, device-mapper & dm-snapshot. + +Create a variable containing the K8s namesapce + +```bash +OPENEBS_NAMESPACE=openebs +``` + +Install the OpenEBS component without the distributed component (MayaStor) and ZFS + +```bash +helm repo add openebs https://openebs.github.io/openebs +helm repo update +helm install openebs --namespace ${OPENEBS_NAMESPACE} openebs/openebs --set engines.replicated.mayastor.enabled=false --set engines.local.zfs.enabled=false --create-namespace --version 4.1.1 +``` + +List the OpenEBS chart and check that it's deployed + +```bash +helm list -n ${OPENEBS_NAMESPACE} +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +openebs openebs 1 2024-06-01 12:28:49.358189 +0200 CEST deployed openebs-4.0.1 4.0.1 +``` + +You can uninstall the OpenEBS componenent if there is any issues or for testing different version + +```bash +helm uninstall openebs --namespace ${OPENEBS_NAMESPACE} +``` + +Create a new Storage Class using the test volume group (!! for testing only !!) +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +allowVolumeExpansion: true +metadata: + name: openebs-lvm-test +parameters: + fsType: xfs + storage: lvm + vgpattern: vg-test +provisioner: local.csi.openebs.io +reclaimPolicy: Retain +volumeBindingMode: Immediate +``` + +Install a test installation of Clickhouse with a ``volumeClaimTemplates`` +```bash +kubecl apply -f clickhouse-installation-with-openebs.yaml -n ch-test +``` + +Connect to the the Clickhouse pod and check the mounted volume name +```bash +# df -k +Filesystem 1K-blocks Used Available Use% Mounted on +overlay 73334784 10300660 63034124 15% / +tmpfs 65536 0 65536 0% /dev +/dev/mapper/rhel_vg-root 73334784 10300660 63034124 15% /etc/hosts +shm 65536 0 65536 0% /dev/shm +/dev/mapper/vg--test-pvc--d2c6219b--755d--4540--8382--a8959c5f1eb5 983040 41684 941356 5% /var/lib/clickhouse +tmpfs 15804764 12 15804752 1% /run/secrets/kubernetes.io/serviceaccount +tmpfs 7902380 0 7902380 0% /proc/asound +tmpfs 7902380 0 7902380 0% /proc/acpi +tmpfs 7902380 0 7902380 0% /proc/scsi +tmpfs 7902380 0 7902380 0% /sys/firmware +tmpfs 7902380 0 7902380 0% /sys/devices/virtual/powercap +``` + +You can find it on the host with the following LVM command +```bash +# lvdisplay + + --- Logical volume --- + LV Path /dev/vg-test/pvc-d2c6219b-755d-4540-8382-a8959c5f1eb5 + LV Name pvc-d2c6219b-755d-4540-8382-a8959c5f1eb5 + VG Name vg-test + LV UUID KqDQ6f-70gM-fbyN-gePw-iROE-XL5E-6zTn5d + LV Write Access read/write + LV Creation host, time openebs-lvm-localpv-node-zfg4x, 2024-10-11 08:52:06 +0200 + LV Status available + # open 1 + LV Size 1.00 GiB + Current LE 256 + Segments 1 + Allocation inherit + Read ahead sectors auto + - currently set to 8192 + Block device 253:4 +``` + diff --git a/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml b/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml new file mode 100644 index 000000000..d477fa686 --- /dev/null +++ b/deploy/openebs/lvm/clickhouse-installation-with-openebs.yaml @@ -0,0 +1,41 @@ +apiVersion: clickhouse.altinity.com/v1 +kind: ClickHouseInstallation +metadata: + name: clickhouse-openebs +spec: + configuration: + clusters: + - layout: + replicasCount: 1 + shardsCount: 1 + name: simple + defaults: + templates: + dataVolumeClaimTemplate: openebs + podTemplate: clickhouse:24.3 + templates: + podTemplates: + - metadata: + name: clickhouse:24.3 + spec: + containers: + - image: clickhouse/clickhouse-server:24.3 + name: clickhouse + ports: + - containerPort: 8123 + name: http + - containerPort: 9000 + name: client + - containerPort: 9009 + name: interserver + - containerPort: 9363 + name: metrics + volumeClaimTemplates: + - name: openebs + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: openebs-lvm-test \ No newline at end of file diff --git a/deploy/openebs/lvm/delete-openebs-lvm.sh b/deploy/openebs/lvm/delete-openebs-lvm.sh new file mode 100755 index 000000000..fb8262156 --- /dev/null +++ b/deploy/openebs/lvm/delete-openebs-lvm.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +OPENEBS_NAMESPACE="${OPENEBS_NAMESPACE:-openebs}" +CLICKHOUSE_NAMESPACE="${CLICKHOUSE_NAMESPACE:-ch-test}" + +echo "Delete Test ClickHouse installation" +kubectl delete --namespace="${CLICKHOUSE_NAMESPACE}" ClickhouseInstallation clickhouse-openebs + +echo "Delete OpenEBS namespace ${OPENEBS_NAMESPACE}" +LVMVOLUMNE_TO_DELETE=$(kubectl get LVMVolume --namespace "${OPENEBS_NAMESPACE}" | tail -1 | cut -f1 -d ' ') +kubectl delete LVMVolume $LVMVOLUMNE_TO_DELETE --namespace "${OPENEBS_NAMESPACE}" +helm uninstall openebs --namespace ${OPENEBS_NAMESPACE} +kubectl delete namespace "${OPENEBS_NAMESPACE}" diff --git a/deploy/openebs/lvm/install-openebs-lvm.sh b/deploy/openebs/lvm/install-openebs-lvm.sh new file mode 100755 index 000000000..702507b0f --- /dev/null +++ b/deploy/openebs/lvm/install-openebs-lvm.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +function ensure_namespace() { + local namespace="${1}" + if kubectl get namespace "${namespace}" 1>/dev/null 2>/dev/null; then + echo "Namespace '${namespace}' already exists." + else + echo "No '${namespace}' namespace found. Going to create." + kubectl create namespace "${namespace}" + fi +} + + +echo "External value for \$OPENEBS_NAMESPACE=$OPENEBS_NAMESPACE" +echo "External value for \$OPENEBS_OPERATOR_VERSION=$OPENEBS_OPERATOR_VERSION" +echo "External value for \$VALIDATE_YAML=$VALIDATE_YAML" +echo "External value for \$CLICKHOUSE_NAMESPACE=$CLICKHOUSE_NAMESPACE" + +OPENEBS_NAMESPACE="${OPENEBS_NAMESPACE:-openebs}" +OPENEBS_OPERATOR_VERSION="${OPENEBS_OPERATOR_VERSION:-v4.1.3}" +VALIDATE_YAML="${VALIDATE_YAML:-"true"}" +CLICKHOUSE_NAMESPACE="${CLICKHOUSE_NAMESPACE:-ch-test}" + +echo "Setup OpenEBS" +echo "OPTIONS" +echo "\$OPENEBS_NAMESPACE=${OPENEBS_NAMESPACE}" +echo "\$OPENEBS_OPERATOR_VERSION=${OPENEBS_OPERATOR_VERSION}" +echo "\$VALIDATE_YAML=${VALIDATE_YAML}" +echo "\$CLICKHOUSE_NAMESPACE=${CLICKHOUSE_NAMESPACE}" +echo "" +echo "!!! IMPORTANT !!!" +echo "If you do not agree with specified options, press ctrl-c now" +if [[ "" == "${NO_WAIT}" ]]; then + sleep 10 +fi +echo "Apply options now..." + +## +## +## +function clean_dir() { + DIR="$1" + + echo "##############################" + echo "Clean dir $DIR ..." + rm -rf "$DIR" + echo "...DONE" +} + +############################## +## ## +## Install openebs.io operator ## +## ## +############################## + +# Download openebs-operator sources into temp dir and run all installation scripts from there + +TMP_DIR=$(mktemp -d) + +# Ensure temp dir in place +mkdir -p "${OPENEBS_OPERATOR_DIR}" + +# Temp dir must not contain any data +if [[ -n "$(ls -A "${OPENEBS_OPERATOR_DIR}")" ]]; then + echo "${OPENEBS_OPERATOR_DIR} is not empty. Abort" + exit 1 +fi + +# Temp dir is empty, will clear it upon script termination +trap 'clean_dir ${TMP_DIR}' SIGHUP SIGINT SIGQUIT SIGFPE SIGALRM SIGTERM + +# Continue with installing help repo +helm repo add openebs https://openebs.github.io/openebs +helm repo update + +echo "Setup OpenEBS operator ${OPENEBS_OPERATOR_VERSION} into ${OPENEBS_NAMESPACE} namespace" + +# Let's setup all OpenEBS-related stuff into dedicated namespace +## TODO: need to refactor after next OPENEBS-operator release +kubectl delete crd volumesnapshotclasses.snapshot.storage.k8s.io +kubectl delete crd volumesnapshotcontents.snapshot.storage.k8s.io +kubectl delete crd volumesnapshots.snapshot.storage.k8s.io + +# Setup OPENEBS-operator into dedicated namespace via kustomize +helm install openebs --namespace ${OPENEBS_NAMESPACE} openebs/openebs --set engines.replicated.mayastor.enabled=false --set engines.local.zfs.enabled=false --create-namespace --version 4.1.1 + +echo -n "Waiting '${OPENEBS_NAMESPACE}/openebs-lvm-localpv-controller' deployment to start" +# Check grafana deployment have all pods ready +while [[ $(kubectl --namespace="${OPENEBS_NAMESPACE}" get deployments | grep "openebs-lvm-localpv-controller" | grep -c "1/1") == "0" ]]; do + printf "." + sleep 1 +done +echo "...DONE" + +# Install the test storage class +kubectl apply -f openebs-lvm-storageclass.yaml -n ${OPENEBS_NAMESPACE} + +# Install a simple Clickhouse instance using openebs +echo "Setup simple Clickhouse into ${OPENEBS_NAMESPACE} namespace using OpenEBS" +ensure_namespace "${CLICKHOUSE_NAMESPACE}" +kubectl apply --validate="${VALIDATE_YAML}" --namespace="${CLICKHOUSE_NAMESPACE}" -f clickhouse-installation-with-openebs.yaml + +# Remove downloaded sources +clean_dir "${TMP_DIR}" + diff --git a/deploy/openebs/lvm/openebs-lvm-pvc-test.yaml b/deploy/openebs/lvm/openebs-lvm-pvc-test.yaml new file mode 100644 index 000000000..212c23964 --- /dev/null +++ b/deploy/openebs/lvm/openebs-lvm-pvc-test.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-test + namespace: openebs +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: openebs-lvm-test \ No newline at end of file diff --git a/deploy/openebs/lvm/openebs-lvm-storageclass.yaml b/deploy/openebs/lvm/openebs-lvm-storageclass.yaml new file mode 100644 index 000000000..4d9d182a1 --- /dev/null +++ b/deploy/openebs/lvm/openebs-lvm-storageclass.yaml @@ -0,0 +1,12 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +allowVolumeExpansion: true +metadata: + name: openebs-lvm-test +parameters: + fsType: xfs + storage: lvm + vgpattern: vg-test +provisioner: local.csi.openebs.io +reclaimPolicy: Delete +volumeBindingMode: Immediate