Skip to content

Commit

Permalink
Merge pull request spegel-org#495 from spegel-org/e2e-reuse
Browse files Browse the repository at this point in the history
Modify e2e tests to allow reusing the same kind cluster
  • Loading branch information
phillebaba authored May 20, 2024
2 parents 51c8f13 + 8184551 commit 857d8c3
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 44 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- [#487](https://github.com/XenitAB/spegel/pull/487) Move mirror metrics code to mirror handler.
- [#488](https://github.com/XenitAB/spegel/pull/488) Update existing registry errors and add more detail.
- [#490](https://github.com/XenitAB/spegel/pull/490) Close immediate channel after writing to it to close wait group in merge logic.
- [#495](https://github.com/XenitAB/spegel/pull/495) Modify e2e tests to allow reusing the same kind cluster.

### Deprecated

Expand Down
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ TAG = $$(git rev-parse --short HEAD)
IMG ?= ghcr.io/spegel-org/spegel
REF = $(IMG):$(TAG)
CNI ?= iptables
DELETE_E2E_CLUSTER ?= true

lint:
golangci-lint run ./...
Expand All @@ -14,7 +15,7 @@ docker-build:
docker build -t ${REF} .

e2e: docker-build
./test/e2e/e2e.sh ${REF} ${CNI}
./test/e2e/e2e.sh ${REF} ${CNI} ${DELETE_E2E_CLUSTER}

tools:
GO111MODULE=on go install github.com/norwoodj/helm-docs/cmd/helm-docs
Expand Down
1 change: 0 additions & 1 deletion test/e2e/conformance-job.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ apiVersion: batch/v1
kind: Job
metadata:
name: conformance
namespace: default
spec:
backoffLimit: 0
template:
Expand Down
133 changes: 91 additions & 42 deletions test/e2e/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,74 @@ set -ex

IMG=$1
CNI=$2
DELETE_E2E_CLUSTER=$3
SCRIPT_PATH=$(realpath $0)
SCRIPT_DIR=$(dirname $SCRIPT_PATH)

# Create Kind cluster
TMP_DIR=$(mktemp -d)
KIND_NAME=spegel-e2e
export KIND_KUBECONFIG=$TMP_DIR/kind.kubeconfig
echo $KIND_KUBECONFIG
kind create cluster --kubeconfig $KIND_KUBECONFIG --config $SCRIPT_DIR/kind-config-$CNI.yaml

# Write existing configuration to test backup.
HOSTS_TOML='server = "https://docker.io"\n\n[host."https://registry-1.docker.io"]\n capabilities = ["push"]'
docker exec kind-worker2 bash -c "mkdir -p /etc/containerd/certs.d/docker.io; echo -e '$HOSTS_TOML' > /etc/containerd/certs.d/docker.io/hosts.toml"
# Check if kind cluster already exists.
if kind get clusters | grep $KIND_NAME
then
NEW_CLUSTER=false
else
NEW_CLUSTER=true
fi

# Pull images onto single node which will never run workload.
docker exec kind-worker ctr -n k8s.io image pull ghcr.io/spegel-org/conformance:75d2816
docker exec kind-worker ctr -n k8s.io image pull docker.io/library/nginx:1.23.0
docker exec kind-worker ctr -n k8s.io image pull docker.io/library/nginx@sha256:b3a676a9145dc005062d5e79b92d90574fb3bf2396f4913dc1732f9065f55c4b
docker exec kind-worker ctr -n k8s.io image pull mcr.microsoft.com/containernetworking/azure-cns@sha256:7944413c630746a35d5596f56093706e8d6a3db0569bec0c8e58323f965f7416
# Either create new cluster or clean existing.
if $NEW_CLUSTER
then
# Create Kind cluster
kind create cluster --kubeconfig $KIND_KUBECONFIG --config $SCRIPT_DIR/kind-config-$CNI.yaml --name $KIND_NAME

# Pull images onto single node which will never run workload.
docker exec $KIND_NAME-worker ctr -n k8s.io image pull ghcr.io/spegel-org/conformance:75d2816
docker exec $KIND_NAME-worker ctr -n k8s.io image pull docker.io/library/nginx:1.23.0
docker exec $KIND_NAME-worker ctr -n k8s.io image pull docker.io/library/nginx@sha256:b3a676a9145dc005062d5e79b92d90574fb3bf2396f4913dc1732f9065f55c4b
docker exec $KIND_NAME-worker ctr -n k8s.io image pull mcr.microsoft.com/containernetworking/azure-cns@sha256:7944413c630746a35d5596f56093706e8d6a3db0569bec0c8e58323f965f7416

# Write existing configuration to test backup.
HOSTS_TOML='server = "https://docker.io"\n\n[host."https://registry-1.docker.io"]\n capabilities = ["push"]'
docker exec $KIND_NAME-worker2 bash -c "mkdir -p /etc/containerd/certs.d/docker.io; echo -e '$HOSTS_TOML' > /etc/containerd/certs.d/docker.io/hosts.toml"
else
kind export kubeconfig --kubeconfig $KIND_KUBECONFIG --name $KIND_NAME
kubectl --kubeconfig $KIND_KUBECONFIG --namespace nginx delete deployments --all
kubectl --kubeconfig $KIND_KUBECONFIG --namespace conformance delete jobs --all
helm --kubeconfig $KIND_KUBECONFIG uninstall --ignore-not-found --namespace spegel spegel
# Delete lease due to bug causing forcing us to wait for Spegel to fails once.
kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel delete lease spegel-leader-election

# Delete test images from all expect one node
for NODE in control-plane worker2 worker3 worker4
do
NAME=$KIND_NAME-$NODE
docker exec $NAME ctr -n k8s.io image rm docker.io/library/nginx:1.21.0@sha256:2f1cd90e00fe2c991e18272bb35d6a8258eeb27785d121aa4cc1ae4235167cfd
docker exec $NAME ctr -n k8s.io image rm docker.io/library/nginx:1.23.0
docker exec $NAME ctr -n k8s.io image rm docker.io/library/nginx@sha256:b3a676a9145dc005062d5e79b92d90574fb3bf2396f4913dc1732f9065f55c4b
docker exec $NAME ctr -n k8s.io image rm mcr.microsoft.com/containernetworking/azure-cns@sha256:7944413c630746a35d5596f56093706e8d6a3db0569bec0c8e58323f965f7416
done

# Delete Spegel from all nodes
for NODE in control-plane worker worker2 worker3 worker4
do
NAME=$KIND_NAME-$NODE
docker exec $NAME bash -c "ctr -n k8s.io image ls -q | grep ghcr.io/spegel-org/spegel | xargs ctr -n k8s.io image rm"
kubectl --kubeconfig $KIND_KUBECONFIG label nodes $NAME spegel=schedule
done
fi

# Deploy Spegel
kind load docker-image ${IMG}
DIGEST=$(docker exec kind-worker crictl inspecti -o 'go-template' --template '{{ index .status.repoDigests 0 }}' ${IMG} | cut -d'@' -f2)
for NODE in kind-control-plane kind-worker kind-worker2 kind-worker3 kind-worker4
kind load docker-image --name $KIND_NAME ${IMG}
DIGEST=$(docker exec $KIND_NAME-worker crictl inspecti -o 'go-template' --template '{{ index .status.repoDigests 0 }}' ${IMG} | cut -d'@' -f2)
for NODE in control-plane worker worker2 worker3 worker4
do
docker exec $NODE ctr -n k8s.io image tag ${IMG} ghcr.io/spegel-org/spegel@${DIGEST}
NAME=$KIND_NAME-$NODE
docker exec $NAME ctr -n k8s.io image tag ${IMG} ghcr.io/spegel-org/spegel@${DIGEST}
done
kubectl --kubeconfig $KIND_KUBECONFIG create namespace spegel
helm --kubeconfig $KIND_KUBECONFIG upgrade --wait --install --namespace="spegel" spegel ./charts/spegel --set "image.pullPolicy=Never" --set "image.digest=${DIGEST}" --set "nodeSelector.spegel=schedule"
helm --kubeconfig $KIND_KUBECONFIG upgrade --create-namespace --wait --install --namespace="spegel" spegel ./charts/spegel --set "image.pullPolicy=Never" --set "image.digest=${DIGEST}" --set "nodeSelector.spegel=schedule"
kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel rollout status daemonset spegel --timeout 60s
POD_COUNT=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get pods --no-headers | wc -l)
if [[ $POD_COUNT != "5" ]]
Expand All @@ -39,70 +79,75 @@ then
fi

# Verify that configuration has been backed up.
BACKUP_HOSTS_TOML=$(docker exec kind-worker2 cat /etc/containerd/certs.d/_backup/docker.io/hosts.toml)
BACKUP_HOSTS_TOML=$(docker exec $KIND_NAME-worker2 cat /etc/containerd/certs.d/_backup/docker.io/hosts.toml)
if [ $BACKUP_HOSTS_TOML != $HOSTS_TOML ]
then
echo "Spegel has not properly backed up existing configuration."
exit 1
fi

# Run conformance tests
kubectl --kubeconfig $KIND_KUBECONFIG apply -f test/e2e/conformance-job.yaml
kubectl --kubeconfig $KIND_KUBECONFIG --namespace default wait --for=condition=complete job/conformance
kubectl --kubeconfig $KIND_KUBECONFIG create namespace conformance --dry-run=client -o yaml | kubectl --kubeconfig $KIND_KUBECONFIG apply -f -
kubectl --kubeconfig $KIND_KUBECONFIG apply --namespace conformance -f test/e2e/conformance-job.yaml
kubectl --kubeconfig $KIND_KUBECONFIG --namespace conformance wait --for=condition=complete job/conformance

# Remove Spegel from the last node to test that the mirror fallback is working.
SPEGEL_WORKER4=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get pods --no-headers -o name --field-selector spec.nodeName=kind-worker4)
kubectl --kubeconfig $KIND_KUBECONFIG label nodes kind-worker4 spegel-
SPEGEL_WORKER4=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get pods --no-headers -o name --field-selector spec.nodeName=$KIND_NAME-worker4)
kubectl --kubeconfig $KIND_KUBECONFIG label nodes $KIND_NAME-worker4 spegel-
kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel wait --for=delete $SPEGEL_WORKER4 --timeout=60s

# Verify that both local and external ports are working
HOST_IP=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get nodes kind-worker -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
HOST_IP=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get nodes $KIND_NAME-worker -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
if ipv6calc --in ipv6addr $HOST_IP; then
HOST_IP="[${HOST_IP}]"
fi
HTTP_CODE=$(docker exec kind-worker curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30020/healthz || true)
HTTP_CODE=$(docker exec $KIND_NAME-worker curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30020/healthz || true)
if [[ $HTTP_CODE != "200" ]]
then
echo "Spegel should be accessible on local port."
exit 1
fi
HTTP_CODE=$(docker exec kind-worker curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30021/healthz || true)
HTTP_CODE=$(docker exec $KIND_NAME-worker curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30021/healthz || true)
if [[ $HTTP_CODE != "200" ]]
then
echo "Spegel should be accessible on external port."
exit 1
fi
HOST_IP=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get nodes kind-worker4 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
HOST_IP=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get nodes $KIND_NAME-worker4 -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
if ipv6calc --in ipv6addr $HOST_IP; then
HOST_IP="[${HOST_IP}]"
fi
HTTP_CODE=$(docker exec kind-worker4 curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30020/healthz || true)
HTTP_CODE=$(docker exec $KIND_NAME-worker4 curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30020/healthz || true)
if [[ $HTTP_CODE != "000" ]]
then
echo "Spegel should not be accessible on local port when Spegel is not present on node."
exit 1
fi
HTTP_CODE=$(docker exec kind-worker4 curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30021/healthz || true)
HTTP_CODE=$(docker exec $KIND_NAME-worker4 curl -s -o /dev/null -w "%{http_code}" http://${HOST_IP}:30021/healthz || true)
if [[ $HTTP_CODE != "200" ]]
then
echo "Spegel should be accessible on external port."
exit 1
fi

# Pull images onto single node which will never run workload.
docker exec kind-worker ctr -n k8s.io image pull docker.io/library/nginx:1.21.0@sha256:2f1cd90e00fe2c991e18272bb35d6a8258eeb27785d121aa4cc1ae4235167cfd

# Block internet access by only allowing RFC1918 CIDR
for NODE in kind-control-plane kind-worker kind-worker2 kind-worker3 kind-worker4
do
docker exec $NODE iptables -A OUTPUT -o eth0 -d 10.0.0.0/8 -j ACCEPT
docker exec $NODE iptables -A OUTPUT -o eth0 -d 172.16.0.0/12 -j ACCEPT
docker exec $NODE iptables -A OUTPUT -o eth0 -d 192.168.0.0/16 -j ACCEPT
docker exec $NODE iptables -A OUTPUT -o eth0 -j REJECT
done
if $NEW_CLUSTER
then
# Pull images onto single node which will never run workload.
docker exec $KIND_NAME-worker ctr -n k8s.io image pull docker.io/library/nginx:1.21.0@sha256:2f1cd90e00fe2c991e18272bb35d6a8258eeb27785d121aa4cc1ae4235167cfd

# Block internet access by only allowing RFC1918 CIDR
for NODE in control-plane worker worker2 worker3 worker4
do
NAME=$KIND_NAME-$NODE
docker exec $NAME iptables -A OUTPUT -o eth0 -d 10.0.0.0/8 -j ACCEPT
docker exec $NAME iptables -A OUTPUT -o eth0 -d 172.16.0.0/12 -j ACCEPT
docker exec $NAME iptables -A OUTPUT -o eth0 -d 192.168.0.0/16 -j ACCEPT
docker exec $NAME iptables -A OUTPUT -o eth0 -j REJECT
done
fi

# Pull test image that does not contain any media types
docker exec kind-worker3 crictl pull mcr.microsoft.com/containernetworking/azure-cns@sha256:7944413c630746a35d5596f56093706e8d6a3db0569bec0c8e58323f965f7416
docker exec $KIND_NAME-worker3 crictl pull mcr.microsoft.com/containernetworking/azure-cns@sha256:7944413c630746a35d5596f56093706e8d6a3db0569bec0c8e58323f965f7416

# Deploy test Nginx pods and verify deployment status
kubectl --kubeconfig $KIND_KUBECONFIG apply -f $SCRIPT_DIR/test-nginx.yaml
Expand All @@ -121,7 +166,7 @@ then
fi

# Remove all Spegel Pods and only restart one to verify that running a single instance works
kubectl --kubeconfig $KIND_KUBECONFIG label nodes kind-control-plane kind-worker kind-worker2 spegel-
kubectl --kubeconfig $KIND_KUBECONFIG label nodes $KIND_NAME-control-plane $KIND_NAME-worker $KIND_NAME-worker2 spegel-
kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel delete pods --all
kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel rollout status daemonset spegel --timeout 60s
POD_COUNT=$(kubectl --kubeconfig $KIND_KUBECONFIG --namespace spegel get pods --no-headers | wc -l)
Expand All @@ -139,5 +184,9 @@ then
exit 1
fi

# Delete cluster
kind delete cluster
if $DELETE_E2E_CLUSTER
then
# Delete cluster
kind delete cluster --name $KIND_NAME
rm -rf $TMP_DIR
fi

0 comments on commit 857d8c3

Please sign in to comment.