Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add webhook #151

Merged
merged 7 commits into from
Oct 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
96 changes: 85 additions & 11 deletions .github/workflows/basic-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
run: |
make validate
make validate-ci
main_jobs:
job-new-installation:
needs: validation
runs-on:
- self-hosted
Expand All @@ -42,21 +42,94 @@ jobs:
- name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing"
id: vm_deploy
run: |
rm -rf ndm-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-vagrant-k3s
pushd ndm-vagrant-k3s
rm -rf ndm-new-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-new-vagrant-k3s
pushd ndm-new-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml
./scripts/deploy_longhorn.sh
cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh
./deploy_ndm.sh
popd
- name: "Patch Image target"
run: |
./ci/scripts/patch-ttl-repo.sh
echo "NDM override result as below:"
cat ci/charts/ndm-override.yaml
- name: "Deploy NDM"
run: |
pushd ndm-new-vagrant-k3s
cp ../ci/scripts/deploy_ndm_current.sh ./deploy_ndm_current.sh
cp ../ci/charts/ndm-override.yaml ./ndm-override.yaml
./deploy_ndm_current.sh
popd
- name: "Add disk"
run: |
pushd ndm-new-vagrant-k3s
./scripts/attach-disk.sh node1 ndm-new-vagrant-k3s
sleep 30
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd ndm-new-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
echo Running integration tests
NDM_HOME=`pwd` go test -v ./tests/...
- name: "Get NDM logs"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip getting logs"
exit 0
fi
./ci/scripts/get-debug-info.sh
- name: "Tear Down / Cleanup"
if: always()
run: |
if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then
echo "VM is not deployed, skip VM destroy"
exit 0
fi
rm -rf /tmp/hotplug_disks/ndm-new-vagrant-k3s
pushd ndm-new-vagrant-k3s
vagrant destroy -f --parallel
popd

jobs-upgrade:
needs: validation
runs-on:
- self-hosted
- golang
steps:
- name: "Clone and check"
uses: actions/checkout@v3
- name: "Build the Image for the Integration Test"
run: |
BUILD_FOR_CI=true make
./ci/scripts/patch-ttl-repo.sh
echo "NDM override result as below:"
cat ci/charts/ndm-override.yaml
- name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing"
id: vm_deploy
run: |
rm -rf ndm-upgrade-vagrant-k3s
git clone https://github.com/bk201/vagrant-k3s ndm-upgrade-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
yq e -i ".cluster_size = 1" settings.yaml
./new-cluster.sh
echo "VM_DEPLOYED=true" >> "$GITHUB_ENV"
yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml
./scripts/deploy_longhorn.sh
cp ../ci/scripts/deploy_ndm_chart.sh ./deploy_ndm_chart.sh
./deploy_ndm_chart.sh
popd
- name: "Add disk"
run: |
pushd ndm-vagrant-k3s
./scripts/attach-disk.sh node1
pushd ndm-upgrade-vagrant-k3s
./scripts/attach-disk.sh node1 ndm-upgrade-vagrant-k3s
sleep 30
popd
- name: "Patch Image target (for upgrade)"
Expand All @@ -66,14 +139,14 @@ jobs:
cat ci/charts/ndm-override.yaml
- name: "Upgrade NDM"
run: |
pushd ndm-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
cp ../ci/scripts/upgrade_ndm.sh ./upgrade_ndm.sh
./upgrade_ndm.sh
popd
- name: "Run Basic Test"
id: basic-test
run: |
pushd ndm-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
vagrant ssh-config node1 > ../ssh-config
cp kubeconfig ../kubeconfig
popd
Expand All @@ -94,6 +167,7 @@ jobs:
echo "VM is not deployed, skip VM destroy"
exit 0
fi
pushd ndm-vagrant-k3s
rm -rf /tmp/hotplug_disks/ndm-upgrade-vagrant-k3s
pushd ndm-upgrade-vagrant-k3s
vagrant destroy -f --parallel
popd
13 changes: 12 additions & 1 deletion .github/workflows/factory.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ on:
env:
repo: "rancher"
controllerImageName: "harvester-node-disk-manager"
webhookImageName: "harvester-node-disk-manager-webhook"

jobs:
dapper-build:
Expand Down Expand Up @@ -54,4 +55,14 @@ jobs:
platforms: linux/amd64,linux/arm64
file: package/Dockerfile
push: ${{ inputs.push }}
tags: ${{ env.repo }}/${{ env.controllerImageName }}:${{ inputs.tag }}
tags: ${{ env.repo }}/${{ env.controllerImageName }}:${{ inputs.tag }}

- name: Docker Build (Webhook)
uses: docker/build-push-action@v5
with:
provenance: false
context: .
platforms: linux/amd64,linux/arm64
file: package/Dockerfile.webhook
push: ${{ inputs.push }}
tags: ${{ env.repo }}/${{ env.webhookImageName }}:${{ inputs.tag }}
6 changes: 6 additions & 0 deletions ci/charts/ndm-override.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,12 @@ image:
# Overrides the image tag whose default is the chart appVersion.
tag: ""

webhook:
repository: rancher/harvester-node-disk-manager
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""

autoProvisionFilter: [/dev/sd*]
debug: true

Expand Down
6 changes: 5 additions & 1 deletion ci/scripts/deploy_ndm.sh → ci/scripts/deploy_ndm_chart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,14 @@ ensure_longhorn_ready

pushd $TOP_DIR

cat >> ndm-override.yaml << 'EOF'
cat >> ndm-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f ndm-override.yaml ]; then
mv ndm-override.yaml.default ndm-override.yaml
fi

$HELM pull harvester-node-disk-manager --repo https://charts.harvesterhci.io --untar
$HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager ./harvester-node-disk-manager --create-namespace -n harvester-system

Expand Down
91 changes: 91 additions & 0 deletions ci/scripts/deploy_ndm_current.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
#!/bin/bash -e

TOP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/" &> /dev/null && pwd )"

ensure_command() {
local cmd=$1
if ! which $cmd &> /dev/null; then
echo 1
return
fi
echo 0
}

wait_ndm_ready() {
while [ true ]; do
running_num=$(kubectl get ds harvester-node-disk-manager -n harvester-system -o 'jsonpath={.status.numberReady}')
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "harvester-node-disk-manager pods are ready!"
break
fi
echo "harvester-node-disk-manager pods are not ready, sleep 10 seconds."
sleep 10
done
}

ensure_longhorn_ready() {
# ensure longhorn-manager first
while [ true ]; do
running_num=$(kubectl get ds longhorn-manager -n longhorn-system -o 'jsonpath={.status.numberReady}')
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "longhorn-manager pods are ready!"
break
fi
echo "check longhorn-manager failure, please deploy longhorn first."
exit 1
done

# ensure instance-manager-e ready
while [ true ]; do
running_num=$(kubectl get pods -n longhorn-system |grep ^instance-manager |grep Running |awk '{print $3}' |wc -l)
if [[ $running_num -eq ${cluster_nodes} ]]; then
echo "instance-manager pods are ready!"
break
fi
echo "check instance-manager failure, please deploy longhorn first."
exit 1
done
}

if [ ! -f $TOP_DIR/kubeconfig ]; then
echo "kubeconfig does not exist. Please create cluster first."
echo "Maybe try new_cluster.sh"
exit 1
fi
echo $TOP_DIR/kubeconfig
export KUBECONFIG=$TOP_DIR/kubeconfig

if [[ $(ensure_command helm) -eq 1 ]]; then
echo "no helm, try to curl..."
curl -O https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz
tar -zxvf helm-v3.9.4-linux-amd64.tar.gz
HELM=$TOP_DIR/linux-amd64/helm
$HELM version
else
echo "Get helm, version info as below"
HELM=$(which helm)
$HELM version
fi

cluster_nodes=$(yq -e e '.cluster_size' $TOP_DIR/settings.yaml)
echo "cluster nodes: $cluster_nodes"
ensure_longhorn_ready

pushd $TOP_DIR
cat >> ndm-override.yaml.default << 'EOF'
autoProvisionFilter: [/dev/sd*]
EOF

if [ ! -f ndm-override.yaml ]; then
mv ndm-override.yaml.default ndm-override.yaml
fi

cp -r ../deploy/charts/harvester-node-disk-manager harvester-node-disk-manager

target_img=$(yq -e .image.repository ndm-override.yaml)
echo "install target image: ${target_img}"
$HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager ./harvester-node-disk-manager --create-namespace -n harvester-system

wait_ndm_ready
echo "harvester-node-disk-manager is ready"
popd
2 changes: 1 addition & 1 deletion ci/scripts/get-debug-info.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ TARGETNODE="node1"

export KUBECONFIG=kubeconfig

NDMPOD=$(kubectl get pods -n harvester-system --field-selector spec.nodeName=$TARGETNODE |grep ^harvester-node-disk-manager |awk '{print $1}')
NDMPOD=$(kubectl get pods -n harvester-system --field-selector spec.nodeName=$TARGETNODE | grep ^harvester-node-disk-manager |grep -v webhook |awk '{print $1}')

# filter out the redundant Skip log
kubectl logs $NDMPOD -n harvester-system |grep -v Skip
Expand Down
5 changes: 4 additions & 1 deletion ci/scripts/patch-ttl-repo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@

COMMIT=$(git rev-parse --short HEAD)
IMAGE=ttl.sh/node-disk-manager-${COMMIT}
IMAGE_WEBHOOK=ttl.sh/node-disk-manager-webhook-${COMMIT}

yq e -i ".image.repository = \"${IMAGE}\"" ci/charts/ndm-override.yaml
yq e -i ".image.tag = \"1h\"" ci/charts/ndm-override.yaml
yq e -i ".image.tag = \"1h\"" ci/charts/ndm-override.yaml
yq e -i ".webhook.image.repository = \"${IMAGE_WEBHOOK}\"" ci/charts/ndm-override.yaml
yq e -i ".webhook.image.tag = \"1h\"" ci/charts/ndm-override.yaml
2 changes: 1 addition & 1 deletion ci/scripts/upgrade_ndm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ sleep 30 # wait 30 seconds for ndm respawn pods

wait_ndm_ready
# check image
pod_name=$(kubectl get pods -n harvester-system |grep Running |grep ^harvester-node-disk-manager|head -n1 |awk '{print $1}')
pod_name=$(kubectl get pods -n harvester-system |grep Running |grep -v webhook |grep ^harvester-node-disk-manager|head -n1 |awk '{print $1}')
container_img=$(kubectl get pods ${pod_name} -n harvester-system -o yaml |yq -e .spec.containers[0].image |tr ":" \n)
yaml_img=$(yq -e .image.repository ndm-override.yaml)
if grep -q ${yaml_img} <<< ${container_img}; then
Expand Down
Loading
Loading