diff --git a/.github/workflows/basic-ci.yaml b/.github/workflows/basic-ci.yaml index 908df020..7661bd81 100644 --- a/.github/workflows/basic-ci.yaml +++ b/.github/workflows/basic-ci.yaml @@ -25,7 +25,7 @@ jobs: run: | make validate make validate-ci - main_jobs: + job-new-installation: needs: validation runs-on: - self-hosted @@ -42,9 +42,81 @@ jobs: - name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing" id: vm_deploy run: | - rm -rf ndm-vagrant-k3s - git clone https://github.com/bk201/vagrant-k3s ndm-vagrant-k3s - pushd ndm-vagrant-k3s + rm -rf ndm-new-vagrant-k3s + git clone https://github.com/bk201/vagrant-k3s ndm-new-vagrant-k3s + pushd ndm-new-vagrant-k3s + yq e -i ".cluster_size = 1" settings.yaml + ./new-cluster.sh + echo "VM_DEPLOYED=true" >> "$GITHUB_ENV" + yq e -i ".longhorn_version = \"1.7.1\"" settings.yaml + ./scripts/deploy_longhorn.sh + popd + - name: "Patch Image target" + run: | + ./ci/scripts/patch-ttl-repo.sh + echo "NDM override result as below:" + cat ci/charts/ndm-override.yaml + - name: "Deploy NDM" + run: | + pushd ndm-new-vagrant-k3s + cp ../ci/scripts/deploy_ndm.sh ./deploy_ndm.sh + cp ../ci/charts/ndm-override.yaml ./ndm-override.yaml + ./deploy_ndm.sh + popd + - name: "Add disk" + run: | + pushd ndm-new-vagrant-k3s + ./scripts/attach-disk.sh node1 ndm-new-vagrant-k3s + sleep 30 + popd + - name: "Run Basic Test" + id: basic-test + run: | + pushd ndm-new-vagrant-k3s + vagrant ssh-config node1 > ../ssh-config + cp kubeconfig ../kubeconfig + popd + echo Running integration tests + NDM_HOME=`pwd` go test -v ./tests/... + - name: "Get NDM logs" + #if: always() + run: | + if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then + echo "VM is not deployed, skip getting logs" + exit 0 + fi + ./ci/scripts/get-debug-info.sh + - name: "Tear Down / Cleanup" + #if: always() + run: | + if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then + echo "VM is not deployed, skip VM destroy" + exit 0 + fi + pushd ndm-new-vagrant-k3s + vagrant destroy -f --parallel + popd + + jobs-upgrade: + needs: validation + runs-on: + - self-hosted + - golang + steps: + - name: "Clone and check" + uses: actions/checkout@v3 + - name: "Build the Image for the Integration Test" + run: | + BUILD_FOR_CI=true make + ./ci/scripts/patch-ttl-repo.sh + echo "NDM override result as below:" + cat ci/charts/ndm-override.yaml + - name: "Local Deployment (Harvester+Longhorn+Node-Disk-Manager) for testing" + id: vm_deploy + run: | + rm -rf ndm-upgrade-vagrant-k3s + git clone https://github.com/bk201/vagrant-k3s ndm-upgrade-vagrant-k3s + pushd ndm-upgrade-vagrant-k3s yq e -i ".cluster_size = 1" settings.yaml ./new-cluster.sh echo "VM_DEPLOYED=true" >> "$GITHUB_ENV" @@ -55,8 +127,8 @@ jobs: popd - name: "Add disk" run: | - pushd ndm-vagrant-k3s - ./scripts/attach-disk.sh node1 + pushd ndm-upgrade-vagrant-k3s + ./scripts/attach-disk.sh node1 ndm-upgrade-vagrant-k3s sleep 30 popd - name: "Patch Image target (for upgrade)" @@ -66,21 +138,21 @@ jobs: cat ci/charts/ndm-override.yaml - name: "Upgrade NDM" run: | - pushd ndm-vagrant-k3s + pushd ndm-upgrade-vagrant-k3s cp ../ci/scripts/upgrade_ndm.sh ./upgrade_ndm.sh ./upgrade_ndm.sh popd - name: "Run Basic Test" id: basic-test run: | - pushd ndm-vagrant-k3s + pushd ndm-upgrade-vagrant-k3s vagrant ssh-config node1 > ../ssh-config cp kubeconfig ../kubeconfig popd echo Running integration tests NDM_HOME=`pwd` go test -v ./tests/... - name: "Get NDM logs" - if: always() + #if: always() run: | if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then echo "VM is not deployed, skip getting logs" @@ -88,12 +160,12 @@ jobs: fi ./ci/scripts/get-debug-info.sh - name: "Tear Down / Cleanup" - if: always() + #if: always() run: | if [ ${{ env.VM_DEPLOYED }} != 'true' ]; then echo "VM is not deployed, skip VM destroy" exit 0 fi - pushd ndm-vagrant-k3s + pushd ndm-upgrade-vagrant-k3s vagrant destroy -f --parallel popd diff --git a/ci/scripts/deploy_ndm.sh b/ci/scripts/deploy_ndm.sh index 06739f0c..99bd360b 100755 --- a/ci/scripts/deploy_ndm.sh +++ b/ci/scripts/deploy_ndm.sh @@ -73,10 +73,14 @@ ensure_longhorn_ready pushd $TOP_DIR -cat >> ndm-override.yaml << 'EOF' +cat >> ndm-override.yaml.default << 'EOF' autoProvisionFilter: [/dev/sd*] EOF +if [ ! -f ndm-override.yaml ]; then + mv ndm-override.yaml.default ndm-override.yaml +fi + $HELM pull harvester-node-disk-manager --repo https://charts.harvesterhci.io --untar $HELM install -f $TOP_DIR/ndm-override.yaml harvester-node-disk-manager ./harvester-node-disk-manager --create-namespace -n harvester-system diff --git a/tests/integration/test_1_disk_hotplug_test.go b/tests/integration/test_1_disk_hotplug_test.go index f5caf50b..d8930697 100644 --- a/tests/integration/test_1_disk_hotplug_test.go +++ b/tests/integration/test_1_disk_hotplug_test.go @@ -34,18 +34,18 @@ import ( */ const ( - hotplugTargetNodeName = "ndm-vagrant-k3s_node1" hotplugDiskXMLFileName = "/tmp/hotplug_disks/node1-sda.xml" hotplugTargetDiskName = "sda" ) type HotPlugTestSuite struct { suite.Suite - SSHClient *goph.Client - clientSet *clientset.Clientset - targetNodeName string - targetDiskName string - curBusPath string // to make sure which path we deployed + SSHClient *goph.Client + clientSet *clientset.Clientset + targetNodeName string + targetDiskName string + hotplugTargetNodeName string + hotplugTargetBaseDir string } func (s *HotPlugTestSuite) SetupSuite() { @@ -84,6 +84,14 @@ func (s *HotPlugTestSuite) SetupSuite() { s.clientSet, err = clientset.NewForConfig(config) require.Equal(s.T(), err, nil, "New clientset should not get error") + + cmd := fmt.Sprintf("ls %s |grep vagrant-k3s", os.Getenv("NDM_HOME")) + targetDirDomain, _, err := doCommand(cmd) + require.Equal(s.T(), err, nil, "Running command `%s` should not get error : %v", cmd, err) + + s.hotplugTargetNodeName = fmt.Sprintf("%s_node1", strings.TrimSpace(targetDirDomain)) + s.hotplugTargetBaseDir = fmt.Sprintf("/tmp/hotplug_disks/%s", strings.TrimSpace(targetDirDomain)) + } func (s *HotPlugTestSuite) AfterTest(_, _ string) { @@ -117,9 +125,9 @@ func (s *HotPlugTestSuite) Test_0_PreCheckForDiskCount() { func (s *HotPlugTestSuite) Test_1_HotPlugRemoveDisk() { // remove disk dynamically - cmd := fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, hotplugTargetDiskName) + cmd := fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, hotplugTargetDiskName) _, _, err := doCommand(cmd) - require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd) // wait for controller handling time.Sleep(5 * time.Second) @@ -136,9 +144,10 @@ func (s *HotPlugTestSuite) Test_1_HotPlugRemoveDisk() { func (s *HotPlugTestSuite) Test_2_HotPlugAddDisk() { // remove disk dynamically - cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", hotplugTargetNodeName, hotplugDiskXMLFileName) + hotplugDiskXMLFileName := fmt.Sprintf("%s/node1-sda.xml", s.hotplugTargetBaseDir) + cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", s.hotplugTargetNodeName, hotplugDiskXMLFileName) _, _, err := doCommand(cmd) - require.Equal(s.T(), err, nil, "Running command `virsh attach-device` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd) // wait for controller handling, the device will be changed need more time to wait for the controller handling time.Sleep(30 * time.Second) @@ -154,15 +163,16 @@ func (s *HotPlugTestSuite) Test_2_HotPlugAddDisk() { func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() { // create another another disk raw file and xml - const ( - originalDeviceRaw = "/tmp/hotplug_disks/node1-sda.qcow2" - duplicatedDeviceXML = "/tmp/hotplug_disks/node1-sdb.xml" - duplicatedDeviceRaw = "/tmp/hotplug_disks/node1-sdb.qcow2" - ) + + originalDeviceRaw := fmt.Sprintf("%s/node1-sda.qcow2", s.hotplugTargetBaseDir) + duplicatedDeviceXML := fmt.Sprintf("%s/node1-sdb.xml", s.hotplugTargetBaseDir) + duplicatedDeviceRaw := fmt.Sprintf("%s/node1-sdb.qcow2", s.hotplugTargetBaseDir) + cmdCpyRawFile := fmt.Sprintf("cp %s %s", originalDeviceRaw, duplicatedDeviceRaw) _, _, err := doCommand(cmdCpyRawFile) - require.Equal(s.T(), err, nil, "Running command `cp the raw device file` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmdCpyRawFile) + hotplugDiskXMLFileName := fmt.Sprintf("%s/node1-sda.xml", s.hotplugTargetBaseDir) disk, err := utils.DiskXMLReader(hotplugDiskXMLFileName) require.Equal(s.T(), err, nil, "Read xml file should not get error") disk.Source.File = duplicatedDeviceRaw @@ -171,9 +181,9 @@ func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() { err = utils.XMLWriter(duplicatedDeviceXML, disk) require.Equal(s.T(), err, nil, "Write xml file should not get error") - cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", hotplugTargetNodeName, duplicatedDeviceXML) + cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", s.hotplugTargetNodeName, duplicatedDeviceXML) _, _, err = doCommand(cmd) - require.Equal(s.T(), err, nil, "Running command `virsh attach-device` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd) // wait for controller handling time.Sleep(5 * time.Second) @@ -186,9 +196,9 @@ func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() { require.Equal(s.T(), 1, len(blockdeviceList.Items), "We should have one disks because duplicated wwn should not added") // cleanup this disk - cmd = fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, "sdb") + cmd = fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, "sdb") _, _, err = doCommand(cmd) - require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd) // wait for controller handling time.Sleep(5 * time.Second) @@ -196,9 +206,9 @@ func (s *HotPlugTestSuite) Test_3_AddDuplicatedWWNDsik() { func (s *HotPlugTestSuite) Test_4_RemoveInactiveDisk() { // remove disk dynamically - cmd := fmt.Sprintf("virsh detach-disk %s %s --live", hotplugTargetNodeName, hotplugTargetDiskName) + cmd := fmt.Sprintf("virsh detach-disk %s %s --live", s.hotplugTargetNodeName, hotplugTargetDiskName) _, _, err := doCommand(cmd) - require.Equal(s.T(), err, nil, "Running command `virsh detach-disk` should not get error") + require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd) // wait for controller handling time.Sleep(5 * time.Second)