Skip to content

Commit

Permalink
draft lvm ci
Browse files Browse the repository at this point in the history
Signed-off-by: Vicente Cheng <vicente.cheng@suse.com>
  • Loading branch information
Vicente-Cheng committed Oct 26, 2024
1 parent 14dd63c commit 74fb63e
Show file tree
Hide file tree
Showing 2 changed files with 179 additions and 1 deletion.
2 changes: 1 addition & 1 deletion tests/integration/test_1_disk_hotplug_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ func (s *HotPlugTestSuite) Test_4_RemoveInactiveDisk() {
curBlockdevice, err = bdi.Get(context.TODO(), s.targetDiskName, v1.GetOptions{})
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error before we want to check remove")
require.Equal(s.T(), curBlockdevice.Status.DeviceStatus.FileSystem.MountPoint, "", "Mountpoint should be empty after we remove disk!")
require.Equal(s.T(), diskv1.ProvisionPhaseUnprovisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Provisioned")
require.Equal(s.T(), diskv1.ProvisionPhaseUnprovisioned, curBlockdevice.Status.ProvisionPhase, "Block device provisionPhase should be Unprovisioned")
}

func doCommand(cmdString string) (string, string, error) {
Expand Down
178 changes: 178 additions & 0 deletions tests/integration/test_2_lvm_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
package integration

import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"

"github.com/kevinburke/ssh_config"
"github.com/melbahja/goph"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"

diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1"
clientset "github.com/harvester/node-disk-manager/pkg/generated/clientset/versioned"
"github.com/harvester/node-disk-manager/pkg/utils"
)

type LVMSuite struct {
suite.Suite
SSHClient *goph.Client
clientSet *clientset.Clientset
targetNodeName string
hotplugTargetNodeName string
hotplugTargetBaseDir string
}

func (s *LVMSuite) SetupSuite() {
nodeName := ""
f, err := os.Open(filepath.Join(os.Getenv("NDM_HOME"), "ssh-config"))
require.Equal(s.T(), err, nil, "Open ssh-config should not get error")
cfg, err := ssh_config.Decode(f)
require.Equal(s.T(), err, nil, "Decode ssh-config should not get error")
// consider wildcard, so length shoule be 2
require.Equal(s.T(), len(cfg.Hosts), 2, "number of Hosts on SSH-config should be 1")
for _, host := range cfg.Hosts {
if host.String() == "" {
// wildcard, continue
continue
}
nodeName = host.Patterns[0].String()
break
}
require.NotEqual(s.T(), nodeName, "", "nodeName should not be empty.")
s.targetNodeName = nodeName
targetHost, _ := cfg.Get(nodeName, "HostName")
targetUser, _ := cfg.Get(nodeName, "User")
targetPrivateKey, _ := cfg.Get(nodeName, "IdentityFile")
splitedResult := strings.Split(targetPrivateKey, "node-disk-manager/")
privateKey := filepath.Join(os.Getenv("NDM_HOME"), splitedResult[len(splitedResult)-1])
// Start new ssh connection with private key.
auth, err := goph.Key(privateKey, "")
require.Equal(s.T(), err, nil, "generate ssh auth key should not get error")

s.SSHClient, err = goph.NewUnknown(targetUser, targetHost, auth)
require.Equal(s.T(), err, nil, "New ssh connection should not get error")

kubeconfig := filepath.Join(os.Getenv("NDM_HOME"), "kubeconfig")
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
require.Equal(s.T(), err, nil, "Generate kubeconfig should not get error")

s.clientSet, err = clientset.NewForConfig(config)
require.Equal(s.T(), err, nil, "New clientset should not get error")

cmd := fmt.Sprintf("ls %s |grep vagrant-k3s", os.Getenv("NDM_HOME"))
targetDirDomain, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `%s` should not get error : %v", cmd, err)

s.hotplugTargetNodeName = fmt.Sprintf("%s_node1", strings.TrimSpace(targetDirDomain))
s.hotplugTargetBaseDir = fmt.Sprintf("/tmp/hotplug_disks/%s", strings.TrimSpace(targetDirDomain))
}

func (s *LVMSuite) AfterTest(_, _ string) {
if s.SSHClient != nil {
s.SSHClient.Close()
}
time.Sleep(5 * time.Second)
}

func TestLVMOperation(t *testing.T) {
suite.Run(t, new(LVMSuite))
}

func (s *LVMSuite) Test_0_ProvisionLVMWithMultipleDisk() {
// Create Target Disk.
// we can ignore the qcow2 file because we already have the disk files on test_1_disk_hotplug_test.go
firstDeviceRaw := fmt.Sprintf("%s/node1-sda.qcow2", s.hotplugTargetBaseDir)
firstDeviceXMLFile := fmt.Sprintf("%s/node1-sda.xml", s.hotplugTargetBaseDir)
secondDeviceRaw := fmt.Sprintf("%s/node1-sda.qcow2", s.hotplugTargetBaseDir)
secondDeviceXMLFile := fmt.Sprintf("%s/node1-sdb.xml", s.hotplugTargetBaseDir)

disk, err := utils.DiskXMLReader(firstDeviceXMLFile)
require.Equal(s.T(), err, nil, "Read xml file should not get error")
disk.Source.File = firstDeviceRaw
disk.Target.Dev = "sda"
disk.VENDOR = "HAR_DEV1"
err = utils.XMLWriter(firstDeviceXMLFile, disk)
require.Equal(s.T(), err, nil, "Write xml file should not get error")

s.attachDisk(s.hotplugTargetNodeName, firstDeviceXMLFile)

disk, err = utils.DiskXMLReader(secondDeviceXMLFile)
require.Equal(s.T(), err, nil, "Read xml file should not get error")
disk.Source.File = secondDeviceRaw
disk.Target.Dev = "sdb"
disk.VENDOR = "HAR_DEV2"
err = utils.XMLWriter(secondDeviceXMLFile, disk)
require.Equal(s.T(), err, nil, "Write xml file should not get error")

s.attachDisk(s.hotplugTargetNodeName, secondDeviceXMLFile)

bdi := s.clientSet.HarvesterhciV1beta1().BlockDevices("longhorn-system")
bdList, err := bdi.List(context.TODO(), v1.ListOptions{})
require.Equal(s.T(), len(bdList.Items), 2, "BlockdevicesList should only have two devices")
require.Equal(s.T(), err, nil, "Get BlockdevicesList should not get error")
require.NotEqual(s.T(), len(bdList.Items), 0, "BlockdevicesList should not be empty")

// find the two BDs we added in the previous step
targetDevFirst := &diskv1.BlockDevice{}
targetDevSecond := any(nil)
for _, blockdevice := range bdList.Items {
if blockdevice.Spec.NodeName != s.targetNodeName {
// focus the target node
continue
}
bdStatus := blockdevice.Status
if bdStatus.DeviceStatus.Details.Vendor == "HAR_DEV1" {
targetDevFirst = blockdevice.DeepCopy()
} else if bdStatus.DeviceStatus.Details.Vendor == "HAR_DEV2" {
targetDevSecond = blockdevice.DeepCopy()
}
}
require.NotEqual(s.T(), nil, targetDevFirst, "targetDevFirst should not be empty")
require.NotEqual(s.T(), nil, targetDevSecond, "targetDevSecond should not be empty")

// provision the two disks
targetDevFirst.Spec.Provision = true
targetDevFirst.Spec.Provisioner = &diskv1.ProvisionerInfo{
LVM: &diskv1.LVMProvisionerInfo{
VgName: "test-vg01",
},
}
bdi.Update(context.TODO(), targetDevFirst, v1.UpdateOptions{})

// sleep 30 seconds to wait controller handle
time.Sleep(30 * time.Second)

lvmClient := s.clientSet.HarvesterhciV1beta1().LVMVolumeGroups("harvester-system")
lvmList, err := lvmClient.List(context.TODO(), v1.ListOptions{})
require.Equal(s.T(), err, nil, "Get LVMVolumeGroups should not get error")
require.NotEqual(s.T(), len(lvmList.Items), 0, "LVMVolumeGroups should not be empty")

targetLVM := &diskv1.LVMVolumeGroup{}
// find the LVM and check the status
for _, lvm := range lvmList.Items {
if lvm.Spec.NodeName == s.targetNodeName && lvm.Spec.VgName == "test-vg01" {
targetLVM = lvm.DeepCopy()
}
}
require.NotEqual(s.T(), nil, targetLVM, "targetLVM should not be empty")
require.Equal(s.T(), targetLVM.Status.Status, diskv1.VGStatusActive, "LVMVolumeGroup should be Active")
_, found := targetLVM.Spec.Devices[targetDevFirst.Name]
require.Equal(s.T(), found, true, "targetDevFirst should be in the LVMVolumeGroup")
}

func (s *LVMSuite) attachDisk(nodeName, xmlFile string) {
cmd := fmt.Sprintf("virsh attach-device --domain %s --file %s --live", nodeName, xmlFile)
_, _, err := doCommand(cmd)
require.Equal(s.T(), err, nil, "Running command `%s` should not get error", cmd)

// wait for controller handling
time.Sleep(5 * time.Second)
}

0 comments on commit 74fb63e

Please sign in to comment.