Skip to content

Commit

Permalink
webhook: only add disk to vg if this vg is not used
Browse files Browse the repository at this point in the history
    - simpify the vg management and offload to user on v1.4

Signed-off-by: Vicente Cheng <vicente.cheng@suse.com>
  • Loading branch information
Vicente-Cheng committed Oct 16, 2024
1 parent 1d4ac7d commit f70f894
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 9 deletions.
24 changes: 20 additions & 4 deletions cmd/node-disk-manager-webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@ import (
"github.com/harvester/webhook/pkg/config"
"github.com/harvester/webhook/pkg/server"
"github.com/harvester/webhook/pkg/server/admission"
"github.com/rancher/wrangler/v3/pkg/generated/controllers/core"
ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
ctlstorage "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage"
ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1"
"github.com/rancher/wrangler/v3/pkg/kubeconfig"
"github.com/rancher/wrangler/v3/pkg/signals"
"github.com/rancher/wrangler/v3/pkg/start"
Expand All @@ -23,7 +27,9 @@ import (
const webhookName = "harvester-node-disk-manager-webhook"

type resourceCaches struct {
bdCache ctldiskv1.BlockDeviceCache
bdCache ctldiskv1.BlockDeviceCache
storageClassCache ctlstoragev1.StorageClassCache
pvCache ctlcorev1.PersistentVolumeCache
}

func main() {
Expand Down Expand Up @@ -109,7 +115,7 @@ func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Opt
bdMutator,
}

bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache)
bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache, resourceCaches.storageClassCache, resourceCaches.pvCache)
var validators = []admission.Validator{
bdValidator,
}
Expand Down Expand Up @@ -138,9 +144,19 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourc
if err != nil {
return nil, err
}
starters = append(starters, disks)
storageFactory, err := ctlstorage.NewFactoryFromConfig(cfg)
if err != nil {
return nil, err
}
coreFactory, err := core.NewFactoryFromConfig(cfg)
if err != nil {
return nil, err
}
starters = append(starters, disks, storageFactory, coreFactory)
resourceCaches := &resourceCaches{
bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(),
bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(),
storageClassCache: storageFactory.Storage().V1().StorageClass().Cache(),
pvCache: coreFactory.Core().V1().PersistentVolume().Cache(),
}

if err := start.All(ctx, threadiness, starters...); err != nil {
Expand Down
6 changes: 6 additions & 0 deletions deploy/charts/harvester-node-disk-manager/templates/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ rules:
- apiGroups: [ "" ]
resources: [ "secrets", "configmaps" ]
verbs: [ "*" ]
- apiGroups: [ "" ]
resources: [ "persistentvolumes" ]
verbs: [ "get", "watch", "list" ]
- apiGroups: [ "storage.k8s.io" ]
resources: [ "storageclasses" ]
verbs: [ "get", "watch", "list" ]
- apiGroups: [ "harvesterhci.io" ]
resources: [ "blockdevices" ]
verbs: [ "*" ]
Expand Down
4 changes: 4 additions & 0 deletions pkg/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@ const (
DiskRemoveTag = "harvester-ndm-disk-remove"
// Harvester Namespace
HarvesterNS = "harvester-system"
// LVMCSIDriver is the LVM CSI driver name
LVMCSIDriver = "lvm.driver.harvesterhci.io"
// LVMTopologyNodeKey is the key of LVM topology node
LVMTopologyNodeKey = "topology.lvm.csi/node"
)

var CmdTimeoutError error
Expand Down
82 changes: 77 additions & 5 deletions pkg/webhook/blockdevice/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,33 +3,48 @@ package blockdevice
import (
werror "github.com/harvester/webhook/pkg/error"
"github.com/harvester/webhook/pkg/server/admission"
ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1"
admissionregv1 "k8s.io/api/admissionregistration/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"

diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1"
ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1"
"github.com/harvester/node-disk-manager/pkg/utils"
)

type Validator struct {
admission.DefaultValidator

BlockdeviceCache ctldiskv1.BlockDeviceCache
BlockdeviceCache ctldiskv1.BlockDeviceCache
storageClassCache ctlstoragev1.StorageClassCache
pvCache ctlcorev1.PersistentVolumeCache
}

func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Validator {
func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache, storageClassCache ctlstoragev1.StorageClassCache, pvCache ctlcorev1.PersistentVolumeCache) *Validator {
return &Validator{
BlockdeviceCache: blockdeviceCache,
BlockdeviceCache: blockdeviceCache,
storageClassCache: storageClassCache,
pvCache: pvCache,
}
}

func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error {
bd := newObj.(*diskv1.BlockDevice)
return v.validateProvisioner(bd)
if err := v.validateProvisioner(bd); err != nil {
return err
}
return v.validateLVMProvisioner(bd)
}

func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error {
newBd := newObj.(*diskv1.BlockDevice)
return v.validateProvisioner(newBd)
if err := v.validateProvisioner(newBd); err != nil {
return err
}
return v.validateLVMProvisioner(newBd)
}

func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error {
Expand All @@ -43,6 +58,52 @@ func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error {
return nil
}

// validateLVMProvisioner will check the blockdeivce with LVM provisioner and block
// if there is already have any pvc created with in the target volume group
func (v *Validator) validateLVMProvisioner(bd *diskv1.BlockDevice) error {
// check again, skip if no LVM provisioner
if bd.Spec.Provisioner == nil || bd.Spec.Provisioner.LVM == nil {
return nil
}
targetVGName := bd.Spec.Provisioner.LVM.VgName
// find what we wanted to check
allStorageClasses, err := v.storageClassCache.List(labels.Everything())
if err != nil {
return werror.NewBadRequest("Failed to list storage classes")
}
targetSC := ""
for _, sc := range allStorageClasses {
if sc.Provisioner != utils.LVMCSIDriver {
continue
}
scTargetNode := getLVMTopologyNodes(sc)
if scTargetNode != bd.Spec.NodeName {
continue
}
if sc.Parameters["vgName"] == targetVGName {
targetSC = sc.Name
break
}
}

// no related SC found, just return
if targetSC == "" {
return nil
}

// check if there is any PV created with the targetSC
pvs, err := v.pvCache.List(labels.Everything())
if err != nil {
return werror.NewBadRequest("Failed to list PVs")
}
for _, pv := range pvs {
if pv.Spec.StorageClassName == targetSC {
return werror.NewBadRequest("There is already a PVC created with the target volume group, we cannot add any blockdevice into it")
}
}
return nil
}

func (v *Validator) Resource() admission.Resource {
return admission.Resource{
Names: []string{"blockdevices"},
Expand All @@ -56,3 +117,14 @@ func (v *Validator) Resource() admission.Resource {
},
}
}

func getLVMTopologyNodes(sc *storagev1.StorageClass) string {
for _, topology := range sc.AllowedTopologies {
for _, matchLabel := range topology.MatchLabelExpressions {
if matchLabel.Key == utils.LVMTopologyNodeKey {
return matchLabel.Values[0]
}
}
}
return ""
}

0 comments on commit f70f894

Please sign in to comment.