From 5d6409789291386b4a6da6cc843f70c330dcb078 Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Wed, 16 Oct 2024 11:52:03 +0800 Subject: [PATCH 1/3] vendor: add storage.k8s.io related packages Signed-off-by: Vicente Cheng --- .../generated/controllers/storage/factory.go | 72 +++++++++++++++++++ .../controllers/storage/interface.go | 43 +++++++++++ .../controllers/storage/v1/interface.go | 49 +++++++++++++ .../controllers/storage/v1/storageclass.go | 39 ++++++++++ vendor/modules.txt | 2 + 5 files changed, 205 insertions(+) create mode 100644 vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/factory.go create mode 100644 vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/interface.go create mode 100644 vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/interface.go create mode 100644 vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/storageclass.go diff --git a/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/factory.go b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/factory.go new file mode 100644 index 00000000..1e75281c --- /dev/null +++ b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/factory.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package storage + +import ( + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/client-go/rest" +) + +type Factory struct { + *generic.Factory +} + +func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { + f, err := NewFactoryFromConfig(config) + if err != nil { + panic(err) + } + return f +} + +func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, nil) +} + +func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ + Namespace: namespace, + }) +} + +type FactoryOptions = generic.FactoryOptions + +func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { + f, err := generic.NewFactoryFromConfigWithOptions(config, opts) + return &Factory{ + Factory: f, + }, err +} + +func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { + f, err := NewFactoryFromConfigWithOptions(config, opts) + if err != nil { + panic(err) + } + return f +} + +func (c *Factory) Storage() Interface { + return New(c.ControllerFactory()) +} + +func (c *Factory) WithAgent(userAgent string) Interface { + return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) +} diff --git a/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/interface.go b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/interface.go new file mode 100644 index 00000000..28d59892 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/interface.go @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package storage + +import ( + "github.com/rancher/lasso/pkg/controller" + v1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" +) + +type Interface interface { + V1() v1.Interface +} + +type group struct { + controllerFactory controller.SharedControllerFactory +} + +// New returns a new Interface. +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &group{ + controllerFactory: controllerFactory, + } +} + +func (g *group) V1() v1.Interface { + return v1.New(g.controllerFactory) +} diff --git a/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/interface.go b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/interface.go new file mode 100644 index 00000000..c0d7f5c4 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/interface.go @@ -0,0 +1,49 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1 + +import ( + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/schemes" + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + schemes.Register(v1.AddToScheme) +} + +type Interface interface { + StorageClass() StorageClassController +} + +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &version{ + controllerFactory: controllerFactory, + } +} + +type version struct { + controllerFactory controller.SharedControllerFactory +} + +func (v *version) StorageClass() StorageClassController { + return generic.NewNonNamespacedController[*v1.StorageClass, *v1.StorageClassList](schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"}, "storageclasses", v.controllerFactory) +} diff --git a/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/storageclass.go b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/storageclass.go new file mode 100644 index 00000000..7bb71dd2 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1/storageclass.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1 + +import ( + "github.com/rancher/wrangler/v3/pkg/generic" + v1 "k8s.io/api/storage/v1" +) + +// StorageClassController interface for managing StorageClass resources. +type StorageClassController interface { + generic.NonNamespacedControllerInterface[*v1.StorageClass, *v1.StorageClassList] +} + +// StorageClassClient interface for managing StorageClass resources in Kubernetes. +type StorageClassClient interface { + generic.NonNamespacedClientInterface[*v1.StorageClass, *v1.StorageClassList] +} + +// StorageClassCache interface for retrieving StorageClass resources in memory. +type StorageClassCache interface { + generic.NonNamespacedCacheInterface[*v1.StorageClass] +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0ef190a4..bc26555f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -215,6 +215,8 @@ github.com/rancher/wrangler/v3/pkg/generated/controllers/core github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1 github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac/v1 +github.com/rancher/wrangler/v3/pkg/generated/controllers/storage +github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1 github.com/rancher/wrangler/v3/pkg/generic github.com/rancher/wrangler/v3/pkg/gvk github.com/rancher/wrangler/v3/pkg/kubeconfig From b41057dbbaffa1b97317cf94081b221c7a6dd12a Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Wed, 16 Oct 2024 11:26:12 +0800 Subject: [PATCH 2/3] webhook: check the vg status when adding/removeing blockdevice - simpify the vg management and offload to user on v1.4 Signed-off-by: Vicente Cheng --- cmd/node-disk-manager-webhook/main.go | 24 +++- .../templates/rbac.yaml | 6 + pkg/utils/utils.go | 4 + pkg/webhook/blockdevice/validator.go | 107 +++++++++++++++++- 4 files changed, 131 insertions(+), 10 deletions(-) diff --git a/cmd/node-disk-manager-webhook/main.go b/cmd/node-disk-manager-webhook/main.go index dc5629ca..76d7c7e2 100644 --- a/cmd/node-disk-manager-webhook/main.go +++ b/cmd/node-disk-manager-webhook/main.go @@ -8,6 +8,10 @@ import ( "github.com/harvester/webhook/pkg/config" "github.com/harvester/webhook/pkg/server" "github.com/harvester/webhook/pkg/server/admission" + "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" + ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + ctlstorage "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage" + ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" "github.com/rancher/wrangler/v3/pkg/kubeconfig" "github.com/rancher/wrangler/v3/pkg/signals" "github.com/rancher/wrangler/v3/pkg/start" @@ -23,7 +27,9 @@ import ( const webhookName = "harvester-node-disk-manager-webhook" type resourceCaches struct { - bdCache ctldiskv1.BlockDeviceCache + bdCache ctldiskv1.BlockDeviceCache + storageClassCache ctlstoragev1.StorageClassCache + pvCache ctlcorev1.PersistentVolumeCache } func main() { @@ -109,7 +115,7 @@ func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Opt bdMutator, } - bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache) + bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache, resourceCaches.storageClassCache, resourceCaches.pvCache) var validators = []admission.Validator{ bdValidator, } @@ -138,9 +144,19 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourc if err != nil { return nil, err } - starters = append(starters, disks) + storageFactory, err := ctlstorage.NewFactoryFromConfig(cfg) + if err != nil { + return nil, err + } + coreFactory, err := core.NewFactoryFromConfig(cfg) + if err != nil { + return nil, err + } + starters = append(starters, disks, storageFactory, coreFactory) resourceCaches := &resourceCaches{ - bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), + bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), + storageClassCache: storageFactory.Storage().V1().StorageClass().Cache(), + pvCache: coreFactory.Core().V1().PersistentVolume().Cache(), } if err := start.All(ctx, threadiness, starters...); err != nil { diff --git a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml index d5f1aea7..dbf38b6a 100644 --- a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml +++ b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml @@ -50,6 +50,12 @@ rules: - apiGroups: [ "" ] resources: [ "secrets", "configmaps" ] verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "watch", "list" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "watch", "list" ] - apiGroups: [ "harvesterhci.io" ] resources: [ "blockdevices" ] verbs: [ "*" ] diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index f11036c3..c2b0c52f 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -19,6 +19,10 @@ const ( DiskRemoveTag = "harvester-ndm-disk-remove" // Harvester Namespace HarvesterNS = "harvester-system" + // LVMCSIDriver is the LVM CSI driver name + LVMCSIDriver = "lvm.driver.harvesterhci.io" + // LVMTopologyNodeKey is the key of LVM topology node + LVMTopologyNodeKey = "topology.lvm.csi/node" ) var CmdTimeoutError error diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index 5eb42217..89b41cb6 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -1,35 +1,54 @@ package blockdevice import ( + "fmt" + werror "github.com/harvester/webhook/pkg/error" "github.com/harvester/webhook/pkg/server/admission" + ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" + "github.com/sirupsen/logrus" admissionregv1 "k8s.io/api/admissionregistration/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1" ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" + "github.com/harvester/node-disk-manager/pkg/utils" ) type Validator struct { admission.DefaultValidator - BlockdeviceCache ctldiskv1.BlockDeviceCache + BlockdeviceCache ctldiskv1.BlockDeviceCache + storageClassCache ctlstoragev1.StorageClassCache + pvCache ctlcorev1.PersistentVolumeCache } -func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Validator { +func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache, storageClassCache ctlstoragev1.StorageClassCache, pvCache ctlcorev1.PersistentVolumeCache) *Validator { return &Validator{ - BlockdeviceCache: blockdeviceCache, + BlockdeviceCache: blockdeviceCache, + storageClassCache: storageClassCache, + pvCache: pvCache, } } func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error { bd := newObj.(*diskv1.BlockDevice) - return v.validateProvisioner(bd) + if err := v.validateProvisioner(bd); err != nil { + return err + } + return v.validateLVMProvisioner(nil, bd) } -func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error { +func (v *Validator) Update(_ *admission.Request, oldObj, newObj runtime.Object) error { newBd := newObj.(*diskv1.BlockDevice) - return v.validateProvisioner(newBd) + oldBd := oldObj.(*diskv1.BlockDevice) + if err := v.validateProvisioner(newBd); err != nil { + return err + } + return v.validateLVMProvisioner(oldBd, newBd) } func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { @@ -43,6 +62,71 @@ func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { return nil } +// validateLVMProvisioner will check the blockdeivce with LVM provisioner and block +// if there is already have any pvc created with in the target volume group +func (v *Validator) validateLVMProvisioner(oldbd, newbd *diskv1.BlockDevice) error { + + // check again, skip if no LVM provisioner + if newbd.Spec.Provisioner == nil || newbd.Spec.Provisioner.LVM == nil { + return nil + } + + // Adding case, should not happened + if oldbd == nil { + logrus.Info("Adding blockdevice with provisioner should not happen") + return v.validateVGIsAlreadyUsed(newbd) + } + + // means add or remove + if oldbd.Spec.Provision != newbd.Spec.Provision { + return v.validateVGIsAlreadyUsed(newbd) + } + + return nil + +} + +func (v *Validator) validateVGIsAlreadyUsed(bd *diskv1.BlockDevice) error { + targetVGName := bd.Spec.Provisioner.LVM.VgName + // find what we wanted to check + allStorageClasses, err := v.storageClassCache.List(labels.Everything()) + if err != nil { + return werror.NewBadRequest("Failed to list storage classes") + } + targetSC := "" + for _, sc := range allStorageClasses { + if sc.Provisioner != utils.LVMCSIDriver { + continue + } + scTargetNode := getLVMTopologyNodes(sc) + if scTargetNode != bd.Spec.NodeName { + continue + } + if sc.Parameters["vgName"] == targetVGName { + targetSC = sc.Name + break + } + } + + // no related SC found, just return + if targetSC == "" { + return nil + } + + // check if there is any PV created with the targetSC + pvs, err := v.pvCache.List(labels.Everything()) + if err != nil { + return werror.NewBadRequest("Failed to list PVs") + } + for _, pv := range pvs { + if pv.Spec.StorageClassName == targetSC { + errStr := fmt.Sprintf("There is already a PVC created using the target volume group (%v), so we cannot add or remove the associated blockdevices", targetVGName) + return werror.NewBadRequest(errStr) + } + } + return nil +} + func (v *Validator) Resource() admission.Resource { return admission.Resource{ Names: []string{"blockdevices"}, @@ -56,3 +140,14 @@ func (v *Validator) Resource() admission.Resource { }, } } + +func getLVMTopologyNodes(sc *storagev1.StorageClass) string { + for _, topology := range sc.AllowedTopologies { + for _, matchLabel := range topology.MatchLabelExpressions { + if matchLabel.Key == utils.LVMTopologyNodeKey { + return matchLabel.Values[0] + } + } + } + return "" +} From a4901fc9a1b13e984c6df0a96829d255dc27df62 Mon Sep 17 00:00:00 2001 From: Vicente Cheng Date: Wed, 16 Oct 2024 15:18:52 +0800 Subject: [PATCH 3/3] webhook: should also consider the old `Spec.Filesystem.Provisioned` - sync the `Spec.Provision` and `Spec.Filesystem.Provisioned` Signed-off-by: Vicente Cheng --- pkg/controller/blockdevice/controller.go | 1 - pkg/provisioner/longhornv1.go | 3 +++ pkg/webhook/blockdevice/mutator.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/controller/blockdevice/controller.go b/pkg/controller/blockdevice/controller.go index 8d235e22..2eeb6f54 100644 --- a/pkg/controller/blockdevice/controller.go +++ b/pkg/controller/blockdevice/controller.go @@ -218,7 +218,6 @@ func (c *Controller) generateProvisioner(device *diskv1.BlockDevice) (provisione // upgrade case, we need to update some fields if device.Spec.Provisioner == nil && device.Status.ProvisionPhase == diskv1.ProvisionPhaseProvisioned { device.Spec.Provision = true - device.Spec.FileSystem.Provisioned = false provisionerLHV1 := &diskv1.LonghornProvisionerInfo{ EngineVersion: provisioner.TypeLonghornV1, } diff --git a/pkg/provisioner/longhornv1.go b/pkg/provisioner/longhornv1.go index 8dc49fe4..8f966f54 100644 --- a/pkg/provisioner/longhornv1.go +++ b/pkg/provisioner/longhornv1.go @@ -97,6 +97,8 @@ func (p *LonghornV1Provisioner) Provision() (bool, error) { } if (synced && !diskv1.DiskAddedToNode.IsTrue(p.device)) || provisioned { + // mark `filesystem.provisioned` to true, that the mutator could work + p.device.Spec.FileSystem.Provisioned = true logrus.Debugf("Set blockdevice CRD (%v) to provisioned", p.device) msg := fmt.Sprintf("Added disk %s to longhorn node `%s` as an additional disk", p.device.Name, p.nodeObj.Name) setCondDiskAddedToNodeTrue(p.device, msg, diskv1.ProvisionPhaseProvisioned) @@ -111,6 +113,7 @@ func (p *LonghornV1Provisioner) UnProvision() (bool, error) { // inner functions updateProvisionPhaseUnprovisioned := func() { + p.device.Spec.FileSystem.Provisioned = false msg := fmt.Sprintf("Disk not in longhorn node `%s`", p.nodeObj.Name) setCondDiskAddedToNodeFalse(p.device, msg, diskv1.ProvisionPhaseUnprovisioned) } diff --git a/pkg/webhook/blockdevice/mutator.go b/pkg/webhook/blockdevice/mutator.go index ceb5a509..4a30cd61 100644 --- a/pkg/webhook/blockdevice/mutator.go +++ b/pkg/webhook/blockdevice/mutator.go @@ -62,7 +62,7 @@ func (m *Mutator) Update(req *admission.Request, oldObj, newObj runtime.Object) return patchOps, nil } // means we need to disable, align the .spec.filesystem.provisioned with .spec.provision -> false - if prevProvision && !newBd.Spec.FileSystem.Provisioned { + if prevProvision && !newBd.Spec.FileSystem.Provisioned && oldBd.Spec.FileSystem.Provisioned { if newBd.Spec.Provision { patchProvision := admission.PatchOp{ Op: admission.PatchOpReplace,