Skip to content

Commit

Permalink
webhook: add mutator and validator
Browse files Browse the repository at this point in the history
  • Loading branch information
Vicente-Cheng committed Oct 8, 2024
1 parent 9c4ab05 commit 84b6d56
Show file tree
Hide file tree
Showing 3 changed files with 310 additions and 0 deletions.
160 changes: 160 additions & 0 deletions cmd/node-disk-manager-webhook/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
package main

import (
"context"
"fmt"
"os"

"github.com/harvester/webhook/pkg/config"
"github.com/harvester/webhook/pkg/server"
"github.com/harvester/webhook/pkg/server/admission"
"github.com/rancher/wrangler/v3/pkg/kubeconfig"
"github.com/rancher/wrangler/v3/pkg/signals"
"github.com/rancher/wrangler/v3/pkg/start"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
"k8s.io/client-go/rest"

ctldisk "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io"
ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1"
"github.com/harvester/node-disk-manager/pkg/webhook/blockdevice"
)

const webhookName = "harvester-node-disk-manager-webhook"

type resourceCaches struct {
bdCache ctldiskv1.BlockDeviceCache
}

func main() {
var options config.Options
var logLevel string

flags := []cli.Flag{
&cli.StringFlag{
Name: "loglevel",
Usage: "Specify log level",
EnvVars: []string{"LOGLEVEL"},
Value: "info",
Destination: &logLevel,
},
&cli.IntFlag{
Name: "threadiness",
EnvVars: []string{"THREADINESS"},
Usage: "Specify controller threads",
Value: 5,
Destination: &options.Threadiness,
},
&cli.IntFlag{
Name: "https-port",
EnvVars: []string{"WEBHOOK_SERVER_HTTPS_PORT"},
Usage: "HTTPS listen port",
Value: 8443,
Destination: &options.HTTPSListenPort,
},
&cli.StringFlag{
Name: "namespace",
EnvVars: []string{"NAMESPACE"},
Destination: &options.Namespace,
Usage: "The harvester namespace",
Value: "harvester-system",
Required: true,
},
&cli.StringFlag{
Name: "controller-user",
EnvVars: []string{"CONTROLLER_USER_NAME"},
Destination: &options.ControllerUsername,
Value: "system:serviceaccount:harvester-system:harvester-node-disk-manager",
Usage: "The harvester-node-disk-manager controller username",
},
&cli.StringFlag{
Name: "gc-user",
EnvVars: []string{"GARBAGE_COLLECTION_USER_NAME"},
Destination: &options.GarbageCollectionUsername,
Usage: "The system username that performs garbage collection",
Value: "system:serviceaccount:kube-system:generic-garbage-collector",
},
}

cfg, err := kubeconfig.GetNonInteractiveClientConfig(os.Getenv("KUBECONFIG")).ClientConfig()
if err != nil {
logrus.Fatal(err)
}

ctx := signals.SetupSignalContext()

app := cli.NewApp()
app.Flags = flags
app.Action = func(_ *cli.Context) error {
setLogLevel(logLevel)
err := runWebhookServer(ctx, cfg, &options)
return err
}

if err := app.Run(os.Args); err != nil {
logrus.Fatalf("run webhook server failed: %v", err)
}
}

func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Options) error {
resourceCaches, err := newCaches(ctx, cfg, options.Threadiness)
if err != nil {
return fmt.Errorf("error building resource caches: %s", err.Error())
}

webhookServer := server.NewWebhookServer(ctx, cfg, webhookName, options)

bdMutator := blockdevice.NewBlockdeviceMutator(resourceCaches.bdCache)
var mutators = []admission.Mutator{
bdMutator,
}

bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache)
var validators = []admission.Validator{
bdValidator,
}

if err := webhookServer.RegisterMutators(mutators...); err != nil {
return fmt.Errorf("failed to register mutators: %v", err)
}

if err := webhookServer.RegisterValidators(validators...); err != nil {
return fmt.Errorf("failed to register validators: %v", err)
}

if err := webhookServer.Start(); err != nil {
return fmt.Errorf("failed to start webhook server: %v", err)
}

<-ctx.Done()
return nil

}

func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourceCaches, error) {
var starters []start.Starter

disks, err := ctldisk.NewFactoryFromConfig(cfg)
if err != nil {
return nil, err
}
starters = append(starters, disks)
resourceCaches := &resourceCaches{
bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(),
}

if err := start.All(ctx, threadiness, starters...); err != nil {
return nil, err
}

return resourceCaches, nil
}

func setLogLevel(level string) {
ll, err := logrus.ParseLevel(level)
if err != nil {
ll = logrus.DebugLevel
}
// set global log level
logrus.SetLevel(ll)
}
92 changes: 92 additions & 0 deletions pkg/webhook/blockdevice/mutator.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package blockdevice

import (
"github.com/harvester/webhook/pkg/server/admission"
admissionregv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/runtime"

diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1"
ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1"
)

type Mutator struct {
admission.DefaultMutator

BlockdeviceCache ctldiskv1.BlockDeviceCache
}

func NewBlockdeviceMutator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Mutator {
return &Mutator{
BlockdeviceCache: blockdeviceCache,
}
}

// Update is used to patch the filesystem.provisioned value to spec.provision
// We move the provision flag from Spec.Filesystem.Provision to Spec.Provision when we introduce the provisioner.
func (m *Mutator) Update(req *admission.Request, oldObj, newObj runtime.Object) (admission.Patch, error) {
if req.IsFromController() {
return nil, nil
}
var patchOps admission.Patch
oldBd := oldObj.(*diskv1.BlockDevice)
newBd := newObj.(*diskv1.BlockDevice)

if newBd.Spec.FileSystem != nil && !newBd.Spec.FileSystem.Provisioned && !newBd.Spec.Provision {
return nil, nil
}

prevProvision := oldBd.Status.ProvisionPhase == diskv1.ProvisionPhaseProvisioned
// That case means the spec.filesystem.provisioned is deprecated, so keep the provision value
if !prevProvision && newBd.Spec.Provision {
return nil, nil
}

if !prevProvision && newBd.Spec.FileSystem.Provisioned {
patchAddNewProvision := admission.PatchOp{
Op: admission.PatchOpReplace,
Path: "/spec/provision",
Value: true,
}
patchOps = append(patchOps, patchAddNewProvision)
if newBd.Spec.Provisioner == nil {
provisionerVal := &diskv1.LonghornProvisionerInfo{
EngineVersion: "LonghornV1",
}
patchAddNewProvisioner := admission.PatchOp{
Op: admission.PatchOpAdd,
Path: "/spec/provisioner",
Value: provisionerVal,
}
patchOps = append(patchOps, patchAddNewProvisioner)
}
return patchOps, nil
}
// means we need to disable, align the .spec.filesystem.provisioned with .spec.provision -> false
if prevProvision && !newBd.Spec.FileSystem.Provisioned {
if newBd.Spec.Provision {
patchProvision := admission.PatchOp{
Op: admission.PatchOpReplace,
Path: "/spec/provision",
Value: false,
}
patchOps = append(patchOps, patchProvision)
}
return patchOps, nil
}

return patchOps, nil
}

func (m *Mutator) Resource() admission.Resource {
return admission.Resource{
Names: []string{"blockdevices"},
Scope: admissionregv1.AllScopes,
APIGroup: diskv1.SchemeGroupVersion.Group,
APIVersion: diskv1.SchemeGroupVersion.Version,
ObjectType: &diskv1.BlockDevice{},
OperationTypes: []admissionregv1.OperationType{
// we donot care about Create because the bd is created by the controller
admissionregv1.Update,
},
}
}
58 changes: 58 additions & 0 deletions pkg/webhook/blockdevice/validator.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package blockdevice

import (
werror "github.com/harvester/webhook/pkg/error"
"github.com/harvester/webhook/pkg/server/admission"
admissionregv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/runtime"

diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1"
ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1"
)

type Validator struct {
admission.DefaultValidator

BlockdeviceCache ctldiskv1.BlockDeviceCache
}

func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache) *Validator {
return &Validator{
BlockdeviceCache: blockdeviceCache,
}
}

func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error {
bd := newObj.(*diskv1.BlockDevice)
return v.validateProvisioner(bd)
}

func (v *Validator) Update(_ *admission.Request, _, newObj runtime.Object) error {
newBd := newObj.(*diskv1.BlockDevice)
return v.validateProvisioner(newBd)
}

func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error {
if bd.Spec.Provisioner == nil {
return nil
}

if bd.Spec.Provisioner.LVM != nil && bd.Spec.Provisioner.Longhorn != nil {
return werror.NewBadRequest("Blockdevice should not have multiple provisioners")
}
return nil
}

func (v *Validator) Resource() admission.Resource {
return admission.Resource{
Names: []string{"blockdevices"},
Scope: admissionregv1.AllScopes,
APIGroup: diskv1.SchemeGroupVersion.Group,
APIVersion: diskv1.SchemeGroupVersion.Version,
ObjectType: &diskv1.BlockDevice{},
OperationTypes: []admissionregv1.OperationType{
admissionregv1.Create,
admissionregv1.Update,
},
}
}

0 comments on commit 84b6d56

Please sign in to comment.