diff --git a/config/crds/migration.openshift.io_directvolumemigrations.yaml b/config/crds/migration.openshift.io_directvolumemigrations.yaml index 9343d8b5a..5ba2137a4 100644 --- a/config/crds/migration.openshift.io_directvolumemigrations.yaml +++ b/config/crds/migration.openshift.io_directvolumemigrations.yaml @@ -177,6 +177,10 @@ spec: description: TargetStorageClass storage class of the migrated PVC in the target cluster type: string + targetVolumeMode: + description: TargetVolumeMode volume mode of the migrated PVC + in the target cluster + type: string uid: description: |- UID of the referent. diff --git a/pkg/apis/migration/v1alpha1/directvolumemigration_types.go b/pkg/apis/migration/v1alpha1/directvolumemigration_types.go index 4bf7115a6..7fb5f0be7 100644 --- a/pkg/apis/migration/v1alpha1/directvolumemigration_types.go +++ b/pkg/apis/migration/v1alpha1/directvolumemigration_types.go @@ -41,6 +41,8 @@ type PVCToMigrate struct { TargetStorageClass string `json:"targetStorageClass"` // TargetAccessModes access modes of the migrated PVC in the target cluster TargetAccessModes []kapi.PersistentVolumeAccessMode `json:"targetAccessModes"` + // TargetVolumeMode volume mode of the migrated PVC in the target cluster + TargetVolumeMode *kapi.PersistentVolumeMode `json:"targetVolumeMode,omitempty"` // TargetNamespace namespace of the migrated PVC in the target cluster TargetNamespace string `json:"targetNamespace,omitempty"` // TargetName name of the migrated PVC in the target cluster diff --git a/pkg/controller/directvolumemigration/descriptions.go b/pkg/controller/directvolumemigration/descriptions.go index d6fd73a7a..9e1b565a7 100644 --- a/pkg/controller/directvolumemigration/descriptions.go +++ b/pkg/controller/directvolumemigration/descriptions.go @@ -25,7 +25,6 @@ var phaseDescriptions = map[string]string{ CreateDestinationNamespaces: "Creating target namespaces", DestinationNamespacesCreated: "Checking if the target namespaces have been created.", CreateDestinationPVCs: "Creating PVCs in the target namespaces", - DestinationPVCsCreated: "Checking whether the created PVCs are bound", CreateRsyncRoute: "Creating one route for each namespace for Rsync on the target cluster", CreateRsyncConfig: "Creating a config map and secrets on both the source and target clusters for Rsync configuration", CreateStunnelConfig: "Creating a config map and secrets for Stunnel to connect to Rsync on the source and target clusters", diff --git a/pkg/controller/directvolumemigration/pvcs.go b/pkg/controller/directvolumemigration/pvcs.go index f3f43d07c..b6e21bb46 100644 --- a/pkg/controller/directvolumemigration/pvcs.go +++ b/pkg/controller/directvolumemigration/pvcs.go @@ -6,6 +6,7 @@ import ( liberr "github.com/konveyor/controller/pkg/error" migapi "github.com/konveyor/mig-controller/pkg/apis/migration/v1alpha1" + "github.com/konveyor/mig-controller/pkg/compat" "github.com/konveyor/mig-controller/pkg/settings" corev1 "k8s.io/api/core/v1" k8serror "k8s.io/apimachinery/pkg/api/errors" @@ -40,124 +41,158 @@ func (t *Task) createDestinationPVCs() error { if migration != nil { migrationUID = string(migration.UID) } + namespaceVMMap := make(map[string]map[string]string) for _, pvc := range t.Owner.Spec.PersistentVolumeClaims { - // Get pvc definition from source cluster - srcPVC := corev1.PersistentVolumeClaim{} - key := types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace} - err = srcClient.Get(context.TODO(), key, &srcPVC) - if err != nil { - return err + if _, ok := namespaceVMMap[pvc.Namespace]; !ok { + vmMap, err := getVolumeNameToVmMap(srcClient, pvc.Namespace) + if err != nil { + return err + } + namespaceVMMap[pvc.Namespace] = vmMap } - - plan := t.PlanResources.MigPlan - matchingMigPlanPV := t.findMatchingPV(plan, pvc.Name, pvc.Namespace) - pvcRequestedCapacity := srcPVC.Spec.Resources.Requests[corev1.ResourceStorage] - - newSpec := srcPVC.Spec - newSpec.StorageClassName = &pvc.TargetStorageClass - newSpec.AccessModes = pvc.TargetAccessModes - newSpec.VolumeName = "" - // Remove DataSource and DataSourceRef from PVC spec so any populators or sources are not - // copied over to the destination PVC - newSpec.DataSource = nil - newSpec.DataSourceRef = nil - - // Adjusting destination PVC storage size request - // max(requested capacity on source, capacity reported in migplan, proposed capacity in migplan) - if matchingMigPlanPV != nil && settings.Settings.DvmOpts.EnablePVResizing { - maxCapacity := pvcRequestedCapacity - // update maxCapacity if matching PV's capacity is greater than current maxCapacity - if matchingMigPlanPV.Capacity.Cmp(maxCapacity) > 0 { - maxCapacity = matchingMigPlanPV.Capacity + if _, ok := namespaceVMMap[pvc.Namespace][pvc.Name]; ok { + // VM associated with this PVC, create a datavolume + if err := t.createDestinationDV(srcClient, destClient, pvc, migrationUID); err != nil { + return err } - - // update maxcapacity if matching PV's proposed capacity is greater than current maxCapacity - if matchingMigPlanPV.ProposedCapacity.Cmp(maxCapacity) > 0 { - maxCapacity = matchingMigPlanPV.ProposedCapacity + } else { + if err := t.createDestinationPVC(srcClient, destClient, pvc, migrationUID); err != nil { + return err } - newSpec.Resources.Requests[corev1.ResourceStorage] = maxCapacity } + } + return nil +} - //Add src labels and rollback labels - pvcLabels := srcPVC.Labels - if pvcLabels == nil { - pvcLabels = make(map[string]string) - } - // Merge DVM correlation labels into PVC labels for debug view - corrLabels := t.Owner.GetCorrelationLabels() - for k, v := range corrLabels { - pvcLabels[k] = v - } +func (t *Task) createDestinationDV(srcClient, destClient compat.Client, pvc migapi.PVCToMigrate, migrationUID string) error { + destPVC, err := t.createDestinationPVCDefinition(srcClient, pvc, migrationUID) + if err != nil { + return err + } + destPVC.Spec.VolumeMode = pvc.TargetVolumeMode + return createBlankDataVolumeFromPVC(destClient, destPVC) +} - if migrationUID != "" && t.PlanResources != nil && t.PlanResources.MigPlan != nil { - pvcLabels[migapi.MigMigrationLabel] = migrationUID - pvcLabels[migapi.MigPlanLabel] = string(t.PlanResources.MigPlan.UID) - } else if t.Owner.UID != "" { - pvcLabels[MigratedByDirectVolumeMigration] = string(t.Owner.UID) +func (t *Task) createDestinationPVC(srcClient, destClient compat.Client, pvc migapi.PVCToMigrate, migrationUID string) error { + destPVC, err := t.createDestinationPVCDefinition(srcClient, pvc, migrationUID) + if err != nil { + return err + } + destPVCCheck := corev1.PersistentVolumeClaim{} + err = destClient.Get(context.TODO(), types.NamespacedName{ + Namespace: destPVC.Namespace, + Name: destPVC.Name, + }, &destPVCCheck) + if k8serror.IsNotFound(err) { + err = destClient.Create(context.TODO(), destPVC) + if err != nil { + return err } + } else if err == nil { + t.Log.Info("PVC already exists on destination", "namespace", pvc.Namespace, "name", pvc.Name) + } else { + return err + } + return nil +} - destNs := pvc.Namespace - if pvc.TargetNamespace != "" { - destNs = pvc.TargetNamespace - } - destName := pvc.Name - if pvc.TargetName != "" { - destName = pvc.TargetName - } +func (t *Task) createDestinationPVCDefinition(srcClient compat.Client, pvc migapi.PVCToMigrate, migrationUID string) (*corev1.PersistentVolumeClaim, error) { + // Get pvc definition from source cluster + srcPVC := corev1.PersistentVolumeClaim{} + key := types.NamespacedName{Name: pvc.Name, Namespace: pvc.Namespace} + err := srcClient.Get(context.TODO(), key, &srcPVC) + if err != nil { + return nil, err + } - annotations := map[string]string{} - // If a kubevirt disk PVC, copy annotations to destination PVC - if srcPVC.Annotations != nil && srcPVC.Annotations["cdi.kubevirt.io/storage.contentType"] == "kubevirt" { - annotations = srcPVC.Annotations - // Ensure that when we create a matching DataVolume, it will adopt this PVC - annotations["cdi.kubevirt.io/storage.populatedFor"] = destName - // Remove annotations indicating the PVC is bound or provisioned - delete(annotations, "pv.kubernetes.io/bind-completed") - delete(annotations, "volume.beta.kubernetes.io/storage-provisioner") - delete(annotations, "pv.kubernetes.io/bound-by-controller") - delete(annotations, "volume.kubernetes.io/storage-provisioner") + plan := t.PlanResources.MigPlan + matchingMigPlanPV := t.findMatchingPV(plan, pvc.Name, pvc.Namespace) + pvcRequestedCapacity := srcPVC.Spec.Resources.Requests[corev1.ResourceStorage] + + newSpec := srcPVC.Spec + newSpec.StorageClassName = &pvc.TargetStorageClass + newSpec.AccessModes = pvc.TargetAccessModes + newSpec.VolumeName = "" + // Remove DataSource and DataSourceRef from PVC spec so any populators or sources are not + // copied over to the destination PVC + newSpec.DataSource = nil + newSpec.DataSourceRef = nil + + // Adjusting destination PVC storage size request + // max(requested capacity on source, capacity reported in migplan, proposed capacity in migplan) + if matchingMigPlanPV != nil && settings.Settings.DvmOpts.EnablePVResizing { + maxCapacity := pvcRequestedCapacity + // update maxCapacity if matching PV's capacity is greater than current maxCapacity + if matchingMigPlanPV.Capacity.Cmp(maxCapacity) > 0 { + maxCapacity = matchingMigPlanPV.Capacity } - // Create pvc on destination with same metadata + spec - destPVC := corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: destName, - Namespace: destNs, - Labels: pvcLabels, - Annotations: annotations, - }, - Spec: newSpec, - } - t.Log.Info("Creating PVC on destination MigCluster", - "persistentVolumeClaim", path.Join(pvc.Namespace, pvc.Name), - "destPersistentVolumeClaim", path.Join(destNs, pvc.Name), - "pvcStorageClassName", destPVC.Spec.StorageClassName, - "pvcAccessModes", destPVC.Spec.AccessModes, - "pvcRequests", destPVC.Spec.Resources.Requests, - "pvcDataSource", destPVC.Spec.DataSource, - "pvcDataSourceRef", destPVC.Spec.DataSourceRef) - destPVCCheck := corev1.PersistentVolumeClaim{} - err = destClient.Get(context.TODO(), types.NamespacedName{ - Namespace: destNs, - Name: destName, - }, &destPVCCheck) - if k8serror.IsNotFound(err) { - err = destClient.Create(context.TODO(), &destPVC) - if err != nil { - return err - } - } else if err == nil { - t.Log.Info("PVC already exists on destination", "namespace", pvc.Namespace, "name", pvc.Name) - } else { - return err + // update maxcapacity if matching PV's proposed capacity is greater than current maxCapacity + if matchingMigPlanPV.ProposedCapacity.Cmp(maxCapacity) > 0 { + maxCapacity = matchingMigPlanPV.ProposedCapacity } + newSpec.Resources.Requests[corev1.ResourceStorage] = maxCapacity } - return nil -} -func (t *Task) getDestinationPVCs() error { - // Ensure PVCs are bound and not in pending state - return nil + //Add src labels and rollback labels + pvcLabels := srcPVC.Labels + if pvcLabels == nil { + pvcLabels = make(map[string]string) + } + // Merge DVM correlation labels into PVC labels for debug view + corrLabels := t.Owner.GetCorrelationLabels() + for k, v := range corrLabels { + pvcLabels[k] = v + } + + if migrationUID != "" && t.PlanResources != nil && t.PlanResources.MigPlan != nil { + pvcLabels[migapi.MigMigrationLabel] = migrationUID + pvcLabels[migapi.MigPlanLabel] = string(t.PlanResources.MigPlan.UID) + } else if t.Owner.UID != "" { + pvcLabels[MigratedByDirectVolumeMigration] = string(t.Owner.UID) + } + + destNs := pvc.Namespace + if pvc.TargetNamespace != "" { + destNs = pvc.TargetNamespace + } + destName := pvc.Name + if pvc.TargetName != "" { + destName = pvc.TargetName + } + + annotations := map[string]string{} + // If a kubevirt disk PVC, copy annotations to destination PVC + if srcPVC.Annotations != nil && srcPVC.Annotations["cdi.kubevirt.io/storage.contentType"] == "kubevirt" { + annotations = srcPVC.Annotations + // Ensure that when we create a matching DataVolume, it will adopt this PVC + annotations["cdi.kubevirt.io/storage.populatedFor"] = destName + // Remove annotations indicating the PVC is bound or provisioned + delete(annotations, "pv.kubernetes.io/bind-completed") + delete(annotations, "volume.beta.kubernetes.io/storage-provisioner") + delete(annotations, "pv.kubernetes.io/bound-by-controller") + delete(annotations, "volume.kubernetes.io/storage-provisioner") + } + + // Create pvc on destination with same metadata + spec + destPVC := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: destName, + Namespace: destNs, + Labels: pvcLabels, + Annotations: annotations, + }, + Spec: newSpec, + } + t.Log.Info("Creating PVC on destination MigCluster", + "persistentVolumeClaim", path.Join(pvc.Namespace, pvc.Name), + "destPersistentVolumeClaim", path.Join(destNs, pvc.Name), + "pvcStorageClassName", destPVC.Spec.StorageClassName, + "pvcAccessModes", destPVC.Spec.AccessModes, + "pvcRequests", destPVC.Spec.Resources.Requests, + "pvcDataSource", destPVC.Spec.DataSource, + "pvcDataSourceRef", destPVC.Spec.DataSourceRef) + return &destPVC, nil } func (t *Task) findMatchingPV(plan *migapi.MigPlan, pvcName string, pvcNamespace string) *migapi.PV { diff --git a/pkg/controller/directvolumemigration/task.go b/pkg/controller/directvolumemigration/task.go index fa9537a72..a6c394d5b 100644 --- a/pkg/controller/directvolumemigration/task.go +++ b/pkg/controller/directvolumemigration/task.go @@ -34,7 +34,6 @@ const ( CreateDestinationNamespaces = "CreateDestinationNamespaces" DestinationNamespacesCreated = "DestinationNamespacesCreated" CreateDestinationPVCs = "CreateDestinationPVCs" - DestinationPVCsCreated = "DestinationPVCsCreated" CreateStunnelConfig = "CreateStunnelConfig" CreateRsyncConfig = "CreateRsyncConfig" CreateRsyncRoute = "CreateRsyncRoute" @@ -127,7 +126,6 @@ var VolumeMigration = Itinerary{ {phase: CreateDestinationNamespaces}, {phase: DestinationNamespacesCreated}, {phase: CreateDestinationPVCs}, - {phase: DestinationPVCsCreated}, {phase: DeleteStaleVirtualMachineInstanceMigrations}, {phase: CreateRsyncRoute}, {phase: EnsureRsyncRouteAdmitted}, @@ -322,16 +320,6 @@ func (t *Task) Run(ctx context.Context) error { if err = t.next(); err != nil { return liberr.Wrap(err) } - case DestinationPVCsCreated: - // Get the PVCs on the destination and confirm they are bound - err := t.getDestinationPVCs() - if err != nil { - return liberr.Wrap(err) - } - t.Requeue = NoReQ - if err = t.next(); err != nil { - return liberr.Wrap(err) - } case CreateRsyncRoute: err := t.ensureRsyncEndpoints() if err != nil { diff --git a/pkg/controller/directvolumemigration/vm.go b/pkg/controller/directvolumemigration/vm.go index 0803b48af..74f335516 100644 --- a/pkg/controller/directvolumemigration/vm.go +++ b/pkg/controller/directvolumemigration/vm.go @@ -198,6 +198,26 @@ func getVMNamesInNamespace(client k8sclient.Client, namespace string) (map[strin return vms, nil } +func getVolumeNameToVmMap(client k8sclient.Client, namespace string) (map[string]string, error) { + volumeVmMap := make(map[string]string) + vmList := virtv1.VirtualMachineList{} + err := client.List(context.TODO(), &vmList, k8sclient.InNamespace(namespace)) + if err != nil { + return nil, err + } + for _, vm := range vmList.Items { + for _, volume := range vm.Spec.Template.Spec.Volumes { + if volume.PersistentVolumeClaim != nil { + volumeVmMap[volume.PersistentVolumeClaim.ClaimName] = vm.Name + } + if volume.DataVolume != nil { + volumeVmMap[volume.DataVolume.Name] = vm.Name + } + } + } + return volumeVmMap, nil +} + func (t *Task) storageLiveMigrateVM(vmName, namespace string, volumes *vmVolumes) error { sourceClient := t.sourceClient if t.Owner.IsRollback() { @@ -365,7 +385,7 @@ func updateVM(client k8sclient.Client, vm *virtv1.VirtualMachine, sourceVolumes, } if volume.DataVolume != nil && volume.DataVolume.Name == sourceVolumes[i] { log.V(5).Info("Updating DataVolume", "source", sourceVolumes[i], "target", targetVolumes[i]) - if err := CreateNewDataVolume(client, sourceVolumes[i], targetVolumes[i], vmCopy.Namespace, log); err != nil { + if err := CreateNewAdoptionDataVolume(client, sourceVolumes[i], targetVolumes[i], vmCopy.Namespace, log); err != nil { return err } vmCopy.Spec.Template.Spec.Volumes[j].DataVolume.Name = targetVolumes[i] @@ -383,7 +403,53 @@ func updateVM(client k8sclient.Client, vm *virtv1.VirtualMachine, sourceVolumes, return nil } -func CreateNewDataVolume(client k8sclient.Client, sourceDvName, targetDvName, ns string, log logr.Logger) error { +func createBlankDataVolumeFromPVC(client k8sclient.Client, targetPvc *corev1.PersistentVolumeClaim) error { + dv := &cdiv1.DataVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: targetPvc.Name, + Namespace: targetPvc.Namespace, + Labels: targetPvc.Labels, + Annotations: targetPvc.Annotations, + }, + Spec: cdiv1.DataVolumeSpec{ + Source: &cdiv1.DataVolumeSource{ + Blank: &cdiv1.DataVolumeBlankImage{}, + }, + Storage: &cdiv1.StorageSpec{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: targetPvc.Spec.Resources.Requests[corev1.ResourceStorage], + }, + }, + StorageClassName: targetPvc.Spec.StorageClassName, + }, + }, + } + if targetPvc.Spec.VolumeMode != nil && (*targetPvc.Spec.VolumeMode == corev1.PersistentVolumeBlock || *targetPvc.Spec.VolumeMode == corev1.PersistentVolumeFilesystem) { + dv.Spec.Storage.VolumeMode = targetPvc.Spec.VolumeMode + } else if targetPvc.Spec.VolumeMode != nil && *targetPvc.Spec.VolumeMode == "auto" { + dv.Spec.Storage.VolumeMode = nil + } + for _, accessMode := range targetPvc.Spec.AccessModes { + if accessMode == corev1.ReadWriteOnce || accessMode == corev1.ReadWriteMany { + dv.Spec.Storage.AccessModes = append(dv.Spec.Storage.AccessModes, accessMode) + } + } + if dv.Annotations == nil { + dv.Annotations = make(map[string]string) + } + dv.Annotations["cdi.kubevirt.io/allowClaimAdoption"] = "true" + + if err := client.Create(context.TODO(), dv); err != nil { + if k8serrors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +func CreateNewAdoptionDataVolume(client k8sclient.Client, sourceDvName, targetDvName, ns string, log logr.Logger) error { log.V(3).Info("Create new adopting datavolume from source datavolume", "namespace", ns, "source name", sourceDvName, "target name", targetDvName) originalDv := &cdiv1.DataVolume{} if err := client.Get(context.TODO(), k8sclient.ObjectKey{Namespace: ns, Name: sourceDvName}, originalDv); err != nil { diff --git a/pkg/controller/directvolumemigration/vm_test.go b/pkg/controller/directvolumemigration/vm_test.go index e3144ec7c..fd068126d 100644 --- a/pkg/controller/directvolumemigration/vm_test.go +++ b/pkg/controller/directvolumemigration/vm_test.go @@ -849,7 +849,7 @@ func TestCreateNewDataVolume(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := CreateNewDataVolume(tt.client, tt.sourceDv.Name, targetDv, testNamespace, log.WithName(tt.name)) + err := CreateNewAdoptionDataVolume(tt.client, tt.sourceDv.Name, targetDv, testNamespace, log.WithName(tt.name)) if err != nil { t.Errorf("unexpected error: %v", err) t.FailNow() diff --git a/pkg/controller/migmigration/dvm.go b/pkg/controller/migmigration/dvm.go index 7cbc1aca0..cb4b520fc 100644 --- a/pkg/controller/migmigration/dvm.go +++ b/pkg/controller/migmigration/dvm.go @@ -35,7 +35,7 @@ func (t *Task) createDirectVolumeMigration(migType *migapi.DirectVolumeMigration dvm.Spec.MigrationType = migType } t.Log.Info("Creating DirectVolumeMigration on host cluster", - "directVolumeMigration", path.Join(dvm.Namespace, dvm.Name)) + "directVolumeMigration", path.Join(dvm.Namespace, dvm.Name), "dvm", dvm) err = t.Client.Create(context.TODO(), dvm) return err } @@ -247,6 +247,7 @@ func (t *Task) getDirectVolumeClaimList() []migapi.PVCToMigrate { continue } accessModes := pv.PVC.AccessModes + volumeMode := pv.PVC.VolumeMode // if the user overrides access modes, set up the destination PVC with user-defined // access mode if pv.Selection.AccessMode != "" { @@ -259,6 +260,7 @@ func (t *Task) getDirectVolumeClaimList() []migapi.PVCToMigrate { }, TargetStorageClass: pv.Selection.StorageClass, TargetAccessModes: accessModes, + TargetVolumeMode: &volumeMode, TargetNamespace: nsMapping[pv.PVC.Namespace], TargetName: pv.PVC.GetTargetName(), Verify: pv.Selection.Verify, diff --git a/pkg/controller/migmigration/storage.go b/pkg/controller/migmigration/storage.go index 330cdc2b7..fcb4bd358 100644 --- a/pkg/controller/migmigration/storage.go +++ b/pkg/controller/migmigration/storage.go @@ -801,7 +801,7 @@ func updateDataVolumeRef(client k8sclient.Client, dv *virtv1.DataVolumeSource, n if destinationDVName, exists := mapping.Get(ns, originalName); exists { dv.Name = destinationDVName - err := dvmc.CreateNewDataVolume(client, originalDv.Name, destinationDVName, ns, log) + err := dvmc.CreateNewAdoptionDataVolume(client, originalDv.Name, destinationDVName, ns, log) if err != nil && !errors.IsAlreadyExists(err) { log.Error(err, "failed creating DataVolume", "namespace", ns, "name", destinationDVName) return true, err diff --git a/pkg/controller/migplan/pvlist.go b/pkg/controller/migplan/pvlist.go index fb50d4ed9..13ae47cf0 100644 --- a/pkg/controller/migplan/pvlist.go +++ b/pkg/controller/migplan/pvlist.go @@ -98,6 +98,15 @@ func (r *ReconcileMigPlan) updatePvs(ctx context.Context, plan *migapi.MigPlan) plan.Status.DestStorageClasses = destStorageClasses plan.Spec.BeginPvStaging() + // Build PV map. + pvMap, err := r.getPvMap(srcClient, plan) + if err != nil { + return liberr.Wrap(err) + } + claims, err := r.getClaims(srcClient, plan) + if err != nil { + return liberr.Wrap(err) + } if plan.IsResourceExcluded("persistentvolumeclaims") { log.Info("PV Discovery: 'persistentvolumeclaims' found in MigPlan "+ "Status.ExcludedResources, ending PV discovery", @@ -113,15 +122,6 @@ func (r *ReconcileMigPlan) updatePvs(ctx context.Context, plan *migapi.MigPlan) plan.Spec.PersistentVolumes.EndPvStaging() return nil } - // Build PV map. - pvMap, err := r.getPvMap(srcClient, plan) - if err != nil { - return liberr.Wrap(err) - } - claims, err := r.getClaims(srcClient, plan) - if err != nil { - return liberr.Wrap(err) - } for _, claim := range claims { key := k8sclient.ObjectKey{ Namespace: claim.Namespace, @@ -412,9 +412,16 @@ func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) continue } + pv := plan.Spec.FindPv(migapi.PV{Name: pvc.Spec.VolumeName}) volumeMode := core.PersistentVolumeFilesystem - if pvc.Spec.VolumeMode != nil { - volumeMode = *pvc.Spec.VolumeMode + accessModes := pvc.Spec.AccessModes + if pv == nil { + if pvc.Spec.VolumeMode != nil { + volumeMode = *pvc.Spec.VolumeMode + } + } else { + volumeMode = pv.PVC.VolumeMode + accessModes = pv.PVC.AccessModes } claims = append( claims, migapi.PVC{ @@ -423,7 +430,7 @@ func (r *ReconcileMigPlan) getClaims(client compat.Client, plan *migapi.MigPlan) Name: pvc.Name, Namespace: pvc.Namespace, }, podList, plan), - AccessModes: pvc.Spec.AccessModes, + AccessModes: accessModes, VolumeMode: volumeMode, HasReference: pvcInPodVolumes(pvc, podList), }) diff --git a/pkg/controller/migplan/validation.go b/pkg/controller/migplan/validation.go index 0b8a48d41..06d8c97a1 100644 --- a/pkg/controller/migplan/validation.go +++ b/pkg/controller/migplan/validation.go @@ -150,10 +150,11 @@ const ( VolumesUpdateStrategy = "VolumesUpdateStrategy" VolumeMigrationConfig = "VolumeMigration" VMLiveUpdateFeatures = "VMLiveUpdateFeatures" + storageProfile = "auto" ) // Valid AccessMode values -var validAccessModes = []kapi.PersistentVolumeAccessMode{kapi.ReadWriteOnce, kapi.ReadOnlyMany, kapi.ReadWriteMany} +var validAccessModes = []kapi.PersistentVolumeAccessMode{kapi.ReadWriteOnce, kapi.ReadOnlyMany, kapi.ReadWriteMany, storageProfile} // Validate the plan resource. func (r ReconcileMigPlan) validate(ctx context.Context, plan *migapi.MigPlan) error { @@ -1782,7 +1783,7 @@ func containsAccessMode(modeList []kapi.PersistentVolumeAccessMode, accessMode k return true } } - return false + return accessMode == storageProfile } // NFS validation