Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert shared disks in release-2.7 #1363

Merged
merged 2 commits into from
Feb 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions operator/config/crd/bases/forklift.konveyor.io_plans.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -161,10 +161,6 @@ spec:
- network
- storage
type: object
migrateSharedDisks:
default: true
description: Determines if the plan should migrate shared disks.
type: boolean
preserveClusterCpuModel:
description: Preserve the CPU model and flags the VM runs with in
its oVirt cluster.
Expand Down
9 changes: 2 additions & 7 deletions pkg/apis/forklift/v1beta1/plan.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ type PlanSpec struct {
// Defaults to 'virtio'.
// +optional
DiskBus cnv.DiskBus `json:"diskBus,omitempty"`
// Determines if the plan should migrate shared disks.
// +kubebuilder:default:=true
MigrateSharedDisks bool `json:"migrateSharedDisks,omitempty"`
}

// Find a planned VM.
Expand Down Expand Up @@ -106,7 +103,7 @@ type Plan struct {
// just use virt-v2v directly to convert the vm while copying data over. In other
// cases, we use CDI to transfer disks to the destination cluster and then use
// virt-v2v-in-place to convert these disks after cutover.
func (p *Plan) ShouldUseV2vForTransfer() (bool, error) {
func (p *Plan) VSphereColdLocal() (bool, error) {
source := p.Referenced.Provider.Source
if source == nil {
return false, liberr.New("Cannot analyze plan, source provider is missing.")
Expand All @@ -118,9 +115,7 @@ func (p *Plan) ShouldUseV2vForTransfer() (bool, error) {

switch source.Type() {
case VSphere:
// The virt-v2v transferes all disks attached to the VM. If we want to skip the shared disks so we don't transfer
// them multiple times we need to manage the transfer using KubeVirt CDI DataVolumes and v2v-in-place.
return !p.Spec.Warm && destination.IsHost() && p.Spec.MigrateSharedDisks, nil
return !p.Spec.Warm && destination.IsHost(), nil
case Ova:
return true, nil
default:
Expand Down
113 changes: 1 addition & 112 deletions pkg/controller/plan/adapter/vsphere/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
"github.com/vmware/govmomi/vim25/types"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
k8slabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/utils/ptr"
cnv "kubevirt.io/api/core/v1"
cdi "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
Expand Down Expand Up @@ -83,10 +82,6 @@ const (
AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"
)

const (
Shareable = "shareable"
)

// Map of vmware guest ids to osinfo ids.
var osMap = map[string]string{
"centos64Guest": "centos5.11",
Expand Down Expand Up @@ -199,9 +194,6 @@ func (r *Builder) PodEnvironment(vmRef ref.Ref, sourceSecret *core.Secret) (env
err = liberr.Wrap(err, "vm", vmRef.String())
return
}
if !r.Context.Plan.Spec.MigrateSharedDisks {
r.removeSharedDisks(vm)
}

macsToIps := ""
if r.Plan.Spec.PreserveStaticIPs {
Expand Down Expand Up @@ -407,9 +399,6 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
err = liberr.Wrap(err, "vm", vmRef.String())
return
}
if !r.Context.Plan.Spec.MigrateSharedDisks {
r.removeSharedDisks(vm)
}

url := r.Source.Provider.Spec.URL
thumbprint := r.Source.Provider.Status.Fingerprint
Expand Down Expand Up @@ -446,7 +435,7 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
if disk.Datastore.ID == ds.ID {
storageClass := mapped.Destination.StorageClass
var dvSource cdi.DataVolumeSource
coldLocal, vErr := r.Context.Plan.ShouldUseV2vForTransfer()
coldLocal, vErr := r.Context.Plan.VSphereColdLocal()
if vErr != nil {
err = vErr
return
Expand Down Expand Up @@ -495,9 +484,6 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
dv.ObjectMeta.Annotations = make(map[string]string)
}
dv.ObjectMeta.Annotations[planbase.AnnDiskSource] = r.baseVolume(disk.File)
if disk.Shared {
dv.ObjectMeta.Labels[Shareable] = "true"
}
dvs = append(dvs, *dv)
}
}
Expand All @@ -506,64 +492,6 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
return
}

// Return all shareable PVCs
func (r *Builder) ShareablePVCs() (pvcs []*core.PersistentVolumeClaim, err error) {
pvcsList := &core.PersistentVolumeClaimList{}
err = r.Destination.Client.List(
context.TODO(),
pvcsList,
&client.ListOptions{
LabelSelector: k8slabels.SelectorFromSet(map[string]string{
Shareable: "true",
}),
},
)
if err != nil {
err = liberr.Wrap(err)
return
}
pvcs = make([]*core.PersistentVolumeClaim, len(pvcsList.Items))
for i, pvc := range pvcsList.Items {
// loopvar
copyPvc := pvc
pvcs[i] = &copyPvc
}

return
}

// Return PersistentVolumeClaims associated with a VM.
func (r *Builder) getSharedPVCs(vm *model.VM) (pvcs []*core.PersistentVolumeClaim, err error) {
allPvcs, err := r.ShareablePVCs()
if err != nil {
return pvcs, err
}
for _, disk := range vm.Disks {
if !disk.Shared {
continue
}
pvc := r.getDisksPvc(disk, allPvcs)
if pvc != nil {
pvcs = append(pvcs, pvc)
r.Log.Info("Found shared PVC disk", "disk", disk.File)
} else {
// No PVC found skipping disk
r.Log.Info("No PVC found to the shared disk, the disk will not be created", "disk", disk.File)
}
}
return pvcs, nil
}

// Return PersistentVolumeClaims associated with a VM.
func (r *Builder) getDisksPvc(disk vsphere.Disk, pvcs []*core.PersistentVolumeClaim) *core.PersistentVolumeClaim {
for _, pvc := range pvcs {
if pvc.Annotations[planbase.AnnDiskSource] == r.baseVolume(disk.File) {
return pvc
}
}
return nil
}

// Create the destination Kubevirt VM.
func (r *Builder) VirtualMachine(vmRef ref.Ref, object *cnv.VirtualMachineSpec, persistentVolumeClaims []*core.PersistentVolumeClaim, usesInstanceType bool) (err error) {
vm := &model.VM{}
Expand Down Expand Up @@ -594,29 +522,6 @@ func (r *Builder) VirtualMachine(vmRef ref.Ref, object *cnv.VirtualMachineSpec,
vmRef.String()))
return
}
if !r.Context.Plan.Spec.MigrateSharedDisks {
sharedPVCs, err := r.getSharedPVCs(vm)
if err != nil {
return err
}
var temp []vsphere.Disk
for _, disk := range vm.Disks {
if !disk.Shared {
temp = append(temp, disk)
continue
}
pvc := r.getDisksPvc(disk, sharedPVCs)
if pvc != nil {
temp = append(temp, disk)
persistentVolumeClaims = append(persistentVolumeClaims, pvc)
r.Log.Info("Found shared PVC disk", "disk", disk.File)
} else {
// No PVC found skipping disk
r.Log.Info("No PVC found to the shared disk, the disk will not be created", "disk", disk.File)
}
}
vm.Disks = temp
}

var conflicts []string
conflicts, err = r.macConflicts(vm)
Expand Down Expand Up @@ -771,16 +676,6 @@ func (r *Builder) mapFirmware(vm *model.VM, object *cnv.VirtualMachineSpec) {
object.Template.Spec.Domain.Firmware = firmware
}

func (r *Builder) removeSharedDisks(vm *model.VM) {
var disks []vsphere.Disk
for _, disk := range vm.Disks {
if !disk.Shared {
disks = append(disks, disk)
}
}
vm.Disks = disks
}

func (r *Builder) mapDisks(vm *model.VM, vmRef ref.Ref, persistentVolumeClaims []*core.PersistentVolumeClaim, object *cnv.VirtualMachineSpec) {
var kVolumes []cnv.Volume
var kDisks []cnv.Disk
Expand Down Expand Up @@ -834,9 +729,6 @@ func (r *Builder) mapDisks(vm *model.VM, vmRef ref.Ref, persistentVolumeClaims [
},
},
}
if disk.Shared {
kubevirtDisk.Cache = cnv.CacheNone
}
// For multiboot VMs, if the selected boot device is the current disk,
// set it as the first in the boot order.
if bootDisk == i+1 {
Expand Down Expand Up @@ -869,9 +761,6 @@ func (r *Builder) Tasks(vmRef ref.Ref) (list []*plan.Task, err error) {
err = liberr.Wrap(err, "vm", vmRef.String())
return
}
if !r.Context.Plan.Spec.MigrateSharedDisks {
r.removeSharedDisks(vm)
}
for _, disk := range vm.Disks {
mB := disk.Capacity / 0x100000
list = append(
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/plan/adapter/vsphere/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ func (r *Client) getTaskById(vmRef ref.Ref, taskId string, hosts util.HostsFunc)
}

func (r *Client) getClient(vm *model.VM, hosts util.HostsFunc) (client *vim25.Client, err error) {
if coldLocal, vErr := r.Plan.ShouldUseV2vForTransfer(); vErr == nil && coldLocal {
if coldLocal, vErr := r.Plan.VSphereColdLocal(); vErr == nil && coldLocal {
// when virt-v2v runs the migration, forklift-controller should interact only
// with the component that serves the SDK endpoint of the provider
client = r.client.Client
Expand Down
20 changes: 1 addition & 19 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -1708,7 +1708,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
nonRoot := true
allowPrivilageEscalation := false
// virt-v2v image
coldLocal, vErr := r.Context.Plan.ShouldUseV2vForTransfer()
coldLocal, vErr := r.Context.Plan.VSphereColdLocal()
if vErr != nil {
err = vErr
return
Expand Down Expand Up @@ -1900,15 +1900,6 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi

for i, v := range vmVolumes {
pvc := pvcsByName[v.PersistentVolumeClaim.ClaimName]
if pvc == nil {
r.Log.V(1).Info(
"Failed to find the PVC to the Volume for the pod volume mount",
"volume",
v.Name,
"pvc",
v.PersistentVolumeClaim.ClaimName)
continue
}
vol := core.Volume{
Name: pvc.Name,
VolumeSource: core.VolumeSource{
Expand Down Expand Up @@ -2090,15 +2081,6 @@ func (r *KubeVirt) libvirtDomain(vmCr *VirtualMachine, pvcs []*core.PersistentVo
diskSource := libvirtxml.DomainDiskSource{}

pvc := pvcsByName[vol.PersistentVolumeClaim.ClaimName]
if pvc == nil {
r.Log.V(1).Info(
"Failed to find the PVC to the Volume for the libvirt domain",
"volume",
vol.Name,
"pvc",
vol.PersistentVolumeClaim.ClaimName)
continue
}
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == core.PersistentVolumeBlock {
diskSource.Block = &libvirtxml.DomainDiskSourceBlock{
Dev: fmt.Sprintf("/dev/block%v", i),
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/plan/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -1825,7 +1825,7 @@ func (r *Migration) updateConversionProgress(vm *plan.VMStatus, step *plan.Step)
break
}

coldLocal, err := r.Context.Plan.ShouldUseV2vForTransfer()
coldLocal, err := r.Context.Plan.VSphereColdLocal()
switch {
case err != nil:
return liberr.Wrap(err)
Expand Down Expand Up @@ -2020,7 +2020,7 @@ type Predicate struct {

// Evaluate predicate flags.
func (r *Predicate) Evaluate(flag libitr.Flag) (allowed bool, err error) {
coldLocal, vErr := r.context.Plan.ShouldUseV2vForTransfer()
coldLocal, vErr := r.context.Plan.VSphereColdLocal()
if vErr != nil {
err = vErr
return
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/plan/scheduler/vsphere/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ func (r *Scheduler) buildPending() (err error) {
}

func (r *Scheduler) cost(vm *model.VM, vmStatus *plan.VMStatus) int {
coldLocal, _ := r.Plan.ShouldUseV2vForTransfer()
coldLocal, _ := r.Plan.VSphereColdLocal()
if coldLocal {
switch vmStatus.Phase {
case CreateVM, PostHook, Completed:
Expand Down
Loading