Gilad Chaplik has uploaded a new change for review.

Change subject: core: prevent maintenance a host with hard affinity
......................................................................

core: prevent maintenance a host with hard affinity

Maintenance a host can break the affinity rule of its running vm
The user should be aware of it, and manually handle it:
soften the affinity rule or migrate the VMs.

Change-Id: Idd93ddca562bf739b2cd423e907be1b6da861735
Bug-Url: https://bugzilla.redhat.com/1084794
Signed-off-by: Gilad Chaplik <gchap...@redhat.com>
---
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/MaintenanceNumberOfVdssCommand.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
M 
backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
M 
backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDao.java
M 
backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDaoImpl.java
M backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
M 
backend/manager/modules/dal/src/test/java/org/ovirt/engine/core/dao/AffinityGroupDaoTest.java
M 
frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
M 
frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
M packaging/dbscripts/affinity_groups_sp.sql
10 files changed, 101 insertions(+), 11 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-engine refs/changes/01/34501/1

diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/MaintenanceNumberOfVdssCommand.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/MaintenanceNumberOfVdssCommand.java
index 9ce78cd..8114b1f 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/MaintenanceNumberOfVdssCommand.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/MaintenanceNumberOfVdssCommand.java
@@ -1,7 +1,5 @@
 package org.ovirt.engine.core.bll;
 
-import org.ovirt.engine.core.bll.context.CommandContext;
-
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -12,6 +10,7 @@
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
+import org.ovirt.engine.core.bll.context.CommandContext;
 import org.ovirt.engine.core.bll.job.ExecutionHandler;
 import org.ovirt.engine.core.bll.network.cluster.NetworkClusterHelper;
 import org.ovirt.engine.core.bll.utils.PermissionSubject;
@@ -31,11 +30,13 @@
 import org.ovirt.engine.core.common.businessentities.network.Network;
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.locks.LockingGroup;
+import org.ovirt.engine.core.common.scheduling.AffinityGroup;
 import org.ovirt.engine.core.common.utils.Pair;
 import 
org.ovirt.engine.core.common.vdscommands.SetVdsStatusVDSCommandParameters;
 import org.ovirt.engine.core.common.vdscommands.VDSCommandType;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
+import org.ovirt.engine.core.utils.ReplacementUtils;
 import org.ovirt.engine.core.vdsbroker.vdsbroker.CancelMigrationVDSParameters;
 
 @InternalCommandAttribute
@@ -246,6 +247,8 @@
                                 
getAsyncTaskDao().getAsyncTaskIdsByStoragePoolId(vds.getStoragePoolId()).size() 
> 0) {
                             
addCanDoActionMessage(VdcBllMessages.VDS_CANNOT_MAINTENANCE_SPM_WITH_RUNNING_TASKS);
                             result = false;
+                        } else {
+                            result = 
handlePositiveEnforcingAffinityGroup(vdsId, vms);
                         }
                     }
                 }
@@ -313,6 +316,45 @@
         return result;
     }
 
+    // Currently we cannot guarantee that migrating VM with positive enforcing 
affinity will
+    // migrate to the same target host, user will have to manually fix it 
before maintenancing the host.
+    private boolean handlePositiveEnforcingAffinityGroup(Guid vdsId, List<VM> 
runningVms) {
+        // less than 2 VMs in host means no positive affinity to worry about
+        if (runningVms.size() > 1) {
+            List<AffinityGroup> affinityGroups =
+                    getDbFacade().getAffinityGroupDao()
+                            
.getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId(vdsId);
+            if (!affinityGroups.isEmpty()) {
+                List<Object> items = new ArrayList<>();
+                for (AffinityGroup affinityGroup : affinityGroups) {
+                    // affinity group has less than 2 vms (trivial)
+                    if (affinityGroup.getEntityIds().size() < 2) {
+                        continue;
+                    }
+                    int count = 0; // counter for running VMs in affinity group
+                    for (VM vm : runningVms) {
+                        if (affinityGroup.getEntityIds().contains(vm.getId())) 
{
+                            count++;
+                        }
+                    }
+                    if (count > 1) {
+                        items.add(String.format("%1$s (%2$s)",
+                                affinityGroup.getName(),
+                                
StringUtils.join(affinityGroup.getEntityNames(), " ,")));
+                    }
+                }
+                if (!items.isEmpty()) {
+                    
addCanDoActionMessage(VdcBllMessages.VDS_CANNOT_MAINTENANCE_VDS_HAS_AFFINITY_VMS);
+                    getReturnValue().getCanDoActionMessages()
+                            
.addAll(ReplacementUtils.replaceWith("AFFINITY_GROUPS_VMS", items));
+                    return false;
+                }
+            }
+        }
+
+        return true;
+    }
+
     private void addSharedLockEntry(VDS vds) {
         if (sharedLockMap == null) {
             sharedLockMap = new HashMap<String, Pair<String, String>>();
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
index 79f7952..5b2851e 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
@@ -9,7 +9,6 @@
 
 import org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl;
 import org.ovirt.engine.core.common.businessentities.VDS;
-import org.ovirt.engine.core.common.businessentities.VDSStatus;
 import org.ovirt.engine.core.common.businessentities.VM;
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.scheduling.AffinityGroup;
@@ -71,19 +70,12 @@
         for (VM iter : getVmDao().getAllRunningByCluster(vm.getVdsGroupId())) {
             runningVMsMap.put(iter.getId(), iter);
         }
-        Map<Guid, VDS> hostMap = new HashMap<>();
-        for (VDS host : hosts) {
-            hostMap.put(host.getId(), host);
-        }
 
         Set<Guid> acceptableHosts = new HashSet<>();
         // Group all hosts for VMs with positive affinity
         for (Guid id : allVmIdsPositive) {
             VM runVm = runningVMsMap.get(id);
-            if (runVm != null && runVm.getRunOnVds() != null && 
hostMap.get(runVm.getRunOnVds()) != null
-                    // when a host preparing for maintenance, we should ignore 
the positive affinity (without that we
-                    // can't migrate).
-                    && hostMap.get(runVm.getRunOnVds()).getStatus() != 
VDSStatus.PreparingForMaintenance) {
+            if (runVm != null && runVm.getRunOnVds() != null) {
                 acceptableHosts.add(runVm.getRunOnVds());
             }
         }
@@ -97,6 +89,11 @@
             }
         }
 
+        Map<Guid, VDS> hostMap = new HashMap<>();
+        for (VDS host : hosts) {
+            hostMap.put(host.getId(), host);
+        }
+
         // Compute the intersection of hosts with positive and negative 
affinity and report that
         // contradicting rules to the log
         unacceptableHosts.retainAll(acceptableHosts);
diff --git 
a/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
 
b/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
index 5180a42..ccb1cb6 100644
--- 
a/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
+++ 
b/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
@@ -307,6 +307,7 @@
     VDS_CANNOT_MAINTENANCE_SPM_WITH_RUNNING_TASKS(ErrorType.CONFLICT),
     VDS_CANNOT_MAINTENANCE_SPM_CONTENDING(ErrorType.CONFLICT),
     VDS_CANNOT_MAINTENANCE_VDS_IS_IN_MAINTENANCE(ErrorType.CONFLICT),
+    VDS_CANNOT_MAINTENANCE_VDS_HAS_AFFINITY_VMS(ErrorType.CONFLICT),
     VDS_NO_UUID(ErrorType.CONFLICT),
     VDS_ALREADY_UP(ErrorType.CONFLICT),
     VDS_NON_RESPONSIVE(ErrorType.CONFLICT),
diff --git 
a/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDao.java
 
b/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDao.java
index b10a636..a4d8213 100644
--- 
a/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDao.java
+++ 
b/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDao.java
@@ -37,4 +37,11 @@
      * @param vmId
      */
     void removeVmFromAffinityGroups(Guid vmId);
+
+    /**
+     * get all positive enforcing affinity groups containing VMs running on 
given host
+     *
+     * @param vdsId
+     */
+    List<AffinityGroup> 
getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId(Guid vdsId);
 }
diff --git 
a/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDaoImpl.java
 
b/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDaoImpl.java
index 58b237e..ac6c7de 100644
--- 
a/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDaoImpl.java
+++ 
b/backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/scheduling/AffinityGroupDaoImpl.java
@@ -61,6 +61,13 @@
     }
 
     @Override
+    public List<AffinityGroup> 
getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId(Guid vdsId) {
+        return 
getCallsHandler().executeReadList("getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId",
+                createEntityRowMapper(),
+                getCustomMapSqlParameterSource().addValue("vds_id", vdsId));
+    }
+
+    @Override
     protected MapSqlParameterSource createFullParametersMapper(AffinityGroup 
entity) {
         return createIdParameterMapper(entity.getId())
                 .addValue("name", entity.getName())
diff --git 
a/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties 
b/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
index 277778d..fe92870 100644
--- 
a/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
+++ 
b/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
@@ -74,6 +74,7 @@
               To do so, verify that the node is really down by right clicking 
on the host and confirm that the node was shutdown manually.
 VDS_CANNOT_MAINTENANCE_VDS_IS_NOT_OPERATIONAL=Cannot switch Host to 
Maintenance mode, Host is not operational.
 VDS_CANNOT_MAINTENANCE_VDS_IS_IN_MAINTENANCE=Cannot switch Host to Maintenance 
mode. Host is already in Maintenance mode.
+VDS_CANNOT_MAINTENANCE_VDS_HAS_AFFINITY_VMS=Cannot switch Host(s) to 
Maintenance mode.\nThe following Enforcing Affinity Group(s) have running VMs 
and can break the affinity rule.\n${AFFINITY_GROUPS_VMS}\nPlease manually 
migrate the VMs, or change Affinity Group's enforcing to false.
 VDS_CANNOT_MAINTENANCE_SPM_WITH_RUNNING_TASKS=Cannot switch Host to 
Maintenance mode. Host has asynchronous running tasks,\n\
        wait for operation to complete and retry.
 VDS_CANNOT_MAINTENANCE_SPM_CONTENDING=Cannot switch Host to Maintenance mode. 
Host is contending for Storage Pool Manager,\n\
diff --git 
a/backend/manager/modules/dal/src/test/java/org/ovirt/engine/core/dao/AffinityGroupDaoTest.java
 
b/backend/manager/modules/dal/src/test/java/org/ovirt/engine/core/dao/AffinityGroupDaoTest.java
index c089e84..d4cd929 100644
--- 
a/backend/manager/modules/dal/src/test/java/org/ovirt/engine/core/dao/AffinityGroupDaoTest.java
+++ 
b/backend/manager/modules/dal/src/test/java/org/ovirt/engine/core/dao/AffinityGroupDaoTest.java
@@ -142,4 +142,22 @@
         dao.removeVmFromAffinityGroups(FixturesTool.VM_RHEL5_POOL_50);
         
assertTrue(dao.getAllAffinityGroupsByVmId(FixturesTool.VM_RHEL5_POOL_50).isEmpty());
     }
+
+    @Test
+    public void testEmptyGetAffinityGroupByVdsId() {
+        getAffinityGroupByVdsIdHelper(Guid.Empty, 0);
+    }
+
+    @Test
+    public void testGetAffinityGroupByVdsId() {
+        getAffinityGroupByVdsIdHelper(FixturesTool.VDS_RHEL6_NFS_SPM, 1);
+    }
+
+    private void getAffinityGroupByVdsIdHelper(Guid vdsId, int count) {
+        List<AffinityGroup> affinityGroups =
+                
dao.getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId(vdsId);
+
+        assertNotNull(affinityGroups);
+        assertEquals(count, affinityGroups.size());
+    }
 }
diff --git 
a/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
 
b/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
index 358762e..53f70ec 100644
--- 
a/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
+++ 
b/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
@@ -175,6 +175,9 @@
     @DefaultStringValue("Cannot switch Host to Maintenance mode. Host is 
already in Maintenance mode.")
     String VDS_CANNOT_MAINTENANCE_VDS_IS_IN_MAINTENANCE();
 
+    @DefaultStringValue("Cannot switch Host(s) to Maintenance mode.\nThe 
following Enforcing Affinity Group(s) have running VMs and can break the 
affinity rule.\n${AFFINITY_GROUPS_VMS}\nPlease manually migrate the VMs, or 
change Affinity Group's enforcing to false.")
+    String VDS_CANNOT_MAINTENANCE_VDS_HAS_AFFINITY_VMS();
+
     @DefaultStringValue("Host CPU type is not supported in this cluster 
compatibility version or is not supported at all.")
     String CPU_TYPE_UNSUPPORTED_IN_THIS_CLUSTER_VERSION();
 
diff --git 
a/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
 
b/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
index ba416b2..59860e4 100644
--- 
a/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
+++ 
b/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
@@ -74,6 +74,7 @@
               To do so, verify that the node is really down by right clicking 
on the host and confirm that the node was shutdown manually.
 VDS_CANNOT_MAINTENANCE_VDS_IS_NOT_OPERATIONAL=Cannot switch Host to 
Maintenance mode, Host is not operational.
 VDS_CANNOT_MAINTENANCE_VDS_IS_IN_MAINTENANCE=Cannot switch Host to Maintenance 
mode. Host is already in Maintenance mode.
+VDS_CANNOT_MAINTENANCE_VDS_HAS_AFFINITY_VMS=Cannot switch Host(s) to 
Maintenance mode.\nThe following Enforcing Affinity Group(s) have running VMs 
and can break the affinity rule.\n${AFFINITY_GROUPS_VMS}\nPlease manually 
migrate the VMs, or change Affinity Group's enforcing to false.
 VDS_CANNOT_MAINTENANCE_SPM_WITH_RUNNING_TASKS=Cannot switch Host to 
Maintenance mode. Host has asynchronous running tasks,\n\
        wait for operation to complete and retry.
 VDS_CANNOT_MAINTENANCE_SPM_CONTENDING=Cannot switch Host to Maintenance mode. 
Host is contending for Storage Pool Manager,\n\
diff --git a/packaging/dbscripts/affinity_groups_sp.sql 
b/packaging/dbscripts/affinity_groups_sp.sql
index 34eb8e9..0bd3b8c 100644
--- a/packaging/dbscripts/affinity_groups_sp.sql
+++ b/packaging/dbscripts/affinity_groups_sp.sql
@@ -132,3 +132,16 @@
     WHERE vm_id = v_vm_id;
 END; $procedure$
 LANGUAGE plpgsql;
+
+-- Get All positive enforcing Affinity Groups which contain VMs running on 
given host id
+Create or replace FUNCTION 
getPositiveEnforcingAffinityGroupsByRunningVmsOnVdsId(v_vds_id UUID) RETURNS 
SETOF affinity_groups_view STABLE
+AS $procedure$
+BEGIN
+    RETURN QUERY
+    SELECT DISTINCT affinity_groups_view.* FROM affinity_groups_view
+    INNER JOIN affinity_group_members ON id = 
affinity_group_members.affinity_group_id
+    INNER JOIN vm_dynamic ON affinity_group_members.vm_id = vm_dynamic.vm_guid 
AND vm_dynamic.run_on_vds = v_vds_id
+    WHERE positive AND enforcing;
+END; $procedure$
+LANGUAGE plpgsql;
+


-- 
To view, visit http://gerrit.ovirt.org/34501
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Idd93ddca562bf739b2cd423e907be1b6da861735
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-engine
Gerrit-Branch: ovirt-engine-3.5
Gerrit-Owner: Gilad Chaplik <gchap...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to