Gilad Chaplik has uploaded a new change for review.

Change subject: core: support NUMA in oVirt Scheduler
......................................................................

core: support NUMA in oVirt Scheduler

Filter out hosts that cannot accommodate memory of pinned
virtual (guest) NUMA nodes within host's physical NUMA node.

Change-Id: I2b41e5ac9d633953dec7bfab5621ace8bb1b13cd
Bug-Url: https://bugzilla.redhat.com/1069303
Signed-off-by: Gilad Chaplik <gchap...@redhat.com>
---
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
M 
backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
M backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
M 
frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
M 
frontend/webadmin/modules/userportal-gwtp/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
M 
frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
6 files changed, 78 insertions(+), 0 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-engine refs/changes/89/33189/1

diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
index 208c6b7..5ede7ef 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
@@ -1,18 +1,25 @@
 package org.ovirt.engine.core.bll.scheduling.policyunits;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
 import org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl;
+import org.ovirt.engine.core.common.businessentities.NumaTuneMode;
 import org.ovirt.engine.core.common.businessentities.VDS;
 import org.ovirt.engine.core.common.businessentities.VM;
 import org.ovirt.engine.core.common.businessentities.VMStatus;
+import org.ovirt.engine.core.common.businessentities.VdsNumaNode;
+import org.ovirt.engine.core.common.businessentities.VmNumaNode;
 import org.ovirt.engine.core.common.config.Config;
 import org.ovirt.engine.core.common.config.ConfigValues;
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.scheduling.PerHostMessages;
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
+import org.ovirt.engine.core.common.utils.Pair;
+import org.ovirt.engine.core.compat.Guid;
+import org.ovirt.engine.core.dal.dbbroker.DbFacade;
 
 public class MemoryPolicyUnit extends PolicyUnitImpl {
 
@@ -27,6 +34,7 @@
         if (vm.getStatus() == VMStatus.Paused) {
             return hosts;
         }
+        List<VmNumaNode> vmNumaNodes = 
DbFacade.getInstance().getVmNumaNodeDAO().getAllVmNumaNodeByVmId(vm.getId());
         for (VDS vds : hosts) {
             if (!isVMSwapValueLegal(vds)) {
                 log.debugFormat("host '{0}' swap value is illegal", 
vds.getName());
@@ -38,11 +46,67 @@
                 messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_ENOUGH_MEMORY.toString());
                 continue;
             }
+            // In case one of VM's virtual NUMA nodes (vNode) is pinned to 
physical NUMA nodes (pNode),
+            // host will be excluded ('filter out') when:
+            // * memory tune is strict (vNode memory cannot be spread across 
several pNodes' memory)
+            // [and]
+            // * host support NUMA configuration
+            // * there isn't enough memory for pinned vNode in pNode
+            if (vm.getNumaTuneMode() == NumaTuneMode.STRICT && 
isVmNumaPinned(vmNumaNodes)
+                    && (!vds.isNumaSupport() || !canVmNumaPinnedToVds(vm, 
vmNumaNodes, vds))) {
+                log.debugFormat("host '{0}' cannot accommodate memory of VM's 
pinned virtual NUMA nodes within host's physical NUMA nodes",
+                        vds.getName());
+                messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_MEMORY_PINNED_NUMA.toString());
+                continue;
+            }
             list.add(vds);
         }
         return list;
     }
 
+    private boolean canVmNumaPinnedToVds(VM vm, List<VmNumaNode> nodes, VDS 
vds) {
+        List<VdsNumaNode> pNodes = 
DbFacade.getInstance().getVdsNumaNodeDAO().getAllVdsNumaNodeByVdsId(vds.getId());
+        if (pNodes == null || pNodes.isEmpty()) {
+            return false;
+        }
+        Map<Integer, VdsNumaNode> indexMap = toMap(pNodes);
+        for (VmNumaNode vNode : nodes) {
+            for (Pair<Guid, Pair<Boolean, Integer>> pair : 
vNode.getVdsNumaNodeList()) {
+                if (pair.getSecond() != null && pair.getSecond().getFirst()) {
+                    if (vNode.getMemTotal() > 
indexMap.get(pair.getSecond().getSecond())
+                            .getNumaNodeStatistics()
+                            .getMemFree()) {
+                        return false;
+                    }
+                }
+            }
+        }
+        return true;
+    }
+
+    private Map<Integer, VdsNumaNode> toMap(List<VdsNumaNode> pNodes) {
+        Map<Integer, VdsNumaNode> map = new HashMap<>();
+        for (VdsNumaNode pNode : pNodes) {
+            map.put(pNode.getIndex(), pNode);
+        }
+        return map;
+    }
+
+    private boolean isVmNumaPinned(List<VmNumaNode> nodes) {
+        if (nodes == null || nodes.isEmpty()) {
+            return false;
+        }
+        // iterate through the nodes, and see if there's at least one pinned 
node.
+        for (VmNumaNode vmNumaNode : nodes) {
+            for (Pair<Guid, Pair<Boolean, Integer>> pair : 
vmNumaNode.getVdsNumaNodeList()) {
+                if (pair.getSecond() != null && pair.getSecond().getFirst()) {
+                    return true;
+                }
+            }
+        }
+        return false;
+    }
+
     /**
      * Determines whether [is VM swap value legal] [the specified VDS].
      * @param host
diff --git 
a/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
 
b/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
index 5082c88..d3ee139 100644
--- 
a/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
+++ 
b/backend/manager/modules/common/src/main/java/org/ovirt/engine/core/common/errors/VdcBllMessages.java
@@ -1003,7 +1003,9 @@
     VAR__DETAIL__LOW_CPU_LEVEL,
     VAR__DETAIL__SWAP_VALUE_ILLEGAL,
     VAR__DETAIL__NOT_ENOUGH_MEMORY,
+    VAR__DETAIL__NOT_MEMORY_PINNED_NUMA,
     VAR__DETAIL__NOT_ENOUGH_CORES,
+    VAR__DETAIL__NUMA_PINNING_FAILED,
     SCHEDULING_NO_HOSTS,
     SCHEDULING_HOST_FILTERED_REASON,
     SCHEDULING_HOST_FILTERED_REASON_WITH_DETAIL,
diff --git 
a/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties 
b/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
index 0faa2b2..274176c 100644
--- 
a/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
+++ 
b/backend/manager/modules/dal/src/main/resources/bundles/AppErrors.properties
@@ -1212,7 +1212,9 @@
 VAR__DETAIL__LOW_CPU_LEVEL=$detailMessage its CPU level ${hostCPULevel} is 
lower than the VM requires ${vmCPULevel}
 VAR__DETAIL__SWAP_VALUE_ILLEGAL=$detailMessage its swap value was illegal
 VAR__DETAIL__NOT_ENOUGH_MEMORY=$detailMessage it has insufficient free memory 
to run the VM
+VAR__DETAIL__NOT_MEMORY_PINNED_NUMA=$detailMessage cannot accommodate memory 
of VM's pinned virtual NUMA nodes within host's physical NUMA nodes.
 VAR__DETAIL__NOT_ENOUGH_CORES=$detailMessage it does not have enough cores to 
run the VM
+VAR__DETAIL__NUMA_PINNING_FAILED=$detailMessage it has insufficient NUMA node 
free memory to run the VM
 SCHEDULING_NO_HOSTS=There are no hosts to use. Check that the cluster contains 
at least one host in Up state.
 VAR__FILTERTYPE__EXTERNAL=$filterType external
 VAR__FILTERTYPE__INTERNAL=$filterType internal
diff --git 
a/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
 
b/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
index a82b8e3..5cd5dd8 100644
--- 
a/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
+++ 
b/frontend/webadmin/modules/frontend/src/main/java/org/ovirt/engine/ui/frontend/AppErrors.java
@@ -3231,9 +3231,15 @@
     @DefaultStringValue("$detailMessage it has insufficient free memory to run 
the VM")
     String VAR__DETAIL__NOT_ENOUGH_MEMORY();
 
+    @DefaultStringValue("$detailMessage cannot accommodate memory of VM's 
pinned virtual NUMA nodes within host's physical NUMA nodes.")
+    String VAR__DETAIL__NOT_MEMORY_PINNED_NUMA();
+
     @DefaultStringValue("$detailMessage it has insufficient CPU cores to run 
the VM")
     String VAR__DETAIL__NOT_ENOUGH_CORES();
 
+    @DefaultStringValue("$detailMessage it has insufficient NUMA node free 
memory to run the VM")
+    String VAR__DETAIL__NUMA_PINNING_FAILED();
+
     @DefaultStringValue("There are no hosts to use. Check that the cluster 
contains at least one host in Up state.")
     String SCHEDULING_NO_HOSTS();
 
diff --git 
a/frontend/webadmin/modules/userportal-gwtp/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
 
b/frontend/webadmin/modules/userportal-gwtp/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
index 1bbb919..b999f3e 100644
--- 
a/frontend/webadmin/modules/userportal-gwtp/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
+++ 
b/frontend/webadmin/modules/userportal-gwtp/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
@@ -1012,7 +1012,9 @@
 VAR__DETAIL__LOW_CPU_LEVEL=$detailMessage its CPU level ${hostCPULevel} is 
lower than the VM requires ${vmCPULevel}
 VAR__DETAIL__SWAP_VALUE_ILLEGAL=$detailMessage its swap value was illegal
 VAR__DETAIL__NOT_ENOUGH_MEMORY=$detailMessage it has insufficient free memory 
to run the VM
+VAR__DETAIL__NOT_MEMORY_PINNED_NUMA=$detailMessage cannot accommodate memory 
of VM's pinned virtual NUMA nodes within host's physical NUMA nodes.
 VAR__DETAIL__NOT_ENOUGH_CORES=$detailMessage it does not have enough cores to 
run the VM
+VAR__DETAIL__NUMA_PINNING_FAILED=$detailMessage it has insufficient NUMA node 
free memory to run the VM
 SCHEDULING_NO_HOSTS=There are no hosts to use. Check that the cluster contains 
at least one host in Up state.
 VAR__FILTERTYPE__EXTERNAL=$filterType external
 VAR__FILTERTYPE__INTERNAL=$filterType internal
diff --git 
a/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
 
b/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
index 12bb7e7..07c5c18 100644
--- 
a/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
+++ 
b/frontend/webadmin/modules/webadmin/src/main/resources/org/ovirt/engine/ui/frontend/AppErrors.properties
@@ -1174,7 +1174,9 @@
 VAR__DETAIL__LOW_CPU_LEVEL=$detailMessage its CPU level ${hostCPULevel} is 
lower than the VM requires ${vmCPULevel}
 VAR__DETAIL__SWAP_VALUE_ILLEGAL=$detailMessage its swap value was illegal
 VAR__DETAIL__NOT_ENOUGH_MEMORY=$detailMessage it has insufficient free memory 
to run the VM
+VAR__DETAIL__NOT_MEMORY_PINNED_NUMA=$detailMessage cannot accommodate memory 
of VM's pinned virtual NUMA nodes within host's physical NUMA nodes.
 VAR__DETAIL__NOT_ENOUGH_CORES=$detailMessage it does not have enough cores to 
run the VM
+VAR__DETAIL__NUMA_PINNING_FAILED=$detailMessage it has insufficient NUMA node 
free memory to run the VM
 SCHEDULING_NO_HOSTS=There are no hosts to use. Check that the cluster contains 
at least one host in Up state.
 VAR__FILTERTYPE__EXTERNAL=$filterType external
 VAR__FILTERTYPE__INTERNAL=$filterType internal


-- 
To view, visit http://gerrit.ovirt.org/33189
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I2b41e5ac9d633953dec7bfab5621ace8bb1b13cd
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-engine
Gerrit-Branch: ovirt-engine-3.5
Gerrit-Owner: Gilad Chaplik <gchap...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to