Martin Peřina has uploaded a new change for review.

Change subject: core: Replace oVirt logger with slf4j in bll/scheduling
......................................................................

core: Replace oVirt logger with slf4j in bll/scheduling

Replaces oVirt logger with slf4j in bll/scheduling subpackages.

Stacktraces are logged only to debug for exceptions with clear custom
error message.

Change-Id: I82070b252197458422792c49b2135fcb9b9b1430
Bug-Url: https://bugzilla.redhat.com/1109871
Signed-off-by: Martin Perina <mper...@redhat.com>
---
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/HaReservationHandling.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/PolicyUnitImpl.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SchedulingManager.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SlaValidator.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerImpl.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerObjectBuilder.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryResult.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryThread.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CPUPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CpuLevelFilterPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenDistributionBalancePolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenGuestDistributionBalancePolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationBalancePolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationWeightPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HostedEngineHAClusterFilterPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MigrationPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/NetworkPolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/PowerSavingBalancePolicyUnit.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
20 files changed, 164 insertions(+), 130 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-engine refs/changes/59/34359/1

diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/HaReservationHandling.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/HaReservationHandling.java
index 50350d3..7c65e83 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/HaReservationHandling.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/HaReservationHandling.java
@@ -15,15 +15,15 @@
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
 import org.ovirt.engine.core.utils.linq.LinqUtils;
 import org.ovirt.engine.core.utils.linq.Predicate;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A helper class for the scheduling mechanism for checking the HA Reservation 
status of a Cluster
  */
 public class HaReservationHandling {
 
-    private static final Log log = 
LogFactory.getLog(HaReservationHandling.class);
+    private static final Logger log = 
LoggerFactory.getLogger(HaReservationHandling.class);
     /**
      * @param cluster
      *            - Cluster to check
@@ -41,7 +41,7 @@
         }
         // HA Reservation is not possible with less than 2 hosts
         if (hosts.size() < 2) {
-            log.debugFormat("Cluster: {0} failed HA reservation check because 
there is only one host in the cluster",
+            log.debug("Cluster '{}' failed HA reservation check because there 
is only one host in the cluster",
                     cluster.getName());
             failedHosts.addAll(hosts);
             return false;
@@ -66,8 +66,9 @@
             }
         }
 
-        log.infoFormat("HA reservation status for cluster {0} is {1}", 
cluster.getName(), failedHosts.isEmpty() ? "OK"
-                : "Failed");
+        log.info("HA reservation status for cluster '{}' is '{}'",
+                cluster.getName(),
+                failedHosts.isEmpty() ? "OK" : "Failed");
         return failedHosts.isEmpty();
     }
 
@@ -90,7 +91,7 @@
                         vm.getUsageCpuPercent() * vm.getNumOfCpus()
                                 / SlaValidator.getEffectiveCpuCores(host, 
cluster.getCountThreadsAsCores());
             }
-            log.debugFormat("VM {0}. CPU usage:{1}%, RAM usage:{2}MB", 
vm.getName(), curVmCpuPercent, curVmMemSize);
+            log.debug("VM '{}'. CPU usage: {}%, RAM usage: {}MB", 
vm.getName(), curVmCpuPercent, curVmMemSize);
 
             boolean foundForCurVm = false;
             for (Pair<Guid, Pair<Integer, Integer>> hostData : 
hostsUnutilizedResources) {
@@ -134,7 +135,7 @@
             }
 
             if (!foundForCurVm) {
-                log.infoFormat("Did not found a replacement host for VM:{0}", 
vm.getName());
+                log.info("Did not found a replacement host for VM '{}'", 
vm.getName());
                 return false;
             }
 
@@ -188,7 +189,7 @@
 
         List<VM> vms = 
DbFacade.getInstance().getVmDao().getAllForVdsGroup(clusterId);
         if (vms == null || vms.isEmpty()) {
-            log.debugFormat("No VMs available for this cluster with id {0}", 
clusterId);
+            log.debug("No VMs available for this cluster with id '{}'", 
clusterId);
             // return empty map
             return Collections.EMPTY_MAP;
         }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/PolicyUnitImpl.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/PolicyUnitImpl.java
index aa827df..ee9376a 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/PolicyUnitImpl.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/PolicyUnitImpl.java
@@ -34,10 +34,12 @@
 import org.ovirt.engine.core.common.scheduling.PolicyUnitType;
 import org.ovirt.engine.core.common.utils.Pair;
 import org.ovirt.engine.core.compat.Guid;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(PolicyUnitImpl.class);
+
     public static final int MaxSchedulerWeight = Config.<Integer> 
getValue(ConfigValues.MaxSchedulerWeight);;
 
     public static PolicyUnitImpl getPolicyUnitImpl(PolicyUnit policyUnit) {
@@ -111,7 +113,6 @@
         throw new NotImplementedException("policyUnit: " + 
policyUnit.getName());
     }
 
-    protected static final Log log = LogFactory.getLog(PolicyUnitImpl.class);
     private final PolicyUnit policyUnit;
     protected VdsFreeMemoryChecker memoryChecker;
 
@@ -120,12 +121,12 @@
     }
 
     public List<VDS> filter(List<VDS> hosts, VM vm, Map<String, String> 
parameters, PerHostMessages messages) {
-        log.error("policy unit:" + getPolicyUnit().getName() + "filter is not 
implemented");
+        log.error("Policy unit '{}' filter is not implemented", 
getPolicyUnit().getName());
         return hosts;
     }
 
     public List<Pair<Guid, Integer>> score(List<VDS> hosts, VM vm, Map<String, 
String> parameters) {
-        log.error("policy unit:" + getPolicyUnit().getName() + "function is 
not implemented");
+        log.error("Policy unit '{}' function is not implemented", 
getPolicyUnit().getName());
         List<Pair<Guid, Integer>> pairs = new ArrayList<Pair<Guid, Integer>>();
         for (VDS vds : hosts) {
             pairs.add(new Pair<Guid, Integer>(vds.getId(), 1));
@@ -137,7 +138,7 @@
             List<VDS> hosts,
             Map<String, String> parameters,
             ArrayList<String> messages) {
-        log.error("policy unit:" + getPolicyUnit().getName() + "balance is not 
implemented");
+        log.error("Policy unit '{}' balance is not implemented", 
getPolicyUnit().getName());
         return null;
     }
 
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SchedulingManager.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SchedulingManager.java
index 6b4d481..72396e6 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SchedulingManager.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SchedulingManager.java
@@ -45,13 +45,13 @@
 import org.ovirt.engine.core.dao.VdsGroupDAO;
 import org.ovirt.engine.core.dao.scheduling.ClusterPolicyDao;
 import org.ovirt.engine.core.dao.scheduling.PolicyUnitDao;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.timer.OnTimerMethodAnnotation;
 import org.ovirt.engine.core.utils.timer.SchedulerUtilQuartzImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SchedulingManager {
-    private static final Log log = LogFactory.getLog(SchedulingManager.class);
+    private static final Logger log = 
LoggerFactory.getLogger(SchedulingManager.class);
     /**
      * singleton
      */
@@ -256,7 +256,7 @@
             String correlationId) {
         clusterLockMap.putIfAbsent(cluster.getId(), new Semaphore(1));
         try {
-            log.debugFormat("scheduling started, correlation Id: {0}", 
correlationId);
+            log.debug("Scheduling started");
             checkAllowOverbooking(cluster);
             clusterLockMap.get(cluster.getId()).acquire();
             List<VDS> vdsList = getVdsDAO()
@@ -301,7 +301,7 @@
                 clusterLockMap.get(cluster.getId()).drainPermits();
                 clusterLockMap.get(cluster.getId()).release();
             }
-            log.debugFormat("Scheduling ended, correlation Id: {0}", 
correlationId);
+            log.debug("Scheduling ended, correlation Id: {}", correlationId);
         }
     }
 
@@ -351,7 +351,7 @@
                 && Config.<Boolean> 
getValue(ConfigValues.SchedulerAllowOverBooking)
                 && clusterLockMap.get(cluster.getId()).getQueueLength() >=
                 Config.<Integer> 
getValue(ConfigValues.SchedulerOverBookingThreshold)) {
-            log.infoFormat("scheduler: cluster ({0}) lock is skipped (cluster 
is allowed to overbook)",
+            log.info("Scheduler: cluster '{}' lock is skipped (cluster is 
allowed to overbook)",
                     cluster.getName());
             // release pending threads (requests) and current one (+1)
             clusterLockMap.get(cluster.getId())
@@ -377,7 +377,7 @@
                         && 
clusterLockMap.get(cluster.getId()).getQueueLength() >
                         threshold;
         if (crossedThreshold) {
-            log.infoFormat("Scheduler: skipping whighing hosts in cluster {0}, 
since there are more than {1} parallel requests",
+            log.info("Scheduler: skipping whinging hosts in cluster '{}', 
since there are more than '{}' parallel requests",
                     cluster.getName(),
                     threshold);
         }
@@ -537,17 +537,12 @@
                                   String correlationId) {
         for (VDS host: oldList) {
             if (!newSet.contains(host.getId())) {
-                String reason =
-                        String.format("Candidate host %s (%s) was filtered out 
by %s filter %s",
-                                host.getName(),
-                                host.getId().toString(),
-                                actionName.name(),
-                                filterName);
                 result.addReason(host.getId(), host.getName(), actionName, 
filterName);
-                if (!StringUtils.isEmpty(correlationId)) {
-                    reason = String.format("%s (correlation id: %s)", reason, 
correlationId);
-                }
-                log.info(reason);
+                log.info("Candidate host '{}' ('{}') was filtered out by '{}' 
filter '{}'",
+                        host.getName(),
+                        host.getId(),
+                        actionName.name(),
+                        filterName);
             }
         }
     }
@@ -822,7 +817,7 @@
 
                         logable.addCustomValue("Hosts", failedHostsStr);
                         AlertDirector.Alert(logable, 
AuditLogType.CLUSTER_ALERT_HA_RESERVATION);
-                        log.infoFormat("Cluster: {0} fail to pass HA 
reservation check.", cluster.getName());
+                        log.info("Cluster '{}' fail to pass HA reservation 
check.", cluster.getName());
                     }
 
                     boolean clusterHaStatusFromPreviousCycle =
@@ -848,7 +843,7 @@
 
     @OnTimerMethodAnnotation("performLoadBalancing")
     public void performLoadBalancing() {
-        log.debugFormat("Load Balancer timer entered.");
+        log.debug("Load Balancer timer entered.");
         List<VDSGroup> clusters = 
DbFacade.getInstance().getVdsGroupDao().getAll();
         for (VDSGroup cluster : clusters) {
             ClusterPolicy policy = policyMap.get(cluster.getClusterPolicyId());
@@ -895,7 +890,7 @@
         List<String> list = new ArrayList<String>();
         final PolicyUnitImpl policyUnitImpl = policyUnits.get(policyUnitId);
         if (policyUnitImpl == null) {
-            log.warnFormat("Trying to find usages of non-existing policy unit 
%s", policyUnitId.toString());
+            log.warn("Trying to find usages of non-existing policy unit '{}'", 
policyUnitId);
             return null;
         }
 
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SlaValidator.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SlaValidator.java
index bb88a37..cec8330 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SlaValidator.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/SlaValidator.java
@@ -4,11 +4,11 @@
 import org.ovirt.engine.core.common.businessentities.VDSGroup;
 import org.ovirt.engine.core.common.businessentities.VM;
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SlaValidator {
-    private static final Log log = LogFactory.getLog(SlaValidator.class);
+    private static final Logger log = 
LoggerFactory.getLogger(SlaValidator.class);
 
     private static final SlaValidator instance = new SlaValidator();
 
@@ -23,17 +23,15 @@
                     curVds.getMemCommited() + curVds.getPendingVmemSize() + 
curVds.getGuestOverhead() + curVds
                             .getReservedMem() + vm.getMinAllocatedMem();
             double vdsMemLimit = curVds.getMaxVdsMemoryOverCommit() * 
curVds.getPhysicalMemMb() / 100.0;
-            if (log.isDebugEnabled()) {
-                log.debugFormat("hasMemoryToRunVM: host {0} pending vmem size 
is : {1} MB",
-                        curVds.getName(),
-                        curVds.getPendingVmemSize());
-                log.debugFormat("Host Mem Conmmitted: {0}, Host Reserved Mem: 
{1}, Host Guest Overhead {2}, VM Min Allocated Mem {3}",
-                        curVds.getMemCommited(),
-                        curVds.getReservedMem(),
-                        curVds.getGuestOverhead(),
-                        vm.getMinAllocatedMem());
-                log.debugFormat("{0} <= ???  {1}", vdsCurrentMem, vdsMemLimit);
-            }
+            log.debug("hasMemoryToRunVM: host '{}' pending vmem size is : {} 
MB",
+                    curVds.getName(),
+                    curVds.getPendingVmemSize());
+            log.debug("Host Mem Conmmitted: '{}', Host Reserved Mem: {}, Host 
Guest Overhead {}, VM Min Allocated Mem {}",
+                    curVds.getMemCommited(),
+                    curVds.getReservedMem(),
+                    curVds.getGuestOverhead(),
+                    vm.getMinAllocatedMem());
+            log.debug("{} <= ???  {}", vdsCurrentMem, vdsMemLimit);
             retVal = (vdsCurrentMem <= vdsMemLimit);
         }
         return retVal;
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerImpl.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerImpl.java
index e4d3ab1..310a401 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerImpl.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerImpl.java
@@ -15,8 +15,8 @@
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector;
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogableBase;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ExternalSchedulerBrokerImpl implements ExternalSchedulerBroker {
 
@@ -27,7 +27,7 @@
 
     private static Object[] EMPTY = new Object[] {};
 
-    private final static Log log = 
LogFactory.getLog(ExternalSchedulerBrokerImpl.class);
+    private final static Logger log = 
LoggerFactory.getLogger(ExternalSchedulerBrokerImpl.class);
 
     private XmlRpcClientConfigImpl config = null;
 
@@ -40,7 +40,8 @@
         try {
             config.setServerURL(new URL(extSchedUrl));
         } catch (MalformedURLException e) {
-            log.error("External scheduler got bad url", e);
+            log.error("External scheduler got bad url: {}", e.getMessage());
+            log.debug("Exception", e);
         }
     }
 
@@ -53,7 +54,8 @@
             return parseDiscoverResults(result);
 
         } catch (XmlRpcException e) {
-            log.error("Could not communicate with the external scheduler while 
discovering", e);
+            log.error("Error communicating with the external scheduler while 
discovering: {}", e.getMessage());
+            log.debug("Exception", e);
             return null;
         }
     }
@@ -83,7 +85,8 @@
             return 
ExternalSchedulerBrokerObjectBuilder.getFilteringResult(xmlRpcStruct).getHosts();
 
         } catch (XmlRpcException e) {
-            log.error("Could not communicate with the external scheduler while 
filtering", e);
+            log.error("Error communicating with the external scheduler while 
filtering: {}", e.getMessage());
+            log.debug("Exception", e);
             auditLogFailedToConnect();
             return hostIDs;
         }
@@ -131,7 +134,9 @@
             return 
ExternalSchedulerBrokerObjectBuilder.getScoreResult(result).getHosts();
 
         } catch (XmlRpcException e) {
-            log.error("Could not communicate with the external scheduler while 
running weight modules", e);
+            log.error("Error communicating with the external scheduler while 
running weight modules: {}",
+                    e.getMessage());
+            log.debug("Exception", e);
             auditLogFailedToConnect();
             return null;
         }
@@ -175,7 +180,8 @@
             return 
ExternalSchedulerBrokerObjectBuilder.getBalanceResults(result).getResult();
 
         } catch (XmlRpcException e) {
-            log.error("Could not communicate with the external scheduler while 
balancing", e);
+            log.error("Error communicate with the external scheduler while 
balancing: {}", e.getMessage());
+            log.debug("Exception", e);
             auditLogFailedToConnect();
             return null;
         }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerObjectBuilder.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerObjectBuilder.java
index ba648b1..9ad2827 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerObjectBuilder.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerBrokerObjectBuilder.java
@@ -1,17 +1,17 @@
 package org.ovirt.engine.core.bll.scheduling.external;
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.ovirt.engine.core.common.AuditLogType;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector;
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogableBase;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
-
-import java.util.HashMap;
-import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ExternalSchedulerBrokerObjectBuilder {
-    private final static Log log = 
LogFactory.getLog(ExternalSchedulerBrokerObjectBuilder.class);
+    private final static Logger log = 
LoggerFactory.getLogger(ExternalSchedulerBrokerObjectBuilder.class);
     private final static int RESULT_OK = 0;
 
     private static void auditLogPluginError(String pluginName, String 
errorMessage) {
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryResult.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryResult.java
index 6158091..a1623f9 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryResult.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryResult.java
@@ -7,12 +7,12 @@
 
 import org.apache.commons.lang.StringUtils;
 import 
org.ovirt.engine.core.common.utils.customprop.SimpleCustomPropertiesUtil;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 public class ExternalSchedulerDiscoveryResult {
-    private static final Log log = 
LogFactory.getLog(ExternalSchedulerDiscoveryResult.class);
+    private static final Logger log = 
LoggerFactory.getLogger(ExternalSchedulerDiscoveryResult.class);
     private static final String FILTERS = "filters";
     private static final String SCORES = "scores";
     private static final String BALANCE = "balance";
@@ -64,7 +64,8 @@
         }
         return true;
         } catch (Exception e) {
-            log.error("External scheduler error, exception why parsing 
discovery results", e);
+            log.error("External scheduler error, exception why parsing 
discovery results: {}", e.getMessage());
+            log.debug("Exception", e);
             return false;
         }
     }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryThread.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryThread.java
index bc8728d..4df2fc3 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryThread.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/external/ExternalSchedulerDiscoveryThread.java
@@ -16,12 +16,12 @@
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector;
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogableBase;
 import org.ovirt.engine.core.dao.scheduling.PolicyUnitDao;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ExternalSchedulerDiscoveryThread extends Thread {
 
-    private final static Log log = 
LogFactory.getLog(ExternalSchedulerDiscoveryThread.class);
+    private final static Logger log = 
LoggerFactory.getLogger(ExternalSchedulerDiscoveryThread.class);
 
     @Override
     public void run() {
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CPUPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CPUPolicyUnit.java
index ee61641..28f0254 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CPUPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CPUPolicyUnit.java
@@ -11,8 +11,11 @@
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.scheduling.PerHostMessages;
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CPUPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(CPUPolicyUnit.class);
 
     public CPUPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
@@ -25,7 +28,7 @@
             Integer cores = 
SlaValidator.getInstance().getEffectiveCpuCores(vds);
             if (cores != null && vm.getNumOfCpus() > cores) {
                 messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_ENOUGH_CORES.toString());
-                log.debugFormat("host {0} has less cores ({1}) than vm cores 
({2})",
+                log.debug("Host '{}' has less cores ({}) than vm cores ({})",
                         vds.getName(),
                         cores,
                         vm.getNumOfCpus());
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CpuLevelFilterPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CpuLevelFilterPolicyUnit.java
index aaa9d2d..c2f345b 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CpuLevelFilterPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/CpuLevelFilterPolicyUnit.java
@@ -1,5 +1,9 @@
 package org.ovirt.engine.core.bll.scheduling.policyunits;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.commons.lang.StringUtils;
 import org.ovirt.engine.core.bll.CpuFlagsManagerHandler;
 import org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl;
@@ -9,12 +13,12 @@
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.scheduling.PerHostMessages;
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class CpuLevelFilterPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(CpuLevelFilterPolicyUnit.class);
+
     public CpuLevelFilterPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
     }
@@ -32,12 +36,12 @@
                     int compareResult = 
CpuFlagsManagerHandler.compareCpuLevels(vm.getCpuName(), hostCpuName, 
vm.getVdsGroupCompatibilityVersion());
                     if (compareResult <= 0) {
                         hostsToRunOn.add(host);
-                        log.debugFormat("Host {0} wasn't filtered out as it 
has a CPU level ({1}) which is higher or equal than the CPU level the VM was 
run with ({2})",
+                        log.debug("Host '{}' wasn't filtered out as it has a 
CPU level ({}) which is higher or equal than the CPU level the VM was run with 
({})",
                                 host.getName(),
                                 hostCpuName,
                                 vm.getCpuName());
                     } else {
-                        log.debugFormat("Host {0} was filtered out as it has a 
CPU level ({1}) which is lower than the CPU level the VM was run with ({2})",
+                        log.debug("Host '{}' was filtered out as it has a CPU 
level ({}) which is lower than the CPU level the VM was run with ({})",
                                 host.getName(),
                                 hostCpuName,
                                 vm.getCpuName());
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenDistributionBalancePolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenDistributionBalancePolicyUnit.java
index 3ec29f6..92bfe78 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenDistributionBalancePolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenDistributionBalancePolicyUnit.java
@@ -26,8 +26,11 @@
 import org.ovirt.engine.core.dao.VmDAO;
 import org.ovirt.engine.core.utils.linq.LinqUtils;
 import org.ovirt.engine.core.utils.linq.Predicate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class EvenDistributionBalancePolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(EvenDistributionBalancePolicyUnit.class);
 
     private static final String HIGH_UTILIZATION = "HighUtilization";
 
@@ -42,7 +45,7 @@
             ArrayList<String> messages) {
         if (hosts == null || hosts.size() < 2) {
             int hostCount = hosts == null ? 0 : hosts.size();
-            log.debugFormat("No balancing for cluster {0}, contains only {1} 
host(s)", cluster.getName(), hostCount);
+            log.debug("No balancing for cluster '{}', contains only {} 
host(s)", cluster.getName(), hostCount);
             return null;
         }
         // get vds that over committed for the time defined
@@ -54,7 +57,7 @@
 
         // if there aren't any overutilized hosts, then there is nothing to 
balance...
         if (overUtilizedHosts == null || overUtilizedHosts.size() == 0) {
-            log.debugFormat("There is no over-utilized host in cluster '{0}'", 
cluster.getName());
+            log.debug("There is no over-utilized host in cluster '{}'", 
cluster.getName());
             return null;
         }
 
@@ -63,7 +66,7 @@
 
         //if no host has a spare power, then there is nothing we can do to 
balance it..
         if (underUtilizedHosts == null || underUtilizedHosts.size() == 0) {
-            log.warnFormat("All hosts are over-utilized, can't balance the 
cluster '{0}'", cluster.getName());
+            log.warn("All hosts are over-utilized, can't balance the cluster 
'{}'", cluster.getName());
             return null;
         }
         VDS randomHost = overUtilizedHosts.get(new 
Random().nextInt(overUtilizedHosts.size()));
@@ -114,7 +117,7 @@
             log.info("VdsLoadBalancer: vm selection - no vm without pending 
found.");
             result = Collections.min(vms, new VmCpuUsageComparator());
         } else {
-            log.infoFormat("VdsLoadBalancer: vm selection - selected vm: {0}, 
cpu: {1}.", result.getName(),
+            log.info("VdsLoadBalancer: vm selection - selected vm: '{}', cpu: 
{}.", result.getName(),
                     result.getUsageCpuPercent());
         }
         return result;
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenGuestDistributionBalancePolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenGuestDistributionBalancePolicyUnit.java
index 9ff1639..b6523c2 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenGuestDistributionBalancePolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/EvenGuestDistributionBalancePolicyUnit.java
@@ -1,5 +1,8 @@
 package org.ovirt.engine.core.bll.scheduling.policyunits;
 
+import java.util.List;
+import java.util.Map;
+
 import org.apache.commons.lang.math.NumberUtils;
 import org.ovirt.engine.core.common.businessentities.VDS;
 import org.ovirt.engine.core.common.businessentities.VDSGroup;
@@ -8,18 +11,15 @@
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
 import org.ovirt.engine.core.utils.linq.LinqUtils;
 import org.ovirt.engine.core.utils.linq.Predicate;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
-
-import java.util.List;
-import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class EvenGuestDistributionBalancePolicyUnit extends 
EvenDistributionBalancePolicyUnit {
 
     private final int spmVmGraceDefault;
     private final int migrationThresholdDefault;
     private final int highVmCountDefault;
-    protected static final Log log = 
LogFactory.getLog(EvenGuestDistributionBalancePolicyUnit.class);
+    protected static final Logger log = 
LoggerFactory.getLogger(EvenGuestDistributionBalancePolicyUnit.class);
 
     public EvenGuestDistributionBalancePolicyUnit (PolicyUnit policyUnit) {
         super(policyUnit);
@@ -65,7 +65,7 @@
         final VDS worstVDS = getWorstVDS(relevantHosts, parameters);
         final int worstVdsOccupiedVmSlots = getOccupiedVmSlots(worstVDS, 
parameters);
         if (worstVdsOccupiedVmSlots < highVmCountUtilization) {
-            log.infoFormat("There is no host with more than {0} running 
guests, no balancing is needed",
+            log.info("There is no host with more than {} running guests, no 
balancing is needed",
                     highVmCountUtilization);
             return null;
         }
@@ -99,7 +99,7 @@
         });
 
         if (underUtilizedHosts.size() == 0) {
-            log.warnFormat("There is no host with less than {0} running 
guests",
+            log.warn("There is no host with less than {} running guests",
                     worstVdsOccupiedVmSlots - migrationThreshold);
         }
 
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationBalancePolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationBalancePolicyUnit.java
index 0302e8c..a42e6b3 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationBalancePolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationBalancePolicyUnit.java
@@ -20,8 +20,8 @@
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.utils.linq.LinqUtils;
 import org.ovirt.engine.core.utils.linq.Predicate;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This balancing policy, is for use in cases the user selected HA Reservation 
for its Cluster. The basic methodology
@@ -31,7 +31,7 @@
  */
 public class HaReservationBalancePolicyUnit extends PolicyUnitImpl {
 
-    private static final Log log = 
LogFactory.getLog(HaReservationBalancePolicyUnit.class);
+    private static final Logger log = 
LoggerFactory.getLogger(HaReservationBalancePolicyUnit.class);
 
     private static final int DEFAULT_OVER_UTILIZATION_VALUE = 200;
     private static final long serialVersionUID = 4926515666890804243L;
@@ -46,13 +46,13 @@
             Map<String, String> parameters,
             ArrayList<String> messages) {
 
-        log.debugFormat("Started HA reservation balancing method for cluster: 
{0}", cluster.getName());
+        log.debug("Started HA reservation balancing method for cluster '{}'", 
cluster.getName());
         if (!cluster.supportsHaReservation()) {
             return null;
         }
         if (hosts == null || hosts.size() < 2) {
             int hostCount = hosts == null ? 0 : hosts.size();
-            log.debugFormat("No balancing for cluster {0}, contains only {1} 
host(s)", cluster.getName(), hostCount);
+            log.debug("No balancing for cluster '{}', contains only {} 
host(s)", cluster.getName(), hostCount);
             return null;
         }
 
@@ -71,22 +71,22 @@
             overUtilizationParam = Config.<Integer> 
getValue(ConfigValues.OverUtilizationForHaReservation);
         }
 
-        log.debugFormat("optimalHaDistribution value:{0}", 
optimalHaDistribution);
+        log.debug("optimalHaDistribution value: {}", optimalHaDistribution);
 
         int overUtilizationThreshold = (int) Math.ceil(optimalHaDistribution * 
(overUtilizationParam / 100.0));
-        log.debugFormat("overUtilizationThreshold value: {0}", 
overUtilizationThreshold);
+        log.debug("overUtilizationThreshold value: {}", 
overUtilizationThreshold);
 
         List<VDS> overUtilizedHosts =
                 getHostUtilizedByCondition(hosts, hostId2HaVmMapping, 
overUtilizationThreshold, Condition.MORE_THAN);
         if (overUtilizedHosts.isEmpty()) {
-            log.debugFormat("No over utilized hosts for cluster: {0}", 
cluster.getName());
+            log.debug("No over utilized hosts for cluster '{}'", 
cluster.getName());
             return null;
         }
 
         List<VDS> underUtilizedHosts =
                 getHostUtilizedByCondition(hosts, hostId2HaVmMapping, 
overUtilizationParam, Condition.LESS_THAN);
         if (underUtilizedHosts.size() == 0) {
-            log.debugFormat("No under utilized hosts for cluster: {0}", 
cluster.getName());
+            log.debug("No under utilized hosts for cluster '{}'", 
cluster.getName());
             return null;
         }
 
@@ -95,13 +95,13 @@
 
         List<VM> migrableVmsOnRandomHost = 
getMigrableVmsRunningOnVds(randomHost.getId(), hostId2HaVmMapping);
         if (migrableVmsOnRandomHost.isEmpty()) {
-            log.debugFormat("No migratable hosts were found for cluster: {0} 
", cluster.getName());
+            log.debug("No migratable hosts were found for cluster '{}'", 
cluster.getName());
             return null;
         }
 
         // Get random vm to migrate
         VM vm = migrableVmsOnRandomHost.get(new 
Random().nextInt(migrableVmsOnRandomHost.size()));
-        log.infoFormat("VM to be migrated:{0}", vm.getName());
+        log.info("VM to be migrated '{}'", vm.getName());
 
         List<Guid> underUtilizedHostsKeys = new ArrayList<Guid>();
         for (VDS vds : underUtilizedHosts) {
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationWeightPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationWeightPolicyUnit.java
index f589824..581168a 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationWeightPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HaReservationWeightPolicyUnit.java
@@ -16,12 +16,12 @@
 import org.ovirt.engine.core.common.utils.Pair;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class HaReservationWeightPolicyUnit extends PolicyUnitImpl {
 
-    private static final Log log = 
LogFactory.getLog(HaReservationWeightPolicyUnit.class);
+    private static final Logger log = 
LoggerFactory.getLogger(HaReservationWeightPolicyUnit.class);
 
     private static final int RATIO_FACTOR = 100;
     private static final int DEFAULT_SCORE = 0;
@@ -83,7 +83,7 @@
 
                 scores.add(new Pair<Guid, Integer>(host.getId(), haCount));
 
-                log.infoFormat("Score for host:{0} is {1}", host.getName(), 
haCount);
+                log.info("Score for host '{}' is {}", host.getName(), haCount);
             }
 
         }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HostedEngineHAClusterFilterPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HostedEngineHAClusterFilterPolicyUnit.java
index 14481c5..b329513 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HostedEngineHAClusterFilterPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/HostedEngineHAClusterFilterPolicyUnit.java
@@ -1,17 +1,21 @@
 package org.ovirt.engine.core.bll.scheduling.policyunits;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
 import org.ovirt.engine.core.bll.scheduling.PolicyUnitImpl;
 import org.ovirt.engine.core.common.businessentities.VDS;
 import org.ovirt.engine.core.common.businessentities.VM;
 import org.ovirt.engine.core.common.errors.VdcBllMessages;
 import org.ovirt.engine.core.common.scheduling.PerHostMessages;
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class HostedEngineHAClusterFilterPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(HostedEngineHAClusterFilterPolicyUnit.class);
+
     public HostedEngineHAClusterFilterPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
     }
@@ -27,11 +31,12 @@
                 int haScore = host.getHighlyAvailableScore();
                 if (haScore > 0) {
                     hostsToRunOn.add(host);
-                    log.debugFormat("Host {0} wasn't filtered out as it has a 
score of {1}",
+                    log.debug("Host '{}' wasn't filtered out as it has a score 
of {}",
                             host.getName(),
                             haScore);
                 } else {
-                    log.debugFormat("Host {0} was filtered out as it doesn't 
have a positive score (the score is {1})", host.getName(), haScore);
+                    log.debug("Host '{}' was filtered out as it doesn't have a 
positive score (the score is {})",
+                            host.getName(), haScore);
                     messages.addMessage(host.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_HE_HOST.name());
                 }
             }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
index 5ede7ef..844ef18 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MemoryPolicyUnit.java
@@ -20,8 +20,11 @@
 import org.ovirt.engine.core.common.utils.Pair;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class MemoryPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(MemoryPolicyUnit.class);
 
     public MemoryPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
@@ -37,12 +40,12 @@
         List<VmNumaNode> vmNumaNodes = 
DbFacade.getInstance().getVmNumaNodeDAO().getAllVmNumaNodeByVmId(vm.getId());
         for (VDS vds : hosts) {
             if (!isVMSwapValueLegal(vds)) {
-                log.debugFormat("host '{0}' swap value is illegal", 
vds.getName());
+                log.debug("Host '{}' swap value is illegal", vds.getName());
                 messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__SWAP_VALUE_ILLEGAL.toString());
                 continue;
             }
             if (!memoryChecker.evaluate(vds, vm)) {
-                log.debugFormat("host '{0}' has insufficient memory to run the 
VM", vds.getName());
+                log.debug("Host '{}' has insufficient memory to run the VM", 
vds.getName());
                 messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_ENOUGH_MEMORY.toString());
                 continue;
             }
@@ -54,7 +57,7 @@
             // * there isn't enough memory for pinned vNode in pNode
             if (vm.getNumaTuneMode() == NumaTuneMode.STRICT && 
isVmNumaPinned(vmNumaNodes)
                     && (!vds.isNumaSupport() || !canVmNumaPinnedToVds(vm, 
vmNumaNodes, vds))) {
-                log.debugFormat("host '{0}' cannot accommodate memory of VM's 
pinned virtual NUMA nodes within host's physical NUMA nodes",
+                log.debug("Host '{}' cannot accommodate memory of VM's pinned 
virtual NUMA nodes within host's physical NUMA nodes",
                         vds.getName());
                 messages.addMessage(vds.getId(), 
VdcBllMessages.VAR__DETAIL__NOT_MEMORY_PINNED_NUMA.toString());
                 continue;
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MigrationPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MigrationPolicyUnit.java
index 1b184a4..001d123 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MigrationPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/MigrationPolicyUnit.java
@@ -8,8 +8,11 @@
 import org.ovirt.engine.core.common.businessentities.VM;
 import org.ovirt.engine.core.common.scheduling.PerHostMessages;
 import org.ovirt.engine.core.common.scheduling.PolicyUnit;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class MigrationPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(MigrationPolicyUnit.class);
 
     public MigrationPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
@@ -20,7 +23,7 @@
         if (vm.getRunOnVds() != null) {
             for (VDS host : hosts) {
                 if (host.getId().equals(vm.getRunOnVds())) {
-                    log.debugFormat("Vm {0} run on host {1}, filtering host", 
vm.getName(), host.getName());
+                    log.debug("Vm '{}' run on host '{}', filtering host", 
vm.getName(), host.getName());
                     hosts.remove(host);
                     break;
                 }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/NetworkPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/NetworkPolicyUnit.java
index a9c5391..276cc57 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/NetworkPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/NetworkPolicyUnit.java
@@ -26,8 +26,12 @@
 import org.ovirt.engine.core.dao.network.NetworkDao;
 import org.ovirt.engine.core.dao.network.VmNetworkInterfaceDao;
 import org.ovirt.engine.core.utils.NetworkUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class NetworkPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(NetworkPolicyUnit.class);
+
     public NetworkPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
     }
@@ -132,7 +136,7 @@
                 }
                 StringBuilder sbBuilder = new StringBuilder();
                 
sbBuilder.append(Entities.vmInterfacesByNetworkName(vmNICs).keySet());
-                log.debugFormat("host {0} is missing networks required by VM 
nics {1}",
+                log.debug("Host '{}' is missing networks required by VM nics 
'{}'",
                         vds.getName(),
                         sbBuilder.toString());
                 return new 
ValidationResult(VdcBllMessages.ACTION_TYPE_FAILED_VDS_VM_NETWORKS);
@@ -184,12 +188,12 @@
 
         // Check if display network attached to host and has a proper boot 
protocol
         if (displayNic == null) {
-            log.debugFormat("host {0} is missing the cluster's display 
network", host.getName());
+            log.debug("Host '{}' is missing the cluster's display network", 
host.getName());
             return new 
ValidationResult(VdcBllMessages.ACTION_TYPE_FAILED_MISSING_DISPLAY_NETWORK);
         }
 
         if (displayNic.getBootProtocol() == NetworkBootProtocol.NONE) {
-            log.debugFormat("Host {0} has the display network {1} configured 
with improper boot protocol on interface {2}.",
+            log.debug("Host '{}' has the display network '{}' configured with 
improper boot protocol on interface '{}'.",
                     host.getName(),
                     displayNetwork.getName(),
                     displayNic.getName());
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/PowerSavingBalancePolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/PowerSavingBalancePolicyUnit.java
index 297bf9d..ec461af 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/PowerSavingBalancePolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/PowerSavingBalancePolicyUnit.java
@@ -31,8 +31,11 @@
 import org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogableBase;
 import org.ovirt.engine.core.utils.linq.LinqUtils;
 import org.ovirt.engine.core.utils.linq.Predicate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class PowerSavingBalancePolicyUnit extends 
EvenDistributionBalancePolicyUnit {
+    private static final Logger log = 
LoggerFactory.getLogger(PowerSavingBalancePolicyUnit.class);
 
     public PowerSavingBalancePolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
@@ -119,9 +122,9 @@
         }
         else {
             /* Should not ever happen... */
-            log.errorFormat("Unknown host power management transition {0} -> 
{1}",
-                    currentStatus.toString(),
-                    targetStatus.toString());
+            log.error("Unknown host power management transition '{}' -> '{}'",
+                    currentStatus,
+                    targetStatus);
         }
     }
 
@@ -183,7 +186,7 @@
 
         /* Automatic power management is disabled */
         if (!enableAutoPM.booleanValue()) {
-            log.infoFormat("Automatic power management is disabled for cluster 
{0}.", cluster.getName());
+            log.info("Automatic power management is disabled for cluster 
'{}'.", cluster.getName());
             return null;
         }
 
@@ -191,7 +194,7 @@
         if (requiredReserve > emptyHosts.size()
                 && pmDownHosts.isEmpty()
                 && pmMaintenanceHosts.isEmpty()) {
-            log.infoFormat("Cluster {0} does not have enough spare hosts, but 
no additional host is available.",
+            log.info("Cluster '{}' does not have enough spare hosts, but no 
additional host is available.",
                     cluster.getName());
             return null;
         }
@@ -201,7 +204,7 @@
          */
         else if (requiredReserve < emptyHosts.size()
                 && pmMaintenanceHosts.size() > 1) {
-            log.infoFormat("Cluster {0} does have enough spare hosts, shutting 
one host down.", cluster.getName());
+            log.info("Cluster '{}' does have enough spare hosts, shutting one 
host down.", cluster.getName());
             return new Pair<>(pmMaintenanceHosts.get(0), VDSStatus.Down);
         }
 
@@ -218,7 +221,7 @@
             });
 
             if (hostsWithAutoPM.isEmpty()) {
-                log.infoFormat("Cluster {0} does have too many spare hosts, 
but none can be put to maintenance.",
+                log.info("Cluster '{}' does have too many spare hosts, but 
none can be put to maintenance.",
                         cluster.getName());
                 return null;
             } else {
@@ -231,7 +234,7 @@
          */
         else if (requiredReserve == emptyHosts.size()
                 && pmMaintenanceHosts.isEmpty() == false) {
-            log.infoFormat("Cluster {0} does have enough spare hosts, shutting 
one host down.", cluster.getName());
+            log.info("Cluster '{}' does have enough spare hosts, shutting one 
host down.", cluster.getName());
             return new Pair<>(pmMaintenanceHosts.get(0), VDSStatus.Down);
         }
 
@@ -240,7 +243,7 @@
          */
         else if (requiredReserve > emptyHosts.size()
                 && pmMaintenanceHosts.isEmpty() == false) {
-            log.infoFormat("Cluster {0} does not have enough spare hosts, 
reactivating one.", cluster.getName());
+            log.info("Cluster '{}' does not have enough spare hosts, 
reactivating one.", cluster.getName());
             return new Pair<>(pmMaintenanceHosts.get(0), VDSStatus.Up);
         }
 
@@ -249,7 +252,7 @@
          */
         else if (requiredReserve > emptyHosts.size()
                 && pmMaintenanceHosts.isEmpty()) {
-            log.infoFormat("Cluster {0} does not have enough spare hosts, 
trying to start one up.", cluster.getName());
+            log.info("Cluster '{}' does not have enough spare hosts, trying to 
start one up.", cluster.getName());
             return new Pair<>(pmDownHosts.get(0), VDSStatus.Up);
         }
 
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
index 79f7952..f49fcf2 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/scheduling/policyunits/VmAffinityFilterPolicyUnit.java
@@ -20,8 +20,12 @@
 import org.ovirt.engine.core.dao.VdsStaticDAO;
 import org.ovirt.engine.core.dao.VmDAO;
 import org.ovirt.engine.core.dao.scheduling.AffinityGroupDao;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class VmAffinityFilterPolicyUnit extends PolicyUnitImpl {
+    private static final Logger log = 
LoggerFactory.getLogger(VmAffinityFilterPolicyUnit.class);
+
     public VmAffinityFilterPolicyUnit(PolicyUnit policyUnit) {
         super(policyUnit);
     }
@@ -101,8 +105,8 @@
         // contradicting rules to the log
         unacceptableHosts.retainAll(acceptableHosts);
         for (Guid id: unacceptableHosts) {
-            log.warnFormat("Host {1} ({2}) belongs to both positive and 
negative affinity list" +
-                    " while scheduling VM {3} ({4})",
+            log.warn("Host '{}' ({}) belongs to both positive and negative 
affinity list" +
+                    " while scheduling VM '{}' ({})",
                     hostMap.get(id).getName(), id.toString(),
                     vm.getName(), vm.getId());
         }
@@ -112,7 +116,7 @@
             acceptableHosts.addAll(hostMap.keySet());
         }
         else if (acceptableHosts.size() > 1) {
-            log.warnFormat("Invalid affinity situation was detected while 
scheduling VM {1} ({2})." +
+            log.warn("Invalid affinity situation was detected while scheduling 
VM '{}' ({})." +
                     " VMs belonging to the same affinity groups are running on 
more than one host.",
                     vm.getName(), vm.getId());
         }


-- 
To view, visit http://gerrit.ovirt.org/34359
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I82070b252197458422792c49b2135fcb9b9b1430
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-engine
Gerrit-Branch: master
Gerrit-Owner: Martin Peřina <mper...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to