Martin Peřina has uploaded a new change for review.

Change subject: core: Replace oVirt logger with slf4j in bll/gluster
......................................................................

core: Replace oVirt logger with slf4j in bll/gluster

Replaces oVirt logger with slf4j in bll/gluster package.

Change-Id: I6f5419168ce6b26c110839dcd85cac995e5aa5db
Bug-Url: https://bugzilla.redhat.com/1109871
Signed-off-by: Martin Perina <mper...@redhat.com>
---
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterHookSyncJob.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterJobsManager.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterServiceSyncJob.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterSyncJob.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterTasksSyncJob.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTaskUtils.java
M 
backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTasksService.java
7 files changed, 100 insertions(+), 91 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-engine refs/changes/23/34323/1

diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterHookSyncJob.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterHookSyncJob.java
index 1d8ccdf..d16512b 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterHookSyncJob.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterHookSyncJob.java
@@ -26,13 +26,13 @@
 import org.ovirt.engine.core.common.vdscommands.VdsIdVDSCommandParametersBase;
 import 
org.ovirt.engine.core.common.vdscommands.gluster.GlusterHookVDSParameters;
 import org.ovirt.engine.core.compat.Guid;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.threadpool.ThreadPoolUtil;
 import org.ovirt.engine.core.utils.timer.OnTimerMethodAnnotation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterHookSyncJob extends GlusterJob {
-    private static final Log log = LogFactory.getLog(GlusterHookSyncJob.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterHookSyncJob.class);
 
     private static final GlusterHookSyncJob instance = new 
GlusterHookSyncJob();
 
@@ -60,7 +60,7 @@
             return;
         }
 
-        log.debugFormat("Syncing hooks for cluster {0}", cluster.getName());
+        log.debug("Syncing hooks for cluster {}", cluster.getName());
         List<VDS> upServers = 
getClusterUtils().getAllUpServers(cluster.getId());
 
         if (upServers == null || upServers.isEmpty()) {
@@ -119,7 +119,7 @@
                 upServers.add(server);
 
                 if (!pairResult.getSecond().getSucceeded()) {
-                    log.infoFormat("Failed to get list of hooks from server 
{0} with error {1} ", server,
+                    log.info("Failed to get list of hooks from server '{}' 
with error: {}", server,
                             pairResult.getSecond().getVdsError().getMessage());
                     logUtil.logServerMessage(server, 
AuditLogType.GLUSTER_HOOK_LIST_FAILED);
                     continue;
@@ -157,7 +157,7 @@
                             } else {
                                 if 
(!(serverHook.getChecksum().equals(fetchedHook.getChecksum()) && 
serverHook.getContentType().equals(fetchedHook.getContentType())
                                         && 
serverHook.getStatus().equals(fetchedHook.getStatus()))) {
-                                    log.infoFormat("Updating existing server 
hook {0} in server {1} ", key, server);
+                                    log.info("Updating existing server hook 
'{}' in server '{}' ", key, server);
                                     
serverHook.setChecksum(fetchedHook.getChecksum());
                                     
serverHook.setContentType(fetchedHook.getContentType());
                                     
serverHook.setStatus(fetchedHook.getStatus());
@@ -172,7 +172,7 @@
                             newHook = fetchedHook;
                             newHook.setClusterId(clusterId);
                             newHook.setId(Guid.newGuid());
-                            log.infoFormat("Detected new hook {0} in server 
{1}, adding to engine hooks", key, server);
+                            log.info("Detected new hook '{}' in server '{}', 
adding to engine hooks", key, server);
                             logMessage(clusterId, key, 
AuditLogType.GLUSTER_HOOK_DETECTED_NEW);
 
                             updateContentTasksList(contentTasksList, newHook, 
server);
@@ -241,7 +241,7 @@
         for (Pair<GlusterHookEntity, VDSReturnValue> pairResult: pairResults) {
             final GlusterHookEntity hook = pairResult.getFirst();
             if (!pairResult.getSecond().getSucceeded()) {
-                log.infoFormat("Failed to get content of hook {0} with error 
{1} ", hook.getHookKey(),
+                log.info("Failed to get content of hook '{}' with error: {}", 
hook.getHookKey(),
                         pairResult.getSecond().getVdsError().getMessage());
                 logMessage(hook.getClusterId(), hook.getHookKey(), 
AuditLogType.GLUSTER_HOOK_GETCONTENT_FAILED);
                 continue;
@@ -289,7 +289,8 @@
             // Check if aggregated conflict status is different from existing 
hook
             Integer oldConflictStatus = 
existingHookConflictMap.get(hook.getHookKey());
             if (!(hook.getConflictStatus().equals(oldConflictStatus))) {
-                log.debugFormat("Conflict change detected for hook {0} in 
cluster {1} ", hook.getHookKey(), hook.getClusterId());
+                log.debug("Conflict change detected for hook '{}' in cluster 
'{}' ",
+                        hook.getHookKey(), hook.getClusterId());
                 logMessage(hook.getClusterId(), hook.getHookKey(), 
AuditLogType.GLUSTER_HOOK_CONFLICT_DETECTED);
                 getHooksDao().updateGlusterHookConflictStatus(hook.getId(), 
hook.getConflictStatus());
             }
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterJobsManager.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterJobsManager.java
index c0f6a27..cbc25fd 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterJobsManager.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterJobsManager.java
@@ -5,14 +5,14 @@
 import org.ovirt.engine.core.common.config.Config;
 import org.ovirt.engine.core.common.config.ConfigValues;
 import org.ovirt.engine.core.common.mode.ApplicationMode;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.timer.SchedulerUtil;
 import org.ovirt.engine.core.utils.timer.SchedulerUtilQuartzImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterJobsManager {
 
-    private static final Log log = LogFactory.getLog(GlusterJobsManager.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterJobsManager.class);
 
     public static void init() {
         if (!glusterModeSupported()) {
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterServiceSyncJob.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterServiceSyncJob.java
index 3ec5b63..68abd79 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterServiceSyncJob.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterServiceSyncJob.java
@@ -24,15 +24,15 @@
 import 
org.ovirt.engine.core.common.vdscommands.gluster.GlusterServicesListVDSParameters;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.compat.TransactionScopeOption;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.threadpool.ThreadPoolUtil;
 import org.ovirt.engine.core.utils.timer.OnTimerMethodAnnotation;
 import org.ovirt.engine.core.utils.transaction.TransactionMethod;
 import org.ovirt.engine.core.utils.transaction.TransactionSupport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterServiceSyncJob extends GlusterJob {
-    private static final Log log = 
LogFactory.getLog(GlusterServiceSyncJob.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterServiceSyncJob.class);
     private static final GlusterServiceSyncJob instance = new 
GlusterServiceSyncJob();
     private final Map<String, GlusterService> serviceNameMap = new 
HashMap<String, GlusterService>();
 
@@ -72,9 +72,8 @@
                         }
                     }
                 } catch (Exception e) {
-                    log.errorFormat("Error while refreshing service statuses 
of cluster {0}!",
-                            cluster.getName(),
-                            e);
+                    log.error("Error while refreshing service statuses of 
cluster '{}'!", cluster.getName());
+                    log.error("Exception", e);
                 }
             }
         }
@@ -223,7 +222,7 @@
                 VDSReturnValue returnValue = 
runVdsCommand(VDSCommandType.GlusterServicesList,
                         new GlusterServicesListVDSParameters(server.getId(), 
getServiceNameMap().keySet()));
                 if (!returnValue.getSucceeded()) {
-                    log.errorFormat("Couldn't fetch services statuses from 
server {0}, error: {1}! " +
+                    log.error("Couldn't fetch services statuses from server 
'{}', error: {}! " +
                             "Updating statuses of all services on this server 
to UNKNOWN.",
                             server.getHostName(),
                             returnValue.getVdsError().getMessage());
@@ -240,7 +239,7 @@
                         final GlusterServiceStatus oldStatus = 
existingService.getStatus();
                         final GlusterServiceStatus newStatus = 
fetchedService.getStatus();
                         if (oldStatus != newStatus) {
-                            log.infoFormat("Status of service {0} on server 
{1} changed from {2} to {3}. Updating in engine now.",
+                            log.info("Status of service '{}' on server '{}' 
changed from '{}' to '{}'. Updating in engine now.",
                                     fetchedService.getServiceName(),
                                     server.getHostName(),
                                     oldStatus.name(),
@@ -299,7 +298,7 @@
     private void insertServerService(VDS server, final GlusterServerService 
fetchedService) {
         fetchedService.setId(Guid.newGuid());
         getGlusterServerServiceDao().save(fetchedService);
-        log.infoFormat("Service {0} was not mapped to server {1}. Mapped it 
now.",
+        log.info("Service '{}' was not mapped to server '{}'. Mapped it now.",
                 fetchedService.getServiceName(),
                 server.getHostName());
         logUtil.logAuditMessage(server.getVdsGroupId(),
@@ -319,7 +318,7 @@
         final GlusterServiceStatus oldStatus = clusterService.getStatus();
         clusterService.setStatus(newStatus);
         getGlusterClusterServiceDao().update(clusterService);
-        log.infoFormat("Status of service type {0} changed on cluster {1} from 
{2} to {3}.",
+        log.info("Status of service type '{}' changed on cluster '{}' from 
'{}' to '{}'.",
                 clusterService.getServiceType().name(),
                 clusterService.getClusterId(),
                 oldStatus,
@@ -348,7 +347,7 @@
 
         getGlusterClusterServiceDao().save(clusterService);
 
-        log.infoFormat("Service type {0} not mapped to cluster {1}. Mapped it 
now.",
+        log.info("Service type '{}' not mapped to cluster '{}'. Mapped it 
now.",
                 serviceType,
                 cluster.getName());
         logUtil.logAuditMessage(clusterService.getClusterId(),
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterSyncJob.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterSyncJob.java
index 3408dde..d147fd0 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterSyncJob.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterSyncJob.java
@@ -45,11 +45,11 @@
 import org.ovirt.engine.core.compat.TransactionScopeOption;
 import org.ovirt.engine.core.dao.gluster.GlusterDBUtils;
 import org.ovirt.engine.core.utils.lock.EngineLock;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.timer.OnTimerMethodAnnotation;
 import org.ovirt.engine.core.utils.transaction.TransactionMethod;
 import org.ovirt.engine.core.utils.transaction.TransactionSupport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is responsible for keeping the Gluster related data of engine in 
sync with the actual data retrieved from
@@ -57,7 +57,7 @@
  * engine as well.
  */
 public class GlusterSyncJob extends GlusterJob {
-    private final Log log = LogFactory.getLog(GlusterSyncJob.class);
+    private final Logger log = LoggerFactory.getLogger(GlusterSyncJob.class);
     private static final GlusterSyncJob instance = new GlusterSyncJob();
 
     private GlusterSyncJob() {
@@ -86,21 +86,22 @@
                 try {
                     refreshClusterData(cluster);
                 } catch (Exception e) {
-                    log.errorFormat("Error while refreshing Gluster 
lightweight data of cluster {0}!",
+                    log.error("Error while refreshing Gluster lightweight data 
of cluster '{}': {}",
                             cluster.getName(),
-                            e);
+                            e.getMessage());
+                    log.error("Exception", e);
                 }
             }
         }
     }
 
     private void refreshClusterData(VDSGroup cluster) {
-        log.debugFormat("Refreshing Gluster lightweight Data for cluster {0}", 
cluster.getName());
+        log.debug("Refreshing Gluster lightweight Data for cluster '{}'", 
cluster.getName());
 
         List<VDS> existingServers = 
getVdsDao().getAllForVdsGroup(cluster.getId());
         VDS upServer = getClusterUtils().getUpServer(cluster.getId());
         if (upServer == null) {
-            log.debugFormat("No server UP in cluster {0}. Can't refresh it's 
data at this point.", cluster.getName());
+            log.debug("No server UP in cluster '{}'. Can't refresh it's data 
at this point.", cluster.getName());
             return;
         }
 
@@ -123,14 +124,14 @@
             // If the cluster supports virt service as well, we should not be 
removing any servers from it, even if they
             // have been removed from the Gluster cluster using the Gluster 
cli, as they could potentially be used for
             // running VMs
-            log.debugFormat("As cluster {0} supports virt service as well, 
it's servers will not be synced with glusterfs",
+            log.debug("As cluster '{}' supports virt service as well, it's 
servers will not be synced with glusterfs",
                     cluster.getName());
             return;
         }
 
         acquireLock(cluster.getId());
 
-        log.debugFormat("Refreshing Gluster Server data for cluster {0} using 
server {1} ",
+        log.debug("Refreshing Gluster Server data for cluster '{}' using 
server '{}'",
                 cluster.getName(),
                 upServer.getName());
         try {
@@ -139,14 +140,16 @@
                 syncServers(existingServers, fetchedServers);
             }
         } catch(Exception e) {
-            log.errorFormat("Error while refreshing server data for cluster 
{0} from database!", cluster.getName(), e);
+            log.error("Error while refreshing server data for cluster '{}' 
from database: {}",
+                    cluster.getName(), e.getMessage());
+            log.error("Exception", e);
         } finally {
             releaseLock(cluster.getId());
         }
     }
 
     private void syncServers(List<VDS> existingServers, 
List<GlusterServerInfo> fetchedServers) {
-        log.debugFormat("Existing servers list returned {0} comparing with 
fetched servers {1)", existingServers, fetchedServers);
+        log.debug("Existing servers list returned '{}' comparing with fetched 
servers '{}'", existingServers, fetchedServers);
 
         boolean serverRemoved = false;
         for (VDS server : existingServers) {
@@ -154,7 +157,7 @@
             if (isRemovableStatus(server.getStatus())) {
                 GlusterServerInfo glusterServer = findGlusterServer(server, 
fetchedServers);
                 if (glusterServer == null) {
-                    log.infoFormat("Server {0} has been removed directly using 
the gluster CLI. Removing it from engine as well.",
+                    log.info("Server '{}' has been removed directly using the 
gluster CLI. Removing it from engine as well.",
                             server.getName());
                     logUtil.logServerMessage(server, 
AuditLogType.GLUSTER_SERVER_REMOVED_FROM_CLI);
                     try (EngineLock lock = 
getGlusterUtil().acquireGlusterLockWait(server.getId())) {
@@ -163,7 +166,9 @@
                         runVdsCommand(VDSCommandType.RemoveVds, new 
RemoveVdsVDSCommandParameters(server.getId()));
                         serverRemoved = true;
                     } catch (Exception e) {
-                        log.errorFormat("Error while removing server {0} from 
database!", server.getName(), e);
+                        log.error("Error while removing server '{}' from 
database: {}",
+                                server.getName(), e.getMessage());
+                        log.error("Exception", e);
                     }
                 }
                 else if (server.getStatus() == VDSStatus.Up && 
glusterServer.getStatus() == PeerStatus.DISCONNECTED) {
@@ -178,8 +183,8 @@
             }
         }
         if (serverRemoved) {
-            log.infoFormat("Servers detached using gluster CLI  is removed 
from engine after inspecting the Gluster servers list returned {0} "
-                    + "- comparing with db servers {1}",
+            log.info("Servers detached using gluster CLI is removed from 
engine after inspecting the Gluster servers"
+                    + " list returned '{}' - comparing with db servers '{}'",
                     fetchedServers, existingServers);
         }
     }
@@ -260,13 +265,13 @@
         List<GlusterServerInfo> fetchedServers = fetchServers(upServer, 
tempServers);
 
         if (fetchedServers == null) {
-            log.errorFormat("gluster peer status command failed on all servers 
of the cluster {0}."
+            log.error("gluster peer status command failed on all servers of 
the cluster '{}'."
                     + "Can't refresh it's data at this point.", 
cluster.getName());
             return null;
         }
 
         if (fetchedServers.size() == 1 && existingServers.size() > 2) {
-            log.infoFormat("Gluster servers list fetched from server {0} has 
only one server", upServer.getName());
+            log.info("Gluster servers list fetched from server '{}' has only 
one server", upServer.getName());
             // It's possible that the server we are using to get list of 
servers itself has been removed from the
             // cluster, and hence is returning a single server (itself)
             GlusterServerInfo server = fetchedServers.iterator().next();
@@ -275,7 +280,7 @@
                 tempServers.remove(upServer);
                 upServer = getNewUpServer(tempServers, upServer);
                 if (upServer == null) {
-                    log.warnFormat("The only UP server in cluster {0} seems to 
have been removed from it using gluster CLI. "
+                    log.warn("The only UP server in cluster '{}' seems to have 
been removed from it using gluster CLI. "
                             + "Can't refresh it's data at this point.",
                             cluster.getName());
                     return null;
@@ -283,7 +288,7 @@
 
                 fetchedServers = fetchServers(upServer, tempServers);
                 if (fetchedServers == null) {
-                    log.warnFormat("The only UP server in cluster {0} (or the 
only one on which gluster peer status "
+                    log.warn("The only UP server in cluster '{}' (or the only 
one on which gluster peer status "
                             + "command is working) seems to have been removed 
from it using gluster CLI. "
                             + "Can't refresh it's data at this point.", 
cluster.getName());
                     return null;
@@ -315,10 +320,10 @@
     private List<GlusterServerInfo> fetchServers(VDS upServer, List<VDS> 
existingServers) {
         List<GlusterServerInfo> fetchedServers = null;
         while (fetchedServers == null && !existingServers.isEmpty()) {
-            log.debugFormat("Fetching gluster servers list from server {0}", 
upServer.getName());
+            log.debug("Fetching gluster servers list from server '{}'", 
upServer.getName());
             fetchedServers = fetchServers(upServer);
             if (fetchedServers == null) {
-                log.infoFormat("Gluster servers list failed in server {0} 
moving it to NonOperational",
+                log.info("Gluster servers list failed in server '{}' moving it 
to NonOperational",
                         upServer.getName());
                 logUtil.logServerMessage(upServer, 
AuditLogType.GLUSTER_SERVERS_LIST_FAILED);
                 // Couldn't fetch servers from the up server. Mark it as 
non-operational
@@ -362,7 +367,7 @@
             // Pass a copy of the existing servers as the fetchVolumes method 
can potentially remove elements from it
             Map<Guid, GlusterVolumeEntity> volumesMap = fetchVolumes(upServer, 
new ArrayList<VDS>(existingServers));
             if (volumesMap == null) {
-                log.errorFormat("gluster volume info command failed on all 
servers of the cluster {0}."
+                log.error("gluster volume info command failed on all servers 
of the cluster '{}'."
                         + "Can't refresh it's data at this point.", 
cluster.getName());
                 return;
             }
@@ -415,7 +420,7 @@
         for (GlusterVolumeEntity volume : 
getVolumeDao().getByClusterId(clusterId)) {
             if (!volumesMap.containsKey(volume.getId())) {
                 idsToRemove.add(volume.getId());
-                log.debugFormat("Volume {0} has been removed directly using 
the gluster CLI. Removing it from engine as well.",
+                log.debug("Volume '{}' has been removed directly using the 
gluster CLI. Removing it from engine as well.",
                         volume.getName());
                 logUtil.logVolumeMessage(volume, 
AuditLogType.GLUSTER_VOLUME_DELETED_FROM_CLI);
             }
@@ -425,7 +430,7 @@
             try {
                 getVolumeDao().removeAll(idsToRemove);
             } catch (Exception e) {
-                log.errorFormat("Error while removing volumes from database!", 
e);
+                log.error("Error while removing volumes from database!", e);
             }
         }
     }
@@ -433,22 +438,24 @@
     private void updateExistingAndNewVolumes(Guid clusterId, Map<Guid, 
GlusterVolumeEntity> volumesMap) {
         for (Entry<Guid, GlusterVolumeEntity> entry : volumesMap.entrySet()) {
             GlusterVolumeEntity volume = entry.getValue();
-            log.debugFormat("Analyzing volume {0}", volume.getName());
+            log.debug("Analyzing volume '{}'", volume.getName());
 
             GlusterVolumeEntity existingVolume = 
getVolumeDao().getById(entry.getKey());
             if (existingVolume == null) {
                 try {
                     createVolume(volume);
                 } catch (Exception e) {
-                    log.errorFormat("Could not save volume {0} in database!", 
volume.getName(), e);
+                    log.error("Could not save volume {} in database!", 
volume.getName());
+                    log.error("Exception", e);
                 }
             } else {
                 try {
-                    log.debugFormat("Volume {0} exists in engine. Checking if 
it needs to be updated.",
+                    log.debug("Volume '{}' exists in engine. Checking if it 
needs to be updated.",
                             existingVolume.getName());
                     updateVolume(existingVolume, volume);
                 } catch (Exception e) {
-                    log.errorFormat("Error while updating Volume {0}!", 
volume.getName(), e);
+                    log.error("Error while updating volume '{}'!", 
volume.getName());
+                    log.error("Exception", e);
                 }
             }
         }
@@ -461,19 +468,19 @@
      */
     private void createVolume(final GlusterVolumeEntity volume) {
         if (volume.getBricks() == null) {
-            log.warnFormat("Bricks of volume {0} were not fetched. " +
+            log.warn("Bricks of volume '{}' were not fetched. " +
                     "Hence will not add it to engine at this point.", 
volume.getName());
             return;
         }
 
         for (GlusterBrickEntity brick : volume.getBricks()) {
             if (brick == null) {
-                log.warnFormat("Volume {0} contains a apparently corrupt 
brick(s). " +
+                log.warn("Volume '{}' contains a apparently corrupt brick(s). 
" +
                         "Hence will not add it to engine at this point.",
                         volume.getName());
                 return;
             } else if (brick.getServerId() == null) {
-                log.warnFormat("Volume {0} contains brick(s) from unknown 
hosts. " +
+                log.warn("Volume '{}' contains brick(s) from unknown hosts. " +
                         "Hence will not add it to engine at this point.",
                         volume.getName());
                 return;
@@ -486,7 +493,7 @@
         }
 
         logUtil.logVolumeMessage(volume, 
AuditLogType.GLUSTER_VOLUME_CREATED_FROM_CLI);
-        log.debugFormat("Volume {0} has been created directly using the 
gluster CLI. Creating it in engine as well.",
+        log.debug("Volume '{}' has been created directly using the gluster 
CLI. Creating it in engine as well.",
                 volume.getName());
         getVolumeDao().save(volume);
     }
@@ -509,7 +516,7 @@
         Collection<TransportType> addedTransportTypes =
                 ListUtils.getAddedElements(existingTransportTypes, 
fetchedTransportTypes);
         if (!addedTransportTypes.isEmpty()) {
-            log.infoFormat("Adding transport type(s) {0} to volume {1}",
+            log.info("Adding transport type(s) '{}' to volume '{}'",
                     addedTransportTypes,
                     existingVolume.getName());
             getVolumeDao().addTransportTypes(existingVolume.getId(), 
addedTransportTypes);
@@ -518,7 +525,7 @@
         Collection<TransportType> removedTransportTypes =
                 ListUtils.getAddedElements(fetchedTransportTypes, 
existingTransportTypes);
         if (!removedTransportTypes.isEmpty()) {
-            log.infoFormat("Removing transport type(s) {0} from volume {1}",
+            log.info("Removing transport type(s) '{}' from volume '{}'",
                     removedTransportTypes,
                     existingVolume.getName());
             getVolumeDao().removeTransportTypes(existingVolume.getId(), 
removedTransportTypes);
@@ -528,7 +535,7 @@
     private void updateBricks(GlusterVolumeEntity existingVolume, 
GlusterVolumeEntity fetchedVolume) {
         List<GlusterBrickEntity> fetchedBricks = fetchedVolume.getBricks();
         if (fetchedBricks == null) {
-            log.warnFormat("Bricks of volume {0} were not fetched. " +
+            log.warn("Bricks of volume '{}' were not fetched. " +
                     "Hence will not try to update them in engine at this 
point.",
                     fetchedVolume.getName());
             return;
@@ -544,7 +551,7 @@
         for (final GlusterBrickEntity existingBrick : 
existingVolume.getBricks()) {
             if (!GlusterCoreUtil.containsBrick(fetchedBricks, existingBrick)) {
                 idsToRemove.add(existingBrick.getId());
-                log.infoFormat("Detected brick {0} removed from Volume {1}. 
Removing it from engine DB as well.",
+                log.info("Detected brick '{}' removed from volume '{}'. 
Removing it from engine DB as well.",
                         existingBrick.getQualifiedName(),
                         existingVolume.getName());
                 logUtil.logAuditMessage(existingVolume.getClusterId(), 
existingVolume, null,
@@ -560,7 +567,7 @@
             try {
                 getBrickDao().removeAll(idsToRemove);
             } catch (Exception e) {
-                log.errorFormat("Error while removing bricks from database!", 
e);
+                log.error("Error while removing bricks from database!", e);
             }
         }
     }
@@ -573,7 +580,7 @@
                 // server id could be null if the new brick resides on a 
server that is not yet added in the engine
                 // adding such servers to engine required manual approval by 
user, and hence can't be automated.
                 if (fetchedBrick.getServerId() != null) {
-                    log.infoFormat("New brick {0} added to volume {1} from 
gluster CLI. Updating engine DB accordingly.",
+                    log.info("New brick '{}' added to volume '{}' from gluster 
CLI. Updating engine DB accordingly.",
                             fetchedBrick.getQualifiedName(),
                             existingVolume.getName());
                     fetchedBrick.setStatus(existingVolume.isOnline() ? 
GlusterStatus.UP : GlusterStatus.DOWN);
@@ -589,7 +596,7 @@
             } else {
                 // brick found. update it if required. Only property that 
could be different is the brick order
                 if (!Objects.equals(existingBrick.getBrickOrder(), 
fetchedBrick.getBrickOrder())) {
-                    log.infoFormat("Brick order for brick {0} changed from {1} 
to {2} because of direct CLI operations. Updating engine DB accordingly.",
+                    log.info("Brick order for brick '{}' changed from '{}' to 
'{}' because of direct CLI operations. Updating engine DB accordingly.",
                             existingBrick.getQualifiedName(),
                             existingBrick.getBrickOrder(),
                             fetchedBrick.getBrickOrder());
@@ -614,7 +621,7 @@
         for (final GlusterVolumeOptionEntity existingOption : existingOptions) 
{
             if (fetchedVolume.getOption(existingOption.getKey()) == null) {
                 idsToRemove.add(existingOption.getId());
-                log.infoFormat("Detected option {0} reset on volume {1}. 
Removing it from engine DB as well.",
+                log.info("Detected option '{}' reset on volume '{}'. Removing 
it from engine DB as well.",
                         existingOption.getKey(),
                         fetchedVolume.getName());
                 // The option "group" gets implicitly replaced with a set of 
options defined in the group file
@@ -635,7 +642,8 @@
             try {
                 getOptionDao().removeAll(idsToRemove);
             } catch (Exception e) {
-                log.errorFormat("Error while removing options of volume {0} 
from database!", fetchedVolume.getName(), e);
+                log.error("Error while removing options of volume '{}' from 
database!", fetchedVolume.getName());
+                log.error("Exception", e);
             }
         }
     }
@@ -699,7 +707,7 @@
                             put(GlusterConstants.OPTION_VALUE, 
entity.getValue());
                         }
                     });
-            log.infoFormat("New option {0}={1} set on volume {2} from gluster 
CLI. Updating engine DB accordingly.",
+            log.info("New option '{}'='{}' set on volume '{}' from gluster 
CLI. Updating engine DB accordingly.",
                     entity.getKey(),
                     entity.getValue(),
                     volume.getName());
@@ -718,7 +726,7 @@
                             put(GlusterConstants.OPTION_NEW_VALUE, 
entity.getValue());
                         }
                     });
-            log.infoFormat("Detected change in value of option {0} of volume 
{1} from {2} to {3}. Updating engine DB accordingly.",
+            log.info("Detected change in value of option '{}' of volume '{}' 
from '{}' to '{}'. Updating engine DB accordingly.",
                     volume.getOption(entity.getKey()),
                     volume.getName(),
                     volume.getOption(entity.getKey()).getValue(),
@@ -756,7 +764,7 @@
         }
 
         if (changed) {
-            log.infoFormat("Updating volume {0} with fetched properties.", 
existingVolume.getName());
+            log.info("Updating volume '{}' with fetched properties.", 
existingVolume.getName());
             getVolumeDao().updateGlusterVolume(existingVolume);
             logUtil.logVolumeMessage(existingVolume, 
AuditLogType.GLUSTER_VOLUME_PROPERTIES_CHANGED_FROM_CLI);
         }
@@ -785,9 +793,9 @@
                 try {
                     refreshClusterHeavyWeightData(cluster);
                 } catch (Exception e) {
-                    log.errorFormat("Error while refreshing Gluster 
heavyweight data of cluster {0}!",
-                            cluster.getName(),
-                            e);
+                    log.error("Error while refreshing Gluster heavyweight data 
of cluster '{}'!",
+                            cluster.getName());
+                    log.error("Exception", e);
                 }
             }
         }
@@ -796,12 +804,12 @@
     private void refreshClusterHeavyWeightData(VDSGroup cluster) {
         VDS upServer = getClusterUtils().getRandomUpServer(cluster.getId());
         if (upServer == null) {
-            log.debugFormat("No server UP in cluster {0}. Can't refresh it's 
data at this point.", cluster.getName());
+            log.debug("No server UP in cluster '{}'. Can't refresh it's data 
at this point.", cluster.getName());
             return;
         }
 
         for (GlusterVolumeEntity volume : 
getVolumeDao().getByClusterId(cluster.getId())) {
-            log.debugFormat("Refreshing brick statuses for volume {0} of 
cluster {1}",
+            log.debug("Refreshing brick statuses for volume '{}' of cluster 
'{}'",
                     volume.getName(),
                     cluster.getName());
             // brick statuses can be fetched only for started volumes
@@ -810,10 +818,10 @@
                 try {
                     refreshVolumeDetails(upServer, volume);
                 } catch (Exception e) {
-                    log.errorFormat("Error while refreshing brick statuses for 
volume {0} of cluster {1}",
+                    log.error("Error while refreshing brick statuses for 
volume '{}' of cluster '{}'",
                             volume.getName(),
-                            cluster.getName(),
-                            e);
+                            cluster.getName());
+                    log.error("Exception", e);
                 } finally {
                     releaseLock(cluster.getId());
                 }
@@ -873,7 +881,7 @@
     }
 
     private void logBrickStatusChange(GlusterVolumeEntity volume, final 
GlusterBrickEntity brick, final GlusterStatus fetchedStatus) {
-        log.debugFormat("Detected that status of brick {0} in volume {1} 
changed from {2} to {3}",
+        log.debug("Detected that status of brick '{}' in volume '{}' changed 
from '{}' to '{}'",
                 brick.getQualifiedName(), volume.getName(), brick.getStatus(), 
fetchedStatus);
         logUtil.logAuditMessage(volume.getClusterId(), volume, null,
                 AuditLogType.GLUSTER_BRICK_STATUS_CHANGED,
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterTasksSyncJob.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterTasksSyncJob.java
index 54547ad..2877a14 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterTasksSyncJob.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/GlusterTasksSyncJob.java
@@ -43,15 +43,15 @@
 import 
org.ovirt.engine.core.dal.dbbroker.auditloghandling.gluster.GlusterAuditLogUtil;
 import org.ovirt.engine.core.dal.job.ExecutionMessageDirector;
 import org.ovirt.engine.core.dao.gluster.GlusterDBUtils;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
 import org.ovirt.engine.core.utils.threadpool.ThreadPoolUtil;
 import org.ovirt.engine.core.utils.timer.OnTimerMethodAnnotation;
 import org.ovirt.engine.core.utils.transaction.TransactionMethod;
 import org.ovirt.engine.core.utils.transaction.TransactionSupport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterTasksSyncJob extends GlusterJob  {
-    private static final Log log = 
LogFactory.getLog(GlusterTasksSyncJob.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterTasksSyncJob.class);
 
     private static GlusterTasksSyncJob instance = new GlusterTasksSyncJob();
 
@@ -155,7 +155,8 @@
         GlusterVolumeEntity vol = getVolumeDao().getByName(cluster.getId(), 
volumeName);
 
         if (vol == null) {
-            log.infoFormat("Volume {0} does not exist yet for task detected 
from CLI {1}, not adding to engine", volumeName, task);
+            log.info("Volume '{}' does not exist yet for task detected from 
CLI '{}', not adding to engine",
+                    volumeName, task);
             return;
         }
 
@@ -237,7 +238,7 @@
                         GlusterBrickEntity brickEntity = new 
GlusterBrickEntity();
                         VdsStatic server = 
GlusterDBUtils.getInstance().getServer(cluster.getId(), hostnameOrIp);
                         if (server == null) {
-                            log.warnFormat("Could not find server {0} in 
cluster {1}", hostnameOrIp, cluster.getId());
+                            log.warn("Could not find server '{}' in cluster 
'{}'", hostnameOrIp, cluster.getId());
                         } else {
                             brickEntity.setServerId(server.getId());
                             brickEntity.setBrickDirectory(brickDir);
@@ -251,7 +252,7 @@
             }
             logTaskStartedFromCLI(cluster, task, vol);
         } catch (Exception e) {
-            log.error(e);
+            log.error("Exception", e);
             // Release the lock only if there is any exception,
             // otherwise the lock will be released once the task is completed
             releaseLock(vol.getId());
@@ -333,7 +334,7 @@
         //if task is in DB but not in running task list
         final Set<Guid> tasksNotRunning = new HashSet<Guid>(taskListInDB);
         tasksNotRunning.removeAll(allRunningTasksInCluster);
-        log.debugFormat("tasks to be cleaned up in db {0}", tasksNotRunning);
+        log.debug("Tasks to be cleaned up in db '{}'", tasksNotRunning);
 
         for (Guid taskId: tasksNotRunning) {
             GlusterVolumeEntity vol= 
getVolumeDao().getVolumeByGlusterTask(taskId);
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTaskUtils.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTaskUtils.java
index 8e3de2b..8dd3e23 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTaskUtils.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTaskUtils.java
@@ -38,8 +38,8 @@
 import org.ovirt.engine.core.utils.lock.EngineLock;
 import org.ovirt.engine.core.utils.lock.LockManager;
 import org.ovirt.engine.core.utils.lock.LockManagerFactory;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterTaskUtils {
     private static GlusterTaskUtils instance;
@@ -58,7 +58,7 @@
         taskTypeAuditMsg.put(GlusterTaskType.REMOVE_BRICK, 
AuditLogType.GLUSTER_VOLUME_MIGRATE_BRICK_DATA_FINISHED);
     }
 
-    private static final Log log = 
LogFactory.getLog(GlusterTasksSyncJob.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterTasksSyncJob.class);
 
     private GlusterTaskUtils() {
     }
@@ -119,7 +119,7 @@
             releaseLock(vol.getId());
 
         } else {
-            log.debugFormat("Did not find a volume associated with task {0}", 
taskId);
+            log.debug("Did not find a volume associated with task '{}'", 
taskId);
         }
     }
 
diff --git 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTasksService.java
 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTasksService.java
index f3c9d71..7af80e7 100644
--- 
a/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTasksService.java
+++ 
b/backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/gluster/tasks/GlusterTasksService.java
@@ -17,11 +17,11 @@
 import org.ovirt.engine.core.common.vdscommands.VdsIdVDSCommandParametersBase;
 import org.ovirt.engine.core.compat.Guid;
 import org.ovirt.engine.core.dal.dbbroker.DbFacade;
-import org.ovirt.engine.core.utils.log.Log;
-import org.ovirt.engine.core.utils.log.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GlusterTasksService {
-    private static final Log log = 
LogFactory.getLog(GlusterTasksService.class);
+    private static final Logger log = 
LoggerFactory.getLogger(GlusterTasksService.class);
 
     public Map<Guid, GlusterAsyncTask> getTaskListForCluster(Guid id) {
         VDS upServer = ClusterUtils.getInstance().getRandomUpServer(id);
@@ -39,7 +39,7 @@
             }
             return tasksMap;
         } else {
-            log.error(returnValue.getVdsError());
+            log.error("Error: {}", returnValue.getVdsError());
             throw new 
VdcBLLException(VdcBllErrors.GlusterVolumeStatusAllFailedException, 
returnValue.getVdsError().getMessage());
         }
     }


-- 
To view, visit http://gerrit.ovirt.org/34323
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I6f5419168ce6b26c110839dcd85cac995e5aa5db
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-engine
Gerrit-Branch: master
Gerrit-Owner: Martin Peřina <mper...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to