Noam Slomianko has uploaded a new change for review.

Change subject: external scheduler: improve logging
......................................................................

external scheduler: improve logging

- create unified format to logging
- add request identifier for easier understanding
of flows in the log
- add logging at the start and end of every reques

Signed-off-by: Noam Slomianko <nslom...@redhat.com>
Change-Id: I2afa5c5c25df3f015cc9a511c0e36185d2bdde12
---
M src/ovirtscheduler/request_handler.py
M src/ovirtscheduler/request_handler_test.py
M src/ovirtscheduler/runner.py
M src/ovirtscheduler/utils.py
4 files changed, 143 insertions(+), 53 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/ovirt-scheduler-proxy 
refs/changes/89/18789/1

diff --git a/src/ovirtscheduler/request_handler.py 
b/src/ovirtscheduler/request_handler.py
index 48ca764..7807808 100644
--- a/src/ovirtscheduler/request_handler.py
+++ b/src/ovirtscheduler/request_handler.py
@@ -23,6 +23,7 @@
 from runner import PythonMethodRunner
 import utils
 import logging
+import uuid
 
 
 class RequestHandler(object):
@@ -54,6 +55,7 @@
         Safely load the user modules in another process
         and find what they implement
         """
+        message_builder = utils.log_message_builder('loadModules', 'Main')
         loaderRunners = []
         module_files = []
         for fileName in os.listdir(self._pluginDir):
@@ -65,29 +67,35 @@
                 utils.LOADER_MODULE,
                 utils.LOADER_MODULE,
                 utils.LOADER_FUNC,
-                (self._pluginDir, module))
+                (self._pluginDir, module),
+                'Main')
             loaderRunners.append(runner)
             module_files.append(module)
 
-        self._logger.info("loadModules::Trying to load the following "
-                          "files: %s",
-                          str(module_files))
+        self._logger.info(
+            message_builder.build(
+                "Trying to load the following files: %s" % str(module_files)))
         for runner in loaderRunners:
             runner.start()
 
         if utils.waitOnGroup(loaderRunners):
-            self._logger.warning("loadModules::Waiting on loading "
-                                 "modules timed out")
+            self._logger.warning(
+                message_builder.build(
+                    "Waiting on loading modules timed out"))
 
         for runner in loaderRunners:
-            self._logger.debug("loadModules::script: %s", str(runner._script))
-            self._logger.info("loadModules::registering: %s",
-                              str(runner.getResults()))
+            self._logger.debug(
+                message_builder.build(
+                    "script: %s" % str(runner._script)))
+            self._logger.info(
+                message_builder.build(
+                    "registering: %s" % str(runner.getResults())))
 
             if runner.getErrors():
-                self._logger.error("loadModules::External module "
-                                   "failed with error - %s ",
-                                   str(runner.getErrors()))
+                self._logger.error(
+                    message_builder.build(
+                        "External module failed with error - %s " %
+                        str(runner.getErrors())))
 
             if runner.getResults() is None:
                 continue
@@ -100,23 +108,40 @@
                     (description, custom_properties_map)
                 self._class_to_module_map[className] = moduleName
 
-        self._logger.info("loadModules::registering::loaded- "
-                          "filters:" + str(self._filters) +
-                          "    scores:" + str(self._scores) +
-                          "    balancers:" + str(self._balancers))
+        self._logger.info(
+            message_builder.build(
+                "registering::loaded- " +
+                "filters:" + str(self._filters) +
+                "    scores:" + str(self._scores) +
+                "    balancers:" + str(self._balancers)))
 
     def discover(self):
-        #temporary?
+        request_id = str(uuid.uuid1())
+        message_builder = utils.log_message_builder('discover', request_id)
+
+        self._logger.info(
+            message_builder.build('got request'))
+        self._logger.info(
+            message_builder.build('returning: %s' % str({
+                "filters": self._filters,
+                "scores": self._scores,
+                "balance": self._balancers})))
         return {
             "filters": self._filters,
             "scores": self._scores,
             "balance": self._balancers}
 
-    def aggregate_filter_results(self, filterRunners):
+    def aggregate_filter_results(self, filterRunners, request_id):
+        message_builder = utils.log_message_builder(
+            'aggregate_filter_results',
+            request_id)
+
         resultSet = set()
         for runner in filterRunners:
             if runner.getResults() is None:
-                self._logger.warn('No results from %s', runner._script)
+                self._logger.warn(
+                    message_builder.build(
+                        'No results from %s' % runner._script))
                 continue
             hosts = set(runner.getResults())
             if not resultSet:
@@ -126,17 +151,21 @@
         return list(resultSet)
 
     def run_filters(self, filters, hostIDs, vmID, properties_map):
-        #Intersects the results from the filters
+        request_id = str(uuid.uuid1())
+        message_builder = utils.log_message_builder('run_filters', request_id)
+
         #run each filter in a process for robustness
-        self._logger.info("run_filters:: got filters request: %s",
-                          str(filters))
+        self._logger.info(
+            message_builder.build(
+                "got request: %s" % str(filters)))
         avail_f, missing_f = utils.partition(filters,
                                              lambda f: f in self._filters)
 
         # report missing filters
         for f in missing_f:
-            self._logger.warning("run_filters::Filter requested but "
-                                 "was not found: %s", f)
+            self._logger.warning(
+                message_builder.build(
+                    "Filter requested but was not found: %s" % f))
 
         # Prepare a generator "list" of runners
         filterRunners = [
@@ -145,27 +174,42 @@
                 self._class_to_module_map[f],
                 f,
                 utils.FILTER,
-                (hostIDs, vmID, properties_map))
+                (hostIDs, vmID, properties_map),
+                request_id)
             for f in avail_f
         ]
 
         for runner in filterRunners:
             runner.start()
 
-        self._logger.debug("run_filters::Waiting for filters to finish")
+        self._logger.debug(
+            message_builder.build(
+                "Waiting for filters to finish"))
         #TODO add timeout config
         if utils.waitOnGroup(filterRunners):
-            self._logger.warning("run_filters::Waiting on filters timed out")
+            self._logger.warning(
+                message_builder.build(
+                    "Waiting on filters timed out"))
 
-        self._logger.debug("run_filters::Aggregating results")
-        return self.aggregate_filter_results(filterRunners)
+        self._logger.debug(
+            message_builder.build(
+                "Aggregating results"))
+        results = self.aggregate_filter_results(filterRunners, request_id)
+        self._logger.info(
+            message_builder.build('returning: %s' % str(results)))
+        return results
 
     #accumalate the results
-    def aggregate_score_results(self, scoreRunners):
+    def aggregate_score_results(self, scoreRunners, request_id):
+        message_builder = utils.log_message_builder('run_filters', request_id)
+
         results = {}
         for runner, weight in scoreRunners:
             hostScores = runner.getResults()
             if hostScores is None:
+                self._logger.warn(
+                    message_builder.build(
+                        'No results from %s' % runner._script))
                 continue
             for host, score in hostScores:
                 results.setdefault(host, 0)
@@ -178,9 +222,13 @@
                            hostIDs,
                            vmID,
                            properties_map):
+        request_id = str(uuid.uuid1())
+        message_builder = utils.log_message_builder('run_filters', request_id)
+
         #run each filter in a process for robustness
-        self._logger.info("run_cost_functions:: got scoring request: %s",
-                          str(cost_functions))
+        self._logger.info(
+            message_builder.build(
+                "got request: %s" % str(cost_functions)))
 
         # Get the list of known and unknown score functions
         available_cost_f, missing_cost_f = \
@@ -188,8 +236,9 @@
 
         # Report the unknown functions
         for name, weight in missing_cost_f:
-                self._logger.warning("run_cost_functions::Score function"
-                                     " requested but was not found: %s", name)
+                self._logger.warning(
+                    message_builder.build(
+                        "requested but was not found: %s" % name))
 
         # Prepare a generator "list" with runners and weights
         scoreRunners = [
@@ -198,35 +247,59 @@
                 self._class_to_module_map[name],
                 name,
                 utils.SCORE,
-                (hostIDs, vmID, properties_map)), weight)
+                (hostIDs, vmID, properties_map),
+                request_id), weight)
             for name, weight in available_cost_f
         ]
 
         for runner, _weight in scoreRunners:
             runner.start()
 
-        self._logger.debug("run_cost_functions::Waiting for scoring to finish")
+        self._logger.debug(
+            message_builder.build(
+                "Waiting for scoring to finish"))
         if utils.waitOnGroup([runner for runner, _weight in scoreRunners]):
-            self._logger.warning("run_cost_functions::Waiting on score"
-                                 " functions timed out")
+            self._logger.warning(
+                message_builder.build(
+                    "Waiting on score functions timed out"))
 
-        self._logger.debug("run_cost_functions::Aggregating results")
-        return self.aggregate_score_results(scoreRunners)
+        self._logger.debug(
+            message_builder.build(
+                "Aggregating results"))
+        results = self.aggregate_score_results(scoreRunners, request_id)
+        self._logger.info(
+            message_builder.build('returning: %s' % str(results)))
+        return results
 
     def run_load_balancing(self, balance, hostIDs, properties_map):
+        request_id = str(uuid.uuid1())
+        message_builder = utils.log_message_builder('run_filters', request_id)
+
+        self._logger.info(
+            message_builder.build(
+                "got request: %s" % balance))
+
         if balance not in self._balancers:
-            self._logger.warning("run_load_balancing::Load balance requested"
-                                 " but was not found: %s", balance)
+            self._logger.warning(
+                message_builder.build(
+                    "Load balance requested but was not found: %s", balance))
             return
 
         runner = PythonMethodRunner(self._pluginDir,
                                     self._class_to_module_map[balance],
                                     balance,
                                     utils.BALANCE,
-                                    (hostIDs, properties_map))
+                                    (hostIDs, properties_map),
+                                    request_id)
 
         runner.start()
-        self._logger.debug("run_load_balancing::Waiting for balance to finish")
+        self._logger.debug(
+            message_builder.build(
+                'Waiting for balance to finish'))
+
         runner.join(30)
-        self._logger.debug("run_load_balancing::returning results")
+
+        self._logger.info(
+            message_builder.build('returning: %s' % str(runner.getResults())))
+
         return runner.getResults()
diff --git a/src/ovirtscheduler/request_handler_test.py 
b/src/ovirtscheduler/request_handler_test.py
index 9755a3f..7651520 100644
--- a/src/ovirtscheduler/request_handler_test.py
+++ b/src/ovirtscheduler/request_handler_test.py
@@ -55,7 +55,7 @@
         executor = RequestHandler(os.path.join(os.getcwd(), 'plugins'),
                                   os.path.join(os.getcwd(), 'src'))
         filterRunners = []
-        assert executor.aggregate_filter_results(filterRunners) is not None
+        assert executor.aggregate_filter_results(filterRunners, '') is not None
 
     def test_aggregate_filter_results_singleNone(self):
         """
@@ -73,4 +73,4 @@
                 return None
 
         filterRunners = [NoneResultRunner()]
-        assert executor.aggregate_filter_results(filterRunners) is not None
+        assert executor.aggregate_filter_results(filterRunners, '') is not None
diff --git a/src/ovirtscheduler/runner.py b/src/ovirtscheduler/runner.py
index 38c3514..63382de 100755
--- a/src/ovirtscheduler/runner.py
+++ b/src/ovirtscheduler/runner.py
@@ -25,7 +25,7 @@
 
 
 class PythonMethodRunner(Thread):
-    def __init__(self, path, module, cls, method, args):
+    def __init__(self, path, module, cls, method, args, request_id=''):
         super(PythonMethodRunner, self).__init__(group=None)
         self._logger = logging.getLogger()
         self._path = path
@@ -33,18 +33,23 @@
         self._error = None
         self._process = None
         self._script = self.createScript(module, cls, method, args)
+        self.request_id = request_id
 
     def run(self):
         try:
-            self._logger.debug('running %s in %s', self._script, self._path)
+            message_builder = utils.log_message_builder('PythonMethodRunner',
+                                                        self.request_id)
+
+            self._logger.debug(
+                message_builder.build(
+                    'running %s in %s' % (self._script, self._path)))
             self._process = utils.createProcess(self._script, self._path)
             (result, error) = self._process.communicate()
             try:
                 self._result = literal_eval(result)
             except Exception as ex:
                 if not error:
-                    self._error = "PythonMethodRunner::" \
-                                  "Unable to parse result: %s" \
+                    self._error = "Unable to parse result: %s" \
                                   " got error : %s " % (result, ex)
             if error:
                 self._error = error
@@ -52,8 +57,9 @@
             self._error = ex
 
         if(self._error):
-            self._logger.error("PythonMethodRunner: script %s"
-                               + " got error %s", self._script, self._error)
+            self._logger.error(
+                message_builder.build(
+                    "script %s got error %s" % (self._script, self._error)))
 
     def getResults(self):
         return self._result
diff --git a/src/ovirtscheduler/utils.py b/src/ovirtscheduler/utils.py
index c60a44d..de04913 100644
--- a/src/ovirtscheduler/utils.py
+++ b/src/ovirtscheduler/utils.py
@@ -105,3 +105,14 @@
     # append returns None and we want to return the list, that is why there
     # is the or statement
     return reduce(lambda x, y: x[not pred(y)].append(y) or x, data, ([], []))
+
+
+class log_message_builder(object):
+
+    def __init__(self, method, request_id):
+        self.method = method
+        self.request_id = request_id
+
+    def build(self, message):
+        return '[Request:%s][Method:%s] - %s' % \
+               (self.request_id, self.method, message)


-- 
To view, visit http://gerrit.ovirt.org/18789
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I2afa5c5c25df3f015cc9a511c0e36185d2bdde12
Gerrit-PatchSet: 1
Gerrit-Project: ovirt-scheduler-proxy
Gerrit-Branch: master
Gerrit-Owner: Noam Slomianko <nslom...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to