David Caro has uploaded a new change for review.

Change subject: Added workspace cleanup job
......................................................................

Added workspace cleanup job

Removes all the workspaces from all the slaves (even offline ones) that are not
currently used by any job.

It requires a newer version of the jenkins job builder.

Change-Id: I97ad249a432129ca0f8b176d9fa8dbe6b1bfb5b5
Signed-off-by: David Caro <dcaro...@redhat.com>
---
A jobs/confs/groovy-scripts/cleanup_workspaces.groovy
A jobs/confs/yaml/jobs/system/system_cleanup-workspaces.yaml
2 files changed, 120 insertions(+), 0 deletions(-)


  git pull ssh://gerrit.ovirt.org:29418/jenkins refs/changes/86/34686/1

diff --git a/jobs/confs/groovy-scripts/cleanup_workspaces.groovy 
b/jobs/confs/groovy-scripts/cleanup_workspaces.groovy
new file mode 100644
index 0000000..1c7dad0
--- /dev/null
+++ b/jobs/confs/groovy-scripts/cleanup_workspaces.groovy
@@ -0,0 +1,112 @@
+import hudson.model.*;
+import hudson.util.*;
+import jenkins.model.*;
+import hudson.FilePath.FileCallable;
+import hudson.slaves.OfflineCause;
+import hudson.node_monitors.*;
+
+
+curJobURL = build.getEnvironment(listener)['BUILD_URL']
+
+for (node in Jenkins.instance.nodes) {
+  computer = node.toComputer()
+  // Skip disconnected nodes
+  if (computer.getChannel() == null) continue
+
+  rootPath = node.getRootPath()
+
+  size = DiskSpaceMonitor.DESCRIPTOR.get(computer).size
+  roundedSize = size / (1024 * 1024 * 1024) as int
+
+  println("\n======")
+  println("INFO::node: ${node.getDisplayName()}, free space: ${roundedSize}GB")
+  if (roundedSize < 10) {
+
+    // If the slave is already offline, don't change it but if not, set
+    // it offline to avoid new jobs from getting in
+    wasOnline = computer.isOnline()
+    if (wasOnline) {
+      reason = new hudson.slaves.OfflineCause.ByCLI(
+        "workspace cleanup (${curJobURL})")
+      computer.setTemporarilyOffline(true, reason)
+      computer.waitUntilOffline()
+    } else {
+      println("INFO::Node already offline.")
+    }
+
+    // get the list of currently used workspaces (to avoid deleting them)
+    lockedWorkspaces = []
+    executors = computer.getExecutors()
+    if (executors != null) {
+      for (executor in executors) {
+        try {
+          curWorkspace = executor.getCurrentWorkspace()
+          if (curWorkspace != null) {
+            lockedWorkspaces.add(curWorkspace)
+          }
+        } catch (all) {
+        }
+      }
+    }
+    // one off executors also have workspace
+    oneOffExecutors = computer.getOneOffExecutors()
+    if (oneOffExecutors != null) {
+      for (executor in oneOffExecutors) {
+        try {
+          curWorkspace = executor.getCurrentWorkspace()
+          if (curWorkspace != null) {
+            lockedWorkspaces.add(curWorkspace)
+          }
+        } catch (all) {
+          // Sometimes it seems it throws NullPointerexception, but we
+          // don't care as that only happens when the executor is not
+          // running anything, so just ignore it
+        }
+      }
+    }
+
+    baseWorkspace = rootPath.child('workspace')
+    for (jobWorkspace in baseWorkspace.listDirectories()) {
+      pathAsString = jobWorkspace.getRemote()
+      // not sure if this ever happens
+      if (!jobWorkspace.exists()) {
+        continue
+      }
+      if (jobWorkspace in lockedWorkspaces) {
+        println("INFO::" + jobWorkspace + "::SKIPPING:: Currently in use")
+      } else {
+        println("INFO::${jobWorkspace}:: Wiping out")
+        try {
+          jobWorkspace.deleteRecursive()
+        } catch (IOException) {
+          println("WARN::${jobWorkspace}:: It seems we can't remove as default 
user... sudoing!")
+          if (jobWorkspace != '/') {
+            launcher = node.createLauncher(listener)
+            // By the docs it seems I have to call this
+            launcher.decorateFor(node)
+            cmd = "/usr/bin/sudo rm -Rf '${jobWorkspace}'"
+            proc = launcher.launch()
+            proc = proc.cmdAsSingleString(cmd)
+            proc.readStdout()
+            res = proc.join()
+            if (res != 0) {
+              println("ERROR::${jobWorkspace}:: cmd: ${cmd}")
+              println("ERROR::${jobWorkspace}:: rc: ${res}")
+              println("ERROR::${jobWorkspace}:: out: ${proc.getStdout()}")
+              throw new Error("Failed to run sudo")
+            }
+          }
+        }
+        println("INFO::" + jobWorkspace + ":: Deleted")
+      }
+    }
+
+    // take it back online if it was online before so it can keep
+    // running new jobs
+    if (wasOnline) {
+      computer.setTemporarilyOffline(false, null)
+      computer.waitUntilOnline()
+    }
+  }
+}
+println("======")
diff --git a/jobs/confs/yaml/jobs/system/system_cleanup-workspaces.yaml 
b/jobs/confs/yaml/jobs/system/system_cleanup-workspaces.yaml
new file mode 100644
index 0000000..579a19f
--- /dev/null
+++ b/jobs/confs/yaml/jobs/system/system_cleanup-workspaces.yaml
@@ -0,0 +1,8 @@
+- job:
+    name: system_cleanup-workspaces
+    node: master
+    triggers:
+        - timed: "30 * * * *"
+    builders:
+        - groovy:
+            command: !include-raw groovy-scripts/cleanup_workspaces.groovy


-- 
To view, visit http://gerrit.ovirt.org/34686
To unsubscribe, visit http://gerrit.ovirt.org/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: I97ad249a432129ca0f8b176d9fa8dbe6b1bfb5b5
Gerrit-PatchSet: 1
Gerrit-Project: jenkins
Gerrit-Branch: master
Gerrit-Owner: David Caro <dcaro...@redhat.com>
_______________________________________________
Engine-patches mailing list
Engine-patches@ovirt.org
http://lists.ovirt.org/mailman/listinfo/engine-patches

Reply via email to