[vlma-devel] commit: Don't let the std{out, err} reader go into an infinite loop when it has nothing to read. ( Adrien Grand )

git version control git at videolan.org
Tue Mar 10 02:22:58 CET 2009


vlma | branch: master | Adrien Grand <jpountz at videolan.org> | Sun Mar  8 01:25:38 2009 +0100| [2068bd2f06e89a47d675b6235f52449a9fa80837] | committer: Adrien Grand 

Don't let the std{out,err} reader go into an infinite loop when it has nothing to read.

> http://git.videolan.org/gitweb.cgi/vlma.git/?a=commit;h=2068bd2f06e89a47d675b6235f52449a9fa80837
---

 vlma-watchdog/src/vlc.py |   38 ++++++++++++++++++++++++++------------
 1 files changed, 26 insertions(+), 12 deletions(-)

diff --git a/vlma-watchdog/src/vlc.py b/vlma-watchdog/src/vlc.py
index a106f20..8f6122c 100644
--- a/vlma-watchdog/src/vlc.py
+++ b/vlma-watchdog/src/vlc.py
@@ -119,12 +119,12 @@ class LogReader(threading.Thread):
   def run(self):
     while True:
       self.__lock.acquire()
-      ready = not self.__input is None
       line = ""
-      if ready:
+      if not self.__input is None:
         line = self.__input.readline().rstrip('\n')
       self.__lock.release()
-      if not ready:
+      if line == "":
+        time.sleep(1)
         continue
       self.__output_lock.acquire()
       self.__output.append(line)
@@ -140,6 +140,7 @@ class VLC:
   def __init__(self):
     self.logger = logging.getLogger("VLC")
     self.runner = None
+    self.__lock = threading.RLock()
     self.__log_queue = []
     self.__log_queue_lock = threading.RLock()
     self.__stdout_reader = LogReader(conf.LOG_TAIL_SIZE, None, self.__log_queue, self.__log_queue_lock)
@@ -148,6 +149,7 @@ class VLC:
     self.__stderr_reader.start()
 
   def start(self):
+    self.__lock.acquire()
     if not self.runner is None and self.runner.isAlive():
       logger.error("Cannot start VLC, another instance is still running.")
       logger.info("Please stop VLC first")
@@ -155,18 +157,30 @@ class VLC:
       self.runner = VLCRunner(self.__log_queue, self.__log_queue_lock,
                               self.__stdout_reader, self.__stderr_reader)
       self.runner.start()
+    self.__lock.release()
 
   def stop(self):
+    self.__lock.acquire()
     self.runner.stop()
+    self.__lock.release()
 
   def getUptime(self):
-    return self.runner.getUptime()
+    self.__lock.acquire()
+    uptime = self.runner.getUptime()
+    self.__lock.release()
+    return uptime
 
   def getVersion(self):
-    return self.runner.getVersion()
+    self.__lock.acquire()
+    version = self.runner.getVersion()
+    self.__lock.release()
+    return version
 
   def getLogTail(self, lines=50):
-    return self.runner.getLogTail(lines)
+    self.__lock.acquire()
+    logtail = self.runner.getLogTail(lines)
+    self.__lock.release()
+    return logtail
 
 # Former version, file-based
 #
@@ -209,24 +223,24 @@ class Monitor(threading.Thread):
     while(True):
       time.sleep(1)
       cpuLoad = self.getCpuLoad()
-      if(cpuLoad >= conf.CPU_LOAD_THRESHOLD):
+      if cpuLoad >= conf.CPU_LOAD_THRESHOLD:
         logger.warn("CPU load is %f, VLC restart triggered", cpuLoad)
         self.vlc.stop()
-        # Because load won't go down in one millisecond
+        # Because the load won't go down in one millisecond
         logger.info("Waiting for the load to decrease")
-        while(True):
+        while True:
           time.sleep(5)
-          if(self.getCpuLoad() < conf.CPU_LOAD_THRESHOLD):
+          if self.getCpuLoad() < conf.CPU_LOAD_THRESHOLD:
             break
         self.vlc.start()
         continue
       vlcCpu = self.getVlcCpu()
-      if(vlcCpu >= conf.VLC_CPU_THRESHOLD):
+      if vlcCpu >= conf.VLC_CPU_THRESHOLD:
         logger.warn("CPU usage of VLC is %f, VLC restart triggered", vlcCpu)
         self.vlc.restart()
         continue
       vlcMem = self.getVlcMem()
-      if(vlcMem >= conf.VLC_MEMORY_THRESHOLD):
+      if vlcMem >= conf.VLC_MEMORY_THRESHOLD:
         logger.warn("Memory usage is %f, VLC restart triggered", vlcMem)
         self.vlc.restart()
         continue



More information about the vlma-devel mailing list