This avoids race condition with multiple devices raising interrupts
simultaneously on the same IRQ and causing mask to fail.
---
 i386/i386/irq.c | 26 ++++++++++++++++++++------
 1 file changed, 20 insertions(+), 6 deletions(-)

diff --git a/i386/i386/irq.c b/i386/i386/irq.c
index 91318f67..d91b3856 100644
--- a/i386/i386/irq.c
+++ b/i386/i386/irq.c
@@ -41,11 +41,19 @@ __disable_irq (irq_t irq_nr)
 {
   assert (irq_nr < NINTR);
 
+  /* The spl wrapping protects the same cpu
+   * from being interrupted while updating this */
   spl_t s = splhigh();
-  ndisabled_irq[irq_nr]++;
+
+  /* masking the irq unconditionally prevents
+   * other cpus being interrupted on this irq */
+  mask_irq (irq_nr);
+
+  /* we can atomically increment the nesting counter now */
+  __atomic_add_fetch(&ndisabled_irq[irq_nr], 1, __ATOMIC_RELAXED);
+
   assert (ndisabled_irq[irq_nr] > 0);
-  if (ndisabled_irq[irq_nr] == 1)
-    mask_irq (irq_nr);
+
   splx(s);
 }
 
@@ -54,11 +62,17 @@ __enable_irq (irq_t irq_nr)
 {
   assert (irq_nr < NINTR);
 
+  /* The spl wrapping protects the same cpu
+   * from being interrupted while updating this */
   spl_t s = splhigh();
-  assert (ndisabled_irq[irq_nr] > 0);
-  ndisabled_irq[irq_nr]--;
-  if (ndisabled_irq[irq_nr] == 0)
+
+  /* This could be racy if the irq was already unmasked,
+   * and the irq is not guaranteed to be masked at this point if more
+   * than one device has called us almost simultaneously on this irq.
+   * Therefore it is only safe to decrement the counter here atomically inside 
the check */ 
+  if (__atomic_add_fetch(&ndisabled_irq[irq_nr], -1, __ATOMIC_RELAXED) == 0)
     unmask_irq (irq_nr);
+
   splx(s);
 }
 
-- 
2.45.2



Reply via email to