tcp_memory_pressure is read without holding any lock,
and its value could be changed on other cpus.
Use READ_ONCE() to annotate these lockless reads.
The write side is already using atomic ops.
Fixes: b8da51ebb1aa ("tcp: introduce tcp_under_memory_pressure()")
Signed-off-by: Eric Dumazet <[email protected]>
---
include/net/tcp.h | 2 +-
net/ipv4/tcp.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index
c9a3f9688223b231e5a8c90c2494ad85f3d63cfc..88e63d64c698229379a953101a8aab2bca55ed1a
100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct
sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return tcp_memory_pressure;
+ return READ_ONCE(tcp_memory_pressure);
}
/*
* The next routines deal with comparing 32 bit unsigned ints
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index
f98a1882e537dca0102e829cb349be50302d83ab..888c92b63f5a6dc4b935cca7c979c1e559126d44
100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
{
unsigned long val;
- if (tcp_memory_pressure)
+ if (READ_ONCE(tcp_memory_pressure))
return;
val = jiffies;
@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
{
unsigned long val;
- if (!tcp_memory_pressure)
+ if (!READ_ONCE(tcp_memory_pressure))
return;
val = xchg(&tcp_memory_pressure, 0);
if (val)
--
2.23.0.581.g78d2f28ef7-goog