From: Moni Shoua <mo...@mellanox.com>

If page-fault handler spans multiple MRs then the access mask needs to
be reset before each MR handling or otherwise write access will be
granted to mapped pages instead of read-only.

Cc: <sta...@vger.kernel.org> # 3.19
Fixes: 7bdf65d411c1 ("IB/mlx5: Handle page faults")
Reported-by: Jerome Glisse <jgli...@redhat.com>
Signed-off-by: Moni Shoua <mo...@mellanox.com>
Signed-off-by: Leon Romanovsky <leo...@mellanox.com>
---
 drivers/infiniband/hw/mlx5/odp.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ec8381d1e0f2..95dccac1f844 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -585,7 +585,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct 
mlx5_ib_mr *mr,
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
        bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
-       u64 access_mask = ODP_READ_ALLOWED_BIT;
+       u64 access_mask;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
        size_t size;
@@ -607,6 +607,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct 
mlx5_ib_mr *mr,
        page_shift = mr->umem->page_shift;
        page_mask = ~(BIT(page_shift) - 1);
        start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+       access_mask = ODP_READ_ALLOWED_BIT;
 
        if (prefetch && !downgrade && !mr->umem->writable) {
                /* prefetch with write-access must
-- 
2.20.1

Reply via email to