On Mon, 24 Aug 2020, Alex Shi wrote:

> From: Hugh Dickins <[email protected]>
> 
> Use the relock function to replace relocking action. And try to save few
> lock times.
> 
> Signed-off-by: Hugh Dickins <[email protected]>
> Signed-off-by: Alex Shi <[email protected]>
> Reviewed-by: Alexander Duyck <[email protected]>

NAK. Who wrote this rubbish? Oh, did I? Maybe something you extracted
from my tarball. No, we don't need any of this now, as explained when
going through 20/32.

> Cc: Andrew Morton <[email protected]>
> Cc: Tejun Heo <[email protected]>
> Cc: Andrey Ryabinin <[email protected]>
> Cc: Jann Horn <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Matthew Wilcox <[email protected]>
> Cc: Hugh Dickins <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> ---
>  mm/vmscan.c | 17 ++++++-----------
>  1 file changed, 6 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 2c94790d4cb1..04ef94190530 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1848,15 +1848,15 @@ static unsigned noinline_for_stack 
> move_pages_to_lru(struct lruvec *lruvec,
>       enum lru_list lru;
>  
>       while (!list_empty(list)) {
> -             struct lruvec *new_lruvec = NULL;
> -
>               page = lru_to_page(list);
>               VM_BUG_ON_PAGE(PageLRU(page), page);
>               list_del(&page->lru);
>               if (unlikely(!page_evictable(page))) {
> -                     spin_unlock_irq(&lruvec->lru_lock);
> +                     if (lruvec) {
> +                             spin_unlock_irq(&lruvec->lru_lock);
> +                             lruvec = NULL;
> +                     }
>                       putback_lru_page(page);
> -                     spin_lock_irq(&lruvec->lru_lock);
>                       continue;
>               }
>  
> @@ -1871,12 +1871,7 @@ static unsigned noinline_for_stack 
> move_pages_to_lru(struct lruvec *lruvec,
>                *     list_add(&page->lru,)
>                *                                        list_add(&page->lru,)
>                */
> -             new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
> -             if (new_lruvec != lruvec) {
> -                     if (lruvec)
> -                             spin_unlock_irq(&lruvec->lru_lock);
> -                     lruvec = lock_page_lruvec_irq(page);
> -             }
> +             lruvec = relock_page_lruvec_irq(page, lruvec);
>               SetPageLRU(page);
>  
>               if (unlikely(put_page_testzero(page))) {
> @@ -1885,8 +1880,8 @@ static unsigned noinline_for_stack 
> move_pages_to_lru(struct lruvec *lruvec,
>  
>                       if (unlikely(PageCompound(page))) {
>                               spin_unlock_irq(&lruvec->lru_lock);
> +                             lruvec = NULL;
>                               destroy_compound_page(page);
> -                             spin_lock_irq(&lruvec->lru_lock);
>                       } else
>                               list_add(&page->lru, &pages_to_free);
>  
> -- 
> 1.8.3.1

Reply via email to