> -----Original Message-----
> From: Mika Penttilä <[email protected]>
> Subject: Re: [PATCH v5 3/7] fsdax: Add dax_iomap_cow_copy() for
> dax_iomap_zero
> 
> Hi,
> 
> On 11.5.2021 6.09, Shiyang Ruan wrote:
> > Punch hole on a reflinked file needs dax_copy_edge() too.  Otherwise,
> > data in not aligned area will be not correct.  So, add the srcmap to
> > dax_iomap_zero() and replace memset() as dax_copy_edge().
> >
> > Signed-off-by: Shiyang Ruan <[email protected]>
> > Reviewed-by: Ritesh Harjani <[email protected]>
> > ---
> >   fs/dax.c               | 25 +++++++++++++++----------
> >   fs/iomap/buffered-io.c |  2 +-
> >   include/linux/dax.h    |  3 ++-
> >   3 files changed, 18 insertions(+), 12 deletions(-)
> >
> > diff --git a/fs/dax.c b/fs/dax.c
> > index ef0e564e7904..ee9d28a79bfb 100644
> > --- a/fs/dax.c
> > +++ b/fs/dax.c
> > @@ -1186,7 +1186,8 @@ static vm_fault_t dax_pmd_load_hole(struct
> xa_state *xas, struct vm_fault *vmf,
> >   }
> >   #endif /* CONFIG_FS_DAX_PMD */
> >
> > -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
> > +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
> > +           struct iomap *srcmap)
> >   {
> >     sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
> >     pgoff_t pgoff;
> > @@ -1208,19 +1209,23 @@ s64 dax_iomap_zero(loff_t pos, u64 length,
> > struct iomap *iomap)
> >
> >     if (page_aligned)
> >             rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
> > -   else
> > +   else {
> >             rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
> > -   if (rc < 0) {
> > -           dax_read_unlock(id);
> > -           return rc;
> > -   }
> > -
> > -   if (!page_aligned) {
> > -           memset(kaddr + offset, 0, size);
> > +           if (rc < 0)
> > +                   goto out;
> > +           if (iomap->addr != srcmap->addr) {
> > +                   rc = dax_iomap_cow_copy(offset, size, PAGE_SIZE, srcmap,
> > +                                           kaddr);
> 
> offset above is offset in page, think dax_iomap_cow_copy() expects absolute 
> pos

You are right.  Should pass pos here.  Thanks for pointing out.


--
Thanks,
Ruan Shiyang.

> 
> > +                   if (rc < 0)
> > +                           goto out;
> > +           } else
> > +                   memset(kaddr + offset, 0, size);
> >             dax_flush(iomap->dax_dev, kaddr + offset, size);
> >     }
> > +
> > +out:
> >     dax_read_unlock(id);
> > -   return size;
> > +   return rc < 0 ? rc : size;
> >   }
> >
> >   static loff_t
> > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index
> > f2cd2034a87b..2734955ea67f 100644
> > --- a/fs/iomap/buffered-io.c
> > +++ b/fs/iomap/buffered-io.c
> > @@ -933,7 +933,7 @@ static loff_t iomap_zero_range_actor(struct inode
> *inode, loff_t pos,
> >             s64 bytes;
> >
> >             if (IS_DAX(inode))
> > -                   bytes = dax_iomap_zero(pos, length, iomap);
> > +                   bytes = dax_iomap_zero(pos, length, iomap, srcmap);
> >             else
> >                     bytes = iomap_zero(inode, pos, length, iomap, srcmap);
> >             if (bytes < 0)
> > diff --git a/include/linux/dax.h b/include/linux/dax.h index
> > b52f084aa643..3275e01ed33d 100644
> > --- a/include/linux/dax.h
> > +++ b/include/linux/dax.h
> > @@ -237,7 +237,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault
> *vmf,
> >   int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t
> index);
> >   int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
> >                                   pgoff_t index);
> > -s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
> > +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap,
> > +           struct iomap *srcmap);
> >   static inline bool dax_mapping(struct address_space *mapping)
> >   {
> >     return mapping->host && IS_DAX(mapping->host);

_______________________________________________
Linux-nvdimm mailing list -- [email protected]
To unsubscribe send an email to [email protected]

Reply via email to