The inode cache can be altered and queried by multiple threads of
execution, even before the introduction of delayed write support for
NAND. This provides a new lock to prevent simultaneous modification of
the cache.
---
 cpukit/libfs/src/jffs2/src/fs-rtems.c | 24 +++++++++++++++++++++++-
 cpukit/libfs/src/jffs2/src/os-rtems.h |  2 ++
 2 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c 
b/cpukit/libfs/src/jffs2/src/fs-rtems.c
index 8e8dfe8b0c..6a14bfa9b3 100644
--- a/cpukit/libfs/src/jffs2/src/fs-rtems.c
+++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c
@@ -369,6 +369,7 @@ static void rtems_jffs2_free_fs_info(rtems_jffs2_fs_info 
*fs_info, bool do_mount
        rtems_jffs2_flash_control_destroy(fs_info->sb.s_flash_control);
        
rtems_jffs2_compressor_control_destroy(fs_info->sb.s_compressor_control);
        rtems_recursive_mutex_destroy(&sb->s_mutex);
+       rtems_recursive_mutex_destroy(&sb->s_cache_mutex);
        free(fs_info);
 }
 
@@ -1071,7 +1072,9 @@ static void 
rtems_jffs2_fsunmount(rtems_filesystem_mount_table_entry_t *mt_entry
 
        jffs2_sum_exit(c);
 
+       rtems_recursive_mutex_lock(&fs_info->sb.s_cache_mutex);
        icache_evict(root_i, NULL);
+       rtems_recursive_mutex_unlock(&fs_info->sb.s_cache_mutex);
        assert(root_i->i_cache_next == NULL);
        assert(root_i->i_count == 1);
        jffs2_iput(root_i);
@@ -1388,6 +1391,7 @@ int rtems_jffs2_initialize(
                spin_lock_init(&c->inocache_lock);
                c->mtd = NULL;
                rtems_recursive_mutex_init(&sb->s_mutex, 
RTEMS_FILESYSTEM_TYPE_JFFS2);
+               rtems_recursive_mutex_init(&sb->s_cache_mutex, 
RTEMS_FILESYSTEM_TYPE_JFFS2);
        }
 
        /* Start task for delayed work if it hasn't already been started */
@@ -1497,6 +1501,7 @@ int rtems_jffs2_initialize(
 //
 //==========================================================================
 
+// The s_cache_mutex must be held while calling this function
 static struct _inode *new_inode(struct super_block *sb)
 {
 
@@ -1537,6 +1542,7 @@ static struct _inode *new_inode(struct super_block *sb)
        return inode;
 }
 
+// The s_cache_mutex must be held while calling this function
 static struct _inode *ilookup(struct super_block *sb, cyg_uint32 ino)
 {
        struct _inode *inode = NULL;
@@ -1565,12 +1571,20 @@ struct _inode *jffs2_iget(struct super_block *sb, 
cyg_uint32 ino)
 
        D2(printf("jffs2_iget\n"));
 
+       /*
+        * ilookup and new_inode need to be locked together since they're
+        * related manipulations of the inode cache
+        */
+       rtems_recursive_mutex_lock(&sb->s_cache_mutex);
        inode = ilookup(sb, ino);
-       if (inode)
+       if (inode) {
+               rtems_recursive_mutex_unlock(&sb->s_cache_mutex);
                return inode;
+       }
 
        // Not cached, so malloc it
        inode = new_inode(sb);
+       rtems_recursive_mutex_unlock(&sb->s_cache_mutex);
        if (inode == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1612,6 +1626,7 @@ void jffs2_iput(struct _inode *i)
                struct _inode *parent;
 
                // Remove from the icache linked list and free immediately
+               rtems_recursive_mutex_lock(&i->i_sb->s_cache_mutex);
                if (i->i_cache_prev)
                        i->i_cache_prev->i_cache_next = i->i_cache_next;
                if (i->i_cache_next)
@@ -1619,6 +1634,7 @@ void jffs2_iput(struct _inode *i)
 
                parent = i->i_parent;
                jffs2_clear_inode(i);
+               rtems_recursive_mutex_unlock(&i->i_sb->s_cache_mutex);
                memset(i, 0x5a, sizeof(*i));
                free(i);
 
@@ -1630,7 +1646,9 @@ void jffs2_iput(struct _inode *i)
        } else {
                // Evict some _other_ inode with i_count zero, leaving
                // this latest one in the cache for a while 
+               rtems_recursive_mutex_lock(&i->i_sb->s_cache_mutex);
                icache_evict(i->i_sb->s_root, i);
+               rtems_recursive_mutex_unlock(&i->i_sb->s_cache_mutex);
        }
 }
 
@@ -1679,7 +1697,9 @@ struct _inode *jffs2_new_inode (struct _inode *dir_i, int 
mode, struct jffs2_raw
 
        c = JFFS2_SB_INFO(sb);
        
+       rtems_recursive_mutex_lock(&sb->s_cache_mutex);
        inode = new_inode(sb);
+       rtems_recursive_mutex_unlock(&sb->s_cache_mutex);
        
        if (!inode)
                return ERR_PTR(-ENOMEM);
@@ -1784,7 +1804,9 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct 
jffs2_sb_info *c,
                   holding the alloc_sem, and jffs2_do_unlink() would also
                   need that while decrementing nlink on any inode.
                */
+               rtems_recursive_mutex_lock(&(OFNI_BS_2SFFJ(c))->s_cache_mutex);
                inode = ilookup(OFNI_BS_2SFFJ(c), inum);
+               
rtems_recursive_mutex_unlock(&(OFNI_BS_2SFFJ(c))->s_cache_mutex);
                if (!inode) {
                        jffs2_dbg(1, "ilookup() failed for ino #%u; inode is 
probably deleted.\n",
                                  inum);
diff --git a/cpukit/libfs/src/jffs2/src/os-rtems.h 
b/cpukit/libfs/src/jffs2/src/os-rtems.h
index 63841a5e50..a53d909b4c 100644
--- a/cpukit/libfs/src/jffs2/src/os-rtems.h
+++ b/cpukit/libfs/src/jffs2/src/os-rtems.h
@@ -100,6 +100,8 @@ struct _inode {
 
 struct super_block {
        struct jffs2_sb_info    jffs2_sb;
+       /* Protects the s_root inode cache */
+       rtems_recursive_mutex   s_cache_mutex;
        struct _inode *         s_root;
        rtems_jffs2_flash_control       *s_flash_control;
        rtems_jffs2_compressor_control  *s_compressor_control;
-- 
2.39.2

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to