summaryrefslogtreecommitdiffstats
path: root/cpukit/libfs/src/jffs2
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--cpukit/libfs/src/jffs2/include/linux/mutex.h31
-rw-r--r--cpukit/libfs/src/jffs2/include/linux/rwsem.h16
-rw-r--r--cpukit/libfs/src/jffs2/include/linux/workqueue.h8
-rw-r--r--cpukit/libfs/src/jffs2/src/fs-rtems.c99
-rw-r--r--cpukit/libfs/src/jffs2/src/jffs2_fs_sb.h3
-rw-r--r--cpukit/libfs/src/jffs2/src/os-rtems.h4
-rw-r--r--cpukit/libfs/src/jffs2/src/scan.c24
7 files changed, 131 insertions, 54 deletions
diff --git a/cpukit/libfs/src/jffs2/include/linux/mutex.h b/cpukit/libfs/src/jffs2/include/linux/mutex.h
index be8709f125..cc82a3f17a 100644
--- a/cpukit/libfs/src/jffs2/include/linux/mutex.h
+++ b/cpukit/libfs/src/jffs2/include/linux/mutex.h
@@ -1,19 +1,30 @@
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H
-#include <rtems/thread.h>
-
-struct mutex { rtems_mutex r_m; };
+struct mutex { };
#define DEFINE_MUTEX(m) struct mutex m
-#define mutex_init(m) rtems_mutex_init(&(m)->r_m, "JFFS2 Mutex");
-
-#define mutex_lock(m) rtems_mutex_lock(&(m)->r_m);
-
-#define mutex_lock_interruptible(m) ({ mutex_lock(m); 0; })
-
-#define mutex_unlock(m) rtems_mutex_unlock(&(m)->r_m);
+static inline void mutex_init(struct mutex *m)
+{
+ (void) m;
+}
+
+static inline void mutex_lock(struct mutex *m)
+{
+ (void) m;
+}
+
+static inline int mutex_lock_interruptible(struct mutex *m)
+{
+ (void) m;
+ return 0;
+}
+
+static inline void mutex_unlock(struct mutex *m)
+{
+ (void) m;
+}
#define mutex_is_locked(m) 1
diff --git a/cpukit/libfs/src/jffs2/include/linux/rwsem.h b/cpukit/libfs/src/jffs2/include/linux/rwsem.h
index 9db6d45ad2..e59e1cede3 100644
--- a/cpukit/libfs/src/jffs2/include/linux/rwsem.h
+++ b/cpukit/libfs/src/jffs2/include/linux/rwsem.h
@@ -1,20 +1,16 @@
#ifndef __LINUX_RWSEM_H__
#define __LINUX_RWSEM_H__
-#include <pthread.h>
+struct rw_semaphore {};
-struct rw_semaphore {
- pthread_rwlock_t lock;
-};
+#define init_rwsem(rwsem)
-#define init_rwsem(rwsem) pthread_rwlock_init(&(rwsem)->lock, NULL)
+#define down_read(rwsem)
-#define down_read(rwsem) pthread_rwlock_rdlock(&(rwsem)->lock)
+#define down_write(rwsem)
-#define down_write(rwsem) pthread_rwlock_wrlock(&(rwsem)->lock)
+#define up_read(rwsem)
-#define up_read(rwsem) pthread_rwlock_unlock(&(rwsem)->lock)
-
-#define up_write(rwsem) pthread_rwlock_unlock(&(rwsem)->lock)
+#define up_write(rwsem)
#endif /* __LINUX_RWSEM_H__ */
diff --git a/cpukit/libfs/src/jffs2/include/linux/workqueue.h b/cpukit/libfs/src/jffs2/include/linux/workqueue.h
index 45a2942bfc..9811c7cd3e 100644
--- a/cpukit/libfs/src/jffs2/include/linux/workqueue.h
+++ b/cpukit/libfs/src/jffs2/include/linux/workqueue.h
@@ -2,6 +2,7 @@
#define __LINUX_WORKQUEUE_H__
#include <rtems/chain.h>
+#include <linux/mutex.h>
struct work_struct { rtems_chain_node node; };
@@ -11,7 +12,6 @@ struct work_struct { rtems_chain_node node; };
})
#define INIT_DELAYED_WORK(delayed_work, delayed_workqueue_callback) ({ \
- _Chain_Initialize_node(&(delayed_work)->work.node); \
(delayed_work)->callback = delayed_workqueue_callback; \
})
@@ -20,8 +20,12 @@ struct work_struct { rtems_chain_node node; };
typedef void (*work_callback_t)(struct work_struct *work);
struct delayed_work {
struct work_struct work;
- uint64_t execution_time;
+ struct mutex dw_mutex;
+ volatile bool pending;
+ volatile uint64_t execution_time;
work_callback_t callback;
+ /* Superblock provided for locking */
+ struct super_block *sb;
};
#define to_delayed_work(work) RTEMS_CONTAINER_OF(work, struct delayed_work, work)
diff --git a/cpukit/libfs/src/jffs2/src/fs-rtems.c b/cpukit/libfs/src/jffs2/src/fs-rtems.c
index 59d03effe6..029cba6618 100644
--- a/cpukit/libfs/src/jffs2/src/fs-rtems.c
+++ b/cpukit/libfs/src/jffs2/src/fs-rtems.c
@@ -579,6 +579,9 @@ static int rtems_jffs2_ioctl(
break;
case RTEMS_JFFS2_FORCE_GARBAGE_COLLECTION:
eno = -jffs2_garbage_collect_pass(&inode->i_sb->jffs2_sb);
+ if (!eno) {
+ eno = -jffs2_flush_wbuf_pad(&inode->i_sb->jffs2_sb);
+ }
break;
default:
eno = EINVAL;
@@ -1066,6 +1069,7 @@ static void rtems_jffs2_fsunmount(rtems_filesystem_mount_table_entry_t *mt_entry
/* Flush any pending writes */
if (!sb_rdonly(&fs_info->sb)) {
jffs2_flush_wbuf_gc(c, 0);
+ jffs2_flush_wbuf_pad(c);
}
#endif
@@ -1241,39 +1245,41 @@ rtems_chain_control delayed_work_chain;
/* Lock for protecting the delayed work chain */
struct mutex delayed_work_mutex;
-void jffs2_queue_delayed_work(struct delayed_work *work, int delay_ms)
+/*
+ * All delayed work structs are initialized and added to the chain during FS
+ * init. Must be called with no locks held
+ */
+static void add_delayed_work_to_chain(struct delayed_work *work)
{
+ /* Initialize delayed work */
+ mutex_init(&work->dw_mutex);
+ work->pending = false;
+ _Chain_Initialize_node(&work->work.node); \
+ work->callback = NULL;
+
mutex_lock(&delayed_work_mutex);
- if (rtems_chain_is_node_off_chain(&work->work.node)) {
- work->execution_time = rtems_clock_get_uptime_nanoseconds() + delay_ms*1000000;
- rtems_chain_append(&delayed_work_chain, &work->work.node);
- }
+ rtems_chain_append_unprotected(&delayed_work_chain, &work->work.node);
mutex_unlock(&delayed_work_mutex);
}
-static void jffs2_remove_delayed_work(struct delayed_work *dwork)
+void jffs2_queue_delayed_work(struct delayed_work *work, int delay_ms)
{
- struct delayed_work* work;
- rtems_chain_node* node;
-
- mutex_lock(&delayed_work_mutex);
- if (rtems_chain_is_node_off_chain(&dwork->work.node)) {
- mutex_unlock(&delayed_work_mutex);
- return;
+ mutex_lock(&work->dw_mutex);
+ if (!work->pending) {
+ work->execution_time = rtems_clock_get_uptime_nanoseconds();
+ work->execution_time += delay_ms*1000000;
+ work->pending = true;
}
+ mutex_unlock(&work->dw_mutex);
+}
- node = rtems_chain_first(&delayed_work_chain);
- while (!rtems_chain_is_tail(&delayed_work_chain, node)) {
- work = (struct delayed_work*) node;
- rtems_chain_node* next_node = rtems_chain_next(node);
- if (work == dwork) {
- rtems_chain_extract(node);
- mutex_unlock(&delayed_work_mutex);
- return;
- }
- node = next_node;
- }
+/* Clean up during FS unmount */
+static void jffs2_remove_delayed_work(struct delayed_work *dwork)
+{
+ mutex_lock(&delayed_work_mutex);
+ rtems_chain_extract_unprotected(&dwork->work.node);
mutex_unlock(&delayed_work_mutex);
+ /* Don't run pending delayed work, this will happen during unmount */
}
static void process_delayed_work(void)
@@ -1291,15 +1297,27 @@ static void process_delayed_work(void)
node = rtems_chain_first(&delayed_work_chain);
while (!rtems_chain_is_tail(&delayed_work_chain, node)) {
work = (struct delayed_work*) node;
- rtems_chain_node* next_node = rtems_chain_next(node);
- if (rtems_clock_get_uptime_nanoseconds() >= work->execution_time) {
- rtems_chain_extract(node);
- work->callback(&work->work);
+ node = rtems_chain_next(node);
+
+ if (!work->pending) {
+ continue;
+ }
+
+ if (rtems_clock_get_uptime_nanoseconds() < work->execution_time) {
+ continue;
}
- node = next_node;
+
+ mutex_lock(&work->dw_mutex);
+ work->pending = false;
+ mutex_unlock(&work->dw_mutex);
+
+ rtems_jffs2_do_lock(work->sb);
+ work->callback(&work->work);
+ rtems_jffs2_do_unlock(work->sb);
}
mutex_unlock(&delayed_work_mutex);
}
+
/* Task for processing delayed work */
static rtems_task delayed_work_task(
rtems_task_argument unused
@@ -1308,7 +1326,7 @@ static rtems_task delayed_work_task(
(void)unused;
while (1) {
process_delayed_work();
- sleep(1);
+ usleep(1);
}
}
@@ -1369,6 +1387,12 @@ int rtems_jffs2_initialize(
if (err == 0) {
sb = &fs_info->sb;
c = JFFS2_SB_INFO(sb);
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ c->wbuf_dwork.sb = sb;
+ add_delayed_work_to_chain(&c->wbuf_dwork);
+#endif
+ spin_lock_init(&c->erase_completion_lock);
+ spin_lock_init(&c->inocache_lock);
c->mtd = NULL;
rtems_recursive_mutex_init(&sb->s_mutex, RTEMS_FILESYSTEM_TYPE_JFFS2);
}
@@ -1455,6 +1479,9 @@ int rtems_jffs2_initialize(
return 0;
} else {
if (fs_info != NULL) {
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ jffs2_remove_delayed_work(&c->wbuf_dwork);
+#endif
free(c->mtd);
c->mtd = NULL;
rtems_jffs2_free_fs_info(fs_info, do_mount_fs_was_successful);
@@ -1503,6 +1530,8 @@ static struct _inode *new_inode(struct super_block *sb)
inode->i_cache_next = NULL; // Newest inode, about to be cached
+ mutex_init(&JFFS2_INODE_INFO(inode)->sem);
+
// Add to the icache
for (cached_inode = sb->s_root; cached_inode != NULL;
cached_inode = cached_inode->i_cache_next) {
@@ -1619,8 +1648,14 @@ void jffs2_iput(struct _inode *i)
static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
{
- memset(f, 0, sizeof(*f));
- mutex_init(&f->sem);
+ /* These must be set manually to preserve other members */
+ f->highest_version = 0;
+ f->fragtree = RB_ROOT;
+ f->metadata = NULL;
+ f->dents = NULL;
+ f->target = NULL;
+ f->flags = 0;
+ f->usercompr = 0;
}
static void jffs2_clear_inode (struct _inode *inode)
diff --git a/cpukit/libfs/src/jffs2/src/jffs2_fs_sb.h b/cpukit/libfs/src/jffs2/src/jffs2_fs_sb.h
index 4ee43a6f30..7960f92f85 100644
--- a/cpukit/libfs/src/jffs2/src/jffs2_fs_sb.h
+++ b/cpukit/libfs/src/jffs2/src/jffs2_fs_sb.h
@@ -75,6 +75,9 @@ struct jffs2_sb_info {
uint32_t bad_size;
uint32_t sector_size;
uint32_t unchecked_size;
+#ifdef __rtems__
+ uint32_t obsolete_size;
+#endif
uint32_t nr_free_blocks;
uint32_t nr_erasing_blocks;
diff --git a/cpukit/libfs/src/jffs2/src/os-rtems.h b/cpukit/libfs/src/jffs2/src/os-rtems.h
index 63841a5e50..4bc6f5df13 100644
--- a/cpukit/libfs/src/jffs2/src/os-rtems.h
+++ b/cpukit/libfs/src/jffs2/src/os-rtems.h
@@ -100,6 +100,10 @@ struct _inode {
struct super_block {
struct jffs2_sb_info jffs2_sb;
+ /*
+ * If granular locking is ever enabled for JFFS2, the inode cache
+ * (s_root) needs to be protected due to NAND delayed writes.
+ */
struct _inode * s_root;
rtems_jffs2_flash_control *s_flash_control;
rtems_jffs2_compressor_control *s_compressor_control;
diff --git a/cpukit/libfs/src/jffs2/src/scan.c b/cpukit/libfs/src/jffs2/src/scan.c
index 10663feb1f..8ac4a40414 100644
--- a/cpukit/libfs/src/jffs2/src/scan.c
+++ b/cpukit/libfs/src/jffs2/src/scan.c
@@ -264,14 +264,32 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
}
#endif
if (c->nr_erasing_blocks) {
+#ifdef __rtems__
+ if (c->obsolete_size != c->dirty_size) {
+#endif
if (!c->used_size && !c->unchecked_size &&
((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) {
pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
empty_blocks, bad_blocks, c->nr_blocks);
+#ifdef __rtems__
+ pr_notice("nr_erasing_blocks %d, used 0x%x, dirty 0x%x, wasted 0x%x, free 0x%x, erasing 0x%x, bad 0x%x, obsolete 0x%x, unchecked 0x%x\n",
+ c->nr_erasing_blocks,
+ c->used_size,
+ c->dirty_size,
+ c->wasted_size,
+ c->free_size,
+ c->erasing_size,
+ c->bad_size,
+ c->obsolete_size,
+ c->unchecked_size);
+#endif
ret = -EIO;
goto out;
}
+#ifdef __rtems__
+ }
+#endif
spin_lock(&c->erase_completion_lock);
jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
@@ -646,6 +664,9 @@ scan_more:
sizeof(struct jffs2_unknown_node),
jeb->offset, c->sector_size, ofs,
sizeof(*node));
+#ifdef __rtems__
+ c->obsolete_size += (jeb->offset + c->sector_size - ofs);
+#endif
if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
return err;
break;
@@ -796,6 +817,9 @@ scan_more:
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
return err;
ofs += PAD(je32_to_cpu(node->totlen));
+#ifdef __rtems__
+ c->obsolete_size += PAD(je32_to_cpu(node->totlen));
+#endif
continue;
}