summaryrefslogtreecommitdiffstats
path: root/cpukit/libblock
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2008-07-29 02:21:15 +0000
committerChris Johns <chrisj@rtems.org>2008-07-29 02:21:15 +0000
commit3899a5379f4bfa067e73b4612a547a308a6634ec (patch)
treef837533ace01e934da6089cdca360822d85cc662 /cpukit/libblock
parent2008-07-27 Ralf Corsépius <ralf.corsepius@rtems.org> (diff)
downloadrtems-3899a5379f4bfa067e73b4612a547a308a6634ec.tar.bz2
2008-07-29 Chris Johns <chrisj@rtems.org>
* libblock/Makefile.am: Removed src/show_bdbuf.c. * libblock/src/show_bdbuf.c: Removed. * libblock/include/rtems/bdbuf.h, cpukit/libblock/src/bdbuf.c: Rewritten the bdbuf code. Remove pre-emption disable, score access, fixed many bugs and increased performance. * libblock/include/rtems/blkdev.h: Added RTEMS_BLKDEV_CAPABILITIES block device request. Cleaned up comments. Added block and user fields to the sg buffer request. Move to rtems_* namespace. * libblock/include/rtems/diskdevs.h, cpukit/libblock/src/diskdevs.c: Move to rtems_* namespace. Add a capabilities field for drivers. Change rtems_disk_lookup to rtems_disk_obtain to match the release call. You do not lookup and release a disk, you obtain and release a disk. * libblock/include/rtems/ide_part_table.h, libblock/include/rtems/ramdisk.h, libblock/src/ide_part_table.c: Move to rtems_* namespace. * libblock/include/rtems/nvdisk.h: Formatting change. * libblock/src/blkdev.c: Move to rtems_* namespace. Change rtems_disk_lookup to rtems_disk_obtain * libblock/src/flashdisk.c: Move to rtems_* namespace. Use the new support for the block number in the scatter/grather request struct. This allows non-continuous buffer requests for those drivers that can support increasing performance. * libblock/src/nvdisk.c: Move to rtems_* namespace. Removed warnings. Added better error checking. Fixed some comments. * libblock/src/ramdisk.c: Move to rtems_* namespace. Added some trace functions to help debugging upper layers. Use the new support for the block number in the scatter/grather request struct. This allows non-continuous buffer requests for those drivers that can support increasing performance. * libfs/src/dosfs/fat.c, libfs/src/dosfs/fat.h: Use new chains API. Removed temporary hack and changed set_errno_and_return_minus_one to rtems_set_errno_and_return_minus_one. Move fat_buf_access from header and stopped it being inlined. Updated to libblock changes. * libfs/src/dosfs/fat_fat_operations.c, libfs/src/dosfs/fat_file.c, libfs/src/dosfs/msdos_create.c, libfs/src/dosfs/msdos_dir.c, libfs/src/dosfs/msdos_eval.c, libfs/src/dosfs/msdos_file.c, libfs/src/dosfs/msdos_format.c, libfs/src/dosfs/msdos_free.c, libfs/src/dosfs/msdos_initsupp.c, libfs/src/dosfs/msdos_misc.c, libfs/src/dosfs/msdos_mknod.c: Use new chains API. Removed temporary hack and changed set_errno_and_return_minus_one to rtems_set_errno_and_return_minus_one. Updated to libblock changes. * libmisc/Makefile.am: Add new ls and rm command files. * libmisc/shell/cmp-ls.c, libmisc/shell/extern-ls.h, libmisc/shell/filemode.c, libmisc/shell/print-ls.c, libmisc/shell/pwcache.c, libmisc/shell/utils-ls.c, libmisc/shell/vis.c, shell/vis.h: New. * libmisc/shell/extern-cp.h, libmisc/shell/main_cp.c, libmisc/shell/utils-cp.c: Fixed the usage call bug. * libmisc/shell/main_blksync.c: Updated to the new block IO ioctl command. * libmisc/shell/main_ls.c, libmisc/shell/main_rm.c: Updated to BSD commands with more features. * score/src/coremutex.c: Fix the strick order mutex code. * libmisc/shell/shell.c: Change shell tasks mode to be timeslice and no ASR. * sapi/include/confdefs.h: Change ata_driver_task_priority to rtems_ata_driver_task_priority. Add the new BD buf cache parameters with defaults. * score/src/interr.c: Do not return if the CPU halt call returns.
Diffstat (limited to 'cpukit/libblock')
-rw-r--r--cpukit/libblock/Makefile.am2
-rw-r--r--cpukit/libblock/include/rtems/bdbuf.h250
-rw-r--r--cpukit/libblock/include/rtems/blkdev.h113
-rw-r--r--cpukit/libblock/include/rtems/diskdevs.h71
-rw-r--r--cpukit/libblock/include/rtems/ide_part_table.h33
-rw-r--r--cpukit/libblock/include/rtems/nvdisk.h2
-rw-r--r--cpukit/libblock/include/rtems/ramdisk.h112
-rw-r--r--cpukit/libblock/src/bdbuf.c3567
-rw-r--r--cpukit/libblock/src/blkdev.c34
-rw-r--r--cpukit/libblock/src/diskdevs.c77
-rw-r--r--cpukit/libblock/src/flashdisk.c34
-rw-r--r--cpukit/libblock/src/ide_part_table.c64
-rw-r--r--cpukit/libblock/src/nvdisk.c122
-rw-r--r--cpukit/libblock/src/ramdisk.c72
14 files changed, 2601 insertions, 1952 deletions
diff --git a/cpukit/libblock/Makefile.am b/cpukit/libblock/Makefile.am
index f48a5d977c..6da4ead942 100644
--- a/cpukit/libblock/Makefile.am
+++ b/cpukit/libblock/Makefile.am
@@ -8,7 +8,7 @@ include $(top_srcdir)/automake/compile.am
if !UNIX
noinst_LIBRARIES = libblock.a
libblock_a_SOURCES = src/bdbuf.c src/blkdev.c src/diskdevs.c src/flashdisk.c \
- src/ramdisk.c src/ide_part_table.c src/show_bdbuf.c src/nvdisk.c \
+ src/ramdisk.c src/ide_part_table.c src/nvdisk.c \
src/nvdisk-sram.c \
include/rtems/bdbuf.h include/rtems/blkdev.h \
include/rtems/diskdevs.h include/rtems/flashdisk.h \
diff --git a/cpukit/libblock/include/rtems/bdbuf.h b/cpukit/libblock/include/rtems/bdbuf.h
index f4308963dd..991da1fdaa 100644
--- a/cpukit/libblock/include/rtems/bdbuf.h
+++ b/cpukit/libblock/include/rtems/bdbuf.h
@@ -25,116 +25,143 @@ extern "C" {
#include "rtems/blkdev.h"
#include "rtems/diskdevs.h"
-
-/*
- * To manage buffers we using Buffer Descriptors.
- * To speed-up buffer lookup descriptors are organized in AVL-Tree.
- * The fields 'dev' and 'block' are search key.
+/**
+ * State of a buffer in the cache.
*/
+typedef enum
+{
+ RTEMS_BDBUF_STATE_EMPTY = 0, /* Not in use. */
+ RTEMS_BDBUF_STATE_READ_AHEAD = 1, /* Holds read ahead data only */
+ RTEMS_BDBUF_STATE_CACHED = 2, /* In the cache and available */
+ RTEMS_BDBUF_STATE_ACCESS = 3, /* The user has the buffer */
+ RTEMS_BDBUF_STATE_MODIFIED = 4, /* In the cache but modified */
+ RTEMS_BDBUF_STATE_ACCESS_MODIFIED = 5, /* With the user but modified */
+ RTEMS_BDBUF_STATE_SYNC = 6, /* Requested to be sync'ed */
+ RTEMS_BDBUF_STATE_TRANSFER = 7 /* Being transferred to or from disk */
+} rtems_bdbuf_buf_state;
-/* Buffer descriptors
- * Descriptors organized in AVL-tree to speedup buffer lookup.
- * dev and block fields are search key in AVL-tree.
- * Modified buffers, free buffers and used buffers linked in 'mod', 'free' and
- * 'lru' chains appropriately.
+/**
+ * To manage buffers we using buffer descriptors (BD). A BD holds a buffer plus
+ * a range of other information related to managing the buffer in the cache. To
+ * speed-up buffer lookup descriptors are organized in AVL-Tree. The fields
+ * 'dev' and 'block' are search keys.
*/
+typedef struct rtems_bdbuf_buffer
+{
+ rtems_chain_node link; /* Link in the BD onto a number of lists. */
-typedef struct bdbuf_buffer {
- Chain_Node link; /* Link in the lru, mod or free chains */
-
- struct bdbuf_avl_node {
- signed char cache; /* Cache */
+ struct rtems_bdbuf_avl_node
+ {
+ signed char cache; /* Cache */
+ struct rtems_bdbuf_buffer* left; /* Left Child */
+ struct rtems_bdbuf_buffer* right; /* Right Child */
+ signed char bal; /* The balance of the sub-tree */
+ } avl;
- struct bdbuf_buffer* left; /* Left Child */
- struct bdbuf_buffer* right; /* Right Child */
+ dev_t dev; /* device number */
+ rtems_blkdev_bnum block; /* block number on the device */
- signed char bal; /* The balance of the sub-tree */
- } avl;
+ unsigned char* buffer; /* Pointer to the buffer memory area */
+ int error; /* If not 0 indicate an error value (errno)
+ * which can be used by user later */
- dev_t dev; /* device number */
- blkdev_bnum block; /* block number on the device */
+ volatile rtems_bdbuf_buf_state state; /* State of the buffer. */
- unsigned char *buffer; /* Pointer to the buffer memory area */
- rtems_status_code status; /* Last I/O operation completion status */
- int error; /* If status != RTEMS_SUCCESSFUL, this field contains
- errno value which can be used by user later */
- boolean modified:1; /* =1 if buffer was modified */
- boolean in_progress:1; /* =1 if exchange with disk is in progress;
- need to wait on semaphore */
- boolean actual:1; /* Buffer contains actual data */
- int use_count; /* Usage counter; incremented when somebody use
- this buffer; decremented when buffer released
- without modification or when buffer is flushed
- by swapout task */
+ volatile uint32_t waiters; /* The number of threads waiting on this
+ * buffer. */
+ rtems_bdpool_id pool; /* Identifier of buffer pool to which this buffer
+ belongs */
- rtems_bdpool_id pool; /* Identifier of buffer pool to which this buffer
- belongs */
- CORE_mutex_Control transfer_sema;
- /* Transfer operation semaphore */
-} bdbuf_buffer;
+ volatile uint32_t hold_timer; /* Timer to indicate how long a buffer
+ * has been held in the cache modified. */
+} rtems_bdbuf_buffer;
+/**
+ * The groups of the blocks with the same size are collected in a pool. Note
+ * that a several of the buffer's groups with the same size can exists.
+ */
+typedef struct rtems_bdbuf_pool
+{
+ int blksize; /* The size of the blocks (in bytes) */
+ int nblks; /* Number of blocks in this pool */
+
+ uint32_t flags; /* Configuration flags */
+
+ rtems_id lock; /* The pool lock. Lock this data and
+ * all BDs. */
+ rtems_id sync_lock; /* Sync calls lock writes. */
+ boolean sync_active; /* True if a sync is active. */
+ rtems_id sync_requester; /* The sync requester. */
+ dev_t sync_device; /* The device to sync */
+
+ rtems_bdbuf_buffer* tree; /* Buffer descriptor lookup AVL tree
+ * root */
+ rtems_chain_control ready; /* Free buffers list (or read-ahead) */
+ rtems_chain_control lru; /* Last recently used list */
+ rtems_chain_control modified; /* Modified buffers list */
+ rtems_chain_control sync; /* Buffers to sync list */
+
+ rtems_id access; /* Obtain if waiting for a buffer in the
+ * ACCESS state. */
+ volatile uint32_t access_waiters; /* Count of access blockers. */
+ rtems_id transfer; /* Obtain if waiting for a buffer in the
+ * TRANSFER state. */
+ volatile uint32_t transfer_waiters; /* Count of transfer blockers. */
+ rtems_id waiting; /* Obtain if waiting for a buffer and the
+ * none are available. */
+ volatile uint32_t wait_waiters; /* Count of waiting blockers. */
+
+ rtems_bdbuf_buffer* bds; /* Pointer to table of buffer descriptors
+ * allocated for this buffer pool. */
+ void* buffers; /* The buffer's memory. */
+} rtems_bdbuf_pool;
-/*
- * the following data structures are internal to the bdbuf layer,
- * but it is convenient to have them visible from the outside for inspection
+/**
+ * Configuration structure describes block configuration (size, amount, memory
+ * location) for buffering layer pool.
*/
-/*
- * The groups of the blocks with the same size are collected in the
- * bd_pool. Note that a several of the buffer's groups with the
- * same size can exists.
+typedef struct rtems_bdbuf_pool_config {
+ int size; /* Size of block */
+ int num; /* Number of blocks of appropriate size */
+ unsigned char* mem_area; /* Pointer to the blocks location or NULL, in this
+ * case memory for blocks will be allocated by
+ * Buffering Layer with the help of RTEMS partition
+ * manager */
+} rtems_bdbuf_pool_config;
+
+/**
+ * External references provided by the user for each pool in the system.
*/
-typedef struct bdbuf_pool
-{
- bdbuf_buffer *tree; /* Buffer descriptor lookup AVL tree root */
-
- Chain_Control free; /* Free buffers list */
- Chain_Control lru; /* Last recently used list */
-
- int blksize; /* The size of the blocks (in bytes) */
- int nblks; /* Number of blocks in this pool */
- rtems_id bufget_sema; /* Buffer obtain counting semaphore */
- void *mallocd_bufs; /* Pointer to the malloc'd buffer memory,
- or NULL, if buffer memory provided in
- buffer configuration */
- bdbuf_buffer *bdbufs; /* Pointer to table of buffer descriptors
- allocated for this buffer pool. */
-} bdbuf_pool;
-
-/* Buffering layer context definition */
-struct bdbuf_context {
- bdbuf_pool *pool; /* Table of buffer pools */
- int npools; /* Number of entries in pool table */
-
- Chain_Control mod; /* Modified buffers list */
- rtems_id flush_sema; /* Buffer flush semaphore; counting
- semaphore; incremented when buffer
- flushed to the disk; decremented when
- buffer modified */
- rtems_id swapout_task; /* Swapout task ID */
-};
- /*
- * the context of the buffering layer, visible for inspection
- */
-extern struct bdbuf_context rtems_bdbuf_ctx;
-
-/* bdbuf_config structure describes block configuration (size,
- * amount, memory location) for buffering layer
+extern rtems_bdbuf_pool_config rtems_bdbuf_pool_configuration[];
+extern int rtems_bdbuf_pool_configuration_size;
+
+/**
+ * Buffering configuration definition. See confdefs.h for support on using this
+ * structure.
*/
typedef struct rtems_bdbuf_config {
- int size; /* Size of block */
- int num; /* Number of blocks of appropriate size */
- unsigned char *mem_area;
- /* Pointer to the blocks location or NULL, in this
- case memory for blocks will be allocated by
- Buffering Layer with the help of RTEMS partition
- manager */
+ int max_read_ahead_blocks; /*<< Number of blocks to read ahead. */
+ int max_write_blocks; /*<< Number of blocks to write at once. */
+ rtems_task_priority swapout_priority; /*<< Priority of the swap out task. */
+ uint32_t swapout_period; /*<< Period swapout checks buf timers. */
+ uint32_t swap_block_hold; /*<< Period a buffer is held. */
} rtems_bdbuf_config;
-extern rtems_bdbuf_config rtems_bdbuf_configuration[];
-extern int rtems_bdbuf_configuration_size;
+/**
+ * External referernce to the configuration. The configuration is provided by
+ * the user.
+ */
+extern rtems_bdbuf_config rtems_bdbuf_configuration;
-#define SWAPOUT_TASK_DEFAULT_PRIORITY 15
-extern rtems_task_priority swapout_task_priority;
+/**
+ * The max_read_ahead_blocks value is altered if there are fewer buffers
+ * than this defined max. This stops thrashing in the cache.
+ */
+#define RTEMS_BDBUF_MAX_READ_AHEAD_BLOCKS_DEFAULT 32
+#define RTEMS_BDBUF_MAX_WRITE_BLOCKS_DEFAULT 16
+#define RTEMS_BDBUF_SWAPOUT_TASK_PRIORITY_DEFAULT 15
+#define RTEMS_BDBUF_SWAPOUT_TASK_SWAP_PERIOD_DEFAULT 250 /* milli-seconds */
+#define RTEMS_BDBUF_SWAPOUT_TASK_BLOCK_HOLD_DEFAULT 1000 /* milli-seconds */
/* rtems_bdbuf_init --
* Prepare buffering layer to work - initialize buffer descritors
@@ -143,17 +170,12 @@ extern rtems_task_priority swapout_task_priority;
* amount requested. After initialization all blocks is placed into
* free elements lists.
*
- * PARAMETERS:
- * conf_table - pointer to the buffers configuration table
- * size - number of entries in configuration table
- *
* RETURNS:
* RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
* or error code if error is occured)
*/
rtems_status_code
-rtems_bdbuf_init(rtems_bdbuf_config *conf_table, int size);
-
+rtems_bdbuf_init ();
/* rtems_bdbuf_get --
* Obtain block buffer. If specified block already cached (i.e. there's
@@ -177,8 +199,8 @@ rtems_bdbuf_init(rtems_bdbuf_config *conf_table, int size);
* SIDE EFFECTS:
* bufget_sema semaphore obtained by this primitive.
*/
-rtems_status_code
-rtems_bdbuf_get(dev_t device, blkdev_bnum block, bdbuf_buffer **bdb_ptr);
+ rtems_status_code
+ rtems_bdbuf_get(dev_t device, rtems_blkdev_bnum block, rtems_bdbuf_buffer** bd);
/* rtems_bdbuf_read --
* (Similar to the rtems_bdbuf_get, except reading data from media)
@@ -201,8 +223,8 @@ rtems_bdbuf_get(dev_t device, blkdev_bnum block, bdbuf_buffer **bdb_ptr);
* SIDE EFFECTS:
* bufget_sema and transfer_sema semaphores obtained by this primitive.
*/
-rtems_status_code
-rtems_bdbuf_read(dev_t device, blkdev_bnum block, bdbuf_buffer **bdb_ptr);
+ rtems_status_code
+ rtems_bdbuf_read(dev_t device, rtems_blkdev_bnum block, rtems_bdbuf_buffer** bd);
/* rtems_bdbuf_release --
* Release buffer allocated before. This primitive decrease the
@@ -223,8 +245,8 @@ rtems_bdbuf_read(dev_t device, blkdev_bnum block, bdbuf_buffer **bdb_ptr);
* SIDE EFFECTS:
* flush_sema and bufget_sema semaphores may be released by this primitive.
*/
-rtems_status_code
-rtems_bdbuf_release(bdbuf_buffer *bd_buf);
+ rtems_status_code
+ rtems_bdbuf_release(rtems_bdbuf_buffer* bd);
/* rtems_bdbuf_release_modified --
* Release buffer allocated before, assuming that it is _modified_ by
@@ -244,8 +266,8 @@ rtems_bdbuf_release(bdbuf_buffer *bd_buf);
* SIDE EFFECTS:
* flush_sema semaphore may be released by this primitive.
*/
-rtems_status_code
-rtems_bdbuf_release_modified(bdbuf_buffer *bd_buf);
+ rtems_status_code
+ rtems_bdbuf_release_modified(rtems_bdbuf_buffer* bd);
/* rtems_bdbuf_sync --
* Wait until specified buffer synchronized with disk. Invoked on exchanges
@@ -265,8 +287,8 @@ rtems_bdbuf_release_modified(bdbuf_buffer *bd_buf);
* SIDE EFFECTS:
* Primitive may be blocked on transfer_sema semaphore.
*/
-rtems_status_code
-rtems_bdbuf_sync(bdbuf_buffer *bd_buf);
+ rtems_status_code
+ rtems_bdbuf_sync(rtems_bdbuf_buffer* bd);
/* rtems_bdbuf_syncdev --
* Synchronize with disk all buffers containing the blocks belonging to
@@ -279,8 +301,8 @@ rtems_bdbuf_sync(bdbuf_buffer *bd_buf);
* RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
* or error code if error is occured)
*/
-rtems_status_code
-rtems_bdbuf_syncdev(dev_t dev);
+ rtems_status_code
+ rtems_bdbuf_syncdev(dev_t dev);
/* rtems_bdbuf_find_pool --
* Find first appropriate buffer pool. This primitive returns the index
@@ -297,8 +319,8 @@ rtems_bdbuf_syncdev(dev_t dev);
* of 2), RTEMS_NOT_DEFINED if buffer pool for this or greater block size
* is not configured.
*/
-rtems_status_code
-rtems_bdbuf_find_pool(int block_size, rtems_bdpool_id *pool);
+ rtems_status_code
+ rtems_bdbuf_find_pool(int block_size, rtems_bdpool_id *pool);
/* rtems_bdbuf_get_pool_info --
* Obtain characteristics of buffer pool with specified number.
@@ -316,8 +338,8 @@ rtems_bdbuf_find_pool(int block_size, rtems_bdpool_id *pool);
* NOTE:
* Buffer pools enumerated contiguously starting from 0.
*/
-rtems_status_code
-rtems_bdbuf_get_pool_info(rtems_bdpool_id pool, int *block_size, int *blocks);
+ rtems_status_code
+ rtems_bdbuf_get_pool_info(rtems_bdpool_id pool, int *block_size, int *blocks);
#ifdef __cplusplus
}
diff --git a/cpukit/libblock/include/rtems/blkdev.h b/cpukit/libblock/include/rtems/blkdev.h
index fadbdce9c7..03b639c73c 100644
--- a/cpukit/libblock/include/rtems/blkdev.h
+++ b/cpukit/libblock/include/rtems/blkdev.h
@@ -20,67 +20,92 @@
extern "C" {
#endif
-/* Interface with device drivers
- * Block device looks, initialized and behaves like traditional RTEMS device
- * driver. Heart of the block device driver is in BIOREQUEST ioctl. This call
- * puts I/O request to the block device queue, in priority order, for
- * asynchronous processing. When driver executes request, req_done
- * function invoked, so callee knows about it. Look for details below.
+/*
+ * Interface with device drivers Block device looks, initialized and behaves
+ * like traditional RTEMS device driver. Heart of the block device driver is in
+ * BIOREQUEST ioctl. This call puts I/O request to the block device queue, in
+ * priority order, for asynchronous processing. When driver executes request,
+ * req_done function invoked, so callee knows about it. Look for details below.
*/
-
-/* Block device block number datatype */
-typedef uint32_t blkdev_bnum;
+/*
+ * Block device block number datatype
+ */
+typedef uint32_t rtems_blkdev_bnum;
/* Block device request type */
-typedef enum blkdev_request_op {
- BLKDEV_REQ_READ, /* Read operation */
- BLKDEV_REQ_WRITE /* Write operation */
-} blkdev_request_op;
+typedef enum rtems_blkdev_request_op {
+ RTEMS_BLKDEV_REQ_READ, /* Read operation */
+ RTEMS_BLKDEV_REQ_WRITE, /* Write operation */
+ RTEMS_BLKDEV_CAPABILITIES /* Capabilities request */
+} rtems_blkdev_request_op;
+
+/**
+ * ATA multi-sector buffer requests only supported. This option
+ * means the cache will only supply multiple buffers that are
+ * inorder so the ATA multi-sector command can be used. This is a
+ * hack to work around the current ATA driver.
+ */
+#define RTEMS_BLKDEV_CAP_MULTISECTOR_CONT (1 << 0)
-/* Type for block device request done callback function.
+/*
+ * @typedef rtems_blkdev_request_cb
*
- * PARAMETERS:
- * arg - argument supplied in blkdev_request
- * status - rtems status code for this operation
- * errno - errno value to be passed to the user when
+ * Type for block device request done callback function.
+ *
+ * @param arg Argument supplied in blkdev_request
+ * @param status RTEMS status code for this operation
+ * @param errno errno value to be passed to the user when
* status != RTEMS_SUCCESSFUL
*/
-typedef void (* blkdev_request_cb)(void *arg,
- rtems_status_code status,
- int error);
+typedef void (* rtems_blkdev_request_cb)(void *arg,
+ rtems_status_code status,
+ int error);
-/* blkdev_sg_buffer
+/**
+ * @struct rtems_blkdev_sg_buffer
* Block device scatter/gather buffer structure
*/
-typedef struct blkdev_sg_buffer {
+typedef struct rtems_blkdev_sg_buffer {
+ uint32_t block; /* The block number */
uint32_t length; /* Buffer length */
- void *buffer; /* Buffer pointer */
-} blkdev_sg_buffer;
+ void *buffer; /* Buffer pointer */
+ void *user; /* User pointer */
+} rtems_blkdev_sg_buffer;
/* blkdev_request (Block Device Request) structure is
* used to read/write a number of blocks from/to device.
*/
-typedef struct blkdev_request {
- blkdev_request_op req; /* Block device operation (read or write) */
- blkdev_request_cb req_done; /* Callback function */
- void *done_arg; /* Argument to be passed to callback function*/
- rtems_status_code status; /* Last I/O operation completion status */
- int error; /* If status != RTEMS_SUCCESSFUL, this field
- * contains error code
- */
- blkdev_bnum start; /* Start block number */
- uint32_t count; /* Number of blocks to be exchanged */
- uint32_t bufnum; /* Number of buffers provided */
-
- blkdev_sg_buffer bufs[0];/* List of scatter/gather buffers */
-} blkdev_request;
+typedef struct rtems_blkdev_request {
+ /* Block device operation (read or write) */
+ rtems_blkdev_request_op req;
+ /* Callback function */
+ rtems_blkdev_request_cb req_done;
+ /* Argument to be passed to callback function*/
+ void *done_arg;
+ /* Last I/O operation completion status */
+ rtems_status_code status;
+ /* If status != RTEMS_SUCCESSFUL, this field contains error code */
+ int error;
+ /* Start block number */
+ rtems_blkdev_bnum start;
+ /* Number of blocks to be exchanged */
+ uint32_t count;
+ /* Number of buffers provided */
+ uint32_t bufnum;
+
+ /* The task requesting the IO operation. */
+ rtems_id io_task;
+
+ /* List of scatter/gather buffers */
+ rtems_blkdev_sg_buffer bufs[0];
+} rtems_blkdev_request;
/* Block device IOCTL request codes */
-#define BLKIO_REQUEST _IOWR('B', 1, blkdev_request)
-#define BLKIO_GETBLKSIZE _IO('B', 2)
-#define BLKIO_GETSIZE _IO('B', 3)
-#define BLKIO_SYNCDEV _IO('B', 4)
+#define RTEMS_BLKIO_REQUEST _IOWR('B', 1, rtems_blkdev_request)
+#define RTEMS_BLKIO_GETBLKSIZE _IO('B', 2)
+#define RTEMS_BLKIO_GETSIZE _IO('B', 3)
+#define RTEMS_BLKIO_SYNCDEV _IO('B', 4)
/* Device driver interface conventions suppose that driver may
* contain initialize/open/close/read/write/ioctl entry points. These
@@ -90,7 +115,7 @@ typedef struct blkdev_request {
* all block devices and appropriate ioctl handlers.
*/
-#define GENERIC_BLOCK_DEVICE_DRIVER_ENTRIES \
+#define RTEMS_GENERIC_BLOCK_DEVICE_DRIVER_ENTRIES \
rtems_blkdev_generic_open, rtems_blkdev_generic_close, \
rtems_blkdev_generic_read, rtems_blkdev_generic_write, \
rtems_blkdev_generic_ioctl
diff --git a/cpukit/libblock/include/rtems/diskdevs.h b/cpukit/libblock/include/rtems/diskdevs.h
index 6542d74775..3517ddede2 100644
--- a/cpukit/libblock/include/rtems/diskdevs.h
+++ b/cpukit/libblock/include/rtems/diskdevs.h
@@ -21,44 +21,47 @@ extern "C" {
#include <rtems/libio.h>
#include <stdlib.h>
-#include "rtems/blkdev.h"
-
/* Buffer pool identifier */
typedef int rtems_bdpool_id;
+#include "rtems/blkdev.h"
+
+/* Driver capabilities. */
+
/* Block device ioctl handler */
-typedef int (* block_device_ioctl) (dev_t dev, uint32_t req, void *argp);
+typedef int (* rtems_block_device_ioctl) (dev_t dev, uint32_t req, void *argp);
-/* disk_device: Entry of this type created for every disk device (both for
- * logical and physical disks).
+/* rtems_disk_device: Entry of this type created for every disk device
+ * (both for logical and physical disks).
* Array of arrays of pointers to disk_device structures maintained. First
* table indexed by major number and second table indexed by minor number.
* Such data organization allow quick lookup using data structure of
* moderated size.
*/
-typedef struct disk_device {
- dev_t dev; /* Device ID (major + minor) */
- struct disk_device *phys_dev; /* Physical device ID (the same
- as dev if this entry specifies
- the physical device) */
- char *name; /* Disk device name */
- int uses; /* Use counter. Device couldn't be
- removed if it is in use. */
- int start; /* Starting block number (0 for
- physical devices, block offset
- on the related physical device
- for logical device) */
- int size; /* Size of physical or logical disk
- in disk blocks */
- int block_size; /* Size of device block (minimum
- transfer unit) in bytes
- (must be power of 2) */
- int block_size_log2; /* log2 of block_size */
- rtems_bdpool_id pool; /* Buffer pool assigned to this
- device */
- block_device_ioctl ioctl; /* ioctl handler for this block
- device */
-} disk_device;
+typedef struct rtems_disk_device {
+ dev_t dev; /* Device ID (major + minor) */
+ struct rtems_disk_device *phys_dev; /* Physical device ID (the same
+ as dev if this entry specifies
+ the physical device) */
+ uint32_t capabilities; /* Driver capabilities. */
+ char *name; /* Disk device name */
+ int uses; /* Use counter. Device couldn't be
+ removed if it is in use. */
+ int start; /* Starting block number (0 for
+ physical devices, block offset
+ on the related physical device
+ for logical device) */
+ int size; /* Size of physical or logical disk
+ in disk blocks */
+ int block_size; /* Size of device block (minimum
+ transfer unit) in bytes
+ (must be power of 2) */
+ int block_size_log2; /* log2 of block_size */
+ rtems_bdpool_id pool; /* Buffer pool assigned to this
+ device */
+ rtems_block_device_ioctl ioctl; /* ioctl handler for this block
+ device */
+} rtems_disk_device;
/* rtems_disk_create_phys --
* Create physical disk entry. This function usually invoked from
@@ -84,7 +87,7 @@ typedef struct disk_device {
*/
rtems_status_code
rtems_disk_create_phys(dev_t dev, int block_size, int disk_size,
- block_device_ioctl handler,
+ rtems_block_device_ioctl handler,
const char *name);
/* rtems_disk_create_log --
@@ -131,7 +134,7 @@ rtems_disk_create_log(dev_t dev, dev_t phys, int start, int size, char *name);
rtems_status_code
rtems_disk_delete(dev_t dev);
-/* rtems_disk_lookup --
+/* rtems_disk_obtain --
* Find block device descriptor by its device identifier. This function
* increment usage counter to 1. User should release disk_device structure
* by invoking rtems_disk_release primitive.
@@ -143,8 +146,8 @@ rtems_disk_delete(dev_t dev);
* pointer to the block device descriptor, or NULL if no such device
* exists.
*/
-disk_device *
-rtems_disk_lookup(dev_t dev);
+rtems_disk_device *
+rtems_disk_obtain(dev_t dev);
/* rtems_disk_release --
* Release disk_device structure (decrement usage counter to 1).
@@ -159,7 +162,7 @@ rtems_disk_lookup(dev_t dev);
* It should be implemented as inline function.
*/
rtems_status_code
-rtems_disk_release(disk_device *dd);
+rtems_disk_release(rtems_disk_device *dd);
/* rtems_disk_next --
* Disk device enumerator. Looking for device having device number larger
@@ -172,7 +175,7 @@ rtems_disk_release(disk_device *dd);
* RETURNS:
* Pointer to the disk descriptor for next disk device, or NULL if all
* devices enumerated. */
-disk_device *
+rtems_disk_device *
rtems_disk_next(dev_t dev);
/* rtems_diskio_initialize --
diff --git a/cpukit/libblock/include/rtems/ide_part_table.h b/cpukit/libblock/include/rtems/ide_part_table.h
index abbbb121c4..052f445f2f 100644
--- a/cpukit/libblock/include/rtems/ide_part_table.h
+++ b/cpukit/libblock/include/rtems/ide_part_table.h
@@ -77,16 +77,18 @@
* sector_data_t --
* corresponds to the sector on the device
*/
-typedef struct sector_data_s
+typedef struct rtems_sector_data_s
{
uint32_t sector_num; /* sector number on the device */
uint8_t data[0]; /* raw sector data */
-} sector_data_t;
+} rtems_sector_data_t;
/*
* Enum partition types
* see list at http://ata-atapi.com/hiwtab.htm
+ *
+ * @todo Should these have RTEMS before them.
*/
enum {
EMPTY_PARTITION = 0x00,
@@ -108,29 +110,32 @@ enum {
/* Forward declaration */
-struct disk_desc_s;
+struct rtems_disk_desc_s;
/*
* part_desc_t --
* contains all neccessary information about partition
*/
-typedef struct part_desc_s {
+typedef struct rtems_part_desc_s {
uint8_t bootable; /* is the partition active */
uint8_t sys_type; /* type of partition */
uint8_t log_id; /* logical number of partition */
- uint32_t start; /* first partition sector, in absolute numeration */
+ uint32_t start; /* first partition sector, in absolute
+ * numeration */
uint32_t size; /* size in sectors */
uint32_t end; /* last partition sector, end = start + size - 1 */
- struct disk_desc_s *disk_desc; /* descriptor of disk, partition contains in */
- struct part_desc_s *ext_part; /* extended partition containing this one */
+ struct rtems_disk_desc_s *disk_desc; /* descriptor of disk, partition
+ * contains in */
+ struct rtems_part_desc_s *ext_part; /* extended partition containing this
+ * one */
/* partitions, containing in this one */
- struct part_desc_s *sub_part[RTEMS_IDE_PARTITION_MAX_SUB_PARTITION_NUMBER];
-} part_desc_t;
+ struct rtems_part_desc_s *sub_part[RTEMS_IDE_PARTITION_MAX_SUB_PARTITION_NUMBER];
+} rtems_part_desc_t;
-typedef struct disk_desc_s {
+typedef struct rtems_disk_desc_s {
dev_t dev; /* device number */
/* device name in /dev filesystem */
@@ -142,8 +147,8 @@ typedef struct disk_desc_s {
int last_log_id; /* used for logical disks enumerating */
/* primary partition descriptors */
- part_desc_t *partitions[RTEMS_IDE_PARTITION_MAX_PARTITION_NUMBER];
-} disk_desc_t;
+ rtems_part_desc_t *partitions[RTEMS_IDE_PARTITION_MAX_PARTITION_NUMBER];
+} rtems_disk_desc_t;
#ifdef __cplusplus
extern "C" {
@@ -160,7 +165,7 @@ extern "C" {
* N/A
*/
void
-rtems_ide_part_table_free(disk_desc_t *disk_desc);
+rtems_ide_part_table_free(rtems_disk_desc_t *disk_desc);
/*
@@ -176,7 +181,7 @@ rtems_ide_part_table_free(disk_desc_t *disk_desc);
* RTEMS_SUCCESSFUL if success, or -1 and corresponding errno else
*/
rtems_status_code
-rtems_ide_part_table_get(const char *dev_name, disk_desc_t *disk_desc);
+rtems_ide_part_table_get(const char *dev_name, rtems_disk_desc_t *disk_desc);
/*
diff --git a/cpukit/libblock/include/rtems/nvdisk.h b/cpukit/libblock/include/rtems/nvdisk.h
index 5e2a0dd6af..24b62f665e 100644
--- a/cpukit/libblock/include/rtems/nvdisk.h
+++ b/cpukit/libblock/include/rtems/nvdisk.h
@@ -164,7 +164,7 @@ typedef struct rtems_nvdisk_config
const rtems_nvdisk_device_desc* devices; /**< The device descriptions. */
uint32_t flags; /**< Set of flags to control
driver. */
- uint32_t info_level; /**< Default info level. */
+ uint32_t info_level; /**< Default info level. */
} rtems_nvdisk_config;
/*
diff --git a/cpukit/libblock/include/rtems/ramdisk.h b/cpukit/libblock/include/rtems/ramdisk.h
index 64d9bad5e7..8fcc442d32 100644
--- a/cpukit/libblock/include/rtems/ramdisk.h
+++ b/cpukit/libblock/include/rtems/ramdisk.h
@@ -1,56 +1,56 @@
-/**
- * @file rtems/ramdisk.h
- * RAM disk block device implementation
- */
-
-/*
- * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia
- * Author: Victor V. Vengerov <vvv@oktet.ru>
- *
- * @(#) $Id$
- */
-
-#ifndef _RTEMS_RAMDISK_H
-#define _RTEMS_RAMDISK_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rtems.h>
-
-#include "rtems/blkdev.h"
-
-/* RAM disk configuration table entry */
-typedef struct rtems_ramdisk_config {
- int block_size; /* RAM disk block size */
- int block_num; /* Number of blocks on this RAM disk */
- void *location; /* RAM disk permanent location (out of RTEMS controlled
- memory), or NULL if RAM disk memory should be
- allocated dynamically */
-} rtems_ramdisk_config;
-
-/* If application want to use RAM disk, it should specify configuration of
- * available RAM disks.
- * The following is definitions for RAM disk configuration table
- */
-extern rtems_ramdisk_config rtems_ramdisk_configuration[];
-extern size_t rtems_ramdisk_configuration_size;
-
-/* ramdisk_initialize --
- * RAM disk driver initialization entry point.
- */
-rtems_device_driver
-ramdisk_initialize(
- rtems_device_major_number major,
- rtems_device_minor_number minor,
- void *arg);
-
-#define RAMDISK_DRIVER_TABLE_ENTRY \
- { ramdisk_initialize, GENERIC_BLOCK_DEVICE_DRIVER_ENTRIES }
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
+/**
+ * @file rtems/ramdisk.h
+ * RAM disk block device implementation
+ */
+
+/*
+ * Copyright (C) 2001 OKTET Ltd., St.-Petersburg, Russia
+ * Author: Victor V. Vengerov <vvv@oktet.ru>
+ *
+ * @(#) $Id$
+ */
+
+#ifndef _RTEMS_RAMDISK_H
+#define _RTEMS_RAMDISK_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rtems.h>
+
+#include "rtems/blkdev.h"
+
+/* RAM disk configuration table entry */
+typedef struct rtems_ramdisk_config {
+ int block_size; /* RAM disk block size */
+ int block_num; /* Number of blocks on this RAM disk */
+ void *location; /* RAM disk permanent location (out of RTEMS controlled
+ memory), or NULL if RAM disk memory should be
+ allocated dynamically */
+} rtems_ramdisk_config;
+
+/* If application want to use RAM disk, it should specify configuration of
+ * available RAM disks.
+ * The following is definitions for RAM disk configuration table
+ */
+extern rtems_ramdisk_config rtems_ramdisk_configuration[];
+extern size_t rtems_ramdisk_configuration_size;
+
+/* ramdisk_initialize --
+ * RAM disk driver initialization entry point.
+ */
+rtems_device_driver
+ramdisk_initialize(
+ rtems_device_major_number major,
+ rtems_device_minor_number minor,
+ void *arg);
+
+#define RAMDISK_DRIVER_TABLE_ENTRY \
+ { ramdisk_initialize, RTEMS_GENERIC_BLOCK_DEVICE_DRIVER_ENTRIES }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/cpukit/libblock/src/bdbuf.c b/cpukit/libblock/src/bdbuf.c
index 17c710a668..07669d0c9e 100644
--- a/cpukit/libblock/src/bdbuf.c
+++ b/cpukit/libblock/src/bdbuf.c
@@ -10,657 +10,912 @@
* @(#) bdbuf.c,v 1.14 2004/04/17 08:15:17 ralf Exp
*/
+/**
+ * @file
+ *
+ * The Buffer Descriptor Buffer code implement a cache between the disk
+ * devices and file systems. The code provides a read ahead and qrite queuing
+ * to the drivers and fast cache look up using an AVL tree.
+ *
+ * The buffers are held in pools based on size. Each pool has buffers and the
+ * buffers follow this state machine:
+ *
+ * read
+ * +-------------------------------+
+ * | v
+ * +-----------+ read complete +------------+
+ * | | write complete | |---------+
+ * | EMPTY | +---------| TRANSFER | |
+ * | | | | |<--+ |
+ * +-----------+ | +------------+ | |
+ * ^ | get v swap | |
+ * | | +-----------+ modified +------------+ |
+ * | +--->| |---------->| | |
+ * | | ACCESSED |<----------| MODIFIED | |
+ * | +----| |<--+ get | | |
+ * | | +-----------+ | +------------+ |
+ * | | | |
+ * | | release get | |
+ * | | | |
+ * | | +-----------+ | |
+ * expire | +--->| |---+ read complete |
+ * | | CACHED | write complete |
+ * +--------| |<-------------------------+
+ * +-----------+
+ *
+ * Empty buffers are added to the empty list and removed from this queue when a
+ * caller wishes to access a buffer. This is referred to getting a buffer in
+ * the code and the event get in the state diagram. The buffer is assigned to a
+ * block and inserted to the AVL. If the block is to be read by the user and
+ * not in the cache (empty) it is transfered from the disk into memory. If no
+ * empty buffers exist the buffer is expired from the cache. Currently the
+ * least recently used block is expired first. A block being accessed is given
+ * to the file system layer and not accessable to another requester until
+ * released back to the cache. If the user has modifed the block it is
+ * transfered to disk then placed on the LRU list.
+ *
+ * The pool have the following lists of buffers:
+ *
+ * empty - Empty buffers created when the pool is initialised.
+ * modided - Buffers waiting to be written to disk.
+ * cached_lru - Accessed buffers released in least recently used order.
+ * cached_unsed - Read ahead buffers that have not been accessed.
+ *
+ */
+
+/**
+ * Set to 1 to enable debug tracing.
+ */
+#define RTEMS_BDBUF_TRACE 0
+
#if HAVE_CONFIG_H
#include "config.h"
#endif
-#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
#include <rtems.h>
+#include <rtems/error.h>
#include <limits.h>
#include <errno.h>
#include <assert.h>
-#include "rtems/bdbuf.h"
-
-/* Fatal errors: */
-#define BLKDEV_FATAL_ERROR(n) (((uint32_t)'B' << 24) | ((uint32_t)(n) & (uint32_t)0x00FFFFFF))
-#define BLKDEV_FATAL_BDBUF_CONSISTENCY BLKDEV_FATAL_ERROR(1)
-#define BLKDEV_FATAL_BDBUF_SWAPOUT BLKDEV_FATAL_ERROR(2)
-
-
-#define SWAPOUT_TASK_STACK_SIZE (RTEMS_MINIMUM_STACK_SIZE * 2)
-
-#define READ_MULTIPLE
-
-#if defined(READ_MULTIPLE)
-#define READ_AHEAD_MAX_BLK_CNT 32
-typedef struct {
- blkdev_request req;
- blkdev_sg_buffer sg[READ_AHEAD_MAX_BLK_CNT];
-} blkdev_request_read_ahead;
-/*
- * list of bd_bufs related to one transfer request
- */
-typedef struct {
- int cnt;
- bdbuf_buffer *bd_bufs[READ_AHEAD_MAX_BLK_CNT];
-} read_ahead_bd_buf_group;
+#if RTEMS_BDBUF_TRACE
+#include <stdio.h>
#endif
-typedef struct {
- blkdev_request *req;
- bdbuf_buffer **write_store;
-} write_tfer_done_arg_t;
-
-static rtems_task bdbuf_swapout_task(rtems_task_argument unused);
+#include "rtems/bdbuf.h"
-static rtems_status_code bdbuf_release(bdbuf_buffer *bd_buf);
-/*
- * maximum number of blocks that might be chained together to one
- * write driver call
+/**
+ * The BD buffer context.
*/
-#define SWAP_OUT_MAX_BLK_CNT 32
-/*#define SWAP_OUT_MAX_BLK_CNT 1 */
-/*
- * XXX: this is a global buffer. It is used in the swapout task
- * and currently will be reused only after it is no longer in use
- *
+/* Buffering layer context definition */
+typedef struct rtems_bdbuf_context {
+ rtems_bdbuf_pool* pool; /* Table of buffer pools */
+ int npools; /* Number of entries in pool table */
+ rtems_id swapout; /* Swapout task ID */
+ boolean swapout_enabled;
+} rtems_bdbuf_context;
+
+/**
+ * Fatal errors
*/
-static bdbuf_buffer *bd_buf_write_store[SWAP_OUT_MAX_BLK_CNT];
-
-/* Block device request with a single buffer provided */
-typedef struct blkdev_request1 {
- blkdev_request req;
- blkdev_sg_buffer sg[1];
-} blkdev_request1;
-
-/* The context of buffering layer */
-struct bdbuf_context rtems_bdbuf_ctx;
-
-#define SAFE
-#ifdef SAFE
-typedef rtems_mode preemption_key;
-
-#define DISABLE_PREEMPTION(key) \
- do { \
- rtems_task_mode(RTEMS_NO_PREEMPT, RTEMS_PREEMPT_MASK, &(key)); \
- } while (0)
-
-#define ENABLE_PREEMPTION(key) \
- do { \
- rtems_mode temp; \
- rtems_task_mode((key), RTEMS_PREEMPT_MASK, &temp); \
- } while (0)
-
-#else
+#define RTEMS_BLKDEV_FATAL_ERROR(n) (((uint32_t)'B' << 24) | \
+ ((uint32_t)(n) & (uint32_t)0x00FFFFFF))
+
+#define RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY RTEMS_BLKDEV_FATAL_ERROR(1)
+#define RTEMS_BLKDEV_FATAL_BDBUF_SWAPOUT RTEMS_BLKDEV_FATAL_ERROR(2)
+#define RTEMS_BLKDEV_FATAL_BDBUF_SYNC_LOCK RTEMS_BLKDEV_FATAL_ERROR(3)
+#define RTEMS_BLKDEV_FATAL_BDBUF_SYNC_UNLOCK RTEMS_BLKDEV_FATAL_ERROR(4)
+#define RTEMS_BLKDEV_FATAL_BDBUF_POOL_LOCK RTEMS_BLKDEV_FATAL_ERROR(5)
+#define RTEMS_BLKDEV_FATAL_BDBUF_POOL_UNLOCK RTEMS_BLKDEV_FATAL_ERROR(6)
+#define RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAIT RTEMS_BLKDEV_FATAL_ERROR(7)
+#define RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAKE RTEMS_BLKDEV_FATAL_ERROR(8)
+#define RTEMS_BLKDEV_FATAL_BDBUF_SO_WAKE RTEMS_BLKDEV_FATAL_ERROR(9)
+#define RTEMS_BLKDEV_FATAL_BDBUF_SO_NOMEM RTEMS_BLKDEV_FATAL_ERROR(10)
+#define BLKDEV_FATAL_BDBUF_SWAPOUT_RE RTEMS_BLKDEV_FATAL_ERROR(11)
+#define BLKDEV_FATAL_BDBUF_SWAPOUT_TS RTEMS_BLKDEV_FATAL_ERROR(12)
+
+#define RTEMS_BDBUF_TRANSFER_SYNC RTEMS_EVENT_1
+#define RTEMS_BDBUF_SWAPOUT_SYNC RTEMS_EVENT_2
+
+#define SWAPOUT_TASK_STACK_SIZE (8 * 1024)
+
+/**
+ * Lock semaphore attributes. This is used for locking type mutexes.
+ */
+#define RTEMS_BDBUF_POOL_LOCK_ATTRIBS \
+ (RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE | \
+ RTEMS_INHERIT_PRIORITY | RTEMS_NO_PRIORITY_CEILING | RTEMS_LOCAL)
-typedef boolean preemption_key;
+/**
+ * Waiter semaphore attributes.
+ *
+ * @note Do not configure as inherit priority. If a driver is in the driver
+ * initialisation table this locked semaphore will have the IDLE task as
+ * the holder and a blocking task will raise the priority of the IDLE
+ * task which can cause unsual side effects.
+ */
+#define RTEMS_BDBUF_POOL_WAITER_ATTRIBS \
+ (RTEMS_PRIORITY | RTEMS_BINARY_SEMAPHORE | \
+ RTEMS_NO_INHERIT_PRIORITY | RTEMS_NO_PRIORITY_CEILING | RTEMS_LOCAL)
-#define DISABLE_PREEMPTION(key) \
- do { \
- (key) = _Thread_Executing->is_preemptible; \
- _Thread_Executing->is_preemptible = 0; \
- } while (0)
+/*
+ * The swap out task.
+ */
+static rtems_task rtems_bdbuf_swapout_task(rtems_task_argument arg);
-#define ENABLE_PREEMPTION(key) \
- do { \
- _Thread_Executing->is_preemptible = (key); \
- if (_Thread_Evaluate_mode()) \
- _Thread_Dispatch(); \
- } while (0)
+/**
+ * The context of buffering layer.
+ */
+static rtems_bdbuf_context rtems_bdbuf_ctx;
+/**
+ * Print a message to the bdbuf trace output and flush it.
+ *
+ * @param format The format string. See printf for details.
+ * @param ... The arguments for the format text.
+ * @return int The number of bytes written to the output.
+ */
+#if RTEMS_BDBUF_TRACE
+boolean rtems_bdbuf_tracer;
+static void
+rtems_bdbuf_printf (const char *format, ...)
+{
+ va_list args;
+ va_start (args, format);
+ if (rtems_bdbuf_tracer)
+ {
+ fprintf (stdout, "bdbuf:");
+ vfprintf (stdout, format, args);
+ fflush (stdout);
+ }
+}
#endif
-
-/* The default maximum height of 32 allows for AVL trees having
- between 5,704,880 and 4,294,967,295 nodes, depending on order of
- insertion. You may change this compile-time constant as you
- wish. */
-#ifndef AVL_MAX_HEIGHT
-#define AVL_MAX_HEIGHT 32
+/**
+ * The default maximum height of 32 allows for AVL trees having between
+ * 5,704,880 and 4,294,967,295 nodes, depending on order of insertion. You may
+ * change this compile-time constant as you wish.
+ */
+#ifndef RTEMS_BDBUF_AVL_MAX_HEIGHT
+#define RTEMS_BDBUF_AVL_MAX_HEIGHT (32)
#endif
-/*
- * avl_search --
- * Searches for the node with specified dev/block.
- *
- * PARAMETERS:
- * root - pointer to the root node of the AVL-Tree.
- * dev, block - search key
+/**
+ * Searches for the node with specified dev/block.
*
- * RETURNS:
- * NULL if node with specified dev/block not found
- * non-NULL - pointer to the node with specified dev/block
+ * @param root pointer to the root node of the AVL-Tree
+ * @param dev device search key
+ * @param block block search key
+ * @retval NULL node with the specified dev/block is not found
+ * @return pointer to the node with specified dev/block
*/
-static bdbuf_buffer *
-avl_search(bdbuf_buffer **root, dev_t dev, blkdev_bnum block)
+static rtems_bdbuf_buffer *
+rtems_bdbuf_avl_search (rtems_bdbuf_buffer** root,
+ dev_t dev,
+ rtems_blkdev_bnum block)
{
- bdbuf_buffer *p = *root;
+ rtems_bdbuf_buffer* p = *root;
- while ((p != NULL) && ((p->dev != dev) || (p->block != block)))
+ while ((p != NULL) && ((p->dev != dev) || (p->block != block)))
+ {
+ if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
{
- if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
- {
- p = p->avl.right;
- }
- else
- {
- p = p->avl.left;
- }
+ p = p->avl.right;
+ }
+ else
+ {
+ p = p->avl.left;
}
+ }
- return p;
+ return p;
}
-
-/* avl_search_for_sync --
- * Search in AVL tree for first modified buffer belongs to specified
- * disk device.
+#if CCJ_REMOVE_IN_IN_CVS
+/**
+ * Search in AVL tree for first modified buffer belongs to specified
+ * disk device.
*
- * PARAMETERS:
- * root - pointer to tree root
- * dd - disk device descriptor
- *
- * RETURNS:
- * Block buffer, or NULL if no modified blocks on specified device
- * exists.
+ * @param root pointer to the root node of the AVL-Tree
+ * @param dd - disk device descriptor
+ * @retval NULL no modified blocks on the disk device
+ * @return pointer to the modified node
*/
-static bdbuf_buffer *
-avl_search_for_sync(bdbuf_buffer **root, disk_device *dd)
+static rtems_bdbuf_buffer *
+rtems_bdbuf_avl_search_for_sync (rtems_bdbuf_buffer** root,
+ rtems_disk_device* dd)
{
- dev_t dev = dd->phys_dev->dev;
- blkdev_bnum block_start = dd->start;
- blkdev_bnum block_end = dd->start + dd->size - 1;
-
- bdbuf_buffer *buf_stack[AVL_MAX_HEIGHT];
- bdbuf_buffer **buf_prev = buf_stack;
- bdbuf_buffer *p = *root;
-
- while (p != NULL)
+ dev_t dev = dd->phys_dev->dev;
+ rtems_blkdev_bnum block_start = dd->start;
+ rtems_blkdev_bnum block_end = dd->start + dd->size - 1;
+ rtems_bdbuf_buffer* buf_stack[RTEMS_BDBUF_AVL_MAX_HEIGHT];
+ rtems_bdbuf_buffer** buf_prev = buf_stack;
+ rtems_bdbuf_buffer* p = *root;
+
+ while (p != NULL)
+ {
+ if ((p->dev < dev) || ((p->dev == dev) && (p->block < block_start)))
{
- if ((p->dev < dev) || ((p->dev == dev) && (p->block < block_start)))
- {
- p = p->avl.right;
- }
- else if ((p->dev > dev) || ((p->dev == dev) && (p->block > block_end)))
- {
- p = p->avl.left;
- }
- else if (p->modified)
- {
- return p;
- }
- else
- {
- if (p->avl.right != NULL)
- {
- *buf_prev++ = p->avl.right;
- }
- p = p->avl.left;
- }
+ p = p->avl.right;
+ }
+ else if ((p->dev > dev) || ((p->dev == dev) && (p->block > block_end)))
+ {
+ p = p->avl.left;
+ }
+ else if (p->state == RTEMS_BDBUF_STATE_MODIFIED)
+ {
+ return p;
+ }
+ else
+ {
+ if (p->avl.right != NULL)
+ {
+ *buf_prev++ = p->avl.right;
+ }
+ p = p->avl.left;
+ }
- if ((p == NULL) && (buf_prev > buf_stack))
- {
- p = *--buf_prev;
- }
+ if ((p == NULL) && (buf_prev > buf_stack))
+ {
+ p = *--buf_prev;
}
+ }
- return p;
+ return p;
}
+#endif
-
-/*
- * avl_insert --
- * Inserts the specified node to the AVl-Tree.
- *
- * PARAMETERS:
- * root - Pointer to pointer to the root node
- * node - Pointer to the node to add.
+/**
+ * Inserts the specified node to the AVl-Tree.
*
- * RETURNS:
- * 0 - The node added successfully
- * -1 - An error occured
+ * @param root pointer to the root node of the AVL-Tree
+ * @param node Pointer to the node to add.
+ * @retval 0 The node added successfully
+ * @retval -1 An error occured
*/
static int
-avl_insert(bdbuf_buffer **root, bdbuf_buffer *node)
+rtems_bdbuf_avl_insert(rtems_bdbuf_buffer** root,
+ rtems_bdbuf_buffer* node)
{
- dev_t dev = node->dev;
- blkdev_bnum block = node->block;
-
- bdbuf_buffer *p = *root;
- bdbuf_buffer *q, *p1, *p2;
- bdbuf_buffer *buf_stack[AVL_MAX_HEIGHT];
- bdbuf_buffer **buf_prev = buf_stack;
+ dev_t dev = node->dev;
+ rtems_blkdev_bnum block = node->block;
+
+ rtems_bdbuf_buffer* p = *root;
+ rtems_bdbuf_buffer* q;
+ rtems_bdbuf_buffer* p1;
+ rtems_bdbuf_buffer* p2;
+ rtems_bdbuf_buffer* buf_stack[RTEMS_BDBUF_AVL_MAX_HEIGHT];
+ rtems_bdbuf_buffer** buf_prev = buf_stack;
+
+ boolean modified = FALSE;
+
+ if (p == NULL)
+ {
+ *root = node;
+ node->avl.left = NULL;
+ node->avl.right = NULL;
+ node->avl.bal = 0;
+ return 0;
+ }
- boolean modified = FALSE;
+ while (p != NULL)
+ {
+ *buf_prev++ = p;
- if (p == NULL)
+ if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
{
- *root = node;
- node->avl.left = NULL;
- node->avl.right = NULL;
- node->avl.bal = 0;
- return 0;
+ p->avl.cache = 1;
+ q = p->avl.right;
+ if (q == NULL)
+ {
+ q = node;
+ p->avl.right = q = node;
+ break;
+ }
}
-
- while (p != NULL)
+ else if ((p->dev != dev) || (p->block != block))
{
- *buf_prev++ = p;
-
- if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
- {
- p->avl.cache = 1;
- q = p->avl.right;
- if (q == NULL)
- {
- q = node;
- p->avl.right = q = node;
- break;
- }
- }
- else if ((p->dev != dev) || (p->block != block))
- {
- p->avl.cache = -1;
- q = p->avl.left;
- if (q == NULL)
- {
- q = node;
- p->avl.left = q;
- break;
- }
- }
- else
- {
- return -1;
- }
-
- p = q;
+ p->avl.cache = -1;
+ q = p->avl.left;
+ if (q == NULL)
+ {
+ q = node;
+ p->avl.left = q;
+ break;
+ }
+ }
+ else
+ {
+ return -1;
}
- q->avl.left = q->avl.right = NULL;
- q->avl.bal = 0;
- modified = TRUE;
- buf_prev--;
+ p = q;
+ }
- while (modified)
- {
- if (p->avl.cache == -1)
- {
- switch (p->avl.bal)
- {
- case 1:
- p->avl.bal = 0;
- modified = FALSE;
- break;
-
- case 0:
- p->avl.bal = -1;
- break;
-
- case -1:
- p1 = p->avl.left;
- if (p1->avl.bal == -1) /* simple LL-turn */
- {
- p->avl.left = p1->avl.right;
- p1->avl.right = p;
- p->avl.bal = 0;
- p = p1;
- }
- else /* double LR-turn */
- {
- p2 = p1->avl.right;
- p1->avl.right = p2->avl.left;
- p2->avl.left = p1;
- p->avl.left = p2->avl.right;
- p2->avl.right = p;
- if (p2->avl.bal == -1) p->avl.bal = +1; else p->avl.bal = 0;
- if (p2->avl.bal == +1) p1->avl.bal = -1; else p1->avl.bal = 0;
- p = p2;
- }
- p->avl.bal = 0;
- modified = FALSE;
- break;
-
- default:
- break;
- }
- }
- else
- {
- switch (p->avl.bal)
- {
- case -1:
- p->avl.bal = 0;
- modified = FALSE;
- break;
-
- case 0:
- p->avl.bal = 1;
- break;
-
- case 1:
- p1 = p->avl.right;
- if (p1->avl.bal == 1) /* simple RR-turn */
- {
- p->avl.right = p1->avl.left;
- p1->avl.left = p;
- p->avl.bal = 0;
- p = p1;
- }
- else /* double RL-turn */
- {
- p2 = p1->avl.left;
- p1->avl.left = p2->avl.right;
- p2->avl.right = p1;
- p->avl.right = p2->avl.left;
- p2->avl.left = p;
- if (p2->avl.bal == +1) p->avl.bal = -1; else p->avl.bal = 0;
- if (p2->avl.bal == -1) p1->avl.bal = +1; else p1->avl.bal = 0;
- p = p2;
- }
- p->avl.bal = 0;
- modified = FALSE;
- break;
-
- default:
- break;
- }
- }
- q = p;
- if (buf_prev > buf_stack)
- {
- p = *--buf_prev;
+ q->avl.left = q->avl.right = NULL;
+ q->avl.bal = 0;
+ modified = TRUE;
+ buf_prev--;
- if (p->avl.cache == -1)
- {
- p->avl.left = q;
- }
- else
- {
- p->avl.right = q;
- }
- }
- else
- {
- *root = p;
- break;
- }
- };
+ while (modified)
+ {
+ if (p->avl.cache == -1)
+ {
+ switch (p->avl.bal)
+ {
+ case 1:
+ p->avl.bal = 0;
+ modified = FALSE;
+ break;
+
+ case 0:
+ p->avl.bal = -1;
+ break;
+
+ case -1:
+ p1 = p->avl.left;
+ if (p1->avl.bal == -1) /* simple LL-turn */
+ {
+ p->avl.left = p1->avl.right;
+ p1->avl.right = p;
+ p->avl.bal = 0;
+ p = p1;
+ }
+ else /* double LR-turn */
+ {
+ p2 = p1->avl.right;
+ p1->avl.right = p2->avl.left;
+ p2->avl.left = p1;
+ p->avl.left = p2->avl.right;
+ p2->avl.right = p;
+ if (p2->avl.bal == -1) p->avl.bal = +1; else p->avl.bal = 0;
+ if (p2->avl.bal == +1) p1->avl.bal = -1; else p1->avl.bal = 0;
+ p = p2;
+ }
+ p->avl.bal = 0;
+ modified = FALSE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ else
+ {
+ switch (p->avl.bal)
+ {
+ case -1:
+ p->avl.bal = 0;
+ modified = FALSE;
+ break;
+
+ case 0:
+ p->avl.bal = 1;
+ break;
+
+ case 1:
+ p1 = p->avl.right;
+ if (p1->avl.bal == 1) /* simple RR-turn */
+ {
+ p->avl.right = p1->avl.left;
+ p1->avl.left = p;
+ p->avl.bal = 0;
+ p = p1;
+ }
+ else /* double RL-turn */
+ {
+ p2 = p1->avl.left;
+ p1->avl.left = p2->avl.right;
+ p2->avl.right = p1;
+ p->avl.right = p2->avl.left;
+ p2->avl.left = p;
+ if (p2->avl.bal == +1) p->avl.bal = -1; else p->avl.bal = 0;
+ if (p2->avl.bal == -1) p1->avl.bal = +1; else p1->avl.bal = 0;
+ p = p2;
+ }
+ p->avl.bal = 0;
+ modified = FALSE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ q = p;
+ if (buf_prev > buf_stack)
+ {
+ p = *--buf_prev;
+
+ if (p->avl.cache == -1)
+ {
+ p->avl.left = q;
+ }
+ else
+ {
+ p->avl.right = q;
+ }
+ }
+ else
+ {
+ *root = p;
+ break;
+ }
+ };
- return 0;
+ return 0;
}
-/* avl_remove --
- * removes the node from the tree.
+/**
+ * Removes the node from the tree.
*
- * PARAMETERS:
- * root_addr - Pointer to pointer to the root node
- * node - Pointer to the node to remove
- *
- * RETURNS:
- * 0 - Item removed
- * -1 - No such item found
+ * @param root_addr Pointer to pointer to the root node
+ * @param node Pointer to the node to remove
+ * @retval 0 Item removed
+ * @retval -1 No such item found
*/
static int
-avl_remove(bdbuf_buffer **root, const bdbuf_buffer *node)
+rtems_bdbuf_avl_remove(rtems_bdbuf_buffer** root,
+ const rtems_bdbuf_buffer* node)
{
- dev_t dev = node->dev;
- blkdev_bnum block = node->block;
+ dev_t dev = node->dev;
+ rtems_blkdev_bnum block = node->block;
- bdbuf_buffer *p = *root;
- bdbuf_buffer *q, *r, *s, *p1, *p2;
- bdbuf_buffer *buf_stack[AVL_MAX_HEIGHT];
- bdbuf_buffer **buf_prev = buf_stack;
+ rtems_bdbuf_buffer* p = *root;
+ rtems_bdbuf_buffer* q;
+ rtems_bdbuf_buffer* r;
+ rtems_bdbuf_buffer* s;
+ rtems_bdbuf_buffer* p1;
+ rtems_bdbuf_buffer* p2;
+ rtems_bdbuf_buffer* buf_stack[RTEMS_BDBUF_AVL_MAX_HEIGHT];
+ rtems_bdbuf_buffer** buf_prev = buf_stack;
- boolean modified = FALSE;
+ boolean modified = FALSE;
- memset(buf_stack, 0, sizeof(buf_stack));
+ memset (buf_stack, 0, sizeof(buf_stack));
- while (p != NULL)
- {
- *buf_prev++ = p;
+ while (p != NULL)
+ {
+ *buf_prev++ = p;
- if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
- {
- p->avl.cache = 1;
- p = p->avl.right;
- }
- else if ((p->dev != dev) || (p->block != block))
- {
- p->avl.cache = -1;
- p = p->avl.left;
- }
- else
- {
- /* node found */
- break;
- }
+ if ((p->dev < dev) || ((p->dev == dev) && (p->block < block)))
+ {
+ p->avl.cache = 1;
+ p = p->avl.right;
+ }
+ else if ((p->dev != dev) || (p->block != block))
+ {
+ p->avl.cache = -1;
+ p = p->avl.left;
}
+ else
+ {
+ /* node found */
+ break;
+ }
+ }
+
+ if (p == NULL)
+ {
+ /* there is no such node */
+ return -1;
+ }
+
+ q = p;
- if (p == NULL)
+ buf_prev--;
+ if (buf_prev > buf_stack)
+ {
+ p = *(buf_prev - 1);
+ }
+ else
+ {
+ p = NULL;
+ }
+
+ /* at this moment q - is a node to delete, p is q's parent */
+ if (q->avl.right == NULL)
+ {
+ r = q->avl.left;
+ if (r != NULL)
{
- /* there is no such node */
- return -1;
+ r->avl.bal = 0;
}
+ q = r;
+ }
+ else
+ {
+ rtems_bdbuf_buffer **t;
- q = p;
+ r = q->avl.right;
- buf_prev--;
- if (buf_prev > buf_stack)
+ if (r->avl.left == NULL)
{
- p = *(buf_prev - 1);
+ r->avl.left = q->avl.left;
+ r->avl.bal = q->avl.bal;
+ r->avl.cache = 1;
+ *buf_prev++ = q = r;
}
else
{
- p = NULL;
+ t = buf_prev++;
+ s = r;
+
+ while (s->avl.left != NULL)
+ {
+ *buf_prev++ = r = s;
+ s = r->avl.left;
+ r->avl.cache = -1;
+ }
+
+ s->avl.left = q->avl.left;
+ r->avl.left = s->avl.right;
+ s->avl.right = q->avl.right;
+ s->avl.bal = q->avl.bal;
+ s->avl.cache = 1;
+
+ *t = q = s;
}
+ }
- /* at this moment q - is a node to delete, p is q's parent */
- if (q->avl.right == NULL)
+ if (p != NULL)
+ {
+ if (p->avl.cache == -1)
{
- r = q->avl.left;
- if (r != NULL)
- {
- r->avl.bal = 0;
- }
- q = r;
+ p->avl.left = q;
}
else
{
- bdbuf_buffer **t;
-
- r = q->avl.right;
-
- if (r->avl.left == NULL)
- {
- r->avl.left = q->avl.left;
- r->avl.bal = q->avl.bal;
- r->avl.cache = 1;
- *buf_prev++ = q = r;
- }
- else
- {
- t = buf_prev++;
- s = r;
-
- while (s->avl.left != NULL)
- {
- *buf_prev++ = r = s;
- s = r->avl.left;
- r->avl.cache = -1;
- }
-
- s->avl.left = q->avl.left;
- r->avl.left = s->avl.right;
- s->avl.right = q->avl.right;
- s->avl.bal = q->avl.bal;
- s->avl.cache = 1;
-
- *t = q = s;
- }
+ p->avl.right = q;
}
+ }
+ else
+ {
+ *root = q;
+ }
+
+ modified = TRUE;
- if (p != NULL)
+ while (modified)
+ {
+ if (buf_prev > buf_stack)
{
- if (p->avl.cache == -1)
- {
- p->avl.left = q;
- }
- else
- {
- p->avl.right = q;
- }
+ p = *--buf_prev;
}
else
{
- *root = q;
+ break;
}
- modified = TRUE;
-
- while (modified)
+ if (p->avl.cache == -1)
{
- if (buf_prev > buf_stack)
- {
- p = *--buf_prev;
- }
- else
- {
- break;
- }
-
- if (p->avl.cache == -1)
- {
- /* rebalance left branch */
- switch (p->avl.bal)
+ /* rebalance left branch */
+ switch (p->avl.bal)
+ {
+ case -1:
+ p->avl.bal = 0;
+ break;
+ case 0:
+ p->avl.bal = 1;
+ modified = FALSE;
+ break;
+
+ case +1:
+ p1 = p->avl.right;
+
+ if (p1->avl.bal >= 0) /* simple RR-turn */
+ {
+ p->avl.right = p1->avl.left;
+ p1->avl.left = p;
+
+ if (p1->avl.bal == 0)
{
- case -1:
- p->avl.bal = 0;
- break;
- case 0:
- p->avl.bal = 1;
- modified = FALSE;
- break;
-
- case +1:
- p1 = p->avl.right;
-
- if (p1->avl.bal >= 0) /* simple RR-turn */
- {
- p->avl.right = p1->avl.left;
- p1->avl.left = p;
-
- if (p1->avl.bal == 0)
- {
- p1->avl.bal = -1;
- modified = FALSE;
- }
- else
- {
- p->avl.bal = 0;
- p1->avl.bal = 0;
- }
- p = p1;
- }
- else /* double RL-turn */
- {
- p2 = p1->avl.left;
-
- p1->avl.left = p2->avl.right;
- p2->avl.right = p1;
- p->avl.right = p2->avl.left;
- p2->avl.left = p;
-
- if (p2->avl.bal == +1) p->avl.bal = -1; else p->avl.bal = 0;
- if (p2->avl.bal == -1) p1->avl.bal = 1; else p1->avl.bal = 0;
-
- p = p2;
- p2->avl.bal = 0;
- }
- break;
-
- default:
- break;
+ p1->avl.bal = -1;
+ modified = FALSE;
}
- }
- else
- {
- /* rebalance right branch */
- switch (p->avl.bal)
+ else
{
- case +1:
- p->avl.bal = 0;
- break;
-
- case 0:
- p->avl.bal = -1;
- modified = FALSE;
- break;
-
- case -1:
- p1 = p->avl.left;
-
- if (p1->avl.bal <= 0) /* simple LL-turn */
- {
- p->avl.left = p1->avl.right;
- p1->avl.right = p;
- if (p1->avl.bal == 0)
- {
- p1->avl.bal = 1;
- modified = FALSE;
- }
- else
- {
- p->avl.bal = 0;
- p1->avl.bal = 0;
- }
- p = p1;
- }
- else /* double LR-turn */
- {
- p2 = p1->avl.right;
-
- p1->avl.right = p2->avl.left;
- p2->avl.left = p1;
- p->avl.left = p2->avl.right;
- p2->avl.right = p;
-
- if (p2->avl.bal == -1) p->avl.bal = 1; else p->avl.bal = 0;
- if (p2->avl.bal == +1) p1->avl.bal = -1; else p1->avl.bal = 0;
-
- p = p2;
- p2->avl.bal = 0;
- }
- break;
-
- default:
- break;
+ p->avl.bal = 0;
+ p1->avl.bal = 0;
}
- }
-
- if (buf_prev > buf_stack)
- {
- q = *(buf_prev - 1);
-
- if (q->avl.cache == -1)
+ p = p1;
+ }
+ else /* double RL-turn */
+ {
+ p2 = p1->avl.left;
+
+ p1->avl.left = p2->avl.right;
+ p2->avl.right = p1;
+ p->avl.right = p2->avl.left;
+ p2->avl.left = p;
+
+ if (p2->avl.bal == +1) p->avl.bal = -1; else p->avl.bal = 0;
+ if (p2->avl.bal == -1) p1->avl.bal = 1; else p1->avl.bal = 0;
+
+ p = p2;
+ p2->avl.bal = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ else
+ {
+ /* rebalance right branch */
+ switch (p->avl.bal)
+ {
+ case +1:
+ p->avl.bal = 0;
+ break;
+
+ case 0:
+ p->avl.bal = -1;
+ modified = FALSE;
+ break;
+
+ case -1:
+ p1 = p->avl.left;
+
+ if (p1->avl.bal <= 0) /* simple LL-turn */
+ {
+ p->avl.left = p1->avl.right;
+ p1->avl.right = p;
+ if (p1->avl.bal == 0)
{
- q->avl.left = p;
+ p1->avl.bal = 1;
+ modified = FALSE;
}
else
{
- q->avl.right = p;
+ p->avl.bal = 0;
+ p1->avl.bal = 0;
}
- }
- else
- {
- *root = p;
- break;
- }
+ p = p1;
+ }
+ else /* double LR-turn */
+ {
+ p2 = p1->avl.right;
+
+ p1->avl.right = p2->avl.left;
+ p2->avl.left = p1;
+ p->avl.left = p2->avl.right;
+ p2->avl.right = p;
+
+ if (p2->avl.bal == -1) p->avl.bal = 1; else p->avl.bal = 0;
+ if (p2->avl.bal == +1) p1->avl.bal = -1; else p1->avl.bal = 0;
+
+ p = p2;
+ p2->avl.bal = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (buf_prev > buf_stack)
+ {
+ q = *(buf_prev - 1);
+
+ if (q->avl.cache == -1)
+ {
+ q->avl.left = p;
+ }
+ else
+ {
+ q->avl.right = p;
+ }
+ }
+ else
+ {
+ *root = p;
+ break;
}
- return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Get the pool for the device.
+ *
+ * @param pdd Physical disk device.
+ */
+static rtems_bdbuf_pool*
+rtems_bdbuf_get_pool (const rtems_bdpool_id pid)
+{
+ return &rtems_bdbuf_ctx.pool[pid];
+}
+
+/**
+ * Lock the pool. A single task can nest calls.
+ *
+ * @param pool The pool to lock.
+ */
+static void
+rtems_bdbuf_lock_pool (rtems_bdbuf_pool* pool)
+{
+ rtems_status_code sc = rtems_semaphore_obtain (pool->lock,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT);
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_LOCK);
+}
+
+/**
+ * Unlock the pool.
+ *
+ * @param pool The pool to unlock.
+ */
+static void
+rtems_bdbuf_unlock_pool (rtems_bdbuf_pool* pool)
+{
+ rtems_status_code sc = rtems_semaphore_release (pool->lock);
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_UNLOCK);
+}
+
+/**
+ * Lock the pool's sync. A single task can nest calls.
+ *
+ * @param pool The pool's sync to lock.
+ */
+static void
+rtems_bdbuf_lock_sync (rtems_bdbuf_pool* pool)
+{
+ rtems_status_code sc = rtems_semaphore_obtain (pool->sync_lock,
+ RTEMS_WAIT,
+ RTEMS_NO_TIMEOUT);
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_SYNC_LOCK);
+}
+
+/**
+ * Unlock the pool's sync.
+ *
+ * @param pool The pool's sync to unlock.
+ */
+static void
+rtems_bdbuf_unlock_sync (rtems_bdbuf_pool* pool)
+{
+ rtems_status_code sc = rtems_semaphore_release (pool->sync_lock);
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_SYNC_UNLOCK);
+}
+
+/**
+ * Wait until woken. Semaphores are used so a number of tasks can wait and
+ * can be woken at once. Task events would require we maintain a list
+ * of tasks to be woken and this would require storgage and we do not
+ * know the number of tasks that could be waiting.
+ *
+ * While we have the pool locked we can try and claim the semaphore and
+ * therefore know when we release the lock to the pool we will block until the
+ * semaphore is released. This may even happen before we get to block.
+ *
+ * A counter is used to save the release call when no one is waiting.
+ *
+ * The function assumes the pool is locked on entry and it will have locked
+ * the pool on exit.
+ *
+ * @param pool The pool to wait for a buffer to return.
+ * @param sema The semaphore to block on and wait.
+ * @param waiters The wait counter for this semaphore.
+ */
+static void
+rtems_bdbuf_wait (rtems_bdbuf_pool* pool, rtems_id* sema,
+ volatile uint32_t* waiters)
+{
+ rtems_status_code sc;
+ rtems_mode prev_mode;
+
+ /*
+ * Indicate we are waiting.
+ */
+ *waiters += 1;
+
+ /*
+ * Disable preemption then unlock the pool and block.
+ * There is no POSIX condition variable in the core API so
+ * this is a work around.
+ *
+ * The issue is a task could preempt after the pool is unlocked
+ * because it is blocking or just hits that window, and before
+ * this task has blocked on the semaphore. If the preempting task
+ * flushes the queue this task will not see the flush and may
+ * block for ever or until another transaction flushes this
+ * semaphore.
+ */
+ sc = rtems_task_mode (RTEMS_NO_PREEMPT, RTEMS_PREEMPT_MASK, &prev_mode);
+
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAIT);
+
+ /*
+ * Unlock the pool, wait, and lock the pool when we return.
+ */
+ rtems_bdbuf_unlock_pool (pool);
+
+ sc = rtems_semaphore_obtain (*sema, RTEMS_WAIT, RTEMS_NO_TIMEOUT);
+
+ if (sc != RTEMS_UNSATISFIED)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAIT);
+
+ rtems_bdbuf_lock_pool (pool);
+
+ sc = rtems_task_mode (prev_mode, RTEMS_ALL_MODE_MASKS, &prev_mode);
+
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAIT);
+
+ *waiters -= 1;
+}
+
+/**
+ * Wake a blocked resource. The resource has a counter that lets us know if
+ * there are any waiters.
+ *
+ * @param sema The semaphore to release.
+ * @param waiters The wait counter for this semaphore.
+ */
+static void
+rtems_bdbuf_wake (rtems_id sema, volatile uint32_t* waiters)
+{
+ if (*waiters)
+ {
+ rtems_status_code sc;
+
+ sc = rtems_semaphore_flush (sema);
+
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_POOL_WAKE);
+ }
+}
+
+/**
+ * Add a buffer descriptor to the modified list. This modified list is treated
+ * a litte differently to the other lists. To access it you must have the pool
+ * locked and this is assumed to be the case on entry to this call and you must
+ * hold the sync lock. The sync lock is used to block writes while a sync is
+ * active.
+ *
+ * @param sema The semaphore to release.
+ * @param waiters The wait counter for this semaphore.
+ */
+static void
+rtems_bdbuf_append_modified (rtems_bdbuf_pool* pool, rtems_bdbuf_buffer* bd)
+{
+ /*
+ * Take a copy of the sync_active variable as it will change after
+ * we unlock the pool and wait for the sync to finish.
+ */
+ int sync_active = pool->sync_active;
+ if (0 && sync_active)
+ {
+ rtems_bdbuf_unlock_pool (pool);
+ rtems_bdbuf_lock_sync (pool);
+ rtems_bdbuf_lock_pool (pool);
+ }
+
+ bd->state = RTEMS_BDBUF_STATE_MODIFIED;
+
+ rtems_chain_append (&pool->modified, &bd->link);
+
+ if (0 && sync_active)
+ rtems_bdbuf_unlock_sync (pool);
+}
+
+static void
+rtems_bdbuf_wake_swapper ()
+{
+ rtems_status_code sc = rtems_event_send (rtems_bdbuf_ctx.swapout,
+ RTEMS_BDBUF_SWAPOUT_SYNC);
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_SO_WAKE);
}
/* bdbuf_initialize_pool --
@@ -675,70 +930,134 @@ avl_remove(bdbuf_buffer **root, const bdbuf_buffer *node)
* code if error occured.
*/
static rtems_status_code
-bdbuf_initialize_pool(rtems_bdbuf_config *config, int pool)
+rtems_bdbuf_initialize_pool (rtems_bdbuf_pool_config* config,
+ rtems_bdpool_id pid)
{
- bdbuf_pool *p = rtems_bdbuf_ctx.pool + pool;
- unsigned char *bufs;
- bdbuf_buffer *b;
- rtems_status_code rc;
- int i;
-
- p->blksize = config->size;
- p->nblks = config->num;
- p->tree = NULL;
-
- rtems_chain_initialize_empty(&p->free);
- rtems_chain_initialize_empty(&p->lru);
-
- /* Allocate memory for buffer descriptors */
- p->bdbufs = calloc(config->num, sizeof(bdbuf_buffer));
- if (p->bdbufs == NULL)
- {
- return RTEMS_NO_MEMORY;
- }
+ unsigned char* buffer = config->mem_area;
+ rtems_bdbuf_pool* pool;
+ rtems_bdbuf_buffer* bd;
+ rtems_status_code sc;
+ int b;
- /* Allocate memory for buffers if required */
- if (config->mem_area == NULL)
- {
- bufs = p->mallocd_bufs = malloc(config->num * config->size);
- if (bufs == NULL)
- {
- free(p->bdbufs);
- return RTEMS_NO_MEMORY;
- }
- }
- else
+ pool = rtems_bdbuf_get_pool (pid);
+
+ pool->blksize = config->size;
+ pool->nblks = config->num;
+ pool->tree = NULL;
+ pool->buffers = NULL;
+
+ rtems_chain_initialize_empty (&pool->ready);
+ rtems_chain_initialize_empty (&pool->lru);
+ rtems_chain_initialize_empty (&pool->modified);
+ rtems_chain_initialize_empty (&pool->sync);
+
+ pool->access = 0;
+ pool->access_waiters = 0;
+ pool->transfer = 0;
+ pool->transfer_waiters = 0;
+ pool->waiting = 0;
+ pool->wait_waiters = 0;
+
+ /*
+ * Allocate memory for buffer descriptors
+ */
+ pool->bds = calloc (config->num, sizeof (rtems_bdbuf_buffer));
+
+ if (!pool->bds)
+ return RTEMS_NO_MEMORY;
+
+ /*
+ * Allocate memory for buffers if required.
+ */
+ if (buffer == NULL)
+ {
+ buffer = pool->buffers = malloc (config->num * config->size);
+ if (!pool->buffers)
{
- bufs = config->mem_area;
- p->mallocd_bufs = NULL;
+ free (pool->bds);
+ return RTEMS_NO_MEMORY;
}
+ }
- for (i = 0, b = p->bdbufs; i < p->nblks; i++, b++, bufs += p->blksize)
- {
- b->dev = -1; b->block = 0;
- b->buffer = bufs;
- b->actual = b->modified = b->in_progress = FALSE;
- b->use_count = 0;
- b->pool = pool;
- rtems_chain_append(&p->free, &b->link);
- }
+ for (b = 0, bd = pool->bds;
+ b < pool->nblks;
+ b++, bd++, buffer += pool->blksize)
+ {
+ bd->dev = -1;
+ bd->block = 0;
+ bd->buffer = buffer;
+ bd->avl.left = NULL;
+ bd->avl.right = NULL;
+ bd->state = RTEMS_BDBUF_STATE_EMPTY;
+ bd->pool = pid;
+ bd->error = 0;
+ bd->waiters = 0;
+ bd->hold_timer = 0;
+
+ rtems_chain_append (&pool->ready, &bd->link);
+ }
- rc = rtems_semaphore_create(
- rtems_build_name('B', 'U', 'F', 'G'),
- p->nblks,
- RTEMS_FIFO | RTEMS_COUNTING_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY |
- RTEMS_NO_PRIORITY_CEILING | RTEMS_LOCAL,
- 0,
- &p->bufget_sema);
+ sc = rtems_semaphore_create (rtems_build_name ('B', 'P', '0' + pid, 'L'),
+ 1, RTEMS_BDBUF_POOL_LOCK_ATTRIBS, 0,
+ &pool->lock);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ free (pool->buffers);
+ free (pool->bds);
+ return sc;
+ }
- if (rc != RTEMS_SUCCESSFUL)
- {
- free(p->bdbufs);
- free(p->mallocd_bufs);
- return rc;
- }
+ sc = rtems_semaphore_create (rtems_build_name ('B', 'P', '0' + pid, 's'),
+ 0, RTEMS_BDBUF_POOL_WAITER_ATTRIBS, 0,
+ &pool->sync_lock);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ rtems_semaphore_delete (pool->lock);
+ free (pool->buffers);
+ free (pool->bds);
+ return sc;
+ }
+
+ sc = rtems_semaphore_create (rtems_build_name ('B', 'P', '0' + pid, 'a'),
+ 0, RTEMS_BDBUF_POOL_WAITER_ATTRIBS, 0,
+ &pool->access);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ rtems_semaphore_delete (pool->sync_lock);
+ rtems_semaphore_delete (pool->lock);
+ free (pool->buffers);
+ free (pool->bds);
+ return sc;
+ }
- return RTEMS_SUCCESSFUL;
+ sc = rtems_semaphore_create (rtems_build_name ('B', 'P', '0' + pid, 't'),
+ 0, RTEMS_BDBUF_POOL_WAITER_ATTRIBS, 0,
+ &pool->transfer);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ rtems_semaphore_delete (pool->access);
+ rtems_semaphore_delete (pool->sync_lock);
+ rtems_semaphore_delete (pool->lock);
+ free (pool->buffers);
+ free (pool->bds);
+ return sc;
+ }
+
+ sc = rtems_semaphore_create (rtems_build_name ('B', 'P', '0' + pid, 'w'),
+ 0, RTEMS_BDBUF_POOL_WAITER_ATTRIBS, 0,
+ &pool->waiting);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ rtems_semaphore_delete (pool->transfer);
+ rtems_semaphore_delete (pool->access);
+ rtems_semaphore_delete (pool->sync_lock);
+ rtems_semaphore_delete (pool->lock);
+ free (pool->buffers);
+ free (pool->bds);
+ return sc;
+ }
+
+ return RTEMS_SUCCESSFUL;
}
/* bdbuf_release_pool --
@@ -751,21 +1070,29 @@ bdbuf_initialize_pool(rtems_bdbuf_config *config, int pool)
* RTEMS_SUCCESSFUL
*/
static rtems_status_code
-bdbuf_release_pool(rtems_bdpool_id pool)
+rtems_bdbuf_release_pool (rtems_bdpool_id pid)
{
- bdbuf_pool *p = rtems_bdbuf_ctx.pool + pool;
- rtems_semaphore_delete(p->bufget_sema);
- free(p->bdbufs);
- free(p->mallocd_bufs);
- return RTEMS_SUCCESSFUL;
+ rtems_bdbuf_pool* pool = rtems_bdbuf_get_pool (pid);
+
+ rtems_bdbuf_lock_pool (pool);
+
+ rtems_semaphore_delete (pool->waiting);
+ rtems_semaphore_delete (pool->transfer);
+ rtems_semaphore_delete (pool->access);
+ rtems_semaphore_delete (pool->lock);
+
+ free (pool->buffers);
+ free (pool->bds);
+
+ return RTEMS_SUCCESSFUL;
}
/* rtems_bdbuf_init --
* Prepare buffering layer to work - initialize buffer descritors
- * and (if it is neccessary)buffers. Buffers will be allocated accoriding
+ * and (if it is neccessary) buffers. Buffers will be allocated according
* to the configuration table, each entry describes kind of block and
- * amount requested. After initialization all blocks is placed into
- * free elements lists.
+ * amount requested. After initialization all blocks are placed onto
+ * empty elements lists.
*
* PARAMETERS:
* conf_table - pointer to the buffers configuration table
@@ -776,107 +1103,124 @@ bdbuf_release_pool(rtems_bdpool_id pool)
* or error code if error is occured)
*/
rtems_status_code
-rtems_bdbuf_init(rtems_bdbuf_config *conf_table, int size)
+rtems_bdbuf_init ()
{
- rtems_bdpool_id i;
- rtems_status_code rc;
+ rtems_bdpool_id p;
+ rtems_status_code sc;
- if (size <= 0)
- return RTEMS_INVALID_SIZE;
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("init\n");
+#endif
- rtems_bdbuf_ctx.npools = size;
+ if (rtems_bdbuf_pool_configuration_size <= 0)
+ return RTEMS_INVALID_SIZE;
- /*
- * Allocate memory for buffer pool descriptors
- */
- rtems_bdbuf_ctx.pool = calloc(size, sizeof(bdbuf_pool));
- if (rtems_bdbuf_ctx.pool == NULL)
- {
- return RTEMS_NO_MEMORY;
- }
+ if (rtems_bdbuf_ctx.npools)
+ return RTEMS_RESOURCE_IN_USE;
- rtems_chain_initialize_empty(&rtems_bdbuf_ctx.mod);
+ rtems_bdbuf_ctx.npools = rtems_bdbuf_pool_configuration_size;
- /* Initialize buffer pools and roll out if something failed */
- for (i = 0; i < size; i++)
+ /*
+ * Allocate memory for buffer pool descriptors
+ */
+ rtems_bdbuf_ctx.pool = calloc (rtems_bdbuf_pool_configuration_size,
+ sizeof (rtems_bdbuf_pool));
+
+ if (rtems_bdbuf_ctx.pool == NULL)
+ return RTEMS_NO_MEMORY;
+
+ /*
+ * Initialize buffer pools and roll out if something failed,
+ */
+ for (p = 0; p < rtems_bdbuf_ctx.npools; p++)
+ {
+ sc = rtems_bdbuf_initialize_pool (&rtems_bdbuf_pool_configuration[p], p);
+ if (sc != RTEMS_SUCCESSFUL)
{
- rc = bdbuf_initialize_pool(conf_table + i, i);
- if (rc != RTEMS_SUCCESSFUL)
- {
- rtems_bdpool_id j;
- for (j = 0; j < i - 1; j++)
- {
- bdbuf_release_pool(j);
- }
- return rc;
- }
+ rtems_bdpool_id j;
+ for (j = 0; j < p - 1; j++)
+ rtems_bdbuf_release_pool (j);
+ return sc;
}
+ }
- /* Create buffer flush semaphore */
- rc = rtems_semaphore_create(
- rtems_build_name('B', 'F', 'L', 'U'), 0,
- RTEMS_FIFO | RTEMS_COUNTING_SEMAPHORE | RTEMS_NO_INHERIT_PRIORITY |
- RTEMS_NO_PRIORITY_CEILING | RTEMS_LOCAL, 0,
- &rtems_bdbuf_ctx.flush_sema);
- if (rc != RTEMS_SUCCESSFUL)
- {
- for (i = 0; i < size; i++)
- bdbuf_release_pool(i);
- free(rtems_bdbuf_ctx.pool);
- return rc;
- }
+ /*
+ * Create and start swapout task
+ */
- /* Create and start swapout task */
- rc = rtems_task_create(
- rtems_build_name('B', 'S', 'W', 'P'),
- ((swapout_task_priority > 0)
- ? swapout_task_priority
- : SWAPOUT_TASK_DEFAULT_PRIORITY),
- SWAPOUT_TASK_STACK_SIZE,
- RTEMS_DEFAULT_MODES | RTEMS_NO_PREEMPT,
- RTEMS_DEFAULT_ATTRIBUTES,
- &rtems_bdbuf_ctx.swapout_task);
- if (rc != RTEMS_SUCCESSFUL)
- {
- rtems_semaphore_delete(rtems_bdbuf_ctx.flush_sema);
- for (i = 0; i < size; i++)
- bdbuf_release_pool(i);
- free(rtems_bdbuf_ctx.pool);
- return rc;
- }
+ rtems_bdbuf_ctx.swapout_enabled = TRUE;
+
+ sc = rtems_task_create (rtems_build_name('B', 'S', 'W', 'P'),
+ (rtems_bdbuf_configuration.swapout_priority ?
+ rtems_bdbuf_configuration.swapout_priority :
+ RTEMS_BDBUF_SWAPOUT_TASK_PRIORITY_DEFAULT),
+ SWAPOUT_TASK_STACK_SIZE,
+ RTEMS_PREEMPT | RTEMS_NO_TIMESLICE | RTEMS_NO_ASR,
+ RTEMS_LOCAL | RTEMS_NO_FLOATING_POINT,
+ &rtems_bdbuf_ctx.swapout);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ for (p = 0; p < rtems_bdbuf_ctx.npools; p++)
+ rtems_bdbuf_release_pool (p);
+ free (rtems_bdbuf_ctx.pool);
+ return sc;
+ }
- rc = rtems_task_start(rtems_bdbuf_ctx.swapout_task, bdbuf_swapout_task, 0);
- if (rc != RTEMS_SUCCESSFUL)
- {
- rtems_task_delete(rtems_bdbuf_ctx.swapout_task);
- rtems_semaphore_delete(rtems_bdbuf_ctx.flush_sema);
- for (i = 0; i < size; i++)
- bdbuf_release_pool(i);
- free(rtems_bdbuf_ctx.pool);
- return rc;
- }
+ sc = rtems_task_start (rtems_bdbuf_ctx.swapout,
+ rtems_bdbuf_swapout_task,
+ (rtems_task_argument) &rtems_bdbuf_ctx);
+ if (sc != RTEMS_SUCCESSFUL)
+ {
+ rtems_task_delete (rtems_bdbuf_ctx.swapout);
+ for (p = 0; p < rtems_bdbuf_ctx.npools; p++)
+ rtems_bdbuf_release_pool (p);
+ free (rtems_bdbuf_ctx.pool);
+ return sc;
+ }
- return RTEMS_SUCCESSFUL;
+ return RTEMS_SUCCESSFUL;
}
-/* find_or_assign_buffer --
- * Looks for buffer already assigned for this dev/block. If one is found
- * obtain block buffer. If specified block already cached (i.e. there's
- * block in the _modified_, or _recently_used_), return address
- * of appropriate buffer descriptor and increment reference counter to 1.
- * If block is not cached, allocate new buffer and return it. Data
- * shouldn't be read to the buffer from media; buffer contains arbitrary
- * data. This primitive may be blocked if there are no free buffer
- * descriptors available and there are no unused non-modified (or
- * synchronized with media) buffers available.
+/**
+ * Get a buffer for this device and block. This function returns a buffer once
+ * placed into the AVL tree. If no buffer is available and it is not a read
+ * ahead request wait until one is available. If the buffer is for a read ahead
+ * transfer return NULL if there is not buffer or it is in the cache.
*
- * PARAMETERS:
- * device - device number (constructed of major and minor device number
- * block - linear media block number
- * ret_buf - address of the variable to store address of found descriptor
+ * The AVL tree of buffers for the pool is searched and if not located check
+ * obtain a buffer and insert it into the AVL tree. Buffers are first obtained
+ * from the ready list until all empty buffers are used. Once all buffers are
+ * in use buffers are taken from the LRU list with the least recently used
+ * buffer taken first. A buffer taken from the LRU list is removed from the AVL
+ * tree. The empty list or LRU list buffer is initialised to this device and
+ * block. If no buffers are available due to the empty and LRU lists being
+ * empty the caller is blocked on the waiting semaphore and counter. When
+ * buffers return from the upper layers (access) or lower driver (transfer) the
+ * blocked caller task is woken and this procedure is repeated. The repeat
+ * handles a case of a another thread pre-empting getting a buffer first and
+ * adding it to the AVL tree.
+ *
+ * A buffer located in the AVL tree means it is already in the cache and maybe
+ * in use somewhere. The buffer can be either:
+ *
+ * # Cached. Not being accessed or part of a media transfer.
+ * # Access. Is with an upper layer being accessed.
+ * # Transfer. Is with the driver and part of a media transfer.
+ *
+ * If cached we assign the state new state extract it from any list it maybe
+ * part of and return to the user. The buffer could be part of the LRU list or
+ * the modifed list waiting to be swapped out by the swap out task.
+ *
+ * This function assumes the pool the buffer is being taken from is locked and
+ * it insure the pool is locked when it returns.
*
+ * @param device The physical disk device
+ * @param block Linear media block number
+ * @param
+ * @param bd Address to store of found descriptor
+
* RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
+ * RTEMS status code ( if operation completed successfully
* or error code if error is occured)
*
* SIDE EFFEECTS:
@@ -885,252 +1229,251 @@ rtems_bdbuf_init(rtems_bdbuf_config *conf_table, int size)
* NOTE:
* It is assumed that primitive invoked when thread preemption is disabled.
*/
-static rtems_status_code
-find_or_assign_buffer(disk_device *dd,
- blkdev_bnum block,
- bdbuf_buffer **ret_buf)
+static rtems_bdbuf_buffer*
+rtems_bdbuf_get_buffer (rtems_disk_device* pdd,
+ rtems_bdbuf_pool* pool,
+ rtems_blkdev_bnum block,
+ boolean read_ahead)
{
- bdbuf_buffer *bd_buf;
- bdbuf_pool *bd_pool;
- rtems_status_code rc;
- dev_t device;
- ISR_Level level;
-
- int blksize;
-
- device = dd->dev;
- bd_pool = rtems_bdbuf_ctx.pool + dd->pool;
- blksize = dd->block_size;
-
-again:
- /* Looking for buffer descriptor used for this dev/block. */
- bd_buf = avl_search(&bd_pool->tree, device, block);
+ dev_t device = pdd->dev;
+ rtems_bdbuf_buffer* bd;
+ boolean available;
+
+ /*
+ * Loop until we get a buffer. Under load we could find no buffers are
+ * available so in the case of the required block this task needs to wait
+ * until some return before proceeding. There is no timeout. If the buffer is
+ * for a read ahead buffer return NULL.
+ *
+ * The search procedure is repeated as another thread could have pre-empted
+ * us while we waited for a buffer, obtained an empty buffer and loaded the
+ * AVL tree with it.
+ */
+ do
+ {
+ /*
+ * Search for buffer descriptor for this dev/block key.
+ */
+ bd = rtems_bdbuf_avl_search (&pool->tree, device, block);
- if (bd_buf == NULL)
+ /*
+ * No buffer in the cache for this block. We need to obtain a buffer and
+ * this means take a buffer that is ready to use. If all buffers are in use
+ * take the least recently used buffer. If there are none then the cache is
+ * empty. All the buffers are either queued to be written to disk or with
+ * the user. We cannot do much with the buffers with the user how-ever with
+ * the modified buffers waiting to be written to disk flush the maximum
+ * number transfered in a block to disk. After this all that be done is to
+ * wait for a buffer to return to the cache.
+ */
+ if (!bd)
{
- /* Try to obtain semaphore without waiting first. It is the most
- frequent case when reasonable number of buffers configured. If
- it is failed, obtain semaphore blocking on it. In this case
- it should be checked that appropriate buffer hasn't been loaded
- by another thread, because this thread is preempted */
- rc = rtems_semaphore_obtain(bd_pool->bufget_sema, RTEMS_NO_WAIT, 0);
- if (rc == RTEMS_UNSATISFIED)
+ /*
+ * Assign new buffer descriptor from the empty list if one is present. If
+ * the empty queue is empty get the oldest buffer from LRU list. If the
+ * LRU list is empty there are no available buffers so we need to wait
+ * until some are returned.
+ */
+ if (rtems_chain_is_empty (&pool->ready))
+ {
+ /*
+ * No unsed or read-ahead buffers.
+ *
+ * If this is a read ahead buffer just return. No need to place
+ * further pressure on the cache by reading something that may be
+ * needed when we have data in the cache that was needed.
+ */
+ if (read_ahead)
+ return NULL;
+
+ /*
+ * Check the LRU list.
+ */
+ bd = (rtems_bdbuf_buffer *) rtems_chain_get (&pool->lru);
+
+ if (bd)
{
- rc = rtems_semaphore_obtain(bd_pool->bufget_sema,
- RTEMS_WAIT, RTEMS_NO_TIMEOUT);
- bd_buf = avl_search(&bd_pool->tree, device, block);
- if (bd_buf != NULL)
- rtems_semaphore_release(bd_pool->bufget_sema);
+ /*
+ * Remove the buffer from the AVL tree.
+ */
+ if (rtems_bdbuf_avl_remove (&pool->tree, bd) != 0)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
}
- }
-
- if (bd_buf == NULL)
- {
- /* Assign new buffer descriptor */
- if (rtems_chain_is_empty(&bd_pool->free))
+ else
{
- bd_buf = (bdbuf_buffer *)rtems_chain_get(&bd_pool->lru);
- if (bd_buf != NULL)
+ /*
+ * If there are buffers on the modified list expire the hold timer
+ * and wake the swap out task.
+ */
+ if (!rtems_chain_is_empty (&pool->modified))
+ {
+ rtems_chain_node* node = rtems_chain_head (&pool->modified);
+ int write_blocks = 0;
+
+ node = node->next;
+ while ((write_blocks < rtems_bdbuf_configuration.max_write_blocks) &&
+ !rtems_chain_is_tail (&pool->modified, node))
{
- int avl_result;
- avl_result = avl_remove(&bd_pool->tree, bd_buf);
- if (avl_result != 0)
- {
- rtems_fatal_error_occurred(BLKDEV_FATAL_BDBUF_CONSISTENCY);
- return RTEMS_INTERNAL_ERROR;
- }
+ rtems_bdbuf_buffer* bd = (rtems_bdbuf_buffer*) node;
+ bd->hold_timer = 0;
+ write_blocks++;
+ node = node->next;
}
- }
- else
- {
- bd_buf = (bdbuf_buffer *)rtems_chain_get(&(bd_pool->free));
- }
- if (bd_buf == NULL)
- {
- goto again;
+ rtems_bdbuf_wake_swapper ();
+ }
+
+ /*
+ * Wait for a buffer to be returned to the pool. The buffer will be
+ * placed on the LRU list.
+ */
+ rtems_bdbuf_wait (pool, &pool->waiting, &pool->wait_waiters);
}
- else
- {
- bd_buf->dev = device;
- bd_buf->block = block;
-#ifdef AVL_GPL
- bd_buf->avl.link[0] = NULL;
- bd_buf->avl.link[1] = NULL;
-#else
- bd_buf->avl.left = NULL;
- bd_buf->avl.right = NULL;
-#endif
- bd_buf->use_count = 1;
- bd_buf->modified = bd_buf->actual = bd_buf->in_progress = FALSE;
- bd_buf->status = RTEMS_SUCCESSFUL;
+ }
+ else
+ {
+ bd = (rtems_bdbuf_buffer *) rtems_chain_get (&(pool->ready));
- if (avl_insert(&bd_pool->tree, bd_buf) != 0)
- {
- rtems_fatal_error_occurred(BLKDEV_FATAL_BDBUF_CONSISTENCY);
- return RTEMS_INTERNAL_ERROR;
- }
+ if ((bd->state != RTEMS_BDBUF_STATE_EMPTY) &&
+ (bd->state != RTEMS_BDBUF_STATE_READ_AHEAD))
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
- *ret_buf = bd_buf;
-
- return RTEMS_SUCCESSFUL;
+ if (bd->state == RTEMS_BDBUF_STATE_READ_AHEAD)
+ {
+ if (rtems_bdbuf_avl_remove (&pool->tree, bd) != 0)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
}
+ }
+
+ if (bd)
+ {
+ bd->dev = device;
+ bd->block = block;
+ bd->avl.left = NULL;
+ bd->avl.right = NULL;
+ bd->state = RTEMS_BDBUF_STATE_EMPTY;
+ bd->error = 0;
+ bd->waiters = 0;
+
+ if (rtems_bdbuf_avl_insert (&pool->tree, bd) != 0)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
+
+ return bd;
+ }
}
- else
+ }
+ while (!bd);
+
+ /*
+ * If the buffer is for read ahead and it exists in the AVL cache or is being
+ * accessed or being transfered then return NULL.
+ */
+ if (read_ahead)
+ return NULL;
+
+ /*
+ * Loop waiting for the buffer to enter the cached state. If the buffer
+ * is in the access or transfer state then wait until it is not.
+ */
+ available = FALSE;
+ while (!available)
+ {
+ switch (bd->state)
{
- /* Buffer descriptor already assigned for this dev/block */
- if (bd_buf->use_count == 0)
- {
- /* If we are removing from lru list, obtain the bufget_sema
- * first. If we are removing from mod list, obtain flush sema.
- * It should be obtained without blocking because we know
- * that our buffer descriptor is in the list. */
- if (bd_buf->modified)
- {
- rc = rtems_semaphore_obtain(rtems_bdbuf_ctx.flush_sema,
- RTEMS_NO_WAIT, 0);
- }
- else
- {
- rc = rtems_semaphore_obtain(bd_pool->bufget_sema,
- RTEMS_NO_WAIT, 0);
- }
- /* It is possible that we couldn't obtain flush or bufget sema
- * although buffer in the appropriate chain is available:
- * semaphore may be released to swapout task, but this task
- * actually did not start to process it. */
- if (rc == RTEMS_UNSATISFIED)
- rc = RTEMS_SUCCESSFUL;
- if (rc != RTEMS_SUCCESSFUL)
- {
- rtems_fatal_error_occurred(BLKDEV_FATAL_BDBUF_CONSISTENCY);
- return RTEMS_INTERNAL_ERROR;
- }
+ case RTEMS_BDBUF_STATE_CACHED:
+ case RTEMS_BDBUF_STATE_MODIFIED:
+ case RTEMS_BDBUF_STATE_READ_AHEAD:
+ available = TRUE;
+ break;
+
+ case RTEMS_BDBUF_STATE_ACCESS:
+ case RTEMS_BDBUF_STATE_ACCESS_MODIFIED:
+ bd->waiters++;
+ rtems_bdbuf_wait (pool, &pool->access, &pool->access_waiters);
+ bd->waiters--;
+ break;
+
+ case RTEMS_BDBUF_STATE_SYNC:
+ case RTEMS_BDBUF_STATE_TRANSFER:
+ bd->waiters++;
+ rtems_bdbuf_wait (pool, &pool->transfer, &pool->transfer_waiters);
+ bd->waiters--;
+ break;
+
+ default:
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
+ }
+ }
- /* Buffer descriptor is linked to the lru or mod chain. Remove
- it from there. */
- rtems_chain_extract(&bd_buf->link);
- }
- bd_buf->use_count++;
- while (bd_buf->in_progress != 0)
- {
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE,
- WATCHDOG_NO_TIMEOUT, level);
- }
+ /*
+ * Buffer is linked to the LRU, modifed, or sync lists. Remove it from there.
+ */
+ rtems_chain_extract (&bd->link);
- *ret_buf = bd_buf;
- return RTEMS_SUCCESSFUL;
- }
+ return bd;
}
-/* rtems_bdbuf_get --
- * Obtain block buffer. If specified block already cached (i.e. there's
- * block in the _modified_, or _recently_used_), return address
- * of appropriate buffer descriptor and increment reference counter to 1.
- * If block is not cached, allocate new buffer and return it. Data
- * shouldn't be read to the buffer from media; buffer may contains
- * arbitrary data. This primitive may be blocked if there are no free
- * buffer descriptors available and there are no unused non-modified
- * (or synchronized with media) buffers available.
+/**
+ * Obtain block buffer. If specified block already cached (i.e. there's block
+ * in the _modified_, or _recently_used_), return address of appropriate buffer
+ * descriptor. If block is not cached, allocate new buffer and return it. Data
+ * shouldn't be read to the buffer from media; buffer may contains arbitrary
+ * data. This primitive may be blocked if there are no free buffer descriptors
+ * available and there are no unused non-modified (or synchronized with media)
+ * buffers available.
*
- * PARAMETERS:
- * device - device number (constructed of major and minor device number)
- * block - linear media block number
- * bd - address of variable to store pointer to the buffer descriptor
- *
- * RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
- * or error code if error is occured)
- *
- * SIDE EFFECTS:
- * bufget_sema semaphore obtained by this primitive.
+ * @param device device number (constructed of major and minor device number)
+ * @param block linear media block number
+ * @param bd address of variable to store pointer to the buffer descriptor
+ * @return RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
+ * or error code if error is occured)
*/
rtems_status_code
-rtems_bdbuf_get(dev_t device, blkdev_bnum block, bdbuf_buffer **bd)
+rtems_bdbuf_get (dev_t device,
+ rtems_blkdev_bnum block,
+ rtems_bdbuf_buffer** bdp)
{
- rtems_status_code rc;
- disk_device *dd;
- disk_device *pdd;
- preemption_key key;
-
- /*
- * Convert logical dev/block to physical one
- */
- dd = rtems_disk_lookup(device);
- if (dd == NULL)
- return RTEMS_INVALID_ID;
+ rtems_disk_device* dd;
+ rtems_bdbuf_pool* pool;
+ rtems_bdbuf_buffer* bd;
+
+ /*
+ * Do not hold the pool lock when obtaining the disk table.
+ */
+ dd = rtems_disk_obtain (device);
+ if (dd == NULL)
+ return RTEMS_INVALID_ID;
+
+ if (block >= dd->size)
+ {
+ rtems_disk_release (dd);
+ return RTEMS_INVALID_NUMBER;
+ }
- if (block >= dd->size)
- {
- rtems_disk_release(dd);
- return RTEMS_INVALID_NUMBER;
- }
+ block += dd->start;
- pdd = dd->phys_dev;
- block += dd->start;
- rtems_disk_release(dd);
+ pool = rtems_bdbuf_get_pool (dd->phys_dev->pool);
+
+ rtems_disk_release(dd);
- DISABLE_PREEMPTION(key);
- rc = find_or_assign_buffer(pdd, block, bd);
- ENABLE_PREEMPTION(key);
+ rtems_bdbuf_lock_pool (pool);
- if (rc != RTEMS_SUCCESSFUL)
- return rc;
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("get: %d (dev = %08x)\n", block, device);
+#endif
- return RTEMS_SUCCESSFUL;
-}
+ bd = rtems_bdbuf_get_buffer (dd->phys_dev, pool, block, FALSE);
-/* bdbuf_initialize_transfer_sema --
- * Initialize transfer_sema mutex semaphore associated with buffer
- * descriptor.
- */
-static inline void
-bdbuf_initialize_transfer_sema(bdbuf_buffer *bd_buf)
-{
- CORE_mutex_Attributes mutex_attr;
- mutex_attr.lock_nesting_behavior = CORE_MUTEX_NESTING_BLOCKS;
- mutex_attr.only_owner_release = FALSE;
- mutex_attr.discipline = CORE_MUTEX_DISCIPLINES_FIFO;
- mutex_attr.priority_ceiling = 0;
-
- _CORE_mutex_Initialize(&bd_buf->transfer_sema,
- &mutex_attr, CORE_MUTEX_LOCKED);
-}
+ if (bd->state == RTEMS_BDBUF_STATE_MODIFIED)
+ bd->state = RTEMS_BDBUF_STATE_ACCESS_MODIFIED;
+ else
+ bd->state = RTEMS_BDBUF_STATE_ACCESS;
+
+ rtems_bdbuf_unlock_pool (pool);
-/* bdbuf_write_transfer_done --
- * Callout function. Invoked by block device driver when data transfer
- * to device (write) is completed. This function may be invoked from
- * interrupt handler.
- *
- * PARAMETERS:
- * arg - arbitrary argument specified in block device request
- * structure (in this case - pointer to the appropriate
- * bdbuf_buffer buffer descriptor structure).
- * status - I/O completion status
- * error - errno error code if status != RTEMS_SUCCESSFUL
- *
- * RETURNS:
- * none
- */
-static void
-bdbuf_write_transfer_done(void *arg, rtems_status_code status, int error)
-{
- int i;
- write_tfer_done_arg_t *wtd_arg = arg;
- blkdev_request *req = wtd_arg->req;
- bdbuf_buffer **bd_buf_write_store = wtd_arg->write_store;
- bdbuf_buffer *bd_buf;
- for (i = 0;i < req->count;i++) {
- bd_buf = bd_buf_write_store[i];
- bd_buf->status = status;
- bd_buf->error = RTEMS_IO_ERROR;
-
- bd_buf->in_progress = FALSE;
- _CORE_mutex_Surrender(&bd_buf->transfer_sema, 0, NULL);
- _CORE_mutex_Flush(&bd_buf->transfer_sema, NULL,
- CORE_MUTEX_STATUS_SUCCESSFUL);
- }
+ *bdp = bd;
+
+ return RTEMS_SUCCESSFUL;
}
/* bdbuf_read_transfer_done --
@@ -1149,658 +1492,882 @@ bdbuf_write_transfer_done(void *arg, rtems_status_code status, int error)
* none
*/
static void
-bdbuf_read_transfer_done(void *arg, rtems_status_code status, int error)
+rtems_bdbuf_read_done (void* arg, rtems_status_code status, int error)
{
-#if defined(READ_MULTIPLE)
-
- read_ahead_bd_buf_group *bd_buf_group = arg;
- bdbuf_buffer *bd_buf;
- int i;
- for (i = 0;i < bd_buf_group->cnt;i++) {
- bd_buf = bd_buf_group->bd_bufs[i];
-
- bd_buf->status = status;
- bd_buf->error = RTEMS_IO_ERROR;
- _CORE_mutex_Surrender(&bd_buf->transfer_sema, 0, NULL);
- _CORE_mutex_Flush(&bd_buf->transfer_sema, NULL,
- CORE_MUTEX_STATUS_SUCCESSFUL);
- }
-#else
- bdbuf_buffer *bd_buf = arg;
- bd_buf->status = status;
- bd_buf->error = RTEMS_IO_ERROR;
- _CORE_mutex_Surrender(&bd_buf->transfer_sema, 0, NULL);
- _CORE_mutex_Flush(&bd_buf->transfer_sema, NULL,
- CORE_MUTEX_STATUS_SUCCESSFUL);
-#endif
+ rtems_blkdev_request* req = (rtems_blkdev_request*) arg;
+
+ req->error = error;
+ req->status = status;
+
+ rtems_event_send (req->io_task, RTEMS_BDBUF_TRANSFER_SYNC);
}
-/* rtems_bdbuf_read --
- * (Similar to the rtems_bdbuf_get, except reading data from media)
- * Obtain block buffer. If specified block already cached, return address
- * of appropriate buffer and increment reference counter to 1. If block is
- * not cached, allocate new buffer and read data to it from the media.
- * This primitive may be blocked on waiting until data to be read from
- * media, if there are no free buffer descriptors available and there are
- * no unused non-modified (or synchronized with media) buffers available.
- *
- * PARAMETERS:
- * device - device number (consists of major and minor device number)
- * block - linear media block number
- * bd - address of variable to store pointer to the buffer descriptor
- *
- * RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
- * or error code if error is occured)
+/**
+ * Read a block into memory. If the block is not is the cache it is read into
+ * memory. The caller is blocked until the block is read and placed into the
+ * cache.
*
- * SIDE EFFECTS:
- * bufget_sema and transfer_sema semaphores obtained by this primitive.
+ * @param device The device number (consists of major and minor device number)
+ * @param block Linear media block number
+ * @param bd Pointer to the buffer BD address.
+ * @retval RTEMS_SUCCESSFUL Operation completed successfully.
+ * @return rtems_status_code An error code. Buffer still passed to caller.
*/
-#if !defined(READ_MULTIPLE)
+
rtems_status_code
-rtems_bdbuf_read(dev_t device,
- blkdev_bnum block,
- bdbuf_buffer **bd)
+rtems_bdbuf_read (dev_t device,
+ rtems_blkdev_bnum block,
+ rtems_bdbuf_buffer** bdp)
{
- preemption_key key;
- ISR_Level level;
+ rtems_disk_device* dd;
+ rtems_bdbuf_pool* pool;
+ rtems_bdbuf_buffer* bd = NULL;
+ int read_ahead_count;
+ rtems_blkdev_request* req;
+
+ /*
+ * @todo This type of request structure is wrong and should be removed.
+ */
+#define bdbuf_alloc(size) __builtin_alloca (size)
+
+ req = bdbuf_alloc (sizeof (rtems_blkdev_request) +
+ (sizeof ( rtems_blkdev_sg_buffer) *
+ rtems_bdbuf_configuration.max_read_ahead_blocks));
+
+ /*
+ * Do not hold the pool lock when obtaining the disk table.
+ */
+ dd = rtems_disk_obtain (device);
+ if (dd == NULL)
+ return RTEMS_INVALID_ID;
+
+ block += dd->start;
+
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("read: %d (dev = %08x)\n", block, device);
+#endif
+
+ if (block >= dd->size)
+ {
+ rtems_disk_release(dd);
+ return RTEMS_INVALID_NUMBER;
+ }
- bdbuf_buffer *bd_buf;
- rtems_status_code rc;
- int result;
- disk_device *dd;
- disk_device *pdd;
- blkdev_request1 req;
+ req->count = 0;
+ req->bufnum = 0;
- dd = rtems_disk_lookup(device);
- if (dd == NULL)
- return RTEMS_INVALID_ID;
+ /*
+ * Read the block plus the required number of blocks ahead. The number of
+ * blocks to read ahead is configured by the user and limited by the size of
+ * the disk or reaching a read ahead block that is also cached.
+ *
+ * Limit the blocks read by the size of the disk.
+ */
+ if ((rtems_bdbuf_configuration.max_read_ahead_blocks + block) < dd->size)
+ read_ahead_count = rtems_bdbuf_configuration.max_read_ahead_blocks;
+ else
+ read_ahead_count = dd->size - block;
- if (block >= dd->size)
- {
- rtems_disk_release(dd);
- return RTEMS_INVALID_NUMBER;
- }
+ pool = rtems_bdbuf_get_pool (dd->phys_dev->pool);
- pdd = dd->phys_dev;
- block += dd->start;
+ rtems_bdbuf_lock_pool (pool);
- DISABLE_PREEMPTION(key);
- rc = find_or_assign_buffer(pdd, block, &bd_buf);
+ while (req->count < read_ahead_count)
+ {
+ /*
+ * Get the buffer for the requested block. If the block is cached then
+ * return it. If it is not cached transfer the block from the disk media
+ * into memory.
+ *
+ * We need to clean up any buffers allocated and not passed back to the
+ * caller.
+ */
+ bd = rtems_bdbuf_get_buffer (dd->phys_dev, pool,
+ block + req->count,
+ req->count == 0 ? FALSE : TRUE);
- if (rc != RTEMS_SUCCESSFUL)
- {
- ENABLE_PREEMPTION(key);
- rtems_disk_release(dd);
- return rc;
- }
+ /*
+ * Read ahead buffer is in the cache or none available. Read what we
+ * can.
+ */
+ if (!bd)
+ break;
- if (!bd_buf->actual)
- {
- bd_buf->in_progress = 1;
-
- req.req.req = BLKDEV_REQ_READ;
- req.req.req_done = bdbuf_read_transfer_done;
- req.req.done_arg = bd_buf;
- req.req.start = block;
- req.req.count = 1;
- req.req.bufnum = 1;
- req.req.bufs[0].length = dd->block_size;
- req.req.bufs[0].buffer = bd_buf->buffer;
-
- bdbuf_initialize_transfer_sema(bd_buf);
- result = dd->ioctl(pdd->dev, BLKIO_REQUEST, &req);
- if (result == -1)
- {
- bd_buf->status = RTEMS_IO_ERROR;
- bd_buf->error = errno;
- bd_buf->actual = FALSE;
- }
- else
- {
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE,
- WATCHDOG_NO_TIMEOUT, level);
- bd_buf->actual = TRUE;
- }
- bd_buf->in_progress = FALSE;
- }
- rtems_disk_release(dd);
+ /*
+ * Is the block we are interested in the cache ?
+ */
+ if ((bd->state == RTEMS_BDBUF_STATE_CACHED) ||
+ (bd->state == RTEMS_BDBUF_STATE_MODIFIED))
+ break;
- ENABLE_PREEMPTION(key);
+ bd->state = RTEMS_BDBUF_STATE_TRANSFER;
+ bd->error = 0;
- *bd = bd_buf;
-
- return RTEMS_SUCCESSFUL;
-}
-#else /* READ_MULTIPLE */
-rtems_status_code
-rtems_bdbuf_read(dev_t device,
- blkdev_bnum block,
- bdbuf_buffer **bd)
-{
- preemption_key key;
- ISR_Level level;
+ /*
+ * @todo The use of these req blocks is not a great design.
+ * The req is a struct with a single 'bufs' declared in the
+ * req struct and the others are added in the outer level
+ * struct. This relies on the structs joining as a single
+ * array and that assumes the compiler packs the structs.
+ * Why not just place on a list ? The BD has a node that
+ * can be used.
+ */
+ req->bufs[req->count].user = bd;
+ req->bufs[req->count].block = bd->block;
+ req->bufs[req->count].length = dd->block_size;
+ req->bufs[req->count].buffer = bd->buffer;
+ req->count++;
+ req->bufnum++;
+ }
- bdbuf_buffer *bd_buf,*first_bd_buf;
- rtems_status_code rc;
+ /*
+ * Transfer any requested buffers. If the request count is 0 we have found
+ * the block in the cache so return it.
+ */
+ if (req->count)
+ {
+ /*
+ * Unlock the pool. We have the buffer for the block and it will be in the
+ * access or transfer state. We may also have a number of read ahead blocks
+ * if we need to transfer data. At this point any other threads can gain
+ * access to the pool and if they are after any of the buffers we have they
+ * will block and be woken when the buffer is returned to the pool.
+ *
+ * If a transfer is needed the I/O operation will occur with pre-emption
+ * enabled and the pool unlocked. This is a change to the previous version
+ * of the bdbuf code.
+ */
int result;
- disk_device *dd;
- disk_device *pdd;
- blkdev_request_read_ahead req;
- read_ahead_bd_buf_group bd_buf_group;
- boolean find_more_buffers;
- int i;
-
- dd = rtems_disk_lookup(device);
- if (dd == NULL)
- return RTEMS_INVALID_ID;
-
- if (block >= dd->size)
+ int b;
+
+ rtems_bdbuf_unlock_pool (pool);
+
+ req->req = RTEMS_BLKDEV_REQ_READ;
+ req->req_done = rtems_bdbuf_read_done;
+ req->done_arg = req;
+ req->io_task = rtems_task_self ();
+ req->status = RTEMS_RESOURCE_IN_USE;
+ req->error = 0;
+ req->start = dd->start;
+
+ result = dd->ioctl (dd->phys_dev->dev, RTEMS_BLKIO_REQUEST, req);
+
+ /*
+ * Inspection of the DOS FS code shows the result from this function is
+ * handled and a buffer must be returned.
+ */
+ if (result < 0)
{
- rtems_disk_release(dd);
- return RTEMS_INVALID_NUMBER;
+ req->error = errno;
+ req->status = RTEMS_IO_ERROR;
+ }
+ else
+ {
+ rtems_status_code sc;
+ rtems_event_set out;
+ sc = rtems_event_receive (RTEMS_BDBUF_TRANSFER_SYNC,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ 0, &out);
+
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (BLKDEV_FATAL_BDBUF_SWAPOUT_RE);
}
-
- pdd = dd->phys_dev;
- block += dd->start;
- DISABLE_PREEMPTION(key);
- rc = find_or_assign_buffer(pdd, block, &first_bd_buf);
+ rtems_bdbuf_lock_pool (pool);
- if (rc != RTEMS_SUCCESSFUL)
+ for (b = 1; b < req->count; b++)
{
- ENABLE_PREEMPTION(key);
- rtems_disk_release(dd);
- return rc;
+ bd = req->bufs[b].user;
+ bd->error = req->error;
+ bd->state = RTEMS_BDBUF_STATE_READ_AHEAD;
+ rtems_bdbuf_release (bd);
}
- if (!first_bd_buf->actual)
- {
- bd_buf_group.bd_bufs[0] = first_bd_buf;
- bd_buf_group.cnt = 1;
+ bd = req->bufs[0].user;
+ }
- first_bd_buf->in_progress = TRUE;
+ /*
+ * The data for this block is cached in the buffer.
+ */
+ if (bd->state == RTEMS_BDBUF_STATE_MODIFIED)
+ bd->state = RTEMS_BDBUF_STATE_ACCESS_MODIFIED;
+ else
+ bd->state = RTEMS_BDBUF_STATE_ACCESS;
- req.req.req = BLKDEV_REQ_READ;
- req.req.req_done = bdbuf_read_transfer_done;
- req.req.done_arg = &bd_buf_group;
- req.req.start = block;
- req.req.count = 1;
- req.req.bufnum = 1;
- req.req.bufs[0].length = dd->block_size;
- req.req.bufs[0].buffer = first_bd_buf->buffer;
-
- bdbuf_initialize_transfer_sema(first_bd_buf);
- /*
- * FIXME: check for following blocks to be:
- * - still in range of partition size
- * - not yet assigned
- * - buffer available
- * allocate for read call, if possible
- */
- find_more_buffers = TRUE;
- while (find_more_buffers) {
- block++;
- /*
- * still bd_buf_group entries free and
- * still in range of this disk?
- */
- if ((bd_buf_group.cnt >= READ_AHEAD_MAX_BLK_CNT) ||
- (block >= dd->size)) {
- find_more_buffers = FALSE;
- }
- if (find_more_buffers) {
- rc = find_or_assign_buffer(pdd, block, &bd_buf);
- if (rc != RTEMS_SUCCESSFUL) {
- find_more_buffers = FALSE;
- }
- else if (bd_buf->actual) {
- find_more_buffers = FALSE;
- bdbuf_release(bd_buf);
- }
- }
- if (find_more_buffers) {
- bdbuf_initialize_transfer_sema(bd_buf);
- bd_buf->in_progress = TRUE;
-
- req.req.bufs[req.req.count].length = dd->block_size;
- req.req.bufs[req.req.count].buffer = bd_buf->buffer;
- req.req.count++;
- req.req.bufnum++;
- bd_buf_group.bd_bufs[bd_buf_group.cnt] = bd_buf;
- bd_buf_group.cnt++;
- }
- }
-
- /* do the actual read call here
- */
- result = dd->ioctl(pdd->dev, BLKIO_REQUEST, &req);
-
- /*
- * cleanup:
- * wait, until all bd_bufs are processed
- * set status in all bd_bufs
- */
- for (i = 0;i < bd_buf_group.cnt;i++) {
- bd_buf = bd_buf_group.bd_bufs[i];
- if (result == -1)
- {
- bd_buf->status = RTEMS_IO_ERROR;
- bd_buf->error = errno;
- bd_buf->actual = FALSE;
- }
- else
- {
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE,
- WATCHDOG_NO_TIMEOUT, level);
- bd_buf->actual = TRUE;
- }
- bd_buf->in_progress = FALSE;
- /* release any pre-read buffers */
- if (i > 0) {
- bdbuf_release(bd_buf);
- }
- }
- }
- rtems_disk_release(dd);
-
- ENABLE_PREEMPTION(key);
+ rtems_bdbuf_unlock_pool (pool);
+ rtems_disk_release (dd);
- *bd = first_bd_buf;
-
- return RTEMS_SUCCESSFUL;
-}
-#endif /* READ_MULTIPLE */
+ *bdp = bd;
+ return RTEMS_SUCCESSFUL;
+}
-/* bdbuf_release --
- * Release buffer. Decrease buffer usage counter. If it is zero, further
- * processing depends on modified attribute. If buffer was modified, it
- * is inserted into mod chain and swapout task waken up. If buffer was
- * not modified, it is returned to the end of lru chain making it available
- * for further use.
+/**
+ * Release buffer that has been in use. The buffer could have been in the
+ * access state and so with a user of the cache or it was being transfered to
+ * or from the disk media and so with a driver. Wake any waiters. If no one is
+ * waiting and this is the only buffer on the LRU list see if anyone is
+ * waiting. Wake them if they are.
*
- * PARAMETERS:
- * bd_buf - pointer to the released buffer descriptor.
- *
- * RETURNS:
- * RTEMS_SUCCESSFUL if buffer released successfully, or error code if
- * error occured.
+ * If the buffer has been modified use the modified release call.
*
- * NOTE:
- * This is internal function. It is assumed that task made non-preemptive
- * before its invocation.
+ * @param bd The buffer to return to the pool.
+ * @retval RTEMS_SUCCESSFUL This operation always succeeds.
*/
-static rtems_status_code
-bdbuf_release(bdbuf_buffer *bd_buf)
+
+rtems_status_code
+rtems_bdbuf_release (rtems_bdbuf_buffer* bd)
{
- bdbuf_pool *bd_pool;
- rtems_status_code rc = RTEMS_SUCCESSFUL;
+ rtems_bdbuf_pool* pool;
- if (bd_buf->use_count <= 0)
- return RTEMS_INTERNAL_ERROR;
+ if (bd == NULL)
+ return RTEMS_INVALID_ADDRESS;
- bd_pool = rtems_bdbuf_ctx.pool + bd_buf->pool;
+ pool = rtems_bdbuf_get_pool (bd->pool);
- bd_buf->use_count--;
+ rtems_bdbuf_lock_pool (pool);
- if (bd_buf->use_count == 0)
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("release: %d\n", bd->block);
+#endif
+
+ if (bd->state == RTEMS_BDBUF_STATE_ACCESS_MODIFIED)
+ {
+ rtems_bdbuf_append_modified (pool, bd);
+ }
+ else
+ {
+ /*
+ * If this is a read ahead buffer place the ready queue. Buffers are
+ * taken from here first. If we prepend then get from the queue the
+ * buffers furthermost from the read buffer will be used.
+ */
+ if (bd->state == RTEMS_BDBUF_STATE_READ_AHEAD)
+ rtems_chain_prepend (&pool->ready, &bd->link);
+ else
{
- if (bd_buf->modified)
- {
-
- /* Buffer was modified. Insert buffer to the modified buffers
- * list and initiate flushing. */
- rtems_chain_append(&rtems_bdbuf_ctx.mod, &bd_buf->link);
-
- /* Release the flush_sema */
- rc = rtems_semaphore_release(rtems_bdbuf_ctx.flush_sema);
- }
- else
- {
- /* Buffer was not modified. Add this descriptor to the
- * end of lru chain and make it available for reuse. */
- rtems_chain_append(&bd_pool->lru, &bd_buf->link);
- rc = rtems_semaphore_release(bd_pool->bufget_sema);
- }
+ bd->state = RTEMS_BDBUF_STATE_CACHED;
+ rtems_chain_append (&pool->lru, &bd->link);
}
- return rc;
-}
+ }
+
+ /*
+ * If there are threads waiting to access the buffer wake them. Wake any
+ * waiters if this is the first buffer to placed back onto the queue.
+ */
+ if (bd->waiters)
+ rtems_bdbuf_wake (pool->access, &pool->access_waiters);
+ else
+ {
+ if (bd->state == RTEMS_BDBUF_STATE_READ_AHEAD)
+ {
+ if (rtems_chain_has_only_one_node (&pool->ready))
+ rtems_bdbuf_wake (pool->waiting, &pool->wait_waiters);
+ }
+ else
+ {
+ if (rtems_chain_has_only_one_node (&pool->lru))
+ rtems_bdbuf_wake (pool->waiting, &pool->wait_waiters);
+ }
+ }
+
+ rtems_bdbuf_unlock_pool (pool);
+ return RTEMS_SUCCESSFUL;
+}
-/* rtems_bdbuf_release --
- * Release buffer allocated before. This primitive decrease the
- * usage counter. If it is zero, further destiny of buffer depends on
- * 'modified' status. If buffer was modified, it is placed to the end of
- * mod list and flush task waken up. If buffer was not modified,
- * it is placed to the end of lru list, and bufget_sema released, allowing
- * to reuse this buffer.
- *
- * PARAMETERS:
- * bd_buf - pointer to the bdbuf_buffer structure previously obtained using
- * get/read primitive.
+/**
+ * Release buffer that has been in use and has been modified. The buffer could
+ * have been in the access state and so with a user of the cache or it was
+ * being transfered to or from the disk media and so with a driver. Wake any
+ * waiters. If no one is waiting and this is the only buffer on the LRU list
+ * see if anyone is waiting. Wake them if they are.
*
- * RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
- * or error code if error is occured)
+ * If the buffer has been modified use the modified release call.
*
- * SIDE EFFECTS:
- * flush_sema and bufget_sema semaphores may be released by this primitive.
+ * @param bd The buffer to return to the pool.
+ * @retval RTEMS_SUCCESSFUL This operation always succeeds.
*/
+
rtems_status_code
-rtems_bdbuf_release(bdbuf_buffer *bd_buf)
+rtems_bdbuf_release_modified (rtems_bdbuf_buffer* bd)
{
- preemption_key key;
- rtems_status_code rc = RTEMS_SUCCESSFUL;
+ rtems_bdbuf_pool* pool;
- if (bd_buf == NULL)
- return RTEMS_INVALID_ADDRESS;
+ if (bd == NULL)
+ return RTEMS_INVALID_ADDRESS;
- DISABLE_PREEMPTION(key);
+ pool = rtems_bdbuf_get_pool (bd->pool);
- rc = bdbuf_release(bd_buf);
+ rtems_bdbuf_lock_pool (pool);
- ENABLE_PREEMPTION(key);
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("release modified: %d\n", bd->block);
+#endif
- return rc;
+ bd->hold_timer = rtems_bdbuf_configuration.swap_block_hold;
+
+ rtems_bdbuf_append_modified (pool, bd);
+
+ if (bd->waiters)
+ rtems_bdbuf_wake (pool->access, &pool->access_waiters);
+
+ rtems_bdbuf_unlock_pool (pool);
+
+ return RTEMS_SUCCESSFUL;
}
-/* rtems_bdbuf_release_modified --
- * Release buffer allocated before, assuming that it is _modified_ by
- * it's owner. This primitive decrease usage counter for buffer, mark
- * buffer descriptor as modified. If usage counter is 0, insert it at
- * end of mod chain and release flush_sema semaphore to activate the
- * flush task.
- *
- * PARAMETERS:
- * bd_buf - pointer to the bdbuf_buffer structure previously obtained using
- * get/read primitive.
+/**
+ * Wait until specified buffer synchronized with disk. Invoked on exchanges
+ * critical for data consistency on the media. The buffer is placed on the sync
+ * list and the swapper is woken. No sync lock is taken as the buffers on the
+ * sync list are taken first and passed to the driver before buffers on the
+ * modified list.
*
- * RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
- * or error code if error is occured)
+ * @note This code does not lock the sync mutex and stop additions to the
+ * modified queue. This means the buffer could be written and then
+ * returned to the modified list but will not happen as the buffer's
+ * state is sync.
*
- * SIDE EFFECTS:
- * flush_sema semaphore may be released by this primitive.
+ * @param bd Pointer to the bdbuf_buffer structure previously obtained using
+ * get/read primitive.
+ * @retval RTEMS_SUCCESSFUL Always returned.
*/
+
rtems_status_code
-rtems_bdbuf_release_modified(bdbuf_buffer *bd_buf)
+rtems_bdbuf_sync (rtems_bdbuf_buffer* bd)
{
- preemption_key key;
- rtems_status_code rc = RTEMS_SUCCESSFUL;
+ rtems_bdbuf_pool* pool;
+ boolean available;
+
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("sync: %d\n", bd->block);
+#endif
+
+ if (bd == NULL)
+ return RTEMS_INVALID_ADDRESS;
- if (bd_buf == NULL)
- return RTEMS_INVALID_ADDRESS;
+ pool = rtems_bdbuf_get_pool (bd->pool);
- DISABLE_PREEMPTION(key);
+ rtems_bdbuf_lock_pool (pool);
- if (!bd_buf->modified)
+ bd->state = RTEMS_BDBUF_STATE_SYNC;
+
+ rtems_chain_append (&pool->sync, &bd->link);
+
+ rtems_bdbuf_wake_swapper ();
+
+ available = FALSE;
+ while (!available)
+ {
+ switch (bd->state)
{
- bdbuf_initialize_transfer_sema(bd_buf);
+ case RTEMS_BDBUF_STATE_CACHED:
+ case RTEMS_BDBUF_STATE_READ_AHEAD:
+ case RTEMS_BDBUF_STATE_MODIFIED:
+ case RTEMS_BDBUF_STATE_ACCESS:
+ case RTEMS_BDBUF_STATE_ACCESS_MODIFIED:
+ available = TRUE;
+ break;
+
+ case RTEMS_BDBUF_STATE_SYNC:
+ case RTEMS_BDBUF_STATE_TRANSFER:
+ bd->waiters++;
+ rtems_bdbuf_wait (pool, &pool->transfer, &pool->transfer_waiters);
+ bd->waiters--;
+ break;
+
+ default:
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_CONSISTENCY);
}
- bd_buf->modified = TRUE;
- bd_buf->actual = TRUE;
- rc = bdbuf_release(bd_buf);
-
- ENABLE_PREEMPTION(key);
+ }
- return rc;
+ rtems_bdbuf_unlock_pool (pool);
+
+ return RTEMS_SUCCESSFUL;
}
-/* rtems_bdbuf_sync --
- * Wait until specified buffer synchronized with disk. Invoked on exchanges
- * critical for data consistency on the media. This primitive mark owned
- * block as modified, decrease usage counter. If usage counter is 0,
- * block inserted to the mod chain and flush_sema semaphore released.
- * Finally, primitives blocked on transfer_sema semaphore.
+/* rtems_bdbuf_syncdev --
+ * Synchronize with disk all buffers containing the blocks belonging to
+ * specified device.
*
* PARAMETERS:
- * bd_buf - pointer to the bdbuf_buffer structure previously obtained using
- * get/read primitive.
+ * dev - block device number
*
* RETURNS:
* RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
* or error code if error is occured)
- *
- * SIDE EFFECTS:
- * Primitive may be blocked on transfer_sema semaphore.
*/
rtems_status_code
-rtems_bdbuf_sync(bdbuf_buffer *bd_buf)
+rtems_bdbuf_syncdev (dev_t dev)
{
- preemption_key key;
- ISR_Level level;
- rtems_status_code rc = RTEMS_SUCCESSFUL;
+ rtems_disk_device* dd;
+ rtems_bdbuf_pool* pool;
+ rtems_status_code sc;
+ rtems_event_set out;
- if (bd_buf == NULL)
- return RTEMS_INVALID_ADDRESS;
-
- DISABLE_PREEMPTION(key);
+#if RTEMS_BDBUF_TRACE
+ rtems_bdbuf_printf ("syncdev: %08x\n", dev);
+#endif
- if (!bd_buf->modified)
- {
- bdbuf_initialize_transfer_sema(bd_buf);
- }
- bd_buf->modified = TRUE;
- bd_buf->actual = TRUE;
+ /*
+ * Do not hold the pool lock when obtaining the disk table.
+ */
+ dd = rtems_disk_obtain (dev);
+ if (dd == NULL)
+ return RTEMS_INVALID_ID;
+
+ pool = rtems_bdbuf_get_pool (dd->pool);
+
+ /*
+ * Take the sync lock before locking the pool. Once we have the sync lock
+ * we can lock the pool. If another thread has the sync lock it will cause
+ * this thread to block until it owns the sync lock then it can own the
+ * pool. The sync lock can only be obtained with the pool unlocked.
+ */
+
+ rtems_bdbuf_lock_sync (pool);
+ rtems_bdbuf_lock_pool (pool);
- rc = bdbuf_release(bd_buf);
+ pool->sync_active = TRUE;
+ pool->sync_requester = rtems_task_self ();
+ pool->sync_device = dev;
+
+ rtems_bdbuf_wake_swapper ();
+ rtems_bdbuf_unlock_pool (pool);
+
+ sc = rtems_event_receive (RTEMS_BDBUF_TRANSFER_SYNC,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ 0, &out);
- if (rc == RTEMS_SUCCESSFUL)
- {
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE,
- WATCHDOG_NO_TIMEOUT, level);
- }
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (BLKDEV_FATAL_BDBUF_SWAPOUT_RE);
+
+ rtems_bdbuf_lock_pool (pool);
- ENABLE_PREEMPTION(key);
+ pool->sync_active = FALSE;
- return rc;
+ rtems_bdbuf_unlock_sync (pool);
+ rtems_bdbuf_unlock_pool (pool);
+
+ return rtems_disk_release(dd);
}
-/* rtems_bdbuf_syncdev --
- * Synchronize with disk all buffers containing the blocks belonging to
- * specified device.
+/* bdbuf_write_transfer_done --
+ * Callout function. Invoked by block device driver when data transfer
+ * to device (write) is completed. This function may be invoked from
+ * interrupt handler.
*
* PARAMETERS:
- * dev - block device number
+ * arg - arbitrary argument specified in block device request
+ * structure (in this case - pointer to the appropriate
+ * bdbuf_buffer buffer descriptor structure).
+ * status - I/O completion status
+ * error - errno error code if status != RTEMS_SUCCESSFUL
*
* RETURNS:
- * RTEMS status code (RTEMS_SUCCESSFUL if operation completed successfully
- * or error code if error is occured)
+ * none
*/
-rtems_status_code
-rtems_bdbuf_syncdev(dev_t dev)
+static void
+rtems_bdbuf_write_done(void *arg, rtems_status_code status, int error)
+{
+ rtems_blkdev_request* req = (rtems_blkdev_request*) arg;
+
+ req->error = error;
+ req->status = status;
+
+ rtems_event_send (req->io_task, RTEMS_BDBUF_TRANSFER_SYNC);
+}
+
+/**
+ * Process the modified list of buffers. We can have a sync or modified
+ * list that needs to be handled.
+ */
+static void
+rtems_bdbuf_swapout_modified_processing (rtems_bdpool_id pid,
+ dev_t* dev,
+ rtems_chain_control* chain,
+ rtems_chain_control* transfer,
+ boolean sync_active,
+ boolean update_timers,
+ uint32_t timer_delta)
{
- preemption_key key;
- ISR_Level level;
+ if (!rtems_chain_is_empty (chain))
+ {
+ rtems_chain_node* node = rtems_chain_head (chain);
+ node = node->next;
+
+ while (!rtems_chain_is_tail (chain, node))
+ {
+ rtems_bdbuf_buffer* bd = (rtems_bdbuf_buffer*) node;
+
+ if (bd->pool == pid)
+ {
+ /*
+ * Check if the buffer's hold timer has reached 0. If a sync
+ * is active force all the timers to 0.
+ *
+ * @note Lots of sync requests will skew this timer. It should
+ * be based on TOD to be accurate. Does it matter ?
+ */
+ if (sync_active)
+ bd->hold_timer = 0;
+
+ if (bd->hold_timer)
+ {
+ if (update_timers)
+ {
+ if (bd->hold_timer > timer_delta)
+ bd->hold_timer -= timer_delta;
+ else
+ bd->hold_timer = 0;
+ }
+
+ if (bd->hold_timer)
+ {
+ node = node->next;
+ continue;
+ }
+ }
- bdbuf_buffer *bd_buf;
- disk_device *dd;
- bdbuf_pool *pool;
+ /*
+ * This assumes we can set dev_t to -1 which is just an
+ * assumption. Cannot use the transfer list being empty
+ * the sync dev calls sets the dev to use.
+ */
+ if (*dev == -1)
+ *dev = bd->dev;
- dd = rtems_disk_lookup(dev);
+ if (bd->dev == *dev)
+ {
+ rtems_chain_node* next_node = node->next;
+ rtems_chain_extract (node);
+ rtems_chain_append (transfer, node);
+ node = next_node;
+ bd->state = RTEMS_BDBUF_STATE_TRANSFER;
+ }
+ else
+ {
+ node = node->next;
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Process the pool.
+ */
+static boolean
+rtems_bdbuf_swapout_pool_processing (rtems_bdpool_id pid,
+ unsigned long timer_delta,
+ boolean update_timers,
+ rtems_blkdev_request* write_req)
+{
+ rtems_bdbuf_pool* pool = rtems_bdbuf_get_pool (pid);
+ rtems_chain_control transfer;
+ dev_t dev = -1;
+ rtems_disk_device* dd;
+ boolean result = TRUE;
+
+ rtems_chain_initialize_empty (&transfer);
+
+ rtems_bdbuf_lock_pool (pool);
+
+ if (pool->sync_active)
+ dev = pool->sync_device;
+
+#if 1
+ /*
+ * If we have any buffers in the sync queue move then to the
+ * modified list. The first sync buffer will select the
+ * device we use.
+ */
+ rtems_bdbuf_swapout_modified_processing (pid, &dev,
+ &pool->sync, &transfer,
+ TRUE, FALSE,
+ timer_delta);
+
+ /*
+ * Process the pool's modified list.
+ */
+ rtems_bdbuf_swapout_modified_processing (pid, &dev,
+ &pool->modified, &transfer,
+ pool->sync_active,
+ update_timers,
+ timer_delta);
+
+ /*
+ * We have all the buffers that have been modified for this device so
+ * the pool can be unlocked because the state is set to TRANSFER.
+ */
+#endif
+ rtems_bdbuf_unlock_pool (pool);
+
+ /*
+ * If there are buffers to transfer to the media tranfer them.
+ */
+ if (rtems_chain_is_empty (&transfer))
+ result = FALSE;
+ else
+ {
+ /*
+ * Obtain the disk device. Release the pool mutex to avoid a dead
+ * lock.
+ */
+ dd = rtems_disk_obtain (dev);
if (dd == NULL)
- return RTEMS_INVALID_ID;
+ result = FALSE;
+ else
+ {
+ /*
+ * The last block number used when the driver only supports
+ * continuous blocks in a single request.
+ */
+ uint32_t last_block = 0;
+
+ /*
+ * Take as many buffers are configured and pass to the driver. Note,
+ * the API to the drivers has the array of buffers and if a chain was
+ * passed we could have just passed the list. If the driver API is
+ * updated it should be possible to make this change with little effect
+ * in this code. The array that is passed is broken in design and
+ * should be removed. Merging to members of a struct into the first
+ * member is trouble waiting to happen.
+ */
+
+ write_req->status = RTEMS_RESOURCE_IN_USE;
+ write_req->start = dd->start;
+ write_req->error = 0;
+ write_req->count = 0;
+ write_req->bufnum = 0;
+
+ while (!rtems_chain_is_empty (&transfer))
+ {
+ rtems_bdbuf_buffer* bd =
+ (rtems_bdbuf_buffer*) rtems_chain_get (&transfer);
+
+ boolean write = FALSE;
+
+ /*
+ * If the device only accepts sequential buffers and
+ * this is not the first buffer (the first is always
+ * sequential, and the buffer is not sequential then
+ * put the buffer back on the transfer chain and
+ * write the committed buffers.
+ */
+
+ if ((dd->capabilities & RTEMS_BLKDEV_CAP_MULTISECTOR_CONT) &&
+ write_req->count &&
+ (bd->block != (last_block + 1)))
+ {
+ rtems_chain_prepend (&transfer, &bd->link);
+ write = TRUE;
+ }
+ else
+ {
+ write_req->bufs[write_req->count].user = bd;
+ write_req->bufs[write_req->count].block = bd->block;
+ write_req->bufs[write_req->count].length = dd->block_size;
+ write_req->bufs[write_req->count].buffer = bd->buffer;
+ write_req->count++;
+ write_req->bufnum++;
+ last_block = bd->block;
+ }
- pool = rtems_bdbuf_ctx.pool + dd->pool;
+ /*
+ * Perform the transfer if there are no more buffers, or the
+ * transfer size has reached the configured max. value.
+ */
- DISABLE_PREEMPTION(key);
- do {
- bd_buf = avl_search_for_sync(&pool->tree, dd);
- if (bd_buf != NULL /* && bd_buf->modified */)
+ if (rtems_chain_is_empty (&transfer) ||
+ (write_req->count >= rtems_bdbuf_configuration.max_write_blocks))
+ write = TRUE;
+
+ if (write)
{
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE,
- WATCHDOG_NO_TIMEOUT, level);
+ int result;
+ int b;
+
+ /*
+ * Perform the transfer. No pool locks, no preemption, only the
+ * disk device is being held.
+ */
+ result = dd->ioctl (dd->phys_dev->dev,
+ RTEMS_BLKIO_REQUEST, write_req);
+
+ if (result < 0)
+ {
+ rtems_bdbuf_lock_pool (pool);
+
+ for (b = 0; b < write_req->count; b++)
+ {
+ bd = write_req->bufs[b].user;
+ bd->state = RTEMS_BDBUF_STATE_MODIFIED;
+ bd->error = errno;
+
+ /*
+ * Place back on the pools modified queue and try again.
+ *
+ * @warning Not sure this is the best option but I do
+ * not know what else can be done.
+ */
+ rtems_chain_append (&pool->modified, &bd->link);
+ }
+ }
+ else
+ {
+ rtems_status_code sc = 0;
+ rtems_event_set out;
+
+ sc = rtems_event_receive (RTEMS_BDBUF_TRANSFER_SYNC,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ 0, &out);
+
+ if (sc != RTEMS_SUCCESSFUL)
+ rtems_fatal_error_occurred (BLKDEV_FATAL_BDBUF_SWAPOUT_RE);
+
+ rtems_bdbuf_lock_pool (pool);
+
+ for (b = 0; b < write_req->count; b++)
+ {
+ bd = write_req->bufs[b].user;
+ bd->state = RTEMS_BDBUF_STATE_CACHED;
+ bd->error = 0;
+
+ rtems_chain_append (&pool->lru, &bd->link);
+
+ if (bd->waiters)
+ rtems_bdbuf_wake (pool->transfer, &pool->transfer_waiters);
+ else
+ {
+ if (rtems_chain_has_only_one_node (&pool->lru))
+ rtems_bdbuf_wake (pool->waiting, &pool->wait_waiters);
+ }
+ }
+ }
+
+ rtems_bdbuf_unlock_pool (pool);
+
+ write_req->status = RTEMS_RESOURCE_IN_USE;
+ write_req->error = 0;
+ write_req->count = 0;
+ write_req->bufnum = 0;
}
- } while (bd_buf != NULL);
- ENABLE_PREEMPTION(key);
- return rtems_disk_release(dd);
+ }
+
+ rtems_disk_release (dd);
+ }
+ }
+
+ if (pool->sync_active)
+ rtems_event_send (pool->sync_requester, RTEMS_BDBUF_TRANSFER_SYNC);
+
+ return result;
}
-/* bdbuf_swapout_task --
- * Body of task which take care on flushing modified buffers to the
- * disk.
+/**
+ * Body of task which take care on flushing modified buffers to the disk.
*/
static rtems_task
-bdbuf_swapout_task(rtems_task_argument unused)
+rtems_bdbuf_swapout_task (rtems_task_argument arg)
{
- rtems_status_code rc;
- int result;
- int i;
- ISR_Level level;
- bdbuf_buffer *bd_buf;
- bdbuf_buffer *nxt_bd_buf;
- bdbuf_pool *bd_pool = NULL;
- disk_device *dd = NULL;
- struct {
- blkdev_request req;
- blkdev_sg_buffer sg[SWAP_OUT_MAX_BLK_CNT];
- } req;
- write_tfer_done_arg_t write_tfer_done_arg;
+ rtems_bdbuf_context* context = (rtems_bdbuf_context*) arg;
+ rtems_blkdev_request* write_req;
+ uint32_t period_in_ticks;
+ const uint32_t period_in_msecs = rtems_bdbuf_configuration.swapout_period;
+ uint32_t timer_delta;
+ rtems_status_code sc;
+
+ /*
+ * @note chrisj The rtems_blkdev_request and the array at the end is a hack.
+ * I am disappointment at finding code like this in RTEMS. The request should
+ * have been a rtems_chain_control. Simple, fast and less storage as the node
+ * is already part of the buffer structure.
+ */
+ write_req =
+ malloc (sizeof (rtems_blkdev_request) +
+ (rtems_bdbuf_configuration.max_write_blocks *
+ sizeof (rtems_blkdev_sg_buffer)));
+
+ if (!write_req)
+ rtems_fatal_error_occurred (RTEMS_BLKDEV_FATAL_BDBUF_SO_NOMEM);
+
+ write_req->req = RTEMS_BLKDEV_REQ_WRITE;
+ write_req->req_done = rtems_bdbuf_write_done;
+ write_req->done_arg = write_req;
+ write_req->io_task = rtems_task_self ();
+
+ period_in_ticks = TOD_MICROSECONDS_TO_TICKS (period_in_msecs * 1000);
+
+ /*
+ * This is temporary. Needs to be changed to use the real clock.
+ */
+ timer_delta = period_in_msecs;
+
+ while (context->swapout_enabled)
+ {
+ rtems_event_set out;
/*
- * provide info needed for write_transfer_done function
+ * Only update the timers once in the processing cycle.
*/
- write_tfer_done_arg.req = (blkdev_request *)&req.req;
- write_tfer_done_arg.write_store = bd_buf_write_store;
- nxt_bd_buf = NULL;
- while (1)
+ boolean update_timers = TRUE;
+
+ /*
+ * If we write buffers to any disk perform a check again. We only
+ * write a single device at a time and a pool may have more than
+ * one devices buffers modified waiting to be written.
+ */
+ boolean transfered_buffers;
+
+ do
{
- req.req.req = BLKDEV_REQ_WRITE;
- req.req.req_done = bdbuf_write_transfer_done;
- req.req.done_arg = &write_tfer_done_arg;
- req.req.count = 0;
- req.req.bufnum = 0;
- bd_buf = NULL;
- do {
- /*
- * if a buffer was left over from last loop, then use this buffer
- * otherwise fetch new buffer from chain.
- * Wait for buffer, if this is the first one of the request,
- * otherwise do not wait, if no buffer available
- */
- if (nxt_bd_buf == NULL) {
- rc = rtems_semaphore_obtain(rtems_bdbuf_ctx.flush_sema,
- (req.req.count == 0)
- ? RTEMS_WAIT
- : RTEMS_NO_WAIT,
- 0);
- if (rc == RTEMS_SUCCESSFUL) {
- nxt_bd_buf = (bdbuf_buffer *)rtems_chain_get(&rtems_bdbuf_ctx.mod);
- if (nxt_bd_buf != NULL) {
- nxt_bd_buf->in_progress = TRUE;
- /* IMD try: clear "modified" bit early */
- /* (and not in bdbuf_write_transfer_done) to allow */
- /* another modification during write processing */
- nxt_bd_buf->modified = FALSE;
-
- nxt_bd_buf->use_count++;
- }
- }
- else if ((rc != RTEMS_UNSATISFIED) &&
- (rc != RTEMS_TIMEOUT)) {
- rtems_fatal_error_occurred(BLKDEV_FATAL_BDBUF_SWAPOUT);
- }
- }
- /*
- * It is possible that flush_sema semaphore will be released, but
- * buffer to be removed from mod chain before swapout task start
- * its processing.
- */
- if ((req.req.count == 0) || /* first bd_buf for this request */
- ((nxt_bd_buf != NULL) &&
- (nxt_bd_buf->dev == bd_buf->dev) && /* same device */
- (nxt_bd_buf->block == bd_buf->block+1))) {/* next block */
- bd_buf = nxt_bd_buf;
- nxt_bd_buf = NULL;
- }
- else {
- bd_buf = NULL;
- }
- /*
- * here we have three possible states:
- * bd_buf == NULL, nxt_bd_buf == NULL: no further block available
- * bd_buf != NULL, nxt_bd_buf == NULL: append bd_buf to request
- * bd_buf == NULL, nxt_bd_buf != NULL: nxt_bd_buf canot be appended
- * to current request, keep it
- * for next main loop
- */
- if (bd_buf != NULL) {
- bd_pool = rtems_bdbuf_ctx.pool + bd_buf->pool;
- if (req.req.count == 0) {
- /*
- * this is the first block, so use its address
- */
- dd = rtems_disk_lookup(bd_buf->dev);
- req.req.start = bd_buf->block + dd->start;
- }
- req.req.bufs[req.req.bufnum].length = dd->block_size;
- req.req.bufs[req.req.bufnum].buffer = bd_buf->buffer;
- /*
- * keep bd_buf for postprocessing
- */
- bd_buf_write_store[req.req.bufnum] = bd_buf;
- req.req.count++;
- req.req.bufnum++;
- }
- } while ((bd_buf != NULL) &&
- (req.req.count < SWAP_OUT_MAX_BLK_CNT));
-
- /* transfer_sema initialized when bd_buf inserted in the mod chain
- first time */
- result = dd->ioctl(dd->phys_dev->dev, BLKIO_REQUEST, &req);
-
- rtems_disk_release(dd);
-
- for (i = 0;i < req.req.count;i++) {
- bd_buf = bd_buf_write_store[i];
- if (result == -1)
- {
-
- bd_buf->status = RTEMS_IO_ERROR;
- bd_buf->error = errno;
- /* Release tasks waiting on syncing this buffer */
- _CORE_mutex_Flush(&bd_buf->transfer_sema, NULL,
- CORE_MUTEX_STATUS_SUCCESSFUL);
- }
- else
- {
- if (bd_buf->in_progress)
- {
- rtems_interrupt_disable(level);
- _CORE_mutex_Seize(&bd_buf->transfer_sema, 0, TRUE, 0, level);
- }
- }
- bd_buf->use_count--;
-
- /* Another task have chance to use this buffer, or even
- * modify it. If buffer is not in use, insert it in appropriate chain
- * and release semaphore */
- if (bd_buf->use_count == 0)
- {
- if (bd_buf->modified)
- {
- rtems_chain_append(&rtems_bdbuf_ctx.mod, &bd_buf->link);
- rc = rtems_semaphore_release(rtems_bdbuf_ctx.flush_sema);
- }
- else
- {
- rtems_chain_append(&bd_pool->lru, &bd_buf->link);
- rc = rtems_semaphore_release(bd_pool->bufget_sema);
- }
- }
+ rtems_bdpool_id pid;
+
+ transfered_buffers = FALSE;
+
+ /*
+ * Loop over each pool extacting all the buffers we find for a specific
+ * device. The device is the first one we find on a modified list of a
+ * pool. Process the sync queue of buffers first.
+ */
+ for (pid = 0; pid < context->npools; pid++)
+ {
+ if (rtems_bdbuf_swapout_pool_processing (pid,
+ timer_delta,
+ update_timers,
+ write_req))
+ {
+ transfered_buffers = TRUE;
}
+ }
+
+ /*
+ * Only update the timers once.
+ */
+ update_timers = FALSE;
}
+ while (transfered_buffers);
+
+ sc = rtems_event_receive (RTEMS_BDBUF_SWAPOUT_SYNC,
+ RTEMS_EVENT_ALL | RTEMS_WAIT,
+ period_in_ticks,
+ &out);
+
+ if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
+ rtems_fatal_error_occurred (BLKDEV_FATAL_BDBUF_SWAPOUT_RE);
+ }
+
+ free (write_req);
+
+ rtems_task_delete (RTEMS_SELF);
}
-/* rtems_bdbuf_find_pool --
- * Find first appropriate buffer pool. This primitive returns the index
- * of first buffer pool which block size is greater than or equal to
- * specified size.
+/**
+ * Find first appropriate buffer pool. This primitive returns the index of
+ * first buffer pool which block size is greater than or equal to specified
+ * size.
*
* PARAMETERS:
* block_size - requested block size
@@ -1813,44 +2380,45 @@ bdbuf_swapout_task(rtems_task_argument unused)
* is not configured.
*/
rtems_status_code
-rtems_bdbuf_find_pool(int block_size, rtems_bdpool_id *pool)
+rtems_bdbuf_find_pool (int block_size, rtems_bdpool_id *pool)
{
- rtems_bdpool_id i;
- bdbuf_pool *p;
- int cursize = INT_MAX;
- rtems_bdpool_id curid = -1;
- rtems_boolean found = FALSE;
- int j;
-
- for (j = block_size; (j != 0) && ((j & 1) == 0); j >>= 1);
- if (j != 1)
- return RTEMS_INVALID_SIZE;
-
- for (i = 0, p = rtems_bdbuf_ctx.pool; i < rtems_bdbuf_ctx.npools; i++, p++)
+ rtems_bdbuf_pool* p;
+ rtems_bdpool_id i;
+ rtems_bdpool_id curid = -1;
+ rtems_boolean found = FALSE;
+ int cursize = INT_MAX;
+ int j;
+
+ for (j = block_size; (j != 0) && ((j & 1) == 0); j >>= 1);
+ if (j != 1)
+ return RTEMS_INVALID_SIZE;
+
+ for (i = 0; i < rtems_bdbuf_ctx.npools; i++)
+ {
+ p = rtems_bdbuf_get_pool (i);
+ if ((p->blksize >= block_size) &&
+ (p->blksize < cursize))
{
- if ((p->blksize >= block_size) &&
- (p->blksize < cursize))
- {
- curid = i;
- cursize = p->blksize;
- found = TRUE;
- }
+ curid = i;
+ cursize = p->blksize;
+ found = TRUE;
}
+ }
- if (found)
- {
- if (pool != NULL)
- *pool = curid;
- return RTEMS_SUCCESSFUL;
- }
- else
- {
- return RTEMS_NOT_DEFINED;
- }
+ if (found)
+ {
+ if (pool != NULL)
+ *pool = curid;
+ return RTEMS_SUCCESSFUL;
+ }
+ else
+ {
+ return RTEMS_NOT_DEFINED;
+ }
}
-/* rtems_bdbuf_get_pool_info --
- * Obtain characteristics of buffer pool with specified number.
+/**
+ * Obtain characteristics of buffer pool with specified number.
*
* PARAMETERS:
* pool - buffer pool number
@@ -1866,21 +2434,20 @@ rtems_bdbuf_find_pool(int block_size, rtems_bdpool_id *pool)
* Buffer pools enumerated contiguously starting from 0.
*/
rtems_status_code
-rtems_bdbuf_get_pool_info(rtems_bdpool_id pool, int *block_size,
- int *blocks)
+rtems_bdbuf_get_pool_info(rtems_bdpool_id pool, int* block_size, int* blocks)
{
- if (pool >= rtems_bdbuf_ctx.npools)
- return RTEMS_INVALID_NUMBER;
+ if (pool >= rtems_bdbuf_ctx.npools)
+ return RTEMS_INVALID_NUMBER;
- if (block_size != NULL)
- {
- *block_size = rtems_bdbuf_ctx.pool[pool].blksize;
- }
+ if (block_size != NULL)
+ {
+ *block_size = rtems_bdbuf_ctx.pool[pool].blksize;
+ }
- if (blocks != NULL)
- {
- *blocks = rtems_bdbuf_ctx.pool[pool].nblks;
- }
+ if (blocks != NULL)
+ {
+ *blocks = rtems_bdbuf_ctx.pool[pool].nblks;
+ }
- return RTEMS_SUCCESSFUL;
+ return RTEMS_SUCCESSFUL;
}
diff --git a/cpukit/libblock/src/blkdev.c b/cpukit/libblock/src/blkdev.c
index e133e5eaf2..a93875c9a2 100644
--- a/cpukit/libblock/src/blkdev.c
+++ b/cpukit/libblock/src/blkdev.c
@@ -38,10 +38,10 @@ rtems_blkdev_generic_read(
unsigned int block;
unsigned int blkofs;
dev_t dev;
- disk_device *dd;
+ rtems_disk_device *dd;
dev = rtems_filesystem_make_dev_t(major, minor);
- dd = rtems_disk_lookup(dev);
+ dd = rtems_disk_obtain(dev);
if (dd == NULL)
return RTEMS_INVALID_NUMBER;
@@ -57,7 +57,7 @@ rtems_blkdev_generic_read(
while (count > 0)
{
- bdbuf_buffer *diskbuf;
+ rtems_bdbuf_buffer *diskbuf;
int copy;
rtems_status_code rc;
@@ -99,10 +99,10 @@ rtems_blkdev_generic_write(
unsigned int blkofs;
dev_t dev;
rtems_status_code rc;
- disk_device *dd;
+ rtems_disk_device *dd;
dev = rtems_filesystem_make_dev_t(major, minor);
- dd = rtems_disk_lookup(dev);
+ dd = rtems_disk_obtain(dev);
if (dd == NULL)
return RTEMS_INVALID_NUMBER;
@@ -118,7 +118,7 @@ rtems_blkdev_generic_write(
while (count > 0)
{
- bdbuf_buffer *diskbuf;
+ rtems_bdbuf_buffer *diskbuf;
int copy;
if ((blkofs == 0) && (count >= block_size))
@@ -156,10 +156,10 @@ rtems_blkdev_generic_open(
void * arg)
{
dev_t dev;
- disk_device *dd;
+ rtems_disk_device *dd;
dev = rtems_filesystem_make_dev_t(major, minor);
- dd = rtems_disk_lookup(dev);
+ dd = rtems_disk_obtain(dev);
if (dd == NULL)
return RTEMS_INVALID_NUMBER;
@@ -181,10 +181,10 @@ rtems_blkdev_generic_close(
void * arg)
{
dev_t dev;
- disk_device *dd;
+ rtems_disk_device *dd;
dev = rtems_filesystem_make_dev_t(major, minor);
- dd = rtems_disk_lookup(dev);
+ dd = rtems_disk_obtain(dev);
if (dd == NULL)
return RTEMS_INVALID_NUMBER;
@@ -206,32 +206,32 @@ rtems_blkdev_generic_ioctl(
{
rtems_libio_ioctl_args_t *args = arg;
dev_t dev;
- disk_device *dd;
+ rtems_disk_device *dd;
int rc;
dev = rtems_filesystem_make_dev_t(major, minor);
- dd = rtems_disk_lookup(dev);
+ dd = rtems_disk_obtain(dev);
if (dd == NULL)
return RTEMS_INVALID_NUMBER;
switch (args->command)
{
- case BLKIO_GETBLKSIZE:
+ case RTEMS_BLKIO_GETBLKSIZE:
args->ioctl_return = dd->block_size;
break;
- case BLKIO_GETSIZE:
+ case RTEMS_BLKIO_GETSIZE:
args->ioctl_return = dd->size;
break;
- case BLKIO_SYNCDEV:
+ case RTEMS_BLKIO_SYNCDEV:
rc = rtems_bdbuf_syncdev(dd->dev);
args->ioctl_return = (rc == RTEMS_SUCCESSFUL ? 0 : -1);
break;
- case BLKIO_REQUEST:
+ case RTEMS_BLKIO_REQUEST:
{
- blkdev_request *req = args->buffer;
+ rtems_blkdev_request *req = args->buffer;
req->start += dd->start;
args->ioctl_return = dd->ioctl(dd->phys_dev->dev, args->command,
req);
diff --git a/cpukit/libblock/src/diskdevs.c b/cpukit/libblock/src/diskdevs.c
index 545a4096e7..94a58daa6f 100644
--- a/cpukit/libblock/src/diskdevs.c
+++ b/cpukit/libblock/src/diskdevs.c
@@ -23,13 +23,13 @@
#define DISKTAB_INITIAL_SIZE 32
/* Table of disk devices having the same major number */
-struct disk_device_table {
- disk_device **minor; /* minor-indexed disk device table */
+typedef struct rtems_disk_device_table {
+ rtems_disk_device **minor; /* minor-indexed disk device table */
int size; /* Number of entries in the table */
-};
+} rtems_disk_device_table;
/* Pointer to [major].minor[minor] indexed array of disk devices */
-static struct disk_device_table *disktab;
+static rtems_disk_device_table *disktab;
/* Number of allocated entries in disktab table */
static int disktab_size;
@@ -64,24 +64,24 @@ static volatile rtems_boolean diskdevs_protected;
* pointer to the disk device descriptor entry, or NULL if no memory
* available for its creation.
*/
-static disk_device *
+static rtems_disk_device *
create_disk_entry(dev_t dev)
{
rtems_device_major_number major;
rtems_device_minor_number minor;
- struct disk_device **d;
+ rtems_disk_device **d;
rtems_filesystem_split_dev_t (dev, major, minor);
if (major >= disktab_size)
{
- struct disk_device_table *p;
+ rtems_disk_device_table *p;
int newsize;
int i;
newsize = disktab_size * 2;
if (major >= newsize)
newsize = major + 1;
- p = realloc(disktab, sizeof(struct disk_device_table) * newsize);
+ p = realloc(disktab, sizeof(rtems_disk_device_table) * newsize);
if (p == NULL)
return NULL;
p += disktab_size;
@@ -97,7 +97,7 @@ create_disk_entry(dev_t dev)
(minor >= disktab[major].size))
{
int newsize;
- disk_device **p;
+ rtems_disk_device **p;
int i;
int s = disktab[major].size;
@@ -108,7 +108,8 @@ create_disk_entry(dev_t dev)
if (minor >= newsize)
newsize = minor + 1;
- p = realloc(disktab[major].minor, sizeof(disk_device *) * newsize);
+ p = realloc(disktab[major].minor,
+ sizeof(rtems_disk_device *) * newsize);
if (p == NULL)
return NULL;
disktab[major].minor = p;
@@ -121,7 +122,7 @@ create_disk_entry(dev_t dev)
d = disktab[major].minor + minor;
if (*d == NULL)
{
- *d = calloc(1, sizeof(disk_device));
+ *d = calloc(1, sizeof(rtems_disk_device));
}
return *d;
}
@@ -136,12 +137,12 @@ create_disk_entry(dev_t dev)
* Pointer to the disk device descriptor corresponding to the specified
* device number, or NULL if disk device with such number not exists.
*/
-static inline disk_device *
+static rtems_disk_device *
get_disk_entry(dev_t dev)
{
rtems_device_major_number major;
rtems_device_minor_number minor;
- struct disk_device_table *dtab;
+ rtems_disk_device_table *dtab;
rtems_filesystem_split_dev_t (dev, major, minor);
@@ -171,9 +172,9 @@ get_disk_entry(dev_t dev)
* no memory available).
*/
static rtems_status_code
-create_disk(dev_t dev, const char *name, disk_device **diskdev)
+create_disk(dev_t dev, const char *name, rtems_disk_device **diskdev)
{
- disk_device *dd;
+ rtems_disk_device *dd;
char *n;
dd = get_disk_entry(dev);
@@ -234,12 +235,12 @@ create_disk(dev_t dev, const char *name, disk_device **diskdev)
*/
rtems_status_code
rtems_disk_create_phys(dev_t dev, int block_size, int disk_size,
- block_device_ioctl handler,
+ rtems_block_device_ioctl handler,
const char *name)
{
int bs_log2;
int i;
- disk_device *dd;
+ rtems_disk_device *dd;
rtems_status_code rc;
rtems_bdpool_id pool;
rtems_device_major_number major;
@@ -284,6 +285,11 @@ rtems_disk_create_phys(dev_t dev, int block_size, int disk_size,
rc = rtems_io_register_name(name, major, minor);
+ if (handler (dd->phys_dev->dev,
+ RTEMS_BLKDEV_CAPABILITIES,
+ &dd->capabilities) < 0)
+ dd->capabilities = 0;
+
diskdevs_protected = FALSE;
rtems_semaphore_release(diskdevs_mutex);
@@ -317,8 +323,8 @@ rtems_disk_create_phys(dev_t dev, int block_size, int disk_size,
rtems_status_code
rtems_disk_create_log(dev_t dev, dev_t phys, int start, int size, char *name)
{
- disk_device *dd;
- disk_device *pdd;
+ rtems_disk_device *dd;
+ rtems_disk_device *pdd;
rtems_status_code rc;
rtems_device_major_number major;
rtems_device_minor_number minor;
@@ -393,12 +399,12 @@ rtems_disk_delete(dev_t dev)
used = 0;
for (maj = 0; maj < disktab_size; maj++)
{
- struct disk_device_table *dtab = disktab + maj;
+ rtems_disk_device_table *dtab = disktab + maj;
if (dtab != NULL)
{
for (min = 0; min < dtab->size; min++)
{
- disk_device *dd = dtab->minor[min];
+ rtems_disk_device *dd = dtab->minor[min];
if ((dd != NULL) && (dd->phys_dev->dev == dev))
used += dd->uses;
}
@@ -415,12 +421,12 @@ rtems_disk_delete(dev_t dev)
/* Delete this device and all of its logical devices */
for (maj = 0; maj < disktab_size; maj++)
{
- struct disk_device_table *dtab = disktab +maj;
+ rtems_disk_device_table *dtab = disktab +maj;
if (dtab != NULL)
{
for (min = 0; min < dtab->size; min++)
{
- disk_device *dd = dtab->minor[min];
+ rtems_disk_device *dd = dtab->minor[min];
if ((dd != NULL) && (dd->phys_dev->dev == dev))
{
unlink(dd->name);
@@ -437,7 +443,7 @@ rtems_disk_delete(dev_t dev)
return rc;
}
-/* rtems_disk_lookup --
+/* rtems_disk_obtain --
* Find block device descriptor by its device identifier.
*
* PARAMETERS:
@@ -447,11 +453,11 @@ rtems_disk_delete(dev_t dev)
* pointer to the block device descriptor, or NULL if no such device
* exists.
*/
-disk_device *
-rtems_disk_lookup(dev_t dev)
+rtems_disk_device *
+rtems_disk_obtain(dev_t dev)
{
rtems_interrupt_level level;
- disk_device *dd;
+ rtems_disk_device *dd;
rtems_status_code rc;
rtems_interrupt_disable(level);
@@ -480,7 +486,7 @@ rtems_disk_lookup(dev_t dev)
}
/* rtems_disk_release --
- * Release disk_device structure (decrement usage counter to 1).
+ * Release rtems_disk_device structure (decrement usage counter to 1).
*
* PARAMETERS:
* dd - pointer to disk device structure
@@ -489,7 +495,7 @@ rtems_disk_lookup(dev_t dev)
* RTEMS_SUCCESSFUL
*/
rtems_status_code
-rtems_disk_release(disk_device *dd)
+rtems_disk_release(rtems_disk_device *dd)
{
rtems_interrupt_level level;
rtems_interrupt_disable(level);
@@ -510,12 +516,12 @@ rtems_disk_release(disk_device *dd)
* Pointer to the disk descriptor for next disk device, or NULL if all
* devices enumerated.
*/
-disk_device *
+rtems_disk_device *
rtems_disk_next(dev_t dev)
{
rtems_device_major_number major;
rtems_device_minor_number minor;
- struct disk_device_table *dtab;
+ rtems_disk_device_table *dtab;
dev++;
rtems_filesystem_split_dev_t (dev, major, minor);
@@ -562,7 +568,7 @@ rtems_disk_io_initialize(void)
return RTEMS_SUCCESSFUL;
disktab_size = DISKTAB_INITIAL_SIZE;
- disktab = calloc(disktab_size, sizeof(struct disk_device_table));
+ disktab = calloc(disktab_size, sizeof(rtems_disk_device_table));
if (disktab == NULL)
return RTEMS_NO_MEMORY;
@@ -578,8 +584,7 @@ rtems_disk_io_initialize(void)
return rc;
}
- rc = rtems_bdbuf_init(rtems_bdbuf_configuration,
- rtems_bdbuf_configuration_size);
+ rc = rtems_bdbuf_init();
if (rc != RTEMS_SUCCESSFUL)
{
@@ -612,12 +617,12 @@ rtems_disk_io_done(void)
/* Free data structures */
for (maj = 0; maj < disktab_size; maj++)
{
- struct disk_device_table *dtab = disktab + maj;
+ rtems_disk_device_table *dtab = disktab + maj;
if (dtab != NULL)
{
for (min = 0; min < dtab->size; min++)
{
- disk_device *dd = dtab->minor[min];
+ rtems_disk_device *dd = dtab->minor[min];
unlink(dd->name);
free(dd->name);
free(dd);
diff --git a/cpukit/libblock/src/flashdisk.c b/cpukit/libblock/src/flashdisk.c
index 4e8b261a93..d67d4a02bf 100644
--- a/cpukit/libblock/src/flashdisk.c
+++ b/cpukit/libblock/src/flashdisk.c
@@ -2057,14 +2057,13 @@ rtems_fdisk_write_block (rtems_flashdisk* fd,
* @retval int The ioctl return value.
*/
static int
-rtems_fdisk_read (rtems_flashdisk* fd, blkdev_request* req)
+rtems_fdisk_read (rtems_flashdisk* fd, rtems_blkdev_request* req)
{
- blkdev_sg_buffer* sg = req->bufs;
- uint32_t block = req->start;
- uint32_t b;
- int ret = 0;
+ rtems_blkdev_sg_buffer* sg = req->bufs;
+ uint32_t b;
+ int ret = 0;
- for (b = 0; b < req->bufnum; b++, block++, sg++)
+ for (b = 0; b < req->bufnum; b++, sg++)
{
uint32_t length = sg->length;
@@ -2077,7 +2076,7 @@ rtems_fdisk_read (rtems_flashdisk* fd, blkdev_request* req)
length = fd->block_size;
}
- ret = rtems_fdisk_read_block (fd, block, sg->buffer);
+ ret = rtems_fdisk_read_block (fd, sg->block, sg->buffer);
if (ret)
break;
@@ -2098,14 +2097,13 @@ rtems_fdisk_read (rtems_flashdisk* fd, blkdev_request* req)
* @retval int The ioctl return value.
*/
static int
-rtems_fdisk_write (rtems_flashdisk* fd, blkdev_request* req)
+rtems_fdisk_write (rtems_flashdisk* fd, rtems_blkdev_request* req)
{
- blkdev_sg_buffer* sg = req->bufs;
- uint32_t block = req->start;
- uint32_t b;
- int ret = 0;
+ rtems_blkdev_sg_buffer* sg = req->bufs;
+ uint32_t b;
+ int ret = 0;
- for (b = 0; b < req->bufnum; b++, block++, sg++)
+ for (b = 0; b < req->bufnum; b++, sg++)
{
if (sg->length != fd->block_size)
{
@@ -2113,7 +2111,7 @@ rtems_fdisk_write (rtems_flashdisk* fd, blkdev_request* req)
"bd:%d fd:%d", sg->length, fd->block_size);
}
- ret = rtems_fdisk_write_block (fd, block, sg->buffer);
+ ret = rtems_fdisk_write_block (fd, sg->block, sg->buffer);
if (ret)
break;
@@ -2356,7 +2354,7 @@ static int
rtems_fdisk_ioctl (dev_t dev, uint32_t req, void* argp)
{
rtems_device_minor_number minor = rtems_filesystem_dev_minor_t (dev);
- blkdev_request* r = argp;
+ rtems_blkdev_request* r = argp;
rtems_status_code sc;
errno = 0;
@@ -2368,7 +2366,7 @@ rtems_fdisk_ioctl (dev_t dev, uint32_t req, void* argp)
{
switch (req)
{
- case BLKIO_REQUEST:
+ case RTEMS_BLKIO_REQUEST:
if ((minor >= rtems_flashdisk_count) ||
(rtems_flashdisks[minor].device_count == 0))
{
@@ -2378,11 +2376,11 @@ rtems_fdisk_ioctl (dev_t dev, uint32_t req, void* argp)
{
switch (r->req)
{
- case BLKDEV_REQ_READ:
+ case RTEMS_BLKDEV_REQ_READ:
errno = rtems_fdisk_read (&rtems_flashdisks[minor], r);
break;
- case BLKDEV_REQ_WRITE:
+ case RTEMS_BLKDEV_REQ_WRITE:
errno = rtems_fdisk_write (&rtems_flashdisks[minor], r);
break;
diff --git a/cpukit/libblock/src/ide_part_table.c b/cpukit/libblock/src/ide_part_table.c
index 2b5b46138b..5d44c9a960 100644
--- a/cpukit/libblock/src/ide_part_table.c
+++ b/cpukit/libblock/src/ide_part_table.c
@@ -46,18 +46,18 @@
* and does not support devices with sector size other than 512 bytes
*/
static rtems_status_code
-get_sector(dev_t dev, uint32_t sector_num, sector_data_t **sector)
+get_sector(dev_t dev, uint32_t sector_num, rtems_sector_data_t **sector)
{
- sector_data_t *s;
- bdbuf_buffer *buf;
- rtems_status_code rc;
+ rtems_sector_data_t *s;
+ rtems_bdbuf_buffer *buf;
+ rtems_status_code rc;
if (sector == NULL)
{
return RTEMS_INTERNAL_ERROR;
}
- s = (sector_data_t *) malloc(sizeof(sector_data_t) + RTEMS_IDE_SECTOR_SIZE);
+ s = (rtems_sector_data_t *) malloc(sizeof(rtems_sector_data_t) + RTEMS_IDE_SECTOR_SIZE);
if (s == NULL)
{
return RTEMS_NO_MEMORY;
@@ -92,7 +92,7 @@ get_sector(dev_t dev, uint32_t sector_num, sector_data_t **sector)
* TRUE if sector has msdos signature, FALSE otherwise
*/
static rtems_boolean
-msdos_signature_check (sector_data_t *sector)
+msdos_signature_check (rtems_sector_data_t *sector)
{
uint8_t *p = sector->data + RTEMS_IDE_PARTITION_MSDOS_SIGNATURE_OFFSET;
@@ -156,10 +156,10 @@ is_fat_partition(uint8_t type)
* RTEMS_INTERNAL_ERROR, if other error occurs.
*/
static rtems_status_code
-data_to_part_desc(uint8_t *data, part_desc_t **new_part_desc)
+data_to_part_desc(uint8_t *data, rtems_part_desc_t **new_part_desc)
{
- part_desc_t *part_desc;
- uint32_t temp;
+ rtems_part_desc_t *part_desc;
+ uint32_t temp;
if (new_part_desc == NULL)
{
@@ -168,7 +168,7 @@ data_to_part_desc(uint8_t *data, part_desc_t **new_part_desc)
*new_part_desc = NULL;
- if ((part_desc = calloc(1, sizeof(part_desc_t))) == NULL)
+ if ((part_desc = calloc(1, sizeof(rtems_part_desc_t))) == NULL)
{
return RTEMS_NO_MEMORY;
}
@@ -219,15 +219,15 @@ data_to_part_desc(uint8_t *data, part_desc_t **new_part_desc)
* RTEMS_INTERNAL_ERROR if other error occurs.
*/
static rtems_status_code
-read_extended_partition(uint32_t start, part_desc_t *ext_part)
+read_extended_partition(uint32_t start, rtems_part_desc_t *ext_part)
{
- int i;
- dev_t dev;
- sector_data_t *sector;
- uint32_t here;
- uint8_t *data;
- part_desc_t *new_part_desc;
- rtems_status_code rc;
+ int i;
+ dev_t dev;
+ rtems_sector_data_t *sector;
+ uint32_t here;
+ uint8_t *data;
+ rtems_part_desc_t *new_part_desc;
+ rtems_status_code rc;
if ((ext_part == NULL) || (ext_part->disk_desc == NULL))
{
@@ -286,7 +286,7 @@ read_extended_partition(uint32_t start, part_desc_t *ext_part)
}
else
{
- disk_desc_t *disk_desc = new_part_desc->disk_desc;
+ rtems_disk_desc_t *disk_desc = new_part_desc->disk_desc;
disk_desc->partitions[disk_desc->last_log_id] = new_part_desc;
new_part_desc->log_id = ++disk_desc->last_log_id;
new_part_desc->start += here;
@@ -314,14 +314,14 @@ read_extended_partition(uint32_t start, part_desc_t *ext_part)
* RTEMS_INTERNAL_ERROR otherwise
*/
static rtems_status_code
-read_mbr(disk_desc_t *disk_desc)
+read_mbr(rtems_disk_desc_t *disk_desc)
{
- int part_num;
- sector_data_t *sector;
- part_desc_t *part_desc;
- uint8_t *data;
- rtems_status_code rc;
- dev_t dev = disk_desc->dev;
+ int part_num;
+ rtems_sector_data_t *sector;
+ rtems_part_desc_t *part_desc;
+ uint8_t *data;
+ rtems_status_code rc;
+ dev_t dev = disk_desc->dev;
/* get MBR sector */
rc = get_sector(dev, 0, &sector);
@@ -398,7 +398,7 @@ read_mbr(disk_desc_t *disk_desc)
* N/A
*/
static void
-partition_free(part_desc_t *part_desc)
+partition_free(rtems_part_desc_t *part_desc)
{
int part_num;
@@ -429,7 +429,7 @@ partition_free(part_desc_t *part_desc)
* N/A
*/
void
-rtems_ide_part_table_free(disk_desc_t *disk_desc)
+rtems_ide_part_table_free(rtems_disk_desc_t *disk_desc)
{
int part_num;
@@ -455,7 +455,7 @@ rtems_ide_part_table_free(disk_desc_t *disk_desc)
* RTEMS_INTERNAL_ERROR otherwise
*/
rtems_status_code
-rtems_ide_part_table_get(const char *dev_name, disk_desc_t *disk_desc)
+rtems_ide_part_table_get(const char *dev_name, rtems_disk_desc_t *disk_desc)
{
struct stat dev_stat;
rtems_status_code rc;
@@ -494,16 +494,16 @@ rtems_ide_part_table_initialize(char *dev_name)
{
int part_num;
dev_t dev;
- disk_desc_t *disk_desc;
+ rtems_disk_desc_t *disk_desc;
rtems_device_major_number major;
rtems_device_minor_number minor;
rtems_status_code rc;
- part_desc_t *part_desc;
+ rtems_part_desc_t *part_desc;
/* logical device name /dev/hdxyy */
char name[RTEMS_IDE_PARTITION_DEV_NAME_LENGTH_MAX];
- disk_desc = (disk_desc_t *) calloc(1, sizeof(disk_desc_t));
+ disk_desc = (rtems_disk_desc_t *) calloc(1, sizeof(rtems_disk_desc_t));
if (disk_desc == NULL)
{
return RTEMS_NO_MEMORY;
diff --git a/cpukit/libblock/src/nvdisk.c b/cpukit/libblock/src/nvdisk.c
index 0ee094bdc6..4166d79c98 100644
--- a/cpukit/libblock/src/nvdisk.c
+++ b/cpukit/libblock/src/nvdisk.c
@@ -51,7 +51,7 @@
* footprint targets. Leave in by default.
*/
#if !defined (RTEMS_NVDISK_TRACE)
-#define RTEMS_NVDISK_TRACE 1
+#define RTEMS_NVDISK_TRACE 0
#endif
/**
@@ -165,7 +165,7 @@ rtems_nvdisk_crc16_gen_factors (uint16_t pattern)
/**
* Print a message to the nvdisk output and flush it.
*
- * @param fd The flashdisk control structure.
+ * @param nvd The nvdisk control structure.
* @param format The format string. See printf for details.
* @param ... The arguments for the format text.
* @return int The number of bytes written to the output.
@@ -189,7 +189,7 @@ rtems_nvdisk_printf (const rtems_nvdisk* nvd, const char *format, ...)
/**
* Print a info message to the nvdisk output and flush it.
*
- * @param fd The flashdisk control structure.
+ * @param nvd The nvdisk control structure.
* @param format The format string. See printf for details.
* @param ... The arguments for the format text.
* @return int The number of bytes written to the output.
@@ -213,7 +213,7 @@ rtems_nvdisk_info (const rtems_nvdisk* nvd, const char *format, ...)
/**
* Print a warning to the nvdisk output and flush it.
*
- * @param fd The flashdisk control structure.
+ * @param nvd The nvdisk control structure.
* @param format The format string. See printf for details.
* @param ... The arguments for the format text.
* @return int The number of bytes written to the output.
@@ -256,24 +256,6 @@ rtems_nvdisk_error (const char *format, ...)
}
/**
- * Print an abort message, flush it then abort the program.
- *
- * @param format The format string. See printf for details.
- * @param ... The arguments for the format text.
- */
-static void
-rtems_nvdisk_abort (const char *format, ...)
-{
- va_list args;
- va_start (args, format);
- fprintf (stderr, "nvdisk:abort:");
- vfprintf (stderr, format, args);
- fprintf (stderr, "\n");
- fflush (stderr);
- exit (1);
-}
-
-/**
* Get the descriptor for a device.
*/
static const rtems_nvdisk_device_desc*
@@ -324,6 +306,7 @@ rtems_nvdisk_device_write (const rtems_nvdisk* nvd,
return ops->write (device, dd->flags, dd->base, offset, buffer, size);
}
+#if NOT_USED
/**
* Verify the data with the data in a segment.
*/
@@ -344,6 +327,7 @@ rtems_nvdisk_device_verify (const rtems_nvdisk* nvd,
#endif
return ops->verify (device, dd->flags, dd->base, offset, buffer, size);
}
+#endif
/**
* Read a page of data from the device.
@@ -374,20 +358,6 @@ rtems_nvdisk_write_page (const rtems_nvdisk* nvd,
}
/**
- * Verify a page of data with the data in the device.
- */
-static int
-rtems_nvdisk_verify_page (const rtems_nvdisk* nvd,
- uint32_t device,
- uint32_t page,
- const void* buffer)
-{
- return rtems_nvdisk_device_verify (nvd, device,
- page * nvd->block_size,
- buffer, nvd->block_size);
-}
-
-/**
* Read the checksum from the device.
*/
static int
@@ -532,7 +502,9 @@ rtems_nvdisk_read_block (rtems_nvdisk* nvd, uint32_t block, uint8_t* buffer)
if (crc == 0xffff)
{
+#if RTEMS_NVDISK_TRACE
rtems_nvdisk_warning (nvd, "read-block: crc not set: %d", block);
+#endif
memset (buffer, 0, nvd->block_size);
return 0;
}
@@ -605,13 +577,12 @@ rtems_nvdisk_write_block (rtems_nvdisk* nvd,
* @retval int The ioctl return value.
*/
static int
-rtems_nvdisk_read (rtems_nvdisk* nvd, blkdev_request* req)
+rtems_nvdisk_read (rtems_nvdisk* nvd, rtems_blkdev_request* req)
{
- blkdev_sg_buffer* sg = req->bufs;
- uint32_t block = req->start;
- uint32_t b;
- int32_t remains;
- int ret = 0;
+ rtems_blkdev_sg_buffer* sg = req->bufs;
+ uint32_t b;
+ int32_t remains;
+ int ret = 0;
#if RTEMS_NVDISK_TRACE
rtems_nvdisk_info (nvd, "read: blocks=%d", req->bufnum);
@@ -619,7 +590,7 @@ rtems_nvdisk_read (rtems_nvdisk* nvd, blkdev_request* req)
remains = req->count * nvd->block_size;
- for (b = 0; b < req->bufnum; b++, block++, sg++)
+ for (b = 0; b < req->bufnum; b++, sg++)
{
uint32_t length = sg->length;
@@ -658,18 +629,17 @@ rtems_nvdisk_read (rtems_nvdisk* nvd, blkdev_request* req)
* @retval int The ioctl return value.
*/
static int
-rtems_nvdisk_write (rtems_nvdisk* nvd, blkdev_request* req)
+rtems_nvdisk_write (rtems_nvdisk* nvd, rtems_blkdev_request* req)
{
- blkdev_sg_buffer* sg = req->bufs;
- uint32_t block = req->start;
- uint32_t b;
- int ret = 0;
+ rtems_blkdev_sg_buffer* sg = req->bufs;
+ uint32_t b;
+ int ret = 0;
#if RTEMS_NVDISK_TRACE
rtems_nvdisk_info (nvd, "write: blocks=%d", req->bufnum);
#endif
- for (b = 0; b < req->bufnum; b++, block++, sg++)
+ for (b = 0; b < req->bufnum; b++, sg++)
{
if (sg->length != nvd->block_size)
{
@@ -720,7 +690,7 @@ rtems_nvdisk_erase_disk (rtems_nvdisk* nvd)
}
/**
- * MV disk IOCTL handler.
+ * NV disk IOCTL handler.
*
* @param dev Device number (major, minor number).
* @param req IOCTL request code.
@@ -731,9 +701,21 @@ static int
rtems_nvdisk_ioctl (dev_t dev, uint32_t req, void* argp)
{
rtems_device_minor_number minor = rtems_filesystem_dev_minor_t (dev);
- blkdev_request* r = argp;
+ rtems_blkdev_request* r = argp;
rtems_status_code sc;
+ if (minor >= rtems_nvdisk_count)
+ {
+ errno = ENODEV;
+ return -1;
+ }
+
+ if (rtems_nvdisks[minor].device_count == 0)
+ {
+ errno = ENODEV;
+ return -1;
+ }
+
errno = 0;
sc = rtems_semaphore_obtain (rtems_nvdisks[minor].lock, RTEMS_WAIT, 0);
@@ -743,39 +725,31 @@ rtems_nvdisk_ioctl (dev_t dev, uint32_t req, void* argp)
{
switch (req)
{
- case BLKIO_REQUEST:
- if ((minor >= rtems_nvdisk_count) ||
- (rtems_nvdisks[minor].device_count == 0))
- {
- errno = ENODEV;
- }
- else
+ case RTEMS_BLKIO_REQUEST:
+ switch (r->req)
{
- switch (r->req)
- {
- case BLKDEV_REQ_READ:
- errno = rtems_nvdisk_read (&rtems_nvdisks[minor], r);
- break;
-
- case BLKDEV_REQ_WRITE:
- errno = rtems_nvdisk_write (&rtems_nvdisks[minor], r);
- break;
-
- default:
- errno = EBADRQC;
- break;
- }
+ case RTEMS_BLKDEV_REQ_READ:
+ errno = rtems_nvdisk_read (&rtems_nvdisks[minor], r);
+ break;
+
+ case RTEMS_BLKDEV_REQ_WRITE:
+ errno = rtems_nvdisk_write (&rtems_nvdisks[minor], r);
+ break;
+
+ default:
+ errno = EBADRQC;
+ break;
}
break;
case RTEMS_NVDISK_IOCTL_ERASE_DISK:
errno = rtems_nvdisk_erase_disk (&rtems_nvdisks[minor]);
break;
-
+
case RTEMS_NVDISK_IOCTL_INFO_LEVEL:
rtems_nvdisks[minor].info_level = (uint32_t) argp;
break;
-
+
default:
errno = EBADRQC;
break;
diff --git a/cpukit/libblock/src/ramdisk.c b/cpukit/libblock/src/ramdisk.c
index b72e3ae795..3b3f3f29b0 100644
--- a/cpukit/libblock/src/ramdisk.c
+++ b/cpukit/libblock/src/ramdisk.c
@@ -22,6 +22,14 @@
#include "rtems/diskdevs.h"
#include "rtems/ramdisk.h"
+/**
+ * Control tracing. It can be compiled out of the code for small
+ * footprint targets. Leave in by default.
+ */
+#if !defined (RTEMS_RAMDISK_TRACE)
+#define RTEMS_RAMDISK_TRACE 0
+#endif
+
#define RAMDISK_DEVICE_BASE_NAME "/dev/ramdisk"
/* Internal RAM disk descriptor */
@@ -32,11 +40,40 @@ struct ramdisk {
rtems_boolean initialized;/* RAM disk is initialized */
rtems_boolean malloced; /* != 0, if memory allocated by malloc for this
RAM disk */
+#if RTEMS_RAMDISK_TRACE
+ int info_level; /* Trace level */
+#endif
};
static struct ramdisk *ramdisk;
static int nramdisks;
+#if RTEMS_RAMDISK_TRACE
+/**
+ * Print a message to the ramdisk output and flush it.
+ *
+ * @param rd The ramdisk control structure.
+ * @param format The format string. See printf for details.
+ * @param ... The arguments for the format text.
+ * @return int The number of bytes written to the output.
+ */
+static int
+rtems_ramdisk_printf (struct ramdisk *rd, const char *format, ...)
+{
+ int ret = 0;
+ if (rd->info_level >= 1)
+ {
+ va_list args;
+ va_start (args, format);
+ fprintf (stdout, "ramdisk:");
+ ret = vfprintf (stdout, format, args);
+ fprintf (stdout, "\n");
+ fflush (stdout);
+ }
+ return ret;
+}
+#endif
+
/* ramdisk_read --
* RAM disk READ request handler. This primitive copies data from RAM
* disk to supplied buffer and invoke the callout function to inform
@@ -49,19 +86,25 @@ static int nramdisks;
* ioctl return value
*/
static int
-ramdisk_read(struct ramdisk *rd, blkdev_request *req)
+ramdisk_read(struct ramdisk *rd, rtems_blkdev_request *req)
{
char *from;
uint32_t i;
- blkdev_sg_buffer *sg;
+ rtems_blkdev_sg_buffer *sg;
uint32_t remains;
- from = (char *)rd->area + (req->start * rd->block_size);
+#if RTEMS_RAMDISK_TRACE
+ rtems_ramdisk_printf (rd, "ramdisk read: start=%d, blocks=%d remains=%d",
+ req->bufs[0].block, req->bufnum,
+ rd->block_size * req->count);
+#endif
+
remains = rd->block_size * req->count;
sg = req->bufs;
for (i = 0; (remains > 0) && (i < req->bufnum); i++, sg++)
{
int count = sg->length;
+ from = ((char *)rd->area + (sg->block * rd->block_size));
if (count > remains)
count = remains;
memcpy(sg->buffer, from, count);
@@ -84,19 +127,24 @@ ramdisk_read(struct ramdisk *rd, blkdev_request *req)
* ioctl return value
*/
static int
-ramdisk_write(struct ramdisk *rd, blkdev_request *req)
+ramdisk_write(struct ramdisk *rd, rtems_blkdev_request *req)
{
char *to;
uint32_t i;
- blkdev_sg_buffer *sg;
+ rtems_blkdev_sg_buffer *sg;
uint32_t remains;
- to = (char *)rd->area + (req->start * rd->block_size);
+#if RTEMS_RAMDISK_TRACE
+ rtems_ramdisk_printf (rd, "ramdisk write: start=%d, blocks=%d remains=%d",
+ req->bufs[0].block, req->bufnum,
+ rd->block_size * req->count);
+#endif
remains = rd->block_size * req->count;
sg = req->bufs;
for (i = 0; (remains > 0) && (i < req->bufnum); i++, sg++)
{
int count = sg->length;
+ to = ((char *)rd->area + (sg->block * rd->block_size));
if (count > remains)
count = remains;
memcpy(to, sg->buffer, count);
@@ -123,10 +171,10 @@ ramdisk_ioctl(dev_t dev, uint32_t req, void *argp)
{
switch (req)
{
- case BLKIO_REQUEST:
+ case RTEMS_BLKIO_REQUEST:
{
rtems_device_minor_number minor;
- blkdev_request *r = argp;
+ rtems_blkdev_request *r = argp;
struct ramdisk *rd;
minor = rtems_filesystem_dev_minor_t(dev);
@@ -140,10 +188,10 @@ ramdisk_ioctl(dev_t dev, uint32_t req, void *argp)
switch (r->req)
{
- case BLKDEV_REQ_READ:
+ case RTEMS_BLKDEV_REQ_READ:
return ramdisk_read(rd, r);
- case BLKDEV_REQ_WRITE:
+ case RTEMS_BLKDEV_REQ_WRITE:
return ramdisk_write(rd, r);
default:
@@ -188,7 +236,9 @@ ramdisk_initialize(
r = ramdisk = calloc(rtems_ramdisk_configuration_size,
sizeof(struct ramdisk));
-
+#if RTEMS_RAMDISK_TRACE
+ r->info_level = 1;
+#endif
for (i = 0; i < rtems_ramdisk_configuration_size; i++, c++, r++)
{
dev_t dev = rtems_filesystem_make_dev_t(major, i);