summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/sys/aio.h
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/sys/aio.h')
-rw-r--r--freebsd/sys/sys/aio.h148
1 files changed, 135 insertions, 13 deletions
diff --git a/freebsd/sys/sys/aio.h b/freebsd/sys/sys/aio.h
index 02e34a85..ab6f766f 100644
--- a/freebsd/sys/sys/aio.h
+++ b/freebsd/sys/sys/aio.h
@@ -21,6 +21,11 @@
#include <sys/types.h>
#include <sys/signal.h>
+#ifdef _KERNEL
+#include <sys/queue.h>
+#include <sys/event.h>
+#include <sys/signalvar.h>
+#endif
/*
* Returned by aio_cancel:
@@ -37,6 +42,7 @@
#define LIO_READ 0x2
#ifdef _KERNEL
#define LIO_SYNC 0x3
+#define LIO_MLOCK 0x4
#endif
/*
@@ -45,10 +51,30 @@
#define LIO_NOWAIT 0x0
#define LIO_WAIT 0x1
+#ifndef __rtems__
/*
* Maximum number of allowed LIO operations
*/
#define AIO_LISTIO_MAX 16
+#endif /* __rtems__ */
+
+#ifdef _KERNEL
+
+/* Default values of tunables for the AIO worker pool. */
+
+#ifndef MAX_AIO_PROCS
+#define MAX_AIO_PROCS 32
+#endif
+
+#ifndef TARGET_AIO_PROCS
+#define TARGET_AIO_PROCS 4
+#endif
+
+#ifndef AIOD_LIFETIME_DEFAULT
+#define AIOD_LIFETIME_DEFAULT (30 * hz)
+#endif
+
+#endif
/*
* Private members for aiocb -- don't access
@@ -78,7 +104,105 @@ typedef struct aiocb {
#endif
} aiocb_t;
-#ifndef _KERNEL
+#ifdef _KERNEL
+
+typedef void aio_cancel_fn_t(struct kaiocb *);
+typedef void aio_handle_fn_t(struct kaiocb *);
+
+/*
+ * Kernel version of an I/O control block.
+ *
+ * Locking key:
+ * * - need not protected
+ * a - locked by kaioinfo lock
+ * b - locked by backend lock
+ * c - locked by aio_job_mtx
+ */
+struct kaiocb {
+ TAILQ_ENTRY(kaiocb) list; /* (b) backend-specific list of jobs */
+ TAILQ_ENTRY(kaiocb) plist; /* (a) lists of pending / done jobs */
+ TAILQ_ENTRY(kaiocb) allist; /* (a) list of all jobs in proc */
+ int jobflags; /* (a) job flags */
+ int inblock; /* (*) input blocks */
+ int outblock; /* (*) output blocks */
+ int msgsnd; /* (*) messages sent */
+ int msgrcv; /* (*) messages received */
+ struct proc *userproc; /* (*) user process */
+ struct ucred *cred; /* (*) active credential when created */
+ struct file *fd_file; /* (*) pointer to file structure */
+ struct aioliojob *lio; /* (*) optional lio job */
+ struct aiocb *ujob; /* (*) pointer in userspace of aiocb */
+ struct knlist klist; /* (a) list of knotes */
+ struct aiocb uaiocb; /* (*) copy of user I/O control block */
+ ksiginfo_t ksi; /* (a) realtime signal info */
+ uint64_t seqno; /* (*) job number */
+ aio_cancel_fn_t *cancel_fn; /* (a) backend cancel function */
+ aio_handle_fn_t *handle_fn; /* (c) backend handle function */
+ union { /* Backend-specific data fields */
+ struct { /* BIO backend */
+ struct bio *bp; /* (*) BIO pointer */
+ struct buf *pbuf; /* (*) buffer pointer */
+ struct vm_page *pages[btoc(MAXPHYS)+1]; /* (*) */
+ int npages; /* (*) number of pages */
+ };
+ struct { /* fsync() requests */
+ int pending; /* (a) number of pending I/O */
+ };
+ struct {
+ void *backend1;
+ void *backend2;
+ long backend3;
+ int backend4;
+ };
+ };
+};
+
+struct socket;
+struct sockbuf;
+
+/*
+ * AIO backends should permit cancellation of queued requests waiting to
+ * be serviced by installing a cancel routine while the request is
+ * queued. The cancellation routine should dequeue the request if
+ * necessary and cancel it. Care must be used to handle races between
+ * queueing and dequeueing requests and cancellation.
+ *
+ * When queueing a request somewhere such that it can be cancelled, the
+ * caller should:
+ *
+ * 1) Acquire lock that protects the associated queue.
+ * 2) Call aio_set_cancel_function() to install the cancel routine.
+ * 3) If that fails, the request has a pending cancel and should be
+ * cancelled via aio_cancel().
+ * 4) Queue the request.
+ *
+ * When dequeueing a request to service it or hand it off to somewhere else,
+ * the caller should:
+ *
+ * 1) Acquire the lock that protects the associated queue.
+ * 2) Dequeue the request.
+ * 3) Call aio_clear_cancel_function() to clear the cancel routine.
+ * 4) If that fails, the cancel routine is about to be called. The
+ * caller should ignore the request.
+ *
+ * The cancel routine should:
+ *
+ * 1) Acquire the lock that protects the associated queue.
+ * 2) Call aio_cancel_cleared() to determine if the request is already
+ * dequeued due to a race with dequeueing thread.
+ * 3) If that fails, dequeue the request.
+ * 4) Cancel the request via aio_cancel().
+ */
+
+bool aio_cancel_cleared(struct kaiocb *job);
+void aio_cancel(struct kaiocb *job);
+bool aio_clear_cancel_function(struct kaiocb *job);
+void aio_complete(struct kaiocb *job, long status, int error);
+void aio_schedule(struct kaiocb *job, aio_handle_fn_t *func);
+bool aio_set_cancel_function(struct kaiocb *job, aio_cancel_fn_t *func);
+void aio_switch_vmspace(struct kaiocb *job);
+
+#else /* !_KERNEL */
struct timespec;
@@ -99,7 +223,8 @@ int aio_write(struct aiocb *);
* "acb_list" is an array of "nacb_listent" I/O control blocks.
* when all I/Os are complete, the optional signal "sig" is sent.
*/
-int lio_listio(int, struct aiocb * const [], int, struct sigevent *);
+int lio_listio(int, struct aiocb *__restrict const *__restrict, int,
+ struct sigevent *);
/*
* Get completion status
@@ -126,21 +251,18 @@ int aio_cancel(int, struct aiocb *);
*/
int aio_suspend(const struct aiocb * const[], int, const struct timespec *);
+/*
+ * Asynchronous mlock
+ */
+int aio_mlock(struct aiocb *);
+
#ifdef __BSD_VISIBLE
-int aio_waitcomplete(struct aiocb **, struct timespec *);
+ssize_t aio_waitcomplete(struct aiocb **, struct timespec *);
#endif
int aio_fsync(int op, struct aiocb *aiocbp);
__END_DECLS
-#else
+#endif /* !_KERNEL */
-/* Forward declarations for prototypes below. */
-struct socket;
-struct sockbuf;
-
-extern void (*aio_swake)(struct socket *, struct sockbuf *);
-
-#endif
-
-#endif
+#endif /* !_SYS_AIO_H_ */