summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-11-06 15:42:44 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-11-15 10:56:14 +0100
commite0b4edbdcc3558d3f38af8398f995c2e9f019f07 (patch)
treeea91a5fcfb9b6a66a8c0b74cf68ff8d450ce17e0 /freebsd/sys/kern
parentDisable or make static kern_* functions (diff)
downloadrtems-libbsd-e0b4edbdcc3558d3f38af8398f995c2e9f019f07.tar.bz2
Update to FreeBSD head 2018-11-15
Git mirror commit a18b0830c4be01b39489a891b63d6023ada6358a. Update #3472.
Diffstat (limited to 'freebsd/sys/kern')
-rw-r--r--freebsd/sys/kern/init_main.c2
-rw-r--r--freebsd/sys/kern/kern_mbuf.c14
-rw-r--r--freebsd/sys/kern/kern_sysctl.c47
-rw-r--r--freebsd/sys/kern/subr_blist.c597
-rw-r--r--freebsd/sys/kern/subr_gtaskqueue.c56
-rw-r--r--freebsd/sys/kern/sys_generic.c31
-rwxr-xr-xfreebsd/sys/kern/sys_pipe.c5
-rw-r--r--freebsd/sys/kern/tty.c11
-rw-r--r--freebsd/sys/kern/uipc_syscalls.c28
9 files changed, 377 insertions, 414 deletions
diff --git a/freebsd/sys/kern/init_main.c b/freebsd/sys/kern/init_main.c
index 43eaea5c..8fcc314b 100644
--- a/freebsd/sys/kern/init_main.c
+++ b/freebsd/sys/kern/init_main.c
@@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
+#include <sys/epoch.h>
#include <sys/exec.h>
#include <sys/file.h>
#include <sys/filedesc.h>
@@ -538,6 +539,7 @@ proc0_init(void *dummy __unused)
td->td_pflags = TDP_KTHREAD;
td->td_cpuset = cpuset_thread0();
td->td_domain.dr_policy = td->td_cpuset->cs_domain;
+ epoch_thread_init(td);
prison0_init();
p->p_peers = 0;
p->p_leader = p;
diff --git a/freebsd/sys/kern/kern_mbuf.c b/freebsd/sys/kern/kern_mbuf.c
index 09cae9b2..f94eda5b 100644
--- a/freebsd/sys/kern/kern_mbuf.c
+++ b/freebsd/sys/kern/kern_mbuf.c
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/conf.h>
+#include <sys/domainset.h>
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
@@ -423,8 +424,6 @@ nd_buf_import(void *arg, void **store, int count, int domain __unused,
struct mbuf *m;
int i;
- KASSERT(!dumping, ("%s: ran out of pre-allocated mbufs", __func__));
-
q = arg;
for (i = 0; i < count; i++) {
@@ -434,6 +433,8 @@ nd_buf_import(void *arg, void **store, int count, int domain __unused,
trash_init(m, q == &nd_mbufq ? MSIZE : nd_clsize, flags);
store[i] = m;
}
+ KASSERT((flags & M_WAITOK) == 0 || i == count,
+ ("%s: ran out of pre-allocated mbufs", __func__));
return (i);
}
@@ -460,8 +461,6 @@ nd_pack_import(void *arg __unused, void **store, int count, int domain __unused,
void *clust;
int i;
- KASSERT(!dumping, ("%s: ran out of pre-allocated mbufs", __func__));
-
for (i = 0; i < count; i++) {
m = m_get(MT_DATA, M_NOWAIT);
if (m == NULL)
@@ -474,6 +473,8 @@ nd_pack_import(void *arg __unused, void **store, int count, int domain __unused,
mb_ctor_clust(clust, nd_clsize, m, 0);
store[i] = m;
}
+ KASSERT((flags & M_WAITOK) == 0 || i == count,
+ ("%s: ran out of pre-allocated mbufs", __func__));
return (i);
}
@@ -605,8 +606,9 @@ mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
#ifndef __rtems__
- return ((void *)kmem_alloc_contig_domain(domain, bytes, wait,
- (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
+ return ((void *)kmem_alloc_contig_domainset(DOMAINSET_FIXED(domain),
+ bytes, wait, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
+ VM_MEMATTR_DEFAULT));
#else /* __rtems__ */
return ((void *)malloc(bytes, M_TEMP, wait));
#endif /* __rtems__ */
diff --git a/freebsd/sys/kern/kern_sysctl.c b/freebsd/sys/kern/kern_sysctl.c
index 3baea2e4..ea9c1821 100644
--- a/freebsd/sys/kern/kern_sysctl.c
+++ b/freebsd/sys/kern/kern_sysctl.c
@@ -1704,6 +1704,53 @@ retry:
}
/*
+ * Based on on sysctl_handle_int() convert microseconds to a sbintime.
+ */
+int
+sysctl_usec_to_sbintime(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int64_t tt;
+ sbintime_t sb;
+
+ tt = *(int64_t *)arg1;
+ sb = ustosbt(tt);
+
+ error = sysctl_handle_64(oidp, &sb, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ tt = sbttous(sb);
+ *(int64_t *)arg1 = tt;
+
+ return (0);
+}
+
+/*
+ * Based on on sysctl_handle_int() convert milliseconds to a sbintime.
+ */
+int
+sysctl_msec_to_sbintime(SYSCTL_HANDLER_ARGS)
+{
+ int error;
+ int64_t tt;
+ sbintime_t sb;
+
+ tt = *(int64_t *)arg1;
+ sb = mstosbt(tt);
+
+ error = sysctl_handle_64(oidp, &sb, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ tt = sbttoms(sb);
+ *(int64_t *)arg1 = tt;
+
+ return (0);
+}
+
+
+/*
* Transfer functions to/from kernel space.
* XXX: rather untested at this point
*/
diff --git a/freebsd/sys/kern/subr_blist.c b/freebsd/sys/kern/subr_blist.c
index e5b40e62..79a5a7b4 100644
--- a/freebsd/sys/kern/subr_blist.c
+++ b/freebsd/sys/kern/subr_blist.c
@@ -48,11 +48,11 @@
* upper bound on a potential allocation, but not necessarily a tight upper
* bound.
*
- * The radix tree also implements two collapsed states for meta nodes:
- * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is
- * in either of these two states, all information contained underneath
- * the node is considered stale. These states are used to optimize
- * allocation and freeing operations.
+ * The bitmap field in each node directs the search for available blocks.
+ * For a leaf node, a bit is set if the corresponding block is free. For a
+ * meta node, a bit is set if the corresponding subtree contains a free
+ * block somewhere within it. The search at a meta node considers only
+ * children of that node that represent a range that includes a free block.
*
* The hinting greatly increases code efficiency for allocations while
* the general radix structure optimizes both allocations and frees. The
@@ -61,19 +61,19 @@
*
* The blist code wires all necessary memory at creation time. Neither
* allocations nor frees require interaction with the memory subsystem.
- * The non-blocking features of the blist code are used in the swap code
- * (vm/swap_pager.c).
+ * The non-blocking nature of allocations and frees is required by swap
+ * code (vm/swap_pager.c).
*
- * LAYOUT: The radix tree is laid out recursively using a
- * linear array. Each meta node is immediately followed (laid out
- * sequentially in memory) by BLIST_META_RADIX lower level nodes. This
- * is a recursive structure but one that can be easily scanned through
- * a very simple 'skip' calculation. In order to support large radixes,
- * portions of the tree may reside outside our memory allocation. We
- * handle this with an early-termination optimization (when bighint is
- * set to -1) on the scan. The memory allocation is only large enough
- * to cover the number of blocks requested at creation time even if it
- * must be encompassed in larger root-node radix.
+ * LAYOUT: The radix tree is laid out recursively using a linear array.
+ * Each meta node is immediately followed (laid out sequentially in
+ * memory) by BLIST_META_RADIX lower level nodes. This is a recursive
+ * structure but one that can be easily scanned through a very simple
+ * 'skip' calculation. The memory allocation is only large enough to
+ * cover the number of blocks requested at creation time. Nodes that
+ * represent blocks beyond that limit, nodes that would never be read
+ * or written, are not allocated, so that the last of the
+ * BLIST_META_RADIX lower level nodes of a some nodes may not be
+ * allocated.
*
* NOTE: the allocator cannot currently allocate more than
* BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too
@@ -107,6 +107,7 @@ __FBSDID("$FreeBSD$");
#define BLIST_DEBUG
#endif
+#include <sys/errno.h>
#include <sys/types.h>
#include <sys/malloc.h>
#include <sys/sbuf.h>
@@ -120,7 +121,7 @@ __FBSDID("$FreeBSD$");
#define bitcount64(x) __bitcount64((uint64_t)(x))
#define malloc(a,b,c) calloc(a, 1)
#define free(a,b) free(a)
-static __inline int imax(int a, int b) { return (a > b ? a : b); }
+#define ummin(a,b) ((a) < (b) ? (a) : (b))
#include <sys/blist.h>
@@ -181,6 +182,18 @@ radix_to_skip(daddr_t radix)
}
/*
+ * Provide a mask with count bits set, starting as position n.
+ */
+static inline u_daddr_t
+bitrange(int n, int count)
+{
+
+ return (((u_daddr_t)-1 << n) &
+ ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - (n + count))));
+}
+
+
+/*
* Use binary search, or a faster method, to find the 1 bit in a u_daddr_t.
* Assumes that the argument has only one bit set.
*/
@@ -222,9 +235,7 @@ blist_t
blist_create(daddr_t blocks, int flags)
{
blist_t bl;
- daddr_t i, last_block;
- u_daddr_t nodes, radix, skip;
- int digit;
+ u_daddr_t nodes, radix;
if (blocks == 0)
panic("invalid block count");
@@ -232,30 +243,13 @@ blist_create(daddr_t blocks, int flags)
/*
* Calculate the radix and node count used for scanning.
*/
- last_block = blocks - 1;
+ nodes = 1;
radix = BLIST_BMAP_RADIX;
- while (radix < blocks) {
- if (((last_block / radix + 1) & BLIST_META_MASK) != 0)
- /*
- * We must widen the blist to avoid partially
- * filled nodes.
- */
- last_block |= radix - 1;
+ while (radix <= blocks) {
+ nodes += 1 + (blocks - 1) / radix;
radix *= BLIST_META_RADIX;
}
- /*
- * Count the meta-nodes in the expanded tree, including the final
- * terminator, from the bottom level up to the root.
- */
- nodes = 1;
- if (radix - blocks >= BLIST_BMAP_RADIX)
- nodes++;
- last_block /= BLIST_BMAP_RADIX;
- while (last_block > 0) {
- nodes += last_block + 1;
- last_block /= BLIST_META_RADIX;
- }
bl = malloc(offsetof(struct blist, bl_root[nodes]), M_SWAP, flags |
M_ZERO);
if (bl == NULL)
@@ -263,33 +257,6 @@ blist_create(daddr_t blocks, int flags)
bl->bl_blocks = blocks;
bl->bl_radix = radix;
- bl->bl_cursor = 0;
-
- /*
- * Initialize the empty tree by filling in root values, then initialize
- * just the terminators in the rest of the tree.
- */
- bl->bl_root[0].bm_bighint = 0;
- if (radix == BLIST_BMAP_RADIX)
- bl->bl_root[0].u.bmu_bitmap = 0;
- else
- bl->bl_root[0].u.bmu_avail = 0;
- last_block = blocks - 1;
- i = 0;
- while (radix > BLIST_BMAP_RADIX) {
- radix /= BLIST_META_RADIX;
- skip = radix_to_skip(radix);
- digit = last_block / radix;
- i += 1 + digit * skip;
- if (digit != BLIST_META_MASK) {
- /*
- * Add a terminator.
- */
- bl->bl_root[i + skip].bm_bighint = (daddr_t)-1;
- bl->bl_root[i + skip].u.bmu_bitmap = 0;
- }
- last_block %= radix;
- }
#if defined(BLIST_DEBUG)
printf(
@@ -323,6 +290,9 @@ blist_alloc(blist_t bl, daddr_t count)
{
daddr_t blk;
+ if (count > BLIST_MAX_ALLOC)
+ panic("allocation too large");
+
/*
* This loop iterates at most twice. An allocation failure in the
* first iteration leads to a second iteration only if the cursor was
@@ -333,12 +303,13 @@ blist_alloc(blist_t bl, daddr_t count)
blk = blst_meta_alloc(bl->bl_root, bl->bl_cursor, count,
bl->bl_radix);
if (blk != SWAPBLK_NONE) {
+ bl->bl_avail -= count;
bl->bl_cursor = blk + count;
if (bl->bl_cursor == bl->bl_blocks)
bl->bl_cursor = 0;
return (blk);
- } else if (bl->bl_cursor != 0)
- bl->bl_cursor = 0;
+ }
+ bl->bl_cursor = 0;
}
return (SWAPBLK_NONE);
}
@@ -350,10 +321,7 @@ daddr_t
blist_avail(blist_t bl)
{
- if (bl->bl_radix == BLIST_BMAP_RADIX)
- return (bitcount64(bl->bl_root->u.bmu_bitmap));
- else
- return (bl->bl_root->u.bmu_avail);
+ return (bl->bl_avail);
}
/*
@@ -365,7 +333,10 @@ void
blist_free(blist_t bl, daddr_t blkno, daddr_t count)
{
+ if (blkno < 0 || blkno + count > bl->bl_blocks)
+ panic("freeing invalid range");
blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix);
+ bl->bl_avail += count;
}
/*
@@ -377,8 +348,13 @@ blist_free(blist_t bl, daddr_t blkno, daddr_t count)
daddr_t
blist_fill(blist_t bl, daddr_t blkno, daddr_t count)
{
+ daddr_t filled;
- return (blst_meta_fill(bl->bl_root, blkno, count, bl->bl_radix));
+ if (blkno < 0 || blkno + count > bl->bl_blocks)
+ panic("filling invalid range");
+ filled = blst_meta_fill(bl->bl_root, blkno, count, bl->bl_radix);
+ bl->bl_avail -= filled;
+ return (filled);
}
/*
@@ -416,8 +392,11 @@ blist_resize(blist_t *pbl, daddr_t count, int freenew, int flags)
void
blist_print(blist_t bl)
{
- printf("BLIST cursor = %08jx {\n", (uintmax_t)bl->bl_cursor);
- blst_radix_print(bl->bl_root, 0, bl->bl_radix, 4);
+ printf("BLIST avail = %jd, cursor = %08jx {\n",
+ (uintmax_t)bl->bl_avail, (uintmax_t)bl->bl_cursor);
+
+ if (bl->bl_root->bm_bitmap != 0)
+ blst_radix_print(bl->bl_root, 0, bl->bl_radix, 4);
printf("}\n");
}
@@ -571,16 +550,11 @@ blist_stats(blist_t bl, struct sbuf *s)
* Check for skippable subtrees starting at i.
*/
while (radix > BLIST_BMAP_RADIX) {
- if (bl->bl_root[nodes].u.bmu_avail == 0) {
+ if (bl->bl_root[nodes].bm_bitmap == 0) {
if (gap_stats_counting(stats))
update_gap_stats(stats, i);
break;
}
- if (bl->bl_root[nodes].u.bmu_avail == radix) {
- if (!gap_stats_counting(stats))
- update_gap_stats(stats, i);
- break;
- }
/*
* Skip subtree root.
@@ -592,7 +566,7 @@ blist_stats(blist_t bl, struct sbuf *s)
/*
* Scan leaf.
*/
- mask = bl->bl_root[nodes].u.bmu_bitmap;
+ mask = bl->bl_root[nodes].bm_bitmap;
diff = mask ^ (mask << 1);
if (gap_stats_counting(stats))
diff ^= 1;
@@ -620,7 +594,57 @@ blist_stats(blist_t bl, struct sbuf *s)
*/
/*
- * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap).
+ * BLST_NEXT_LEAF_ALLOC() - allocate the first few blocks in the next leaf.
+ *
+ * 'scan' is a leaf node, associated with a block containing 'blk'.
+ * The next leaf node could be adjacent, or several nodes away if the
+ * least common ancestor of 'scan' and its neighbor is several levels
+ * up. Use 'blk' to determine how many meta-nodes lie between the
+ * leaves. If the next leaf has enough initial bits set, clear them
+ * and clear the bits in the meta nodes on the path up to the least
+ * common ancestor to mark any subtrees made completely empty.
+ */
+static int
+blst_next_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
+{
+ blmeta_t *next;
+ daddr_t skip;
+ u_daddr_t radix;
+ int digit;
+
+ next = scan + 1;
+ blk += BLIST_BMAP_RADIX;
+ radix = BLIST_BMAP_RADIX;
+ while ((digit = ((blk / radix) & BLIST_META_MASK)) == 0 &&
+ (next->bm_bitmap & 1) == 1) {
+ next++;
+ radix *= BLIST_META_RADIX;
+ }
+ if (((next->bm_bitmap + 1) & ~((u_daddr_t)-1 << count)) != 0) {
+ /*
+ * The next leaf doesn't have enough free blocks at the
+ * beginning to complete the spanning allocation.
+ */
+ return (ENOMEM);
+ }
+ /* Clear the first 'count' bits in the next leaf to allocate. */
+ next->bm_bitmap &= (u_daddr_t)-1 << count;
+
+ /*
+ * Update bitmaps of next-ancestors, up to least common ancestor.
+ */
+ skip = radix_to_skip(radix);
+ while (radix != BLIST_BMAP_RADIX && next->bm_bitmap == 0) {
+ (--next)->bm_bitmap ^= 1;
+ radix /= BLIST_META_RADIX;
+ }
+ if (next->bm_bitmap == 0)
+ scan[-digit * skip].bm_bitmap ^= (u_daddr_t)1 << digit;
+ return (0);
+}
+
+/*
+ * BLST_LEAF_ALLOC() - allocate at a leaf in the radix tree (a bitmap).
*
* This is the core of the allocator and is optimized for the
* BLIST_BMAP_RADIX block allocation case. Otherwise, execution
@@ -635,15 +659,15 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
range1 = 0;
count1 = count - 1;
num_shifts = fls(count1);
- mask = scan->u.bmu_bitmap;
+ mask = scan->bm_bitmap;
while ((-mask & ~mask) != 0 && num_shifts > 0) {
/*
* If bit i is set in mask, then bits in [i, i+range1] are set
- * in scan->u.bmu_bitmap. The value of range1 is equal to
+ * in scan->bm_bitmap. The value of range1 is equal to
* count1 >> num_shifts. Grow range and reduce num_shifts to 0,
* while preserving these invariants. The updates to mask leave
* fewer bits set, but each bit that remains set represents a
- * longer string of consecutive bits set in scan->u.bmu_bitmap.
+ * longer string of consecutive bits set in scan->bm_bitmap.
* If more updates to mask cannot clear more bits, because mask
* is partitioned with all 0 bits preceding all 1 bits, the loop
* terminates immediately.
@@ -687,31 +711,14 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
* An allocation within this leaf is impossible, so a successful
* allocation depends on the next leaf providing some of the blocks.
*/
- if (((blk / BLIST_BMAP_RADIX + 1) & BLIST_META_MASK) == 0) {
- /*
- * The next leaf has a different meta-node parent, so it
- * is not necessarily initialized. Update bighint,
- * comparing the range found at the end of mask to the
- * largest earlier range that could have been made to
- * vanish in the initial processing of mask.
- */
- scan->bm_bighint = imax(BLIST_BMAP_RADIX - lo, range1);
- return (SWAPBLK_NONE);
- }
- hi -= BLIST_BMAP_RADIX;
- if (((scan[1].u.bmu_bitmap + 1) & ~((u_daddr_t)-1 << hi)) != 0) {
+ if (blst_next_leaf_alloc(scan, blk, hi - BLIST_BMAP_RADIX) != 0)
/*
- * The next leaf doesn't have enough free blocks at the
- * beginning to complete the spanning allocation. The
- * hint cannot be updated, because the same allocation
- * request could be satisfied later, by this leaf, if
- * the state of the next leaf changes, and without any
- * changes to this leaf.
+ * The hint cannot be updated, because the same
+ * allocation request could be satisfied later, by this
+ * leaf, if the state of the next leaf changes, and
+ * without any changes to this leaf.
*/
return (SWAPBLK_NONE);
- }
- /* Clear the first 'hi' bits in the next leaf, allocating them. */
- scan[1].u.bmu_bitmap &= (u_daddr_t)-1 << hi;
hi = BLIST_BMAP_RADIX;
}
@@ -726,12 +733,9 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
} else {
/* Clear the bits of mask at position 'hi' and higher. */
mask &= (u_daddr_t)-1 >> (BLIST_BMAP_RADIX - hi);
- /* If this allocation uses all the bits, clear the hint. */
- if (mask == scan->u.bmu_bitmap)
- scan->bm_bighint = 0;
}
/* Clear the allocated bits from this leaf. */
- scan->u.bmu_bitmap &= ~mask;
+ scan->bm_bitmap &= ~mask;
return ((blk & ~BLIST_BMAP_MASK) + lo);
}
@@ -746,81 +750,61 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count)
static daddr_t
blst_meta_alloc(blmeta_t *scan, daddr_t cursor, daddr_t count, u_daddr_t radix)
{
- daddr_t blk, i, next_skip, r, skip;
- int child;
+ daddr_t blk, i, r, skip;
+ u_daddr_t bit, mask;
bool scan_from_start;
+ int digit;
if (radix == BLIST_BMAP_RADIX)
return (blst_leaf_alloc(scan, cursor, count));
- if (scan->u.bmu_avail < count) {
- /*
- * The meta node's hint must be too large if the allocation
- * exceeds the number of free blocks. Reduce the hint, and
- * return failure.
- */
- scan->bm_bighint = scan->u.bmu_avail;
- return (SWAPBLK_NONE);
- }
blk = cursor & -radix;
+ scan_from_start = (cursor == blk);
+ radix /= BLIST_META_RADIX;
skip = radix_to_skip(radix);
- next_skip = skip / BLIST_META_RADIX;
+ mask = scan->bm_bitmap;
+
+ /* Discard any candidates that appear before cursor. */
+ digit = (cursor / radix) & BLIST_META_MASK;
+ mask &= (u_daddr_t)-1 << digit;
/*
- * An ALL-FREE meta node requires special handling before allocating
- * any of its blocks.
+ * If the first try is for a block that includes the cursor, pre-undo
+ * the digit * radix offset in the first call; otherwise, ignore the
+ * cursor entirely.
*/
- if (scan->u.bmu_avail == radix) {
- radix /= BLIST_META_RADIX;
-
- /*
- * Reinitialize each of the meta node's children. An ALL-FREE
- * meta node cannot have a terminator in any subtree.
- */
- for (i = 1; i < skip; i += next_skip) {
- if (next_skip == 1)
- scan[i].u.bmu_bitmap = (u_daddr_t)-1;
- else
- scan[i].u.bmu_avail = radix;
- scan[i].bm_bighint = radix;
- }
- } else {
- radix /= BLIST_META_RADIX;
- }
+ if (((mask >> digit) & 1) == 1)
+ cursor -= digit * radix;
+ else
+ cursor = blk;
- if (count > radix) {
- /*
- * The allocation exceeds the number of blocks that are
- * managed by a subtree of this meta node.
- */
- panic("allocation too large");
- }
- scan_from_start = cursor == blk;
- child = (cursor - blk) / radix;
- blk += child * radix;
- for (i = 1 + child * next_skip; i < skip; i += next_skip) {
+ /*
+ * Examine the nonempty subtree associated with each bit set in mask.
+ */
+ do {
+ bit = mask & -mask;
+ digit = bitpos(bit);
+ i = 1 + digit * skip;
if (count <= scan[i].bm_bighint) {
/*
* The allocation might fit beginning in the i'th subtree.
*/
- r = blst_meta_alloc(&scan[i],
- cursor > blk ? cursor : blk, count, radix);
+ r = blst_meta_alloc(&scan[i], cursor + digit * radix,
+ count, radix);
if (r != SWAPBLK_NONE) {
- scan->u.bmu_avail -= count;
+ if (scan[i].bm_bitmap == 0)
+ scan->bm_bitmap ^= bit;
return (r);
}
- } else if (scan[i].bm_bighint == (daddr_t)-1) {
- /*
- * Terminator
- */
- break;
}
- blk += radix;
- }
+ cursor = blk;
+ } while ((mask ^= bit) != 0);
/*
- * We couldn't allocate count in this subtree, update bighint.
+ * We couldn't allocate count in this subtree. If the whole tree was
+ * scanned, and the last tree node is allocated, update bighint.
*/
- if (scan_from_start && scan->bm_bighint >= count)
+ if (scan_from_start && !(digit == BLIST_META_RADIX - 1 &&
+ scan[i].bm_bighint == BLIST_MAX_ALLOC))
scan->bm_bighint = count - 1;
return (SWAPBLK_NONE);
@@ -834,7 +818,6 @@ static void
blst_leaf_free(blmeta_t *scan, daddr_t blk, int count)
{
u_daddr_t mask;
- int n;
/*
* free some data in this bitmap
@@ -842,20 +825,10 @@ blst_leaf_free(blmeta_t *scan, daddr_t blk, int count)
* \_________/\__/
* count n
*/
- n = blk & BLIST_BMAP_MASK;
- mask = ((u_daddr_t)-1 << n) &
- ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n));
- if (scan->u.bmu_bitmap & mask)
+ mask = bitrange(blk & BLIST_BMAP_MASK, count);
+ if (scan->bm_bitmap & mask)
panic("freeing free block");
- scan->u.bmu_bitmap |= mask;
-
- /*
- * We could probably do a better job here. We are required to make
- * bighint at least as large as the biggest contiguous block of
- * data. If we just shoehorn it, a little extra overhead will
- * be incured on the next allocation (but only that one typically).
- */
- scan->bm_bighint = BLIST_BMAP_RADIX;
+ scan->bm_bitmap |= mask;
}
/*
@@ -871,79 +844,37 @@ blst_leaf_free(blmeta_t *scan, daddr_t blk, int count)
static void
blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, u_daddr_t radix)
{
- daddr_t blk, i, next_skip, skip, v;
- int child;
-
- if (scan->bm_bighint == (daddr_t)-1)
- panic("freeing invalid range");
- if (radix == BLIST_BMAP_RADIX)
- return (blst_leaf_free(scan, freeBlk, count));
- skip = radix_to_skip(radix);
- next_skip = skip / BLIST_META_RADIX;
-
- if (scan->u.bmu_avail == 0) {
- /*
- * ALL-ALLOCATED special case, with possible
- * shortcut to ALL-FREE special case.
- */
- scan->u.bmu_avail = count;
- scan->bm_bighint = count;
-
- if (count != radix) {
- for (i = 1; i < skip; i += next_skip) {
- if (scan[i].bm_bighint == (daddr_t)-1)
- break;
- scan[i].bm_bighint = 0;
- if (next_skip == 1) {
- scan[i].u.bmu_bitmap = 0;
- } else {
- scan[i].u.bmu_avail = 0;
- }
- }
- /* fall through */
- }
- } else {
- scan->u.bmu_avail += count;
- /* scan->bm_bighint = radix; */
- }
+ daddr_t blk, endBlk, i, skip;
+ int digit, endDigit;
/*
- * ALL-FREE special case.
+ * We could probably do a better job here. We are required to make
+ * bighint at least as large as the biggest allocable block of data.
+ * If we just shoehorn it, a little extra overhead will be incurred
+ * on the next allocation (but only that one typically).
*/
+ scan->bm_bighint = BLIST_MAX_ALLOC;
- if (scan->u.bmu_avail == radix)
- return;
- if (scan->u.bmu_avail > radix)
- panic("blst_meta_free: freeing already free blocks (%lld) %lld/%lld",
- (long long)count, (long long)scan->u.bmu_avail,
- (long long)radix);
-
- /*
- * Break the free down into its components
- */
+ if (radix == BLIST_BMAP_RADIX)
+ return (blst_leaf_free(scan, freeBlk, count));
- blk = freeBlk & -radix;
+ endBlk = ummin(freeBlk + count, (freeBlk + radix) & -radix);
radix /= BLIST_META_RADIX;
-
- child = (freeBlk - blk) / radix;
- blk += child * radix;
- i = 1 + child * next_skip;
- while (i < skip && blk < freeBlk + count) {
- v = blk + radix - freeBlk;
- if (v > count)
- v = count;
- blst_meta_free(&scan[i], freeBlk, v, radix);
- if (scan->bm_bighint < scan[i].bm_bighint)
- scan->bm_bighint = scan[i].bm_bighint;
- count -= v;
- freeBlk += v;
+ skip = radix_to_skip(radix);
+ blk = freeBlk & -radix;
+ digit = (blk / radix) & BLIST_META_MASK;
+ endDigit = 1 + (((endBlk - 1) / radix) & BLIST_META_MASK);
+ scan->bm_bitmap |= bitrange(digit, endDigit - digit);
+ for (i = 1 + digit * skip; blk < endBlk; i += skip) {
blk += radix;
- i += next_skip;
+ count = ummin(blk, endBlk) - freeBlk;
+ blst_meta_free(&scan[i], freeBlk, count, radix);
+ freeBlk = blk;
}
}
/*
- * BLIST_RADIX_COPY() - copy one radix tree to another
+ * BLST_COPY() - copy one radix tree to another
*
* Locates free space in the source tree and frees it in the destination
* tree. The space may not already be free in the destination.
@@ -952,21 +883,21 @@ static void
blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest,
daddr_t count)
{
- daddr_t i, next_skip, skip;
+ daddr_t endBlk, i, skip;
/*
* Leaf node
*/
if (radix == BLIST_BMAP_RADIX) {
- u_daddr_t v = scan->u.bmu_bitmap;
+ u_daddr_t v = scan->bm_bitmap;
if (v == (u_daddr_t)-1) {
blist_free(dest, blk, count);
} else if (v != 0) {
int i;
- for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) {
+ for (i = 0; i < count; ++i) {
if (v & ((u_daddr_t)1 << i))
blist_free(dest, blk + i, 1);
}
@@ -978,42 +909,22 @@ blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, blist_t dest,
* Meta node
*/
- if (scan->u.bmu_avail == 0) {
+ if (scan->bm_bitmap == 0) {
/*
* Source all allocated, leave dest allocated
*/
return;
}
- if (scan->u.bmu_avail == radix) {
- /*
- * Source all free, free entire dest
- */
- if (count < radix)
- blist_free(dest, blk, count);
- else
- blist_free(dest, blk, radix);
- return;
- }
-
- skip = radix_to_skip(radix);
- next_skip = skip / BLIST_META_RADIX;
+ endBlk = blk + count;
radix /= BLIST_META_RADIX;
-
- for (i = 1; count && i < skip; i += next_skip) {
- if (scan[i].bm_bighint == (daddr_t)-1)
- break;
-
- if (count >= radix) {
- blst_copy(&scan[i], blk, radix, dest, radix);
- count -= radix;
- } else {
- if (count) {
- blst_copy(&scan[i], blk, radix, dest, count);
- }
- count = 0;
- }
+ skip = radix_to_skip(radix);
+ for (i = 1; blk < endBlk; i += skip) {
blk += radix;
+ count = radix;
+ if (blk >= endBlk)
+ count -= blk - endBlk;
+ blst_copy(&scan[i], blk - radix, radix, dest, count);
}
}
@@ -1029,16 +940,13 @@ blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count)
{
daddr_t nblks;
u_daddr_t mask;
- int n;
- n = blk & BLIST_BMAP_MASK;
- mask = ((u_daddr_t)-1 << n) &
- ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n));
+ mask = bitrange(blk & BLIST_BMAP_MASK, count);
/* Count the number of blocks that we are allocating. */
- nblks = bitcount64(scan->u.bmu_bitmap & mask);
+ nblks = bitcount64(scan->bm_bitmap & mask);
- scan->u.bmu_bitmap &= ~mask;
+ scan->bm_bitmap &= ~mask;
return (nblks);
}
@@ -1053,70 +961,27 @@ blst_leaf_fill(blmeta_t *scan, daddr_t blk, int count)
static daddr_t
blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix)
{
- daddr_t blk, i, nblks, next_skip, skip, v;
- int child;
+ daddr_t blk, endBlk, i, nblks, skip;
+ int digit;
- if (scan->bm_bighint == (daddr_t)-1)
- panic("filling invalid range");
- if (count > radix) {
- /*
- * The allocation exceeds the number of blocks that are
- * managed by this node.
- */
- panic("fill too large");
- }
if (radix == BLIST_BMAP_RADIX)
return (blst_leaf_fill(scan, allocBlk, count));
- if (count == radix || scan->u.bmu_avail == 0) {
- /*
- * ALL-ALLOCATED special case
- */
- nblks = scan->u.bmu_avail;
- scan->u.bmu_avail = 0;
- scan->bm_bighint = 0;
- return (nblks);
- }
+
+ endBlk = ummin(allocBlk + count, (allocBlk + radix) & -radix);
+ radix /= BLIST_META_RADIX;
skip = radix_to_skip(radix);
- next_skip = skip / BLIST_META_RADIX;
blk = allocBlk & -radix;
-
- /*
- * An ALL-FREE meta node requires special handling before allocating
- * any of its blocks.
- */
- if (scan->u.bmu_avail == radix) {
- radix /= BLIST_META_RADIX;
-
- /*
- * Reinitialize each of the meta node's children. An ALL-FREE
- * meta node cannot have a terminator in any subtree.
- */
- for (i = 1; i < skip; i += next_skip) {
- if (next_skip == 1)
- scan[i].u.bmu_bitmap = (u_daddr_t)-1;
- else
- scan[i].u.bmu_avail = radix;
- scan[i].bm_bighint = radix;
- }
- } else {
- radix /= BLIST_META_RADIX;
- }
-
nblks = 0;
- child = (allocBlk - blk) / radix;
- blk += child * radix;
- i = 1 + child * next_skip;
- while (i < skip && blk < allocBlk + count) {
- v = blk + radix - allocBlk;
- if (v > count)
- v = count;
- nblks += blst_meta_fill(&scan[i], allocBlk, v, radix);
- count -= v;
- allocBlk += v;
+ while (blk < endBlk) {
+ digit = (blk / radix) & BLIST_META_MASK;
+ i = 1 + digit * skip;
blk += radix;
- i += next_skip;
+ count = ummin(blk, endBlk) - allocBlk;
+ nblks += blst_meta_fill(&scan[i], allocBlk, count, radix);
+ if (scan[i].bm_bitmap == 0)
+ scan->bm_bitmap &= ~((u_daddr_t)1 << digit);
+ allocBlk = blk;
}
- scan->u.bmu_avail -= nblks;
return (nblks);
}
@@ -1125,64 +990,44 @@ blst_meta_fill(blmeta_t *scan, daddr_t allocBlk, daddr_t count, u_daddr_t radix)
static void
blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab)
{
- daddr_t i, next_skip, skip;
+ daddr_t skip;
+ u_daddr_t bit, mask;
+ int digit;
if (radix == BLIST_BMAP_RADIX) {
printf(
- "%*.*s(%08llx,%lld): bitmap %016llx big=%lld\n",
+ "%*.*s(%08llx,%lld): bitmap %0*llx big=%lld\n",
tab, tab, "",
(long long)blk, (long long)radix,
- (long long)scan->u.bmu_bitmap,
+ 1 + (BLIST_BMAP_RADIX - 1) / 4,
+ (long long)scan->bm_bitmap,
(long long)scan->bm_bighint
);
return;
}
- if (scan->u.bmu_avail == 0) {
- printf(
- "%*.*s(%08llx,%lld) ALL ALLOCATED\n",
- tab, tab, "",
- (long long)blk,
- (long long)radix
- );
- return;
- }
- if (scan->u.bmu_avail == radix) {
- printf(
- "%*.*s(%08llx,%lld) ALL FREE\n",
- tab, tab, "",
- (long long)blk,
- (long long)radix
- );
- return;
- }
-
printf(
- "%*.*s(%08llx,%lld): subtree (%lld/%lld) big=%lld {\n",
+ "%*.*s(%08llx): subtree (%lld/%lld) bitmap %0*llx big=%lld {\n",
tab, tab, "",
(long long)blk, (long long)radix,
- (long long)scan->u.bmu_avail,
(long long)radix,
+ 1 + (BLIST_META_RADIX - 1) / 4,
+ (long long)scan->bm_bitmap,
(long long)scan->bm_bighint
);
- skip = radix_to_skip(radix);
- next_skip = skip / BLIST_META_RADIX;
radix /= BLIST_META_RADIX;
+ skip = radix_to_skip(radix);
tab += 4;
- for (i = 1; i < skip; i += next_skip) {
- if (scan[i].bm_bighint == (daddr_t)-1) {
- printf(
- "%*.*s(%08llx,%lld): Terminator\n",
- tab, tab, "",
- (long long)blk, (long long)radix
- );
- break;
- }
- blst_radix_print(&scan[i], blk, radix, tab);
- blk += radix;
- }
+ mask = scan->bm_bitmap;
+ /* Examine the nonempty subtree associated with each bit set in mask */
+ do {
+ bit = mask & -mask;
+ digit = bitpos(bit);
+ blst_radix_print(&scan[1 + digit * skip], blk + digit * radix,
+ radix, tab);
+ } while ((mask ^= bit) != 0);
tab -= 4;
printf(
@@ -1198,7 +1043,7 @@ blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int tab)
int
main(int ac, char **av)
{
- int size = 1024;
+ int size = BLIST_META_RADIX * BLIST_BMAP_RADIX;
int i;
blist_t bl;
struct sbuf *s;
diff --git a/freebsd/sys/kern/subr_gtaskqueue.c b/freebsd/sys/kern/subr_gtaskqueue.c
index e56e90d7..3f80cd2c 100644
--- a/freebsd/sys/kern/subr_gtaskqueue.c
+++ b/freebsd/sys/kern/subr_gtaskqueue.c
@@ -56,6 +56,8 @@ __FBSDID("$FreeBSD$");
static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
static void gtaskqueue_thread_enqueue(void *);
static void gtaskqueue_thread_loop(void *arg);
+static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask);
+static void gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask);
TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
TASKQGROUP_DEFINE(config, 1, 1);
@@ -188,6 +190,44 @@ gtaskqueue_free(struct gtaskqueue *queue)
free(queue, M_GTASKQUEUE);
}
+/*
+ * Wait for all to complete, then prevent it from being enqueued
+ */
+void
+grouptask_block(struct grouptask *grouptask)
+{
+ struct gtaskqueue *queue = grouptask->gt_taskqueue;
+ struct gtask *gtask = &grouptask->gt_task;
+
+#ifdef INVARIANTS
+ if (queue == NULL) {
+ gtask_dump(gtask);
+ panic("queue == NULL");
+ }
+#endif
+ TQ_LOCK(queue);
+ gtask->ta_flags |= TASK_NOENQUEUE;
+ gtaskqueue_drain_locked(queue, gtask);
+ TQ_UNLOCK(queue);
+}
+
+void
+grouptask_unblock(struct grouptask *grouptask)
+{
+ struct gtaskqueue *queue = grouptask->gt_taskqueue;
+ struct gtask *gtask = &grouptask->gt_task;
+
+#ifdef INVARIANTS
+ if (queue == NULL) {
+ gtask_dump(gtask);
+ panic("queue == NULL");
+ }
+#endif
+ TQ_LOCK(queue);
+ gtask->ta_flags &= ~TASK_NOENQUEUE;
+ TQ_UNLOCK(queue);
+}
+
int
grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
{
@@ -202,6 +242,10 @@ grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
TQ_UNLOCK(queue);
return (0);
}
+ if (gtask->ta_flags & TASK_NOENQUEUE) {
+ TQ_UNLOCK(queue);
+ return (EAGAIN);
+ }
STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
gtask->ta_flags |= TASK_ENQUEUED;
TQ_UNLOCK(queue);
@@ -383,6 +427,13 @@ gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
return (error);
}
+static void
+gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask)
+{
+ while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
+ TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
+}
+
void
gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
{
@@ -391,8 +442,7 @@ gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
TQ_LOCK(queue);
- while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
- TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
+ gtaskqueue_drain_locked(queue, gtask);
TQ_UNLOCK(queue);
}
@@ -857,6 +907,7 @@ taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
{
int i;
+ grouptask_block(gtask);
mtx_lock(&qgroup->tqg_lock);
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
@@ -867,6 +918,7 @@ taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
LIST_REMOVE(gtask, gt_list);
mtx_unlock(&qgroup->tqg_lock);
gtask->gt_taskqueue = NULL;
+ gtask->gt_task.ta_flags &= ~TASK_NOENQUEUE;
}
static void
diff --git a/freebsd/sys/kern/sys_generic.c b/freebsd/sys/kern/sys_generic.c
index b0da68a8..70638ad0 100644
--- a/freebsd/sys/kern/sys_generic.c
+++ b/freebsd/sys/kern/sys_generic.c
@@ -1366,16 +1366,15 @@ sys_poll(struct thread *td, struct poll_args *uap)
}
int
-kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
+kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds,
struct timespec *tsp, sigset_t *uset)
{
- struct pollfd *bits;
- struct pollfd smallbits[32];
+ struct pollfd *kfds;
+ struct pollfd stackfds[32];
sbintime_t sbt, precision, tmp;
time_t over;
struct timespec ts;
int error;
- size_t ni;
precision = 0;
if (tsp != NULL) {
@@ -1402,18 +1401,24 @@ kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
} else
sbt = -1;
+ /*
+ * This is kinda bogus. We have fd limits, but that is not
+ * really related to the size of the pollfd array. Make sure
+ * we let the process use at least FD_SETSIZE entries and at
+ * least enough for the system-wide limits. We want to be reasonably
+ * safe, but not overly restrictive.
+ */
#ifndef __rtems__
if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
#else /* __rtems__ */
if (nfds > rtems_libio_number_iops)
#endif /* __rtems__ */
return (EINVAL);
- ni = nfds * sizeof(struct pollfd);
- if (ni > sizeof(smallbits))
- bits = malloc(ni, M_TEMP, M_WAITOK);
+ if (nfds > nitems(stackfds))
+ kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
else
- bits = smallbits;
- error = copyin(fds, bits, ni);
+ kfds = stackfds;
+ error = copyin(ufds, kfds, nfds * sizeof(*kfds));
if (error)
goto done;
@@ -1438,7 +1443,7 @@ kern_poll(struct thread *td, struct pollfd *fds, u_int nfds,
seltdinit(td);
/* Iterate until the timeout expires or descriptors become ready. */
for (;;) {
- error = pollscan(td, bits, nfds);
+ error = pollscan(td, kfds, nfds);
if (error || td->td_retval[0] != 0)
break;
error = seltdwait(td, sbt, precision);
@@ -1457,13 +1462,13 @@ done:
if (error == EWOULDBLOCK)
error = 0;
if (error == 0) {
- error = pollout(td, bits, fds, nfds);
+ error = pollout(td, kfds, ufds, nfds);
if (error)
goto out;
}
out:
- if (ni > sizeof(smallbits))
- free(bits, M_TEMP);
+ if (nfds > nitems(stackfds))
+ free(kfds, M_TEMP);
return (error);
}
#ifdef __rtems__
diff --git a/freebsd/sys/kern/sys_pipe.c b/freebsd/sys/kern/sys_pipe.c
index f562b38f..b6616271 100755
--- a/freebsd/sys/kern/sys_pipe.c
+++ b/freebsd/sys/kern/sys_pipe.c
@@ -593,9 +593,8 @@ retry:
#ifndef __rtems__
buffer = (caddr_t) vm_map_min(pipe_map);
- error = vm_map_find(pipe_map, NULL, 0,
- (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
- VM_PROT_ALL, VM_PROT_ALL, 0);
+ error = vm_map_find(pipe_map, NULL, 0, (vm_offset_t *)&buffer, size, 0,
+ VMFS_ANY_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
if (error != KERN_SUCCESS) {
#else /* __rtems__ */
(void)error;
diff --git a/freebsd/sys/kern/tty.c b/freebsd/sys/kern/tty.c
index f4a2b01f..aa9734cf 100644
--- a/freebsd/sys/kern/tty.c
+++ b/freebsd/sys/kern/tty.c
@@ -35,6 +35,7 @@
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_capsicum.h>
+#include <rtems/bsd/local/opt_printf.h>
#include <sys/param.h>
#include <sys/capsicum.h>
@@ -108,6 +109,12 @@ SYSCTL_INT(_kern, OID_AUTO, tty_drainwait, CTLFLAG_RWTUN,
#define TTYBUF_MAX 65536
+#ifdef PRINTF_BUFR_SIZE
+#define TTY_PRBUF_SIZE PRINTF_BUFR_SIZE
+#else
+#define TTY_PRBUF_SIZE 256
+#endif
+
/*
* Allocate buffer space if necessary, and set low watermarks, based on speed.
* Note that the ttyxxxq_setsize() functions may drop and then reacquire the tty
@@ -1065,7 +1072,9 @@ tty_alloc_mutex(struct ttydevsw *tsw, void *sc, struct mtx *mutex)
PATCH_FUNC(busy);
#undef PATCH_FUNC
- tp = malloc(sizeof(struct tty), M_TTY, M_WAITOK|M_ZERO);
+ tp = malloc(sizeof(struct tty) + TTY_PRBUF_SIZE, M_TTY,
+ M_WAITOK | M_ZERO);
+ tp->t_prbufsz = TTY_PRBUF_SIZE;
tp->t_devsw = tsw;
tp->t_devswsoftc = sc;
tp->t_flags = tsw->tsw_flags;
diff --git a/freebsd/sys/kern/uipc_syscalls.c b/freebsd/sys/kern/uipc_syscalls.c
index 992df712..f338629e 100644
--- a/freebsd/sys/kern/uipc_syscalls.c
+++ b/freebsd/sys/kern/uipc_syscalls.c
@@ -90,10 +90,12 @@ struct getsockaddr_sockaddr {
char data[SOCK_MAXADDRLEN - sizeof(struct sockaddr)];
} __aligned(sizeof(long));
-static int getsockaddr(struct sockaddr **, caddr_t, size_t);
+static int getsockaddr(struct sockaddr **, const struct sockaddr *, size_t);
static int kern_getsockname(struct thread *, int, struct sockaddr **,
socklen_t *);
static int kern_listen(struct thread *, int, int);
+static int kern_setsockopt(struct thread *, int, int, int, const void *,
+ enum uio_seg, socklen_t);
static int kern_shutdown(struct thread *, int, int);
static int kern_socket(struct thread *, int, int, int);
static int kern_socketpair(struct thread *, int, int, int, int *);
@@ -287,7 +289,7 @@ bind(int socket, const struct sockaddr *address, socklen_t address_len)
struct thread *td = rtems_bsd_get_curthread_or_null();
struct bind_args ua = {
.s = socket,
- .name = (caddr_t) address,
+ .name = address,
.namelen = address_len
};
int error;
@@ -363,6 +365,9 @@ sys_bindat(struct thread *td, struct bindat_args *uap)
}
#endif /* __rtems__ */
+#ifdef __rtems__
+static
+#endif /* __rtems__ */
int
sys_listen(struct thread *td, struct listen_args *uap)
{
@@ -681,7 +686,7 @@ connect(int socket, const struct sockaddr *address, socklen_t address_len)
struct thread *td = rtems_bsd_get_curthread_or_null();
struct connect_args ua = {
.s = socket,
- .name = (caddr_t) address,
+ .name = address,
.namelen = address_len
};
int error;
@@ -1111,7 +1116,7 @@ sys_sendto(struct thread *td, struct sendto_args *uap)
struct msghdr msg;
struct iovec aiov;
- msg.msg_name = uap->to;
+ msg.msg_name = __DECONST(void *, uap->to);
msg.msg_namelen = uap->tolen;
msg.msg_iov = &aiov;
msg.msg_iovlen = 1;
@@ -1119,7 +1124,7 @@ sys_sendto(struct thread *td, struct sendto_args *uap)
#ifdef COMPAT_OLDSOCK
msg.msg_flags = 0;
#endif
- aiov.iov_base = uap->buf;
+ aiov.iov_base = __DECONST(void *, uap->buf);
aiov.iov_len = uap->len;
return (sendit(td, uap->s, &msg, uap->flags));
}
@@ -1134,7 +1139,7 @@ sendto(int socket, const void *message, size_t length, int flags,
.buf = (caddr_t) message,
.len = length,
.flags = flags,
- .to = (caddr_t) dest_addr,
+ .to = dest_addr,
.tolen = dest_len
};
int error;
@@ -1189,7 +1194,7 @@ osend(struct thread *td, struct osend_args *uap)
msg.msg_namelen = 0;
msg.msg_iov = &aiov;
msg.msg_iovlen = 1;
- aiov.iov_base = uap->buf;
+ aiov.iov_base = __DECONST(void *, uap->buf);
aiov.iov_len = uap->len;
msg.msg_control = 0;
msg.msg_flags = 0;
@@ -1669,9 +1674,6 @@ shutdown(int socket, int how)
#endif /* __rtems__ */
#ifdef __rtems__
-static int kern_setsockopt( struct thread *td, int s, int level, int name,
- void *val, enum uio_seg valseg, socklen_t valsize);
-
static
#endif /* __rtems__ */
int
@@ -1707,7 +1709,7 @@ setsockopt(int socket, int level, int option_name, const void *option_value,
#endif /* __rtems__ */
int
-kern_setsockopt(struct thread *td, int s, int level, int name, void *val,
+kern_setsockopt(struct thread *td, int s, int level, int name, const void *val,
enum uio_seg valseg, socklen_t valsize)
{
struct socket *so;
@@ -1723,7 +1725,7 @@ kern_setsockopt(struct thread *td, int s, int level, int name, void *val,
sopt.sopt_dir = SOPT_SET;
sopt.sopt_level = level;
sopt.sopt_name = name;
- sopt.sopt_val = val;
+ sopt.sopt_val = __DECONST(void *, val);
sopt.sopt_valsize = valsize;
switch (valseg) {
case UIO_USERSPACE:
@@ -2111,7 +2113,7 @@ sockargs(struct mbuf **mp, char *buf, socklen_t buflen, int type)
}
int
-getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len)
+getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr, size_t len)
{
struct sockaddr *sa;
#ifndef __rtems__