Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
- index a1ab9229bd1..172668a433d 100644
- --- a/sys/kern/kern_mbuf.c
- +++ b/sys/kern/kern_mbuf.c
- @@ -444,10 +444,11 @@ mb_dtor_mbuf(void *mem, int size, void *arg)
- m = (struct mbuf *)mem;
- flags = (unsigned long)arg;
- -
- - KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
- - if (!(flags & MB_DTOR_SKIP) && (m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
- - m_tag_delete_chain(m, NULL);
- + if (!(flags & MB_DTOR_SKIP)) {
- + KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
- + if ((m->m_flags & M_PKTHDR) && !SLIST_EMPTY(&m->m_pkthdr.tags))
- + m_tag_delete_chain(m, NULL);
- + }
- #ifdef INVARIANTS
- trash_dtor(mem, size, arg);
- #endif
- @@ -653,6 +654,16 @@ mb_free_ext(struct mbuf *m)
- /* Free attached storage if this mbuf is the only reference to it. */
- if (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1) {
- + /*
- + * An mvec has been converted to an mbuf chain, but is still
- + * owned by the mvec.
- + */
- + if (__predict_false(m->m_ext.ext_flags & EXT_FLAG_MVECREF)) {
- + MPASS(!(m->m_ext.ext_flags & EXT_FLAG_EMBREF));
- + MPASS(mref != m);
- + mvec_free((struct mbuf_ext *)mref);
- + goto skip;
- + }
- switch (m->m_ext.ext_type) {
- case EXT_PACKET:
- /* The packet zone is special. */
- @@ -676,6 +687,18 @@ mb_free_ext(struct mbuf *m)
- uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
- uma_zfree(zone_mbuf, mref);
- break;
- + case EXT_MBUF:
- + uma_zfree(zone_mbuf, m->m_ext.ext_buf);
- + break;
- + case EXT_MVEC:
- + if (m->m_ext.ext_flags & EXT_FLAG_EXTFREE) {
- + KASSERT(m->m_ext.ext_free != NULL,
- + ("%s: ext_free not set", __func__));
- + m->m_ext.ext_free(m);
- + }
- + mvec_free((struct mbuf_ext*)m);
- + return;
- + break;
- case EXT_SFBUF:
- case EXT_NET_DRV:
- case EXT_MOD_TYPE:
- @@ -695,7 +718,7 @@ mb_free_ext(struct mbuf *m)
- ("%s: unknown ext_type", __func__));
- }
- }
- -
- + skip:
- if (freembuf && m != mref)
- uma_zfree(zone_mbuf, m);
- }
- @@ -796,36 +819,6 @@ m_get2(int size, int how, short type, int flags)
- return (m);
- }
- -/*
- - * m_getjcl() returns an mbuf with a cluster of the specified size attached.
- - * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
- - */
- -struct mbuf *
- -m_getjcl(int how, short type, int flags, int size)
- -{
- - struct mb_args args;
- - struct mbuf *m, *n;
- - uma_zone_t zone;
- -
- - if (size == MCLBYTES)
- - return m_getcl(how, type, flags);
- -
- - args.flags = flags;
- - args.type = type;
- -
- - m = uma_zalloc_arg(zone_mbuf, &args, how);
- - if (m == NULL)
- - return (NULL);
- -
- - zone = m_getzone(size);
- - n = uma_zalloc_arg(zone, m, how);
- - if (n == NULL) {
- - uma_zfree(zone_mbuf, m);
- - return (NULL);
- - }
- - return (m);
- -}
- -
- /*
- * Allocate a given length worth of mbufs and/or clusters (whatever fits
- * best) and return a pointer to the top of the allocated chain. If an
- diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c
- index ea2791673ed..a035f6d4eb3 100644
- --- a/sys/kern/kern_switch.c
- +++ b/sys/kern/kern_switch.c
- @@ -188,71 +188,38 @@ choosethread(void)
- return (td);
- }
- -/*
- - * Kernel thread preemption implementation. Critical sections mark
- - * regions of code in which preemptions are not allowed.
- - *
- - * It might seem a good idea to inline critical_enter() but, in order
- - * to prevent instructions reordering by the compiler, a __compiler_membar()
- - * would have to be used here (the same as sched_pin()). The performance
- - * penalty imposed by the membar could, then, produce slower code than
- - * the function call itself, for most cases.
- - */
- void
- -critical_enter(void)
- +critical_preempt(struct thread *td)
- {
- - struct thread *td;
- + int flags;
- - td = curthread;
- - td->td_critnest++;
- - CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
- - (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
- + /*
- + * Microoptimization: we committed to switch,
- + * disable preemption in interrupt handlers
- + * while spinning for the thread lock.
- + */
- + td->td_critnest = 1;
- + thread_lock(td);
- + td->td_critnest--;
- + flags = SW_INVOL | SW_PREEMPT;
- + if (TD_IS_IDLETHREAD(td))
- + flags |= SWT_IDLE;
- + else
- + flags |= SWT_OWEPREEMPT;
- + mi_switch(flags, NULL);
- + thread_unlock(td);
- }
- void
- -critical_exit(void)
- +critical_enter(void)
- {
- - struct thread *td;
- - int flags;
- -
- - td = curthread;
- - KASSERT(td->td_critnest != 0,
- - ("critical_exit: td_critnest == 0"));
- -
- - if (td->td_critnest == 1) {
- - td->td_critnest = 0;
- -
- - /*
- - * Interrupt handlers execute critical_exit() on
- - * leave, and td_owepreempt may be left set by an
- - * interrupt handler only when td_critnest > 0. If we
- - * are decrementing td_critnest from 1 to 0, read
- - * td_owepreempt after decrementing, to not miss the
- - * preempt. Disallow compiler to reorder operations.
- - */
- - __compiler_membar();
- - if (td->td_owepreempt && !kdb_active) {
- - /*
- - * Microoptimization: we committed to switch,
- - * disable preemption in interrupt handlers
- - * while spinning for the thread lock.
- - */
- - td->td_critnest = 1;
- - thread_lock(td);
- - td->td_critnest--;
- - flags = SW_INVOL | SW_PREEMPT;
- - if (TD_IS_IDLETHREAD(td))
- - flags |= SWT_IDLE;
- - else
- - flags |= SWT_OWEPREEMPT;
- - mi_switch(flags, NULL);
- - thread_unlock(td);
- - }
- - } else
- - td->td_critnest--;
- + _critical_enter();
- +}
- - CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
- - (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
- +void
- +critical_exit(void)
- +{
- + _critical_exit();
- }
- /************************************************************************
- diff --git a/sys/kern/subr_sglist.c b/sys/kern/subr_sglist.c
- index ff002038393..894d7272a2d 100644
- --- a/sys/kern/subr_sglist.c
- +++ b/sys/kern/subr_sglist.c
- @@ -319,6 +319,37 @@ sglist_append_phys(struct sglist *sg, vm_paddr_t paddr, size_t len)
- return (error);
- }
- +int
- +sglist_append_mvec(struct sglist *sg, struct mbuf *m0)
- +{
- + struct sgsave save;
- + struct mbuf_ext *mext;
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- + int i, error;
- +
- + MPASS(m != NULL);
- +
- + mext = (void*)m0;
- + mh = &mext->me_mh;
- + me = &mext->me_ents[mh->mh_start];
- +
- + if (__predict_false(sg->sg_maxseg == 0))
- + return (EINVAL);
- +
- + SGLIST_SAVE(sg, save);
- + for (i = 0; i < mh->mh_used; i++, me++) {
- + if (__predict_false(me->me_len == 0))
- + continue;
- + error = sglist_append(sg, me_data(me), me->me_len);
- + if (__predict_false(error)) {
- + SGLIST_RESTORE(sg, save);
- + return (error);
- + }
- + }
- + return (0);
- +}
- +
- /*
- * Append the segments that describe a single mbuf chain to a
- * scatter/gather list. If there are insufficient segments, then this
- @@ -334,6 +365,9 @@ sglist_append_mbuf(struct sglist *sg, struct mbuf *m0)
- if (sg->sg_maxseg == 0)
- return (EINVAL);
- + if (m_ismvec(m0))
- + return (sglist_append_mvec(sg, m0));
- +
- error = 0;
- SGLIST_SAVE(sg, save);
- for (m = m0; m != NULL; m = m->m_next) {
- diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c
- index a4bdf6f062a..04d0c1c267e 100644
- --- a/sys/kern/uipc_mbuf.c
- +++ b/sys/kern/uipc_mbuf.c
- @@ -724,6 +724,10 @@ m_adj(struct mbuf *mp, int req_len)
- if ((m = mp) == NULL)
- return;
- + if (m_ismvec(mp)) {
- + mvec_adj(mp, req_len);
- + return;
- + }
- if (len >= 0) {
- /*
- * Trim from head.
- @@ -803,6 +807,9 @@ m_pullup(struct mbuf *n, int len)
- int count;
- int space;
- + if (m_ismvec(n))
- + return (mvec_pullup(n, 0, len));
- +
- /*
- * If first mbuf has no cluster, and has room for len bytes
- * without shifting current data, pullup into it,
- diff --git a/sys/kern/uipc_mvec.c b/sys/kern/uipc_mvec.c
- new file mode 100644
- index 00000000000..2968d40a33d
- --- /dev/null
- +++ b/sys/kern/uipc_mvec.c
- @@ -0,0 +1,1258 @@
- +/*
- + * Copyright (C) 2017 Matthew Macy <matt.macy@joyent.com>
- + * Copyright (C) 2017 Joyent Inc.
- + * All rights reserved.
- + *
- + * Redistribution and use in source and binary forms, with or without
- + * modification, are permitted provided that the following conditions
- + * are met:
- + * 1. Redistributions of source code must retain the above copyright
- + * notice, this list of conditions and the following disclaimer.
- + * 2. Redistributions in binary form must reproduce the above copyright
- + * notice, this list of conditions and the following disclaimer in the
- + * documentation and/or other materials provided with the distribution.
- + *
- + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- + * SUCH DAMAGE.
- + */
- +
- +#include <sys/cdefs.h>
- +__FBSDID("$FreeBSD$");
- +
- +#include <sys/param.h>
- +#include <sys/malloc.h>
- +#include <sys/types.h>
- +#include <sys/systm.h>
- +#include <sys/mbuf.h>
- +#include <sys/kernel.h>
- +#include <sys/lock.h>
- +#include <sys/mutex.h>
- +#include <sys/smp.h>
- +#include <sys/sysctl.h>
- +
- +#include <machine/in_cksum.h>
- +
- +
- +#define MVEC_DEBUG
- +
- +#ifdef MVEC_DEBUG
- +#define DPRINTF printf
- +#else
- +#define DPRINTF(...)
- +#endif
- +
- +static MALLOC_DEFINE(M_MVEC, "mvec", "mbuf vector");
- +
- +static int type2len[] = {-1, MCLBYTES, -1, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES, -1, MSIZE};
- +#ifdef INVARIANTS
- +static int validtypes = ((1<<EXT_CLUSTER)|(1<<EXT_JUMBOP)|(1<<EXT_JUMBO9)|(1<<EXT_JUMBO16)|(1<<EXT_MBUF));
- +#endif
- +
- +#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
- +#define REDUCE32 \
- + { \
- + q_util.q = sum; \
- + sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
- + }
- +#define REDUCE16 \
- + { \
- + q_util.q = sum; \
- + l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
- + sum = l_util.s[0] + l_util.s[1]; \
- + ADDCARRY(sum); \
- + }
- +
- +uint64_t in_cksumdata(const void *buf, int len);
- +
- +union l_util {
- + u_int16_t s[2];
- + u_int32_t l;
- +};
- +union q_util {
- + u_int16_t s[4];
- + u_int32_t l[2];
- + u_int64_t q;
- +};
- +
- +
- +#ifdef INVARIANTS
- +void
- +mvec_sanity(struct mbuf *m)
- +{
- + struct mbuf_ext *mext;
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- + int i, total;
- +
- + mext = (void*)m;
- + mh = &mext->me_mh;
- + me = &mext->me_ents[mh->mh_start];
- + total = 0;
- + MPASS(m->m_len == me->me_len);
- + MPASS(m->m_data == (me->me_cl + me->me_off));
- + MPASS(mh->mh_count >= (mh->mh_start + mh->mh_used));
- + for (i = mh->mh_start; i < mh->mh_used + mh->mh_start; i++, me++) {
- + if (__predict_false(me->me_len == 0))
- + continue;
- +
- + MPASS(me->me_cl);
- + MPASS(me->me_cl != (void *)0xdeadc0dedeadc0de);
- + total += me->me_len;
- + }
- + MPASS(total == m->m_pkthdr.len);
- +}
- +#endif
- +
- +static void
- +mvec_buffer_free(struct mbuf *m)
- +{
- + struct mvec_header *mh;
- +
- + mh = MBUF2MH(m);
- + switch (mh->mh_mvtype) {
- + case MVALLOC_MALLOC:
- + free(m, M_MVEC);
- + break;
- + case MVALLOC_MBUF:
- + uma_zfree_arg(zone_mbuf, m, (void *)MB_DTOR_SKIP);
- + break;
- + }
- +}
- +
- +
- +static void
- +mvec_clfree(struct mvec_ent *me, m_refcnt_t *refcntp, bool dupref)
- +{
- + bool free = true;
- + struct mbuf *mref;
- + volatile uint32_t *refcnt;
- +
- + mref = NULL;
- + if (dupref) {
- + if (me->me_ext_flags & EXT_FLAG_EMBREF) {
- + refcnt = &refcntp->ext_count;
- + } else {
- + refcnt = refcntp->ext_cnt;
- + }
- + free = (*refcnt == 1 || atomic_fetchadd_int(refcnt, -1) == 1);
- + }
- + if (!free)
- + return;
- + if (!(me->me_ext_flags & EXT_FLAG_NOFREE))
- + mref = __containerof(refcnt, struct mbuf, m_ext.ext_count);
- +
- + switch (me->me_ext_type) {
- + case EXT_CLUSTER:
- + uma_zfree(zone_clust, me->me_cl);
- + break;
- + case EXT_JUMBOP:
- + uma_zfree(zone_jumbop, me->me_cl);
- + break;
- + case EXT_JUMBO9:
- + uma_zfree(zone_jumbo9, me->me_cl);
- + break;
- + case EXT_JUMBO16:
- + uma_zfree(zone_jumbo16, me->me_cl);
- + break;
- + default:
- + panic("unsupported ext_type: %d\n", me->me_ext_type);
- + }
- + if (mref != NULL)
- + uma_zfree_arg(zone_mbuf, mref, (void *)MB_DTOR_SKIP);
- +}
- +
- +static void
- +mvec_ent_free(struct mvec_header *mh, int idx)
- +{
- + struct mvec_ent *me = (struct mvec_ent *)(mh + 1);
- + m_refcnt_t *me_count = (m_refcnt_t *)(me + mh->mh_count);
- +
- + me += idx;
- + me_count += idx;
- + switch (me->me_type) {
- + case MVEC_MBUF:
- + uma_zfree_arg(zone_mbuf, me->me_cl, (void *)MB_DTOR_SKIP);
- + break;
- + case MVEC_MANAGED:
- + mvec_clfree(me, me_count, mh->mh_multiref);
- + break;
- + default:
- + /* ... */
- + break;
- + }
- +}
- +
- +void *
- +mvec_seek(struct mbuf *m, struct mvec_cursor *mc, int offset)
- +{
- + struct mvec_ent *me = MBUF2ME(m);
- + struct mvec_header *mh = MBUF2MH(m);
- + int rem;
- +
- + mc->mc_idx = mc->mc_off = 0;
- + MPASS(offset <= m->m_pkthdr.len);
- + rem = offset;
- +
- + me = MHMEI(m, mh, 0);
- + do {
- + if (rem > me->me_len) {
- + rem -= me->me_len;
- + me++;
- + mc->mc_idx++;
- + } else if (rem < me->me_len) {
- + rem = 0;
- + mc->mc_off = rem;
- + } else {
- + rem = 0;
- + mc->mc_idx++;
- + me++;
- + }
- + } while(rem);
- +
- + return (void *)(me_data(me) + mc->mc_off);
- +}
- +
- +static void
- +mvec_trim_head(struct mbuf *m, int offset)
- +{
- + struct mvec_header *mh = MBUF2MH(m);
- + struct mvec_ent *me = MBUF2ME(m);
- + int rem;
- + bool owned;
- +
- + MPASS(offset <= m->m_pkthdr.len);
- + rem = offset;
- + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
- + owned = (m->m_ext.ext_count == 1);
- + } else {
- + owned = (*(m->m_ext.ext_cnt) == 1);
- + }
- + do {
- + if (rem > me->me_len) {
- + rem -= me->me_len;
- + if (owned)
- + mvec_ent_free(mh, mh->mh_start);
- + mh->mh_start++;
- + mh->mh_used--;
- + me++;
- + } else if (rem < me->me_len) {
- + rem = 0;
- + me->me_off += rem;
- + me->me_len -= rem;
- + } else {
- + rem = 0;
- + mvec_ent_free(mh, mh->mh_start);
- + mh->mh_start++;
- + mh->mh_used--;
- + }
- + } while(rem);
- + m->m_pkthdr.len -= offset;
- + m->m_data = ME_SEG(m, mh, 0);
- +}
- +
- +static void
- +mvec_trim_tail(struct mbuf *m, int offset)
- +{
- + struct mvec_header *mh = MBUF2MH(m);
- + struct mvec_ent *me = MBUF2ME(m);
- + int i, rem;
- + bool owned;
- +
- + MPASS(offset <= m->m_pkthdr.len);
- + rem = offset;
- + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
- + owned = (m->m_ext.ext_count == 1);
- + } else {
- + owned = (*(m->m_ext.ext_cnt) == 1);
- + }
- + i = mh->mh_count-1;
- + me = &me[i];
- + do {
- + if (rem > me->me_len) {
- + rem -= me->me_len;
- + me->me_len = 0;
- + if (owned)
- + mvec_ent_free(mh, i);
- + me--;
- + mh->mh_used--;
- + } else if (rem < me->me_len) {
- + rem = 0;
- + me->me_len -= rem;
- + } else {
- + rem = 0;
- + me->me_len = 0;
- + if (owned)
- + mvec_ent_free(mh, i);
- + mh->mh_used--;
- + }
- + i++;
- + } while(rem);
- + m->m_pkthdr.len -= offset;
- +}
- +
- +void
- +mvec_adj(struct mbuf *m, int req_len)
- +{
- + if (__predict_false(req_len == 0))
- + return;
- + if (req_len > 0)
- + mvec_trim_head(m, req_len);
- + else
- + mvec_trim_tail(m, req_len);
- +}
- +
- +void
- +mvec_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
- +{
- + panic("%s unimplemented", __func__);
- +}
- +
- +struct mbuf *
- +mvec_dup(const struct mbuf *m, int how)
- +{
- + panic("%s unimplemented", __func__);
- + return (NULL);
- +}
- +
- +struct mbuf *
- +mvec_defrag(const struct mbuf *m, int how)
- +{
- + panic("%s unimplemented", __func__);
- + return (NULL);
- +}
- +
- +struct mbuf *
- +mvec_collapse(struct mbuf *m, int how, int maxfrags)
- +{
- + panic("%s unimplemented", __func__);
- + return (NULL);
- +}
- +
- +uint16_t
- +mvec_cksum_skip(struct mbuf *m, int len, int skip)
- +{
- + u_int64_t sum = 0;
- + int mlen = 0;
- + int clen = 0;
- + caddr_t addr;
- + union q_util q_util;
- + union l_util l_util;
- + struct mvec_cursor mc;
- + struct mvec_header mh;
- + struct mvec_ent *me;
- +
- + MPASS(m_ismvec(m));
- +
- + len -= skip;
- + mvec_seek(m, &mc, skip);
- + mh = *(MBUF2MH(m));
- +
- + /* XXX */
- + if (mh.mh_multipkt)
- + return (0);
- +
- + me = MHMEI(m, &mh, mc.mc_idx);
- + addr = me->me_cl + me->me_off;
- + goto skip_start;
- +
- + for (; mh.mh_used && len; me++) {
- + mh.mh_used--;
- + if (me->me_len == 0)
- + continue;
- + mlen = me->me_len;
- + addr = me->me_cl + me->me_off;
- +skip_start:
- + if (len < mlen)
- + mlen = len;
- + if ((clen ^ (long) addr) & 1)
- + sum += in_cksumdata(addr, mlen) << 8;
- + else
- + sum += in_cksumdata(addr, mlen);
- +
- + clen += mlen;
- + len -= mlen;
- + }
- + REDUCE16;
- + return (~sum & 0xffff);
- +}
- +
- +struct mbuf *
- +mvec_prepend(struct mbuf *m, int size)
- +{
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- + struct mbuf *data;
- + struct mbuf_ext *mext;
- +
- + MPASS(size <= MSIZE);
- + if (__predict_false((data = m_get(M_NOWAIT, MT_NOINIT)) == NULL))
- + return (NULL);
- +
- + mext = (struct mbuf_ext *)m;
- + mh = &mext->me_mh;
- + if (__predict_true(mh->mh_start)) {
- + mh->mh_start--;
- + mh->mh_used++;
- + me = MHMEI(m, mh, 0);
- + me->me_len = size;
- + me->me_cl = (caddr_t)data;
- + me->me_off = 0;
- + me->me_type = MVEC_MBUF;
- + me->me_eop = 0;
- + me->me_ext_flags = 0;
- + me->me_ext_type = EXT_MBUF;
- + m->m_pkthdr.len += size;
- + m->m_len = size;
- + m->m_data = me->me_cl;
- + } else {
- + panic("implement fallback path for %s", __func__);
- + }
- + return (m);
- +}
- +
- +struct mbuf *
- +mvec_append(struct mbuf *m, caddr_t cl, uint16_t off,
- + uint16_t len, uint8_t cltype)
- +{
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- +
- + mh = MBUF2MH(m);
- + KASSERT(mh->mh_used < mh->mh_count,
- + ("need to add support for growing mvec on append"));
- + me = MHMEI(m, mh, mh->mh_used);
- + me->me_cl = cl;
- + me->me_off = off;
- + me->me_len = len;
- + me->me_ext_type = cltype;
- + me->me_ext_flags = 0;
- + m->m_pkthdr.len += len;
- + if (mh->mh_used == 0) {
- + m->m_len = len;
- + m->m_data = (cl + off);
- + }
- + mh->mh_used++;
- + return (m);
- +}
- +
- +static int
- +mvec_init_mbuf_(struct mbuf *m, uint8_t count, uint8_t type, int len)
- +{
- + struct mvec_header *mh;
- + int rc;
- +
- + mh = MBUF2MH(m);
- + *((uint64_t *)mh) = 0;
- + if (type == MVALLOC_MBUF && len == 0)
- + mh->mh_count = MBUF_ME_MAX;
- + else
- + mh->mh_count = count;
- + mh->mh_mvtype = type;
- + /* leave room for prepend */
- + mh->mh_start = 1;
- + rc = m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
- + if (__predict_false(rc))
- + return (rc);
- +
- + m->m_next = m->m_nextpkt = NULL;
- + m->m_len = 0;
- + m->m_data = NULL;
- + m->m_flags = M_PKTHDR|M_EXT;
- + m->m_ext.ext_free = NULL;
- + m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL;
- + m->m_ext.ext_flags = EXT_FLAG_EMBREF;
- + m->m_ext.ext_type = EXT_MVEC;
- + m->m_ext.ext_size = MSIZE;
- + m->m_ext.ext_buf = (caddr_t)m;
- + m->m_ext.ext_cnt = NULL;
- + m->m_ext.ext_count = 1;
- + return (0);
- +}
- +
- +int
- +mvec_init_mbuf(struct mbuf *m, uint8_t count, uint8_t type)
- +{
- +
- + return (mvec_init_mbuf_(m, count, type, 0));
- +}
- +
- +struct mbuf_ext *
- +mvec_alloc(uint8_t count, int len, int how)
- +{
- + int size;
- + uint8_t type;
- + struct mbuf_ext *m;
- +
- + size = sizeof(*m) + count*sizeof(struct mvec_ent);
- + size += len;
- + if (size <= MSIZE) {
- + m = (void*)m_get(how, MT_NOINIT);
- + type = MVALLOC_MBUF;
- + } else {
- + m = malloc(size, M_MVEC, how);
- + type = MVALLOC_MALLOC;
- + }
- + if (__predict_false(m == NULL))
- + return (NULL);
- + mvec_init_mbuf_((struct mbuf *)m, count, type, len);
- + return (m);
- +}
- +
- +static int
- +mvec_ent_size(struct mvec_ent *me)
- +{
- + int type;
- +
- + MPASS(me->me_ext_type && (me->me_ext_type < 32));
- +
- + type = me->me_ext_type;
- + MPASS((1<<type) & validtypes);
- + return (type2len[type]);
- +}
- +
- +struct mbuf *
- +mvec_pullup(struct mbuf *m, int idx, int count)
- +{
- + struct mvec_header *mh;
- + struct mvec_ent *mecur, *menxt;
- + int tailroom, size, copylen, doff, i, len;
- +
- + /* XXX --- fix */
- + MPASS(idx == 0);
- + mvec_sanity(m);
- + MPASS(count <= m->m_pkthdr.len);
- + mh = MBUF2MH(m);
- + mecur = MHMEI(m, mh, 0);
- + size = mvec_ent_size(mecur);
- + tailroom = size - mecur->me_off - mecur->me_len;
- + MPASS(tailroom >= 0);
- + copylen = count - mecur->me_len;
- +
- + if (__predict_false(count <= mecur->me_len))
- + return (m);
- + /*
- + * XXX - If we're not the exclusive owner we need to allocate a new
- + * buffer regardless.
- + */
- + if (copylen > size) {
- + /* allocate new buffer */
- + panic("allocate new buffer copylen=%d size=%d", copylen, size);
- + } else if (copylen > tailroom) {
- + /*
- + * move data up if possible
- + * else allocate new buffer
- + */
- + panic("relocate data copylen=%d size=%d tailroom=%d", copylen, size, tailroom);
- + }
- + doff = mecur->me_off + mecur->me_len;
- + i = 1;
- + do {
- + menxt = MHMEI(m, mh, i);
- + len = min(copylen, menxt->me_len);
- + bcopy(ME_SEG(m, mh, i), mecur->me_cl + doff, len);
- + doff += len;
- + mecur->me_len += len;
- + menxt->me_off += len;
- + menxt->me_len -= len;
- + copylen -= len;
- + i++;
- + } while (copylen);
- + m->m_data = ME_SEG(m, mh, 0);
- + m->m_len = ME_LEN(m, mh, 0);
- + mvec_sanity(m);
- + return (m);
- +}
- +
- +void
- +mvec_free(struct mbuf_ext *m)
- +{
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- + m_refcnt_t *me_count;
- + int i;
- +
- + mh = &m->me_mh;
- + me = m->me_ents;
- + me_count = (m_refcnt_t *)(me + mh->mh_count);
- +
- + for (i = 0; i < mh->mh_count; i++, me_count++, me++) {
- + if (__predict_false(me->me_cl == NULL))
- + continue;
- + switch (me->me_type) {
- + case MVEC_MBUF:
- + uma_zfree_arg(zone_mbuf, me->me_cl, (void *)MB_DTOR_SKIP);
- + break;
- + case MVEC_MANAGED:
- + mvec_clfree(me, me_count, mh->mh_multiref);
- + break;
- + default:
- + /* ... */
- + break;
- + }
- + }
- + mvec_buffer_free((void*)m);
- +}
- +
- +struct mbuf_ext *
- +mchain_to_mvec(struct mbuf *m, int how)
- +{
- + struct mbuf *mp, *mnext;
- + struct mbuf_ext *mnew;
- + struct mvec_header *mh;
- + struct mvec_ent *me;
- + int count, size;
- + bool dupref;
- + m_refcnt_t *me_count;
- +
- + if (__predict_false(m_ismvec(m)))
- + return ((struct mbuf_ext *)m);
- +
- + size = count = 0;
- + mp = m;
- + dupref = false;
- + do {
- + mnext = mp->m_next;
- + count++;
- + if (mp->m_flags & M_EXT) {
- + /*
- + * bail on ext_free -- we can't efficiently pass an mbuf
- + * at free time and m_ext adds up to a lot of space
- + */
- + if (mp->m_ext.ext_free != NULL) {
- + DPRINTF("%s ext_free is set: %p\n", __func__, mp->m_ext.ext_free);
- + return (NULL);
- + }
- + if (!(mp->m_ext.ext_flags & EXT_FLAG_EMBREF && mp->m_ext.ext_count == 1))
- + dupref = true;
- + }
- + mp = mnext;
- + } while (mp);
- +
- + /* add spare */
- + count++;
- + if (dupref)
- + size = count*sizeof(void*);
- + mnew = mvec_alloc(count, size, how);
- +
- + if (mnew == NULL) {
- + DPRINTF("%s malloc failed\n", __func__);
- + return (NULL);
- + }
- + mh = &mnew->me_mh;
- + mh->mh_used = count-1;
- + MPASS(mh->mh_count == mh->mh_used+1);
- + mh->mh_multiref = dupref;
- + /* leave first entry open for encap */
- + bcopy(&m->m_pkthdr, &mnew->me_mbuf.m_pkthdr, sizeof(struct pkthdr));
- +
- + me = mnew->me_ents;
- + MPASS(mh->mh_start == 1);
- + me->me_cl = NULL;
- + me->me_off = me->me_len = 0;
- + me->me_ext_type = me->me_ext_flags = 0;
- + me++;
- + me_count = MBUF2REF(mnew);
- + if (dupref)
- + bzero(me_count, count*sizeof(void *));
- + me_count++;
- + mp = m;
- + do {
- + mnext = mp->m_next;
- + if (mp->m_flags & M_EXT) {
- + me->me_cl = mp->m_ext.ext_buf;
- + me->me_off = ((uintptr_t)mp->m_data - (uintptr_t)mp->m_ext.ext_buf);
- + me->me_type = MVEC_MANAGED;
- + me->me_ext_flags = mp->m_ext.ext_flags;
- + MPASS(mp->m_ext.ext_type < 32);
- + me->me_ext_type = mp->m_ext.ext_type;
- +#ifdef INVARIANTS
- + (void)mvec_ent_size(me);
- +#endif
- + } else {
- + me->me_cl = (caddr_t)mp;
- + me->me_off = ((uintptr_t)(mp->m_data) - (uintptr_t)mp);
- + me->me_type = MVEC_MBUF;
- + me->me_ext_flags = 0;
- + me->me_ext_type = EXT_MBUF;
- + }
- + me->me_len = mp->m_len;
- + me->me_eop = 0;
- + if (dupref) {
- + if (m->m_flags & M_EXT) {
- + if (mp->m_ext.ext_flags & EXT_FLAG_EMBREF) {
- + me_count->ext_cnt = &mp->m_ext.ext_count;
- + me->me_ext_flags &= ~EXT_FLAG_EMBREF;
- + } else
- + me_count->ext_cnt = mp->m_ext.ext_cnt;
- + }
- + if (mp->m_flags & M_NOFREE)
- + me->me_ext_flags |= EXT_FLAG_NOFREE;
- + }
- + me_count++;
- + mp = mnext;
- + me++;
- + } while (mp);
- + mnew->me_mbuf.m_len = mnew->me_ents[1].me_len;
- + mnew->me_mbuf.m_data = (mnew->me_ents[1].me_cl + mnew->me_ents[1].me_off);
- + mh = MBUF2MH(mnew);
- + MPASS(mh->mh_count == mh->mh_start + mh->mh_used);
- + mvec_sanity((void*)mnew);
- + return (mnew);
- +}
- +
- +struct mbuf_ext *
- +pktchain_to_mvec(struct mbuf *m, int mtu, int how)
- +{
- + struct mbuf *mp, *mnext;
- + struct mbuf_ext *mnew, *mh, *mt;
- +
- + mp = m;
- + mh = mt = NULL;
- + while (mp) {
- + mnext = mp->m_nextpkt;
- + mnew = mchain_to_mvec(mp, how);
- + if (__predict_false(mnew == NULL)) {
- + m_freem(mp);
- + mp = mnext;
- + continue;
- + }
- + if (mh == NULL) {
- + mh = mt = mnew;
- + } else {
- + mt->me_mbuf.m_nextpkt = (void*)mnew;
- + mt = mnew;
- + }
- + mp = mnext;
- + }
- + return (mh);
- +}
- +
- +static void
- +m_ext_init(struct mbuf *m, struct mbuf_ext *head, struct mvec_header *mh)
- +{
- + struct mvec_ent *me;
- + struct mbuf *headm;
- + bool doref;
- +
- + headm = &head->me_mbuf;
- + doref = true;
- + me = &head->me_ents[mh->mh_start];
- + m->m_ext.ext_buf = me->me_cl;
- + m->m_ext.ext_arg1 = headm->m_ext.ext_arg1;
- + m->m_ext.ext_arg2 = headm->m_ext.ext_arg2;
- + m->m_ext.ext_free = headm->m_ext.ext_free;
- + m->m_ext.ext_type = me->me_ext_type;
- + if (me->me_ext_type) {
- + m->m_ext.ext_flags = me->me_ext_flags;
- + m->m_ext.ext_size = mvec_ent_size(me);
- + } else {
- + m->m_ext.ext_flags = EXT_FLAG_NOFREE;
- + /* Only used by m_sanity so just call it our size */
- + m->m_ext.ext_size = me->me_len + me->me_off;
- + }
- + /*
- + * There are 2 cases for refcount transfer:
- + * 1) all clusters are owned by the mvec [default]
- + * - point at mvec refcnt and increment
- + * 2) cluster has a normal external refcount
- + */
- + if (__predict_true(!head->me_mh.mh_multiref)) {
- + m->m_ext.ext_flags = EXT_FLAG_MVECREF;
- + if (headm->m_ext.ext_flags & EXT_FLAG_EMBREF)
- + m->m_ext.ext_cnt = &headm->m_ext.ext_count;
- + else
- + m->m_ext.ext_cnt = headm->m_ext.ext_cnt;
- + } else {
- + m_refcnt_t *ref = MHREFI(headm, mh, 0);
- +
- + m->m_ext.ext_cnt = ref->ext_cnt;
- + if (ref->ext_cnt == NULL) {
- + m->m_ext.ext_flags |= EXT_FLAG_EMBREF;
- + m->m_ext.ext_type = 0;
- + m->m_ext.ext_count = 1;
- + doref = false;
- + }
- + }
- + if (doref)
- + atomic_add_int(m->m_ext.ext_cnt, 1);
- +}
- +
- +static struct mbuf *
- +mvec_to_mchain_pkt(struct mbuf_ext *mp, struct mvec_header *mhdr, int how)
- +{
- + struct mvec_ent *me;
- + struct mbuf *m, *mh, *mt, *mpm;
- +
- + if (__predict_false((mh = m_gethdr(how, MT_DATA)) == NULL))
- + return (NULL);
- +
- + mpm = &mp->me_mbuf;
- + me = MHMEI(mp, mhdr, 0);
- + mh->m_flags |= M_EXT;
- + mh->m_flags |= mpm->m_flags & (M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_VXLANTAG);
- + /* XXX update csum_data after encap */
- + mh->m_pkthdr.csum_data = mpm->m_pkthdr.csum_data;
- + mh->m_pkthdr.csum_flags = mpm->m_pkthdr.csum_flags;
- + mh->m_pkthdr.vxlanid = mpm->m_pkthdr.vxlanid;
- + m_ext_init(mh, mp, mhdr);
- + mh->m_data = me->me_cl + me->me_off;
- + mh->m_pkthdr.len = mh->m_len = me->me_len;
- + mhdr->mh_start++;
- + mhdr->mh_used--;
- + mt = mh;
- + while (!me->me_eop && mhdr->mh_used) {
- + if (__predict_false((m = m_get(how, MT_DATA)) == NULL))
- + goto fail;
- + me++;
- + mt->m_next = m;
- + mt = m;
- + mt->m_flags |= M_EXT;
- + m_ext_init(mt, mp, mhdr);
- + mt->m_len = me->me_len;
- + mh->m_pkthdr.len += mt->m_len;
- + mt->m_data = me->me_cl + me->me_off;
- + mhdr->mh_start++;
- + mhdr->mh_used--;
- + }
- +#ifdef INVARIANTS
- + m_sanity(mh, 0);
- +#endif
- + return (mh);
- + fail:
- + if (mh)
- + m_freem(mh);
- + return (NULL);
- +}
- +
- +struct mbuf *
- +mvec_to_mchain(struct mbuf *mp, int how)
- +{
- + struct mvec_header *pmhdr, mhdr;
- + struct mbuf *mh, *mt, *m;
- +#ifdef INVARIANTS
- + int count = 0;
- +#endif
- +
- + mvec_sanity(mp);
- + pmhdr = MBUF2MH(mp);
- + bcopy(pmhdr, &mhdr, sizeof(mhdr));
- + mh = mt = NULL;
- + while (mhdr.mh_used) {
- +#ifdef INVARIANTS
- + count++;
- +#endif
- + if (__predict_false((m = mvec_to_mchain_pkt((struct mbuf_ext *)mp, &mhdr, how)) == NULL)) {
- + DPRINTF("mvec_to_mchain_pkt failed\n");
- + goto fail;
- + }
- + if (mh != NULL) {
- + mt->m_nextpkt = m;
- + mt = m;
- + } else
- + mh = mt = m;
- + }
- +#ifdef INVARIANTS
- + m = mh;
- + while (m) {
- + MPASS(m->m_data);
- + m_sanity(m, 0);
- + m = m->m_nextpkt;
- + count--;
- + }
- + MPASS(count == 0);
- +#endif
- + return (mh);
- + fail:
- + m_freechain(mh);
- + return (NULL);
- +}
- +
- +/*
- + * Move the below to net/ once working
- + */
- +
- +#include <sys/socket.h>
- +#include <net/if.h>
- +#include <net/if_var.h>
- +#include <net/ethernet.h>
- +#include <net/iflib.h>
- +#include <netinet/in.h>
- +#include <netinet/ip.h>
- +#include <netinet/tcp.h>
- +#include <netinet/udp.h>
- +
- +#include <machine/in_cksum.h>
- +
- +#define MIN_HDR_LEN (ETHER_HDR_LEN + sizeof(struct ip) + sizeof(struct tcphdr))
- +
- +static int
- +mvec_parse_header(struct mbuf_ext *mp, int prehdrlen, if_pkt_info_t pi)
- +{
- + struct ether_vlan_header *evh;
- + struct mvec_header *mh = &mp->me_mh;
- + struct mbuf *m;
- +
- + m = (void*)mp;
- + mvec_sanity(m);
- + if (__predict_false(m->m_len < MIN_HDR_LEN + prehdrlen) &&
- + __predict_false(mvec_pullup(m, 0, prehdrlen + MIN_HDR_LEN) == NULL))
- + return (ENOMEM);
- + evh = (struct ether_vlan_header *)(ME_SEG(m, mh, 0) + prehdrlen);
- + if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
- + pi->ipi_etype = ntohs(evh->evl_proto);
- + pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
- + } else {
- + pi->ipi_etype = ntohs(evh->evl_encap_proto);
- + pi->ipi_ehdrlen = ETHER_HDR_LEN;
- + }
- + switch (pi->ipi_etype) {
- + case ETHERTYPE_IP: {
- + struct ip *ip = NULL;
- + struct tcphdr *th = NULL;
- + int minthlen;
- +
- + minthlen = pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th);
- + if (__predict_false(m->m_len < minthlen + prehdrlen) &&
- + __predict_false(mvec_pullup(m, 0, prehdrlen + minthlen) == NULL))
- + return (ENOMEM);
- + ip = (struct ip *)(ME_SEG(m, mh, 0) + prehdrlen + pi->ipi_ehdrlen);
- + pi->ipi_ip_hlen = ip->ip_hl << 2;
- + pi->ipi_ipproto = ip->ip_p;
- + if (ip->ip_p != IPPROTO_TCP)
- + return (EINVAL);
- + minthlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + sizeof(*th);
- + if (__predict_false(m->m_len < minthlen + prehdrlen) &&
- + __predict_false(mvec_pullup(m, 0, prehdrlen + minthlen) == NULL))
- + return (ENOMEM);
- + th = (struct tcphdr *)(ME_SEG(m, mh, 0) + prehdrlen + pi->ipi_ehdrlen + pi->ipi_ip_hlen);
- + pi->ipi_tcp_hflags = th->th_flags;
- + pi->ipi_tcp_hlen = th->th_off << 2;
- + pi->ipi_tcp_seq = th->th_seq;
- + minthlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
- + if (__predict_false(m->m_len < minthlen + prehdrlen) &&
- + __predict_false(mvec_pullup(m, 0, prehdrlen + minthlen) == NULL))
- + return (ENOMEM);
- + if (prehdrlen == 0) {
- + th->th_sum = in_pseudo(ip->ip_src.s_addr,
- + ip->ip_dst.s_addr, htons(IPPROTO_TCP));
- + ip->ip_sum = 0;
- + ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz);
- +
- + }
- + break;
- + }
- + case ETHERTYPE_IPV6: {
- + break;
- + }
- + default:
- + /* XXX unsupported -- error */
- + break;
- + }
- + mvec_sanity(m);
- + return (0);
- +}
- +
- +struct tso_state {
- + if_pkt_info_t ts_pi;
- + tcp_seq ts_seq;
- + uint16_t ts_idx;
- + uint16_t ts_prehdrlen;
- + uint16_t ts_hdrlen;
- + uint16_t ts_ip_len_off;
- + uint16_t ts_uh_len_off;
- +};
- +
- +static void
- +tso_init(struct tso_state *state, caddr_t hdr, if_pkt_info_t pi, int prehdrlen, int hdrlen)
- +{
- + struct ip *ip;
- +
- + MPASS(hdrlen > prehdrlen);
- + ip = (struct ip *)(hdr + prehdrlen + pi->ipi_ehdrlen);
- + state->ts_pi = pi;
- + state->ts_idx = ntohs(ip->ip_id);
- + state->ts_prehdrlen = prehdrlen;
- + state->ts_hdrlen = hdrlen;
- + state->ts_seq = ntohl(pi->ipi_tcp_seq);
- + state->ts_uh_len_off = state->ts_ip_len_off = 0;
- + /* XXX assuming !VLAN */
- + if (prehdrlen) {
- + state->ts_uh_len_off = ETHER_HDR_LEN + sizeof(*ip) + offsetof(struct udphdr, uh_ulen);
- + state->ts_ip_len_off = ETHER_HDR_LEN + offsetof(struct ip, ip_len);
- + }
- +}
- +
- +static void
- +tso_fixup(struct tso_state *state, caddr_t hdr, int len, bool last)
- +{
- + if_pkt_info_t pi = state->ts_pi;
- + struct ip *ip;
- + struct tcphdr *th;
- + uint16_t encap_len, *hdr_lenp;
- +
- + encap_len = len + state->ts_hdrlen - state->ts_prehdrlen - pi->ipi_ehdrlen;
- + if (state->ts_prehdrlen) {
- + hdr_lenp = (uint16_t *)(hdr + state->ts_uh_len_off);
- + *hdr_lenp = htons(len + state->ts_hdrlen - ETHER_HDR_LEN - sizeof(*ip));
- + hdr_lenp = (uint16_t *)(hdr + state->ts_ip_len_off);
- + *hdr_lenp = htons(len + state->ts_hdrlen - ETHER_HDR_LEN);
- + }
- + if (pi->ipi_etype == ETHERTYPE_IP) {
- + ip = (struct ip *)(hdr + state->ts_prehdrlen + pi->ipi_ehdrlen);
- + ip->ip_len = htons(encap_len);
- + ip->ip_id = htons(state->ts_idx);
- + ip->ip_sum = 0;
- + state->ts_idx++;
- + } else if (pi->ipi_etype == ETHERTYPE_IPV6) {
- + /* XXX notyet */
- + } else {
- + panic("bad ethertype %d in tso_fixup", pi->ipi_etype);
- + }
- + if (pi->ipi_ipproto == IPPROTO_TCP) {
- + th = (struct tcphdr *)(hdr + state->ts_prehdrlen + pi->ipi_ehdrlen + pi->ipi_ip_hlen);
- + th->th_seq = htonl(state->ts_seq);
- + state->ts_seq += len;
- + th->th_sum = 0;
- +
- + /* Zero the PSH and FIN TCP flags if this is not the last
- + segment. */
- + if (!last)
- + th->th_flags &= ~(0x8 | 0x1);
- + } else {
- + panic("non TCP IPPROTO %d in tso_fixup", pi->ipi_ipproto);
- + }
- +}
- +
- +struct mbuf_ext *
- +mvec_tso(struct mbuf_ext *mprev, int prehdrlen, bool freesrc)
- +{
- + struct mvec_header *mh, *newmh;
- + struct mvec_cursor mc;
- + struct mvec_ent *me, *mesrc, *medst, *newme;
- + struct mbuf_ext *mnew;
- + struct mbuf *m;
- + struct if_pkt_info pi;
- + struct tso_state state;
- + m_refcnt_t *newme_count, *medst_count, *mesrc_count;
- + int segcount, soff, segrem, srem;
- + int i, segsz, nheaders, hdrsize;
- + int refsize, count, pktrem, srci, dsti;
- + volatile uint32_t *refcnt;
- + bool dupref, dofree;
- + caddr_t hdrbuf;
- +
- + m = (void*)mprev;
- + mvec_sanity(m);
- + dofree = false;
- + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF) {
- + refcnt = &m->m_ext.ext_count;
- + } else {
- + refcnt = m->m_ext.ext_cnt;
- + }
- + if (freesrc && (*refcnt == 1))
- + dofree = true;
- +
- + segsz = m->m_pkthdr.tso_segsz;
- + mh = &mprev->me_mh;
- + me = mprev->me_ents;
- + dupref = mh->mh_multiref;
- + pi.ipi_tso_segsz = segsz;
- + if (mvec_parse_header(mprev, prehdrlen, &pi))
- + return (NULL);
- + hdrsize = prehdrlen + pi.ipi_ehdrlen + pi.ipi_ip_hlen + pi.ipi_tcp_hlen;
- + pktrem = m->m_pkthdr.len - hdrsize;
- + nheaders = pktrem / segsz;
- + if (nheaders*segsz != pktrem)
- + nheaders++;
- + segrem = segsz;
- + segcount = refsize = 0;
- + mvec_seek(m, &mc, hdrsize);
- + soff = mc.mc_off;
- + srci = mc.mc_idx;
- + while (pktrem > 0) {
- + MPASS(pktrem >= segrem);
- + MPASS(srci < mprev->me_mh.mh_count);
- + if (__predict_false(me[srci].me_len == 0)) {
- + srci++;
- + continue;
- + }
- + segrem = min(pktrem, segsz);
- + do {
- + int used;
- +
- + srem = me[srci].me_len - soff;
- + used = min(segrem, srem);
- + srem -= used;
- + if (srem) {
- + soff += segrem;
- + } else {
- + srci++;
- + soff = 0;
- + }
- + segrem -= used;
- + pktrem -= used;
- + segcount++;
- + } while (segrem);
- + }
- +
- + count = segcount + nheaders;
- + if (mh->mh_multiref)
- + refsize = count*sizeof(void*);
- +
- + mnew = mvec_alloc(count, refsize + (nheaders * hdrsize), M_NOWAIT);
- + if (__predict_false(mnew == NULL))
- + return (NULL);
- + bcopy(&m->m_pkthdr, &mnew->me_mbuf.m_pkthdr, sizeof(struct pkthdr));
- + newmh = &mnew->me_mh;
- + newmh->mh_start = 0;
- + newmh->mh_used = count;
- + newmh->mh_multiref = mh->mh_multiref;
- + newmh->mh_multipkt = true;
- + newme = mnew->me_ents;
- + newme_count = MBUF2REF(mnew);
- + __builtin_prefetch(newme_count);
- + medst_count = newme_count;
- + medst = newme;
- +
- + /*
- + * skip past header info
- + */
- + mvec_seek(m, &mc, hdrsize);
- + mesrc = mprev->me_ents;
- + mesrc_count = &MBUF2REF(m)[mc.mc_idx];
- + if (dupref) {
- + bzero(medst_count, count*sizeof(void *));
- + medst_count++;
- + }
- + medst[0].me_cl = NULL;
- + medst[0].me_len = 0;
- + /*
- + * Packet segmentation loop
- + */
- + srci = mc.mc_idx;
- + soff = mc.mc_off;
- + pktrem = m->m_pkthdr.len - hdrsize;
- + for (dsti = i = 0; i < nheaders; i++) {
- + /* skip header */
- + medst[dsti].me_cl = NULL;
- + medst[dsti].me_len = 0;
- + dsti++;
- + medst_count++;
- +
- + MPASS(pktrem > 0);
- + segrem = min(segsz, pktrem);
- + do {
- + int used;
- +
- + MPASS(pktrem > 0);
- + MPASS(srci < mprev->me_mh.mh_count);
- + MPASS(dsti < mnew->me_mh.mh_count);
- + /*
- + * Skip past any empty slots
- + */
- + if (__predict_false(mesrc[srci].me_len == 0)) {
- + srci++;
- + mesrc_count++;
- + continue;
- + }
- + /*
- + * At the start of a source descriptor:
- + * copy its attributes and, if dupref,
- + * its refcnt
- + */
- + if (soff == 0) {
- + if (dupref) {
- + *medst_count = *mesrc_count;
- + if (!dofree && (mesrc_count->ext_cnt != NULL))
- + atomic_add_int(mesrc_count->ext_cnt, 1);
- + }
- + medst[dsti].me_type = mesrc[srci].me_type;
- + medst[dsti].me_ext_flags = mesrc[srci].me_ext_flags;
- + medst[dsti].me_ext_type = mesrc[srci].me_ext_type;
- + } else {
- + medst[dsti].me_type = MVEC_UNMANAGED;
- + medst[dsti].me_ext_flags = 0;
- + medst[dsti].me_ext_type = 0;
- + }
- + /*
- + * Remaining value is len - off
- + */
- + srem = mesrc[srci].me_len - soff;
- + medst[dsti].me_cl = mesrc[srci].me_cl;
- + medst[dsti].me_off = mesrc[srci].me_off + soff;
- + used = min(segrem, srem);
- + srem -= used;
- + if (srem) {
- + soff += segrem;
- + } else {
- + srci++;
- + mesrc_count++;
- + soff = 0;
- + }
- + segrem -= used;
- + pktrem -= used;
- + medst[dsti].me_eop = (segrem == 0);
- + medst[dsti].me_len = used;
- + dsti++;
- + medst_count++;
- + } while (segrem);
- + }
- + /*
- + * Special case first header
- + */
- + medst = newme;
- + mesrc = MHMEI(m, MBUF2MH(m), 0);
- + /*
- + * Header initialization loop
- + */
- + hdrbuf = ((caddr_t)(newme + count)) + refsize;
- + tso_init(&state, mesrc->me_cl + mesrc->me_off, &pi, prehdrlen, hdrsize);
- + pktrem = m->m_pkthdr.len - hdrsize;
- + for (dsti = i = 0; i < nheaders; i++) {
- + MPASS(pktrem > 0);
- + /* skip ahead to next header slot */
- + while (medst[dsti].me_cl != NULL)
- + dsti++;
- + bcopy(mesrc->me_cl + mesrc->me_off, hdrbuf, hdrsize);
- + tso_fixup(&state, hdrbuf, min(pktrem, segsz), (pktrem <= segsz));
- + pktrem -= segsz;
- + medst[dsti].me_cl = hdrbuf;
- + medst[dsti].me_off = 0;
- + medst[dsti].me_len = hdrsize;
- + medst[dsti].me_type = MVEC_UNMANAGED;
- + medst[dsti].me_ext_flags = 0;
- + medst[dsti].me_ext_type = 0;
- + medst[dsti].me_eop = 0;
- + hdrbuf += hdrsize;
- + }
- +
- + mnew->me_mbuf.m_len = mnew->me_ents->me_len;
- + mnew->me_mbuf.m_data = (mnew->me_ents->me_cl + mnew->me_ents->me_off);
- + mnew->me_mbuf.m_pkthdr.len = m->m_pkthdr.len + (nheaders - 1)*hdrsize;
- + mvec_sanity((struct mbuf *)mnew);
- + if (dofree) {
- + if (mesrc->me_cl && (mesrc->me_type == MVEC_MBUF) && mesrc->me_len == hdrsize)
- + uma_zfree_arg(zone_mbuf, mesrc->me_cl, (void *)MB_DTOR_SKIP);
- + mnew->me_mbuf.m_ext.ext_count = 1;
- + if (!(m->m_ext.ext_flags & EXT_FLAG_EMBREF))
- + mvec_buffer_free(__containerof(refcnt, struct mbuf, m_ext.ext_count));
- + /* XXX we're leaking here */
- + mvec_buffer_free(m);
- + } else {
- + if (m->m_ext.ext_flags & EXT_FLAG_EMBREF)
- + mnew->me_mbuf.m_ext.ext_cnt = m->m_ext.ext_cnt;
- + else
- + mnew->me_mbuf.m_ext.ext_cnt = &m->m_ext.ext_count;
- + atomic_add_int(mnew->me_mbuf.m_ext.ext_cnt, 1);
- + }
- + return (mnew);
- +}
- diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
- index ba1e88c6175..b7d23354eb3 100644
- --- a/sys/sys/mbuf.h
- +++ b/sys/sys/mbuf.h
- @@ -167,7 +167,8 @@ struct pkthdr {
- uint8_t l3hlen; /* layer 3 hdr len */
- uint8_t l4hlen; /* layer 4 hdr len */
- uint8_t l5hlen; /* layer 5 hdr len */
- - uint32_t spare;
- + uint32_t spare:8;
- + uint32_t vxlanid:24;
- };
- };
- union {
- @@ -285,6 +286,172 @@ struct mbuf {
- };
- };
- +#define MVEC_MANAGED 0x0 /* cluster should be freed when refcnt goes to 0 */
- +#define MVEC_UNMANAGED 0x1 /* memory managed elsewhere */
- +#define MVEC_MBUF 0x2 /* free to mbuf zone */
- +
- +#define MVALLOC_MALLOC 0x0 /* mvec was malloced with type M_MVEC */
- +#define MVALLOC_MBUF 0x1 /* mvec was allocated from zone_mbuf */
- +
- +/*
- + * | mbuf { }| pkthdr { } | m_ext { }| mvec_header { } | mvec_ent[] | refcnt[] (optional) |
- + */
- +struct mvec_header {
- + uint64_t mh_count:7; /* number of segments */
- + uint64_t mh_start:7; /* starting segment */
- + uint64_t mh_used:7; /* segments in use */
- + uint64_t mh_mvtype:3; /* mvec allocation */
- + uint64_t mh_multiref:1; /* the clusters have independent ref counts so
- + * an array of refcounts sits before the mvec_ents
- + */
- + uint64_t mh_multipkt:1; /* contains multiple packets */
- + uint64_t mh_flags:38;
- +};
- +
- +struct mvec_ent {
- + caddr_t me_cl;
- + uint16_t me_off;
- + uint16_t me_len;
- + uint16_t me_eop:1;
- + uint16_t me_type:2;
- + uint16_t me_spare:13;
- + uint8_t me_ext_flags;
- + uint8_t me_ext_type;
- +};
- +
- +struct mbuf_ext {
- + struct mbuf me_mbuf;
- + struct mvec_header me_mh;
- + struct mvec_ent me_ents[0];
- +};
- +
- +#ifdef _KERNEL
- +#define MBUF2MH(m_) (&(((struct mbuf_ext *)(m_))->me_mh))
- +#define MBUF2ME(m_) (((struct mbuf_ext *)(m_))->me_ents)
- +#define MBUF2REF(m_) ((m_refcnt_t *)(MBUF2ME(m_) + MBUF2MH(m_)->mh_count))
- +
- +#define MHMEI(m_, mh_, idx_) (MBUF2ME(m_) + (mh_)->mh_start + (idx_))
- +#define MHREFI(m_, mh_, idx_) (MBUF2REF(m_) + (mh_)->mh_start + (idx_))
- +
- +#define ME_SEG(m_, mh_, idx_) (MHMEI(m_, mh_,idx_)->me_cl + MHMEI(m_, mh_, idx_)->me_off)
- +#define ME_LEN(m_, mh_, idx_) (MHMEI(m_, mh_,idx_)->me_len)
- +
- +#define MBUF_ME_MAX ((MHLEN - sizeof(struct m_ext) - sizeof(struct mvec_header))/sizeof(struct mvec_ent))
- +
- +#define m_ismvec(m) (((m)->m_flags & M_EXT) && ((m)->m_ext.ext_type == EXT_MVEC))
- +#define me_data(me) ((me)->me_cl + (me)->me_off)
- +/* XXX --- fix */
- +#define ME_WRITABLE(m, i) (0)
- +
- +struct mvec_cursor {
- + uint16_t mc_idx;
- + uint16_t mc_off;
- +};
- +
- +typedef union {
- + /*
- + * If EXT_FLAG_EMBREF is set, then we use refcount in the
- + * mbuf, the 'ext_count' member. Otherwise, we have a
- + * shadow copy and we use pointer 'ext_cnt'. The original
- + * mbuf is responsible to carry the pointer to free routine
- + * and its arguments. They aren't copied into shadows in
- + * mb_dupcl() to avoid dereferencing next cachelines.
- + */
- + volatile u_int ext_count;
- + volatile u_int *ext_cnt;
- +} m_refcnt_t;
- +
- +/*
- + * Get index and relative offset of `off` in to mvec `m`
- + */
- +void *mvec_seek(struct mbuf *m, struct mvec_cursor *mc, int off);
- +
- +/*
- + * Trim (destructively if unshared) `req_len` bytes of `m`.
- + * Will trim the front if req_len is positive and the tail
- + * if req_len is negative.
- + */
- +void mvec_adj(struct mbuf *m, int req_len);
- +
- +/*
- + * Make the first `count` bytes of `m` index `idx` contiguous
- + */
- +struct mbuf *mvec_pullup(struct mbuf *m, int idx, int count);
- +
- +/*
- + * Perform accounting neccesary to free all references contained
- + * and `m` itself
- + */
- +void mvec_free(struct mbuf_ext *m);
- +
- +/*
- + * Convert mbuf chain `m` to mvec non-destructively. Returns
- + * NULL on failure. It is the caller's responsibility to free
- + * the source on success.
- + */
- +struct mbuf_ext *mchain_to_mvec(struct mbuf *m, int how);
- +
- +struct mbuf_ext *pktchain_to_mvec(struct mbuf *m, int mtu, int how);
- +
- +
- +/*
- + * Convert mvec `m` to mbuf chain non-destructively.
- + * Returns NULL if not successful. It is the caller's
- + * responsibility to free the source on success.
- + */
- +struct mbuf *mvec_to_mchain(struct mbuf *m, int how);
- +/*
- + * Given an mvec `m` returns a new mvec of segmented packets.
- + * If prehdrlen is non-zero the first prehdrlen bytes are
- + * treated as encapsulation and copied to the front of every
- + * packet. Non-destructive.
- + */
- +struct mbuf_ext *mvec_tso(struct mbuf_ext *m, int prehdrlen, bool freesrc);
- +
- +/*
- + * Create size bytes of room at the front of `m`. Will allocate a
- + * new mvec if there is no room for an addition mvec_ent.
- + */
- +struct mbuf *mvec_prepend(struct mbuf *m, int size);
- +
- +/*
- + * Append `cl` of type `cltype` and length `len` starting at `off`
- + * to mvec `m` - return a new mvec if `cl` won't fit in the existing
- + * entries.
- + */
- +struct mbuf *mvec_append(struct mbuf *m, caddr_t cl, uint16_t off,
- + uint16_t len, uint8_t cltype);
- +
- +/*
- + * Allocate mvec with `count` entries and `len` additional bytes.
- + */
- +struct mbuf_ext *mvec_alloc(uint8_t count, int len, int how);
- +
- +/*
- + * Initialize an mbuf `m` from zone_mbuf as an mvec.
- + */
- +int mvec_init_mbuf(struct mbuf *m, uint8_t count, uint8_t type);
- +
- +
- +uint16_t mvec_cksum_skip(struct mbuf *m, int len, int skip);
- +
- +
- +/*
- + * Mvec analogs to mbuf helpers that should be implemented sooner
- + * rather than later.
- + */
- +void mvec_copydata(const struct mbuf *m, int off, int len, caddr_t cp);
- +struct mbuf *mvec_dup(const struct mbuf *m, int how);
- +struct mbuf *mvec_defrag(const struct mbuf *m, int how);
- +struct mbuf *mvec_collapse(struct mbuf *m, int how, int maxfrags);
- +
- +#ifdef INVARIANTS
- +void mvec_sanity(struct mbuf *m);
- +#else
- +static __inline void mvec_sanity(struct mbuf *m __unused) {}
- +#endif
- +
- +#endif
- /*
- * mbuf flags of global significance and layer crossing.
- * Those of only protocol/layer specific significance are to be mapped
- @@ -317,7 +484,7 @@ struct mbuf {
- #define M_PROTO9 0x00100000 /* protocol-specific */
- #define M_PROTO10 0x00200000 /* protocol-specific */
- #define M_PROTO11 0x00400000 /* protocol-specific */
- -#define M_PROTO12 0x00800000 /* protocol-specific */
- +#define M_VXLANTAG 0x00800000 /* vxlanid is valid */
- #define MB_DTOR_SKIP 0x1 /* don't pollute the cache by touching a freed mbuf */
- @@ -326,14 +493,14 @@ struct mbuf {
- */
- #define M_PROTOFLAGS \
- (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8|\
- - M_PROTO9|M_PROTO10|M_PROTO11|M_PROTO12)
- + M_PROTO9|M_PROTO10|M_PROTO11)
- /*
- * Flags preserved when copying m_pkthdr.
- */
- #define M_COPYFLAGS \
- (M_PKTHDR|M_EOR|M_RDONLY|M_BCAST|M_MCAST|M_PROMISC|M_VLANTAG|M_TSTMP| \
- - M_TSTMP_HPREC|M_PROTOFLAGS)
- + M_TSTMP_HPREC|M_PROTOFLAGS|M_VXLANTAG)
- /*
- * Mbuf flag description for use with printf(9) %b identifier.
- @@ -438,6 +605,7 @@ struct mbuf {
- #define EXT_JUMBO16 5 /* jumbo cluster 16184 bytes */
- #define EXT_PACKET 6 /* mbuf+cluster from packet zone */
- #define EXT_MBUF 7 /* external mbuf reference */
- +#define EXT_MVEC 8 /* pointer to mbuf vector */
- #define EXT_VENDOR1 224 /* for vendor-internal use */
- #define EXT_VENDOR2 225 /* for vendor-internal use */
- @@ -460,6 +628,8 @@ struct mbuf {
- */
- #define EXT_FLAG_EMBREF 0x000001 /* embedded ext_count */
- #define EXT_FLAG_EXTREF 0x000002 /* external ext_cnt, notyet */
- +#define EXT_FLAG_MVECREF 0x000004 /* reference is an mvec */
- +#define EXT_FLAG_EXTFREE 0x000008 /* ext_free is valid */
- #define EXT_FLAG_NOFREE 0x000010 /* don't free mbuf to pool, notyet */
- @@ -637,7 +807,6 @@ u_int m_fixhdr(struct mbuf *);
- struct mbuf *m_fragment(struct mbuf *, int, int);
- void m_freem(struct mbuf *);
- struct mbuf *m_get2(int, int, short, int);
- -struct mbuf *m_getjcl(int, short, int, int);
- struct mbuf *m_getm2(struct mbuf *, int, int, short, int);
- struct mbuf *m_getptr(struct mbuf *, int, int *);
- u_int m_length(struct mbuf *, struct mbuf **);
- @@ -751,6 +920,7 @@ m_init(struct mbuf *m, int how, short type, int flags)
- m->m_len = 0;
- m->m_flags = flags;
- m->m_type = type;
- + m->m_ext.ext_free = NULL;
- if (flags & M_PKTHDR)
- error = m_pkthdr_init(m, how);
- else
- @@ -786,19 +956,6 @@ m_gethdr(int how, short type)
- return (m);
- }
- -static __inline struct mbuf *
- -m_getcl(int how, short type, int flags)
- -{
- - struct mbuf *m;
- - struct mb_args args;
- -
- - args.flags = flags;
- - args.type = type;
- - m = uma_zalloc_arg(zone_pack, &args, how);
- - MBUF_PROBE4(m__getcl, how, type, flags, m);
- - return (m);
- -}
- -
- /*
- * XXX: m_cljset() is a dangerous API. One must attach only a new,
- * unreferenced cluster to an mbuf(9). It is not possible to assert
- @@ -838,12 +995,37 @@ m_cljset(struct mbuf *m, void *cl, int type)
- m->m_flags |= M_EXT;
- MBUF_PROBE3(m__cljset, m, cl, type);
- }
- -
- -static __inline void
- -m_chtype(struct mbuf *m, short new_type)
- +/*
- + * m_getjcl() returns an mbuf with a cluster of the specified size attached.
- + * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES.
- + */
- +static __inline struct mbuf *
- +m_getjcl(int how, short type, int flags, int size)
- {
- + struct mb_args args;
- + struct mbuf *m, *n;
- + uma_zone_t zone;
- +
- + args.flags = flags;
- + args.type = type;
- +
- + m = uma_zalloc_arg(zone_mbuf, &args, how);
- + if (m == NULL)
- + return (NULL);
- +
- + zone = m_getzone(size);
- + n = uma_zalloc_arg(zone, m, how);
- + if (n == NULL) {
- + uma_zfree(zone_mbuf, m);
- + return (NULL);
- + }
- + return (m);
- +}
- - m->m_type = new_type;
- +static __inline struct mbuf *
- +m_getcl(int how, short type, int flags)
- +{
- + return (m_getjcl(how, type, flags, MCLBYTES));
- }
- static __inline void
- @@ -1008,12 +1190,6 @@ m_align(struct mbuf *m, int len)
- *_mmp = _mm; \
- } while (0)
- -/*
- - * Change mbuf to new type. This is a relatively expensive operation and
- - * should be avoided.
- - */
- -#define MCHTYPE(m, t) m_chtype((m), (t))
- -
- /* Length to m_copy to copy all. */
- #define M_COPYALL 1000000000
- @@ -1219,6 +1395,19 @@ m_free(struct mbuf *m)
- return (n);
- }
- +static __inline void
- +m_freechain(struct mbuf *m)
- +{
- + struct mbuf *mp, *mnext;
- +
- + mp = m;
- + while (mp != NULL) {
- + mnext = mp->m_nextpkt;
- + m_freem(mp);
- + mp = mnext;
- + }
- +}
- +
- static __inline int
- rt_m_getfib(struct mbuf *m)
- {
- diff --git a/sys/sys/proc.h b/sys/sys/proc.h
- index 4af92a8297b..c015889570c 100644
- --- a/sys/sys/proc.h
- +++ b/sys/sys/proc.h
- @@ -51,6 +51,7 @@
- #include <sys/lock_profile.h>
- #include <sys/_mutex.h>
- #include <sys/osd.h>
- +#include <sys/ktr.h>
- #include <sys/priority.h>
- #include <sys/rtprio.h> /* XXX. */
- #include <sys/runq.h>
- @@ -1136,6 +1137,52 @@ td_softdep_cleanup(struct thread *td)
- softdep_ast_cleanup(td);
- }
- +extern u_char kdb_active;
- +void critical_preempt(struct thread *td);
- +
- +static __inline void
- +_critical_enter()
- +{
- + struct thread *td;
- +
- + td = curthread;
- + td->td_critnest++;
- + __compiler_membar();
- +
- + CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
- + (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
- +}
- +
- +static __inline void
- +_critical_exit()
- +{
- + struct thread *td;
- +
- + td = curthread;
- + KASSERT(td->td_critnest != 0,
- + ("critical_exit: td_critnest == 0"));
- + __compiler_membar();
- + if (__predict_true(td->td_critnest == 1)) {
- + td->td_critnest = 0;
- +
- + /*
- + * Interrupt handlers execute critical_exit() on
- + * leave, and td_owepreempt may be left set by an
- + * interrupt handler only when td_critnest > 0. If we
- + * are decrementing td_critnest from 1 to 0, read
- + * td_owepreempt after decrementing, to not miss the
- + * preempt. Disallow compiler to reorder operations.
- + */
- + __compiler_membar();
- + if (__predict_false(td->td_owepreempt && !kdb_active))
- + critical_preempt(td);
- + } else
- + td->td_critnest--;
- +
- + CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
- + (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
- +}
- +
- #endif /* _KERNEL */
- #endif /* !_SYS_PROC_H_ */
- diff --git a/sys/sys/sglist.h b/sys/sys/sglist.h
- index 5674416c07a..ba1dad1bc7d 100644
- --- a/sys/sys/sglist.h
- +++ b/sys/sys/sglist.h
- @@ -88,6 +88,7 @@ struct sglist *sglist_alloc(int nsegs, int mflags);
- int sglist_append(struct sglist *sg, void *buf, size_t len);
- int sglist_append_bio(struct sglist *sg, struct bio *bp);
- int sglist_append_mbuf(struct sglist *sg, struct mbuf *m0);
- +int sglist_append_mvec(struct sglist *sg, struct mbuf *m0);
- int sglist_append_phys(struct sglist *sg, vm_paddr_t paddr,
- size_t len);
- int sglist_append_sglist(struct sglist *sg, struct sglist *source,
- diff --git a/sys/sys/sockio.h b/sys/sys/sockio.h
- index cc335e88707..14e5e7d8da6 100644
Add Comment
Please, Sign In to add comment