/* @(#)01 1.7 src/bos/kernel/sys/mbuf_macro.h, sysuipc, bos72X, x2021_50A7 10/28/21 16:34:22 */ #ifndef _SYS_MBUF_MACRO_H_ #define _SYS_MBUF_MACRO_H_ #ifdef __cplusplus extern "C" { #endif #include #include /* * Macros for type conversion * mtod(m,t) - convert mbuf pointer to data pointer of correct type * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) */ #define mtod(m,t) ((t)((m)->m_data)) #define dtom(x) ((struct mbuf *)((u_long)(x) & ~(MSIZE-1))) #define mtocl(m) ((m)->m_ext.ext_buf) #define MTOD(m,t) mtod(m,t) #define DTOM(x) dtom(x) #define MTOCL(x) mtocl(x) #define CLTOM(x) cltom(x) /* * Macros to access data structure elements */ #define m_next m_hdr.mh_next #define m_len m_hdr.mh_len #define m_data m_hdr.mh_data #define m_type m_hdr.mh_type #define m_flags m_hdr.mh_flags #define m_nextpkt m_hdr.mh_nextpkt #define m_eyecat m_hdr.mh_eyecat #define m_options m_hdr.mh_options #define m_flags2 m_hdr.mh_flags2 #define m_event m_hdr.mh_event #define m_contig m_hdr.mh_contig #define m_cid m_hdr.mh_cid #define m_mls m_hdr.mh_mls #define m_act m_nextpkt #define m_pkthdr M_dat.MH.MH_pkthdr #define m_ext M_dat.MH.MH_dat.MH_ext #define m_pktdat M_dat.MH.MH_dat.MH_databuf #define m_qosdat M_dat.MH.MH_dat.MH_qos.MH_qosdatabuf #define m_qoshdr M_dat.MH.MH_dat.MH_qos.MH_qoshdr #define m_sumdat M_dat.MH.MH_dat.MH_sum.MH_sumdat #define m_suminfo M_dat.MH.MH_dat.MH_sum.MH_suminfo #define m_qossuminfo M_dat.MH.MH_dat.MH_qos.MH_suminfo #define m_dat M_dat.M_databuf #define m_dcbflushinfo M_dat.MH.MH_dat.MH_dcbflush.MH_dcbflushinfo #define m_dcbflushdat M_dat.MH.MH_dat.MH_dcbflush.MH_dcbflushdatabuf #define m_extfree m_ext.ext_free #define m_extarg m_ext.ext_arg #define m_extpool m_ext.ext_mpool #define m_extdebug m_ext.ext_debug #define m_forw m_ext.ext_ref.forw #define m_back m_ext.ext_ref.back #define m_hasxm m_ext.ext_hasxm #define m_xmemd m_ext.ext_xmemd #define m_extbuf m_ext.ext_buf #define m_size m_ext.ext_size #define m_bktidx m_ext.ext_bktidx /* does mbuf hold a broadcast packet? */ #define m_broadcast(m) ((m)->m_flags & (M_BCAST|M_MCAST|M_WCARD)) /** * Initializes the main mbuf fields */ #define MBUF_INIT(_mbuf_p, _mbuf_type) \ do { \ (_mbuf_p)->m_next = (_mbuf_p)->m_nextpkt = 0; \ (_mbuf_p)->m_type = (_mbuf_type); \ (_mbuf_p)->m_data = (_mbuf_p)->m_dat; \ (_mbuf_p)->m_flags = 0; \ (_mbuf_p)->m_flags2 = 0; \ (_mbuf_p)->m_eyecat = EYEC_MBUFA; \ (_mbuf_p)->m_contig = NULL; \ (_mbuf_p)->m_mls = NULL; \ (_mbuf_p)->m_cid = 0; \ (_mbuf_p)->m_ext.ext_mpool = NULL; \ (_mbuf_p)->m_ext.ext_debug = NULL; \ } while (0) /* * mbuf allocation/deallocation macros: * * MGET(struct mbuf *m, int how, int type) * allocates an mbuf and initializes it to contain internal data. * * MGETHDR(struct mbuf *m, int how, int type) * allocates an mbuf and initializes it to contain a packet header * and internal data. */ /* ------------------------------------------------------------------------------------- // // NAME: MGET // // DESC: Allocate mbuf from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // // ------------------------------------------------------------------------------------ */ #define MGET(m, how, type) \ { \ MTRCHKL2T(HKWD_MBUF | hkwd_m_get_in, how, type); \ TRCHKL3T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller()); \ \ MALLOC((m), struct mbuf *, MSIZE, M_MBUF, (!how)); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_dat; \ (m)->m_flags = 0; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MGET_CPU // // DESC: Allocate mbuf from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // cpu CPU id // // ------------------------------------------------------------------------------------ */ #define MGET_CPU(m, how, type, cpu) \ { \ MTRCHKL2T(HKWD_MBUF | hkwd_m_get_in, how, type); \ TRCHKL3T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller()); \ \ MALLOC_CPU((m), struct mbuf *, MSIZE, M_MBUF, (!how), cpu); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_dat; \ (m)->m_flags = 0; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MPOOL_CACHE_LOCK // // DESC: Lock the mbuf cache pool and raise interrupts. // // ARGS: p pool cache pointer // ipr old interrupt priority // // ------------------------------------------------------------------------------------ */ #define MPOOL_CACHE_LOCK(p, ipr) (ipr) = disable_lock(PL_IMP, &(p)->pool_lock); /* ------------------------------------------------------------------------------------- // // NAME: MPOOL_CACHE_UNLOCK // // DESC: Unlock the mbuf cache pool and resume interrupts. // // ARGS: p pool cache pointer // ipr old interrupt priority // // ------------------------------------------------------------------------------------ */ #define MPOOL_CACHE_UNLOCK(p, ipr) unlock_enable((ipr), &(p)->pool_lock); /* ------------------------------------------------------------------------------------- // // NAME: MGET_POOL // // DESC: Allocate mbuf from a header cache pool // // ARGS: p pool cache pointer // m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // cpu CPU id // // ------------------------------------------------------------------------------------ */ #define MGET_POOL(p, m, how, type, cpu) \ { \ int __old_priority; \ int __space_available; \ MPOOL_CACHE_LOCK(p, __old_priority); \ if ((p)->free_list != NULL) \ { \ (m) = (p)->free_list; \ (p)->free_list = (m)->m_nextpkt; \ (p)->pool_current_size--; \ MPOOL_CACHE_UNLOCK(p, __old_priority); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_dat; \ (m)->m_flags = 0; \ (m)->m_flags2 = M_USE_MCACHE; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ (m)->m_options[2] = (long)(p); \ } \ else \ { \ (p)->missed_allocations++; \ /* determine if we can add this mbuf to the cache pool */ \ __space_available = (p)->pool_maximum_size - (p)->pool_current_size; \ MPOOL_CACHE_UNLOCK(p, __old_priority); \ MGET_CPU(m, how, type, cpu); \ if ((m) && (__space_available > 0)) \ { \ (m)->m_flags2 |= M_USE_MCACHE; \ (m)->m_options[2] = (long)(p); \ } \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MPUT_POOL // // DESC: Attempt to place a mbuf into a header cache pool // // NOTE: The mbuf arg 'm' will be set to NULL if the pool put // operation was successfull. // // ARGS: m mbuf // // ------------------------------------------------------------------------------------ */ #define MPUT_POOL(m) \ { \ struct mpool_cache *__pool; \ int __old_priority; \ __pool = (struct mpool_cache *)(m)->m_options[2]; \ \ if (__pool->pool_eyecat == EYEC_MCACHE) \ { \ MPOOL_CACHE_LOCK(__pool, __old_priority); \ if (__pool->pool_current_size < __pool->pool_maximum_size) \ { \ (m)->m_nextpkt = __pool->free_list; \ __pool->free_list = (m); \ __pool->pool_current_size++; \ MPOOL_CACHE_UNLOCK(__pool, __old_priority); \ (m) = NULL; \ } \ else \ { \ MPOOL_CACHE_UNLOCK(__pool, __old_priority); \ (m)->m_flags2 &= ~M_USE_MCACHE; \ (m)->m_options[2] = NULL; \ } \ } \ else \ { \ (m)->m_flags2 &= ~M_USE_MCACHE; \ (m)->m_options[2] = NULL; \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MPOOL_CACHE_INIT // // DESC: Initialize a new mbuf header pool object // // ARGS: p pool cache pointer // size maximum size of the pool // // ------------------------------------------------------------------------------------ */ #define MPOOL_CACHE_INIT(p, size) \ do \ { \ (p)->pool_eyecat = EYEC_MCACHE; \ (p)->free_list = NULL; \ (p)->pool_maximum_size = size; \ (p)->pool_current_size = 0; \ (p)->missed_allocations = 0; \ \ lock_alloc(&(p)->pool_lock, LOCK_ALLOC_PIN, 0, -1); \ simple_lock_init(&(p)->pool_lock); \ \ } while (0) /* ------------------------------------------------------------------------------------- // // NAME: MGET_SRAD // // DESC: Allocate mbuf from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // srad srad value // // ------------------------------------------------------------------------------------ */ #define MGET_SRAD(m, how, type, srad) \ { \ MTRCHKL3T(HKWD_MBUF | hkwd_m_get_in, how, type, srad); \ TRCHKL4T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller(), srad); \ \ MALLOC_SRAD((m), struct mbuf *, MSIZE, M_MBUF, (!how), srad); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_dat; \ (m)->m_flags = 0; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MGETHDR // // DESC: Allocate mbuf header from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // // ------------------------------------------------------------------------------------ */ #define MGETHDR(m, how, type) \ { \ MTRCHKL2T(HKWD_MBUF | hkwd_m_get_in, how, type); \ TRCHKL3T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller()); \ \ MALLOC((m), struct mbuf *, MSIZE, M_MBUF, (!how)); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_pktdat; \ (m)->m_flags = M_PKTHDR; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MGETHDR_CPU // // DESC: Allocate mbuf header from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // cpu CPU id // // ------------------------------------------------------------------------------------ */ #define MGETHDR_CPU(m, how, type, cpu) \ { \ MTRCHKL2T(HKWD_MBUF | hkwd_m_get_in, how, type); \ TRCHKL3T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller()); \ \ MALLOC_CPU((m), struct mbuf *, MSIZE, M_MBUF, (!how), cpu); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_pktdat; \ (m)->m_flags = M_PKTHDR; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } /* ------------------------------------------------------------------------------------- // // NAME: MGETHDR_SRAD // // DESC: Allocate mbuf header from network memory // // ARGS: m mbuf // how M_WAIT if waiting allowed, M_DONTWAIT otherwise // type Type of mbuf to create // srad srad value // // ------------------------------------------------------------------------------------ */ #define MGETHDR_SRAD(m, how, type, srad) \ { \ MTRCHKL3T(HKWD_MBUF | hkwd_m_get_in, how, type, srad); \ TRCHKL4T_NOMTRC(HKWD_MBUF | hkwd_m_get_in, how, type, getcaller(), srad); \ \ MALLOC_SRAD((m), struct mbuf *, MSIZE, M_MBUF, (!how), srad); \ if (m) \ { \ MBSTAT2(mbstat.m_mtypes[type], 1); \ MBSTAT2(mbstat.m_mbufs, 1); \ (m)->m_next = (m)->m_nextpkt = 0; \ (m)->m_type = (type); \ (m)->m_data = (m)->m_pktdat; \ (m)->m_flags = M_PKTHDR; \ (m)->m_flags2 = 0; \ (m)->m_eyecat = EYEC_MBUFA; \ (m)->m_contig = NULL; \ (m)->m_mls = NULL; \ (m)->m_cid = 0; \ (m)->m_ext.ext_mpool = NULL; \ (m)->m_ext.ext_debug = NULL; \ TRCHKL2T_NOMTRC(HKWD_MBUF | hkwd_m_get_out, m, mtod(m, caddr_t)); \ } \ else \ { \ MBSTAT(mbstat.m_drops, 1); \ } \ } #define MGETBUF(len, flag) ((len) <= MHLEN) ? m_gethdr((flag), MT_DATA) : m_getclustm((flag), MT_DATA, (len)) /* * Mbuf cluster macros. * MCLALLOC(caddr_t p, int how) allocates an mbuf cluster. * MCLGET adds such clusters to a normal mbuf; * the flag M_EXT is set upon success. * MCLFREE unconditionally frees a cluster allocated by MCLALLOC, */ #define MCLALLOC(p, how) \ (p = m_clalloc((struct mbuf *)0, MCLBYTES, (how))) #define MCLALLOC_SRAD(p, how, srad) \ (p = m_clalloc((struct mbuf *)0, MCLBYTES, (how))) #define MCLFREE(p) { \ MBSTAT2(mbstat.m_clusters, -1); \ FREE((p), M_CLUSTER); \ } #define MCLGET(m, how) \ (void) m_clalloc((m), MCLBYTES, (how)) #define MCLGET_SRAD(m, how, srad) \ (void) m_clalloc((m), MCLBYTES, (how)) #define MCLREFERENCED(m) \ ((m)->m_ext.ext_ref.forw != &((m)->m_ext.ext_ref)) /* ------------------------------------------------------------------------------------- // // NAME: MFREE // // DESC: Free a single mbuf and associated external storage // also, place the successor, if any, in n // // ARGS: m mbuf to free // n successor mbuf // // ------------------------------------------------------------------------------------ */ #define MFREE(m, n) \ { \ MBUF_LOCK_DECL() \ struct mbuf *mnext = (m)->m_next; \ void (*extfree)() = (m)->m_ext.ext_free; \ __mh_flags_t __mflags = (m)->m_flags; \ __mh_flags_t __mflags2 = (m)->m_flags2; \ long __moptions2 = (m)->m_options[2]; \ \ MTRCHKL2T(HKWD_MBUF | hkwd_m_free_in, (m), mtod(m, caddr_t)); \ TRCHKL4T_NOMTRC(HKWD_MBUF | hkwd_m_free_in, (m), mtod(m, caddr_t), \ getcaller(), 0); \ \ if (MBUFLABELED((m))) \ { \ ncred_free(MBUFSEC((m))); \ MBUFSEC((m)) = NULL; \ } \ \ if ((m)->m_flags & M_EXT) \ { \ MBUF_LOCK((m)->m_ext.ext_buf); \ \ if (MCLREFERENCED((m))) \ { \ /* Unlink with lock held */ \ remque(&(m)->m_ext.ext_ref); \ __mflags &= ~M_EXT2; \ MBUF_UNLOCK((m)->m_ext.ext_buf); \ } \ else if ((m)->m_ext.ext_free == NULL) \ { \ MBUF_UNLOCK((m)->m_ext.ext_buf); \ MBSTAT2(mbstat.m_clusters, -1); \ FREE((m)->m_ext.ext_buf, M_CLUSTER); \ } \ else if ((m)->m_flags & M_EXT2) \ { \ MBUF_UNLOCK((m)->m_ext.ext_buf); \ (*((m)->m_ext.ext_free))((caddr_t)(m), \ (m)->m_ext.ext_size, (m)->m_ext.ext_arg); \ } \ else \ { \ MBUF_UNLOCK((m)->m_ext.ext_buf); \ (*((m)->m_ext.ext_free))((m)->m_ext.ext_buf, \ (m)->m_ext.ext_size, (m)->m_ext.ext_arg); \ (m)->m_flags &= ~M_EXT; \ } \ } \ \ (n) = mnext; \ if (!((__mflags & M_EXT2) && extfree)) \ { \ if ((__mflags2 & M_USE_MCACHE) && (__moptions2 != NULL)) \ { \ MPUT_POOL((m)); \ } \ \ if ((m) != NULL) \ { \ MBSTAT2(mbstat.m_mbufs, -1); \ MBSTAT2(mbstat.m_mtypes[(m)->m_type], -1); \ (m)->m_eyecat = EYEC_MBUFF; \ FREE((m), M_MBUF); \ } \ } \ \ TRCHKL1T_NOMTRC(HKWD_MBUF | hkwd_m_free_out, (m)); \ } #define m_getclust(h, t) m_getclustm(h, t, MCLBYTES) #define M_HASCL(m) ((m)->m_flags & M_EXT) #define M_XMEMD(m) (m_getxmemd((m))) #define MCLREFERENCE(m, n) m_clreference((m), (n)) #define MCLUNREFERENCE(m) m_clunreference((m)) #define M_SUMINFOP(m) ((m)->m_flags & (M_CHECKSUM_RX|M_CHECKSUM_TX) ? \ ((m)->m_flags & (M_QOS|M_EXT) ? \ &(m)->m_qossuminfo : \ &(m)->m_suminfo) : \ 0) /* Populate suminfo irrespective of M_CHECKSUM_TX */ #define LARGE_SUMINFOP(m) ((m)->m_flags & (M_QOS|M_EXT) ? &(m)->m_qossuminfo : &(m)->m_suminfo) /* ------------------------------------------------------------------------------------- // // NAME: M_COPY_PKTHDR // // DESC: Copy mbuf pkthdr from from to to. // from must have M_PKTHDR set, and to must be empty. // The logic is a bit confusing, could be rewritten to match struct mbuf def. // If to's M_EXT is set, then the user NEEDS to adjust the m_data, see m_copym, // i.e. no need to set it in the macro to be reset again. It is implied when // M_EXT and checksum is on that m_qossuminfo is used. The dcbflushinfo is not // copied, the flag is turn off. // // ARGS: to target mbuf pointer // from source mbuf pointer // // ------------------------------------------------------------------------------------ */ #define M_COPY_PKTHDR(to, from) \ do \ { \ (to)->m_pkthdr = (from)->m_pkthdr; \ if ((from)->m_flags & M_QOS) \ { \ (to)->m_qoshdr = (from)->m_qoshdr; \ (to)->m_data = (to)->m_qosdat; \ } \ else if ((from)->m_flags & (M_CHECKSUM_TX|M_CHECKSUM_RX)) \ { \ (to)->m_data = (to)->m_sumdat; \ } \ else \ { \ (to)->m_data = (to)->m_pktdat; \ } \ \ (to)->m_flags = (from)->m_flags & M_COPYFLAGS; \ (to)->m_flags2 = (from)->m_flags2 & M_COPYFLAGS2; \ (to)->m_cid = (from)->m_cid; \ \ if (M_SUMINFOP(from)) \ { \ *M_SUMINFOP(to) = *M_SUMINFOP(from); \ } \ \ } while(0) /* * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place * an object of the specified size at the end of the mbuf, longword aligned. */ #define M_ALIGN(m, len) \ { (m)->m_data += (MLEN - (len)) &~ (sizeof(long) - 1); } /* * As above, for mbufs allocated with m_gethdr/MGETHDR * or initialized by M_COPY_PKTHDR. */ #define MH_ALIGN(m, len) \ { \ int bufferlength; \ if ((m)->m_flags & M_QOS) \ { \ bufferlength = MQOSLEN; \ } \ else if ((m)->m_flags & (M_CHECKSUM_TX|M_CHECKSUM_RX)) \ { \ bufferlength = MSUMLEN; \ } \ else \ { \ bufferlength = MHLEN; \ } \ (m)->m_data += (bufferlength - (len)) & ~(sizeof(long) - 1); \ } /* * Similar to MH_ALIGN, but for mbufs with a struct qoshdr. */ #define MQOS_ALIGN(m, len) \ { (m)->m_data += (MQOSLEN - (len)) &~ (sizeof(long) - 1); } /* * Compute the amount of space available * before the current start of data in an mbuf. */ #define M_LEADINGSPACE(m) \ ((m)->m_flags & M_EXT ? ( MCLREFERENCED((m)) ? 0 : \ (m)->m_data - (m)->m_ext.ext_buf ) : \ (m)->m_flags & M_PKTHDR ? \ ((m)->m_flags & M_QOS ? (m)->m_data - (m)->m_qosdat : \ ((m)->m_flags & (M_CHECKSUM_TX|M_CHECKSUM_RX) ? \ (m)->m_data - (m)->m_sumdat : (m)->m_data - (m)->m_pktdat)) : \ (m)->m_data - (m)->m_dat) /* * Compute the amount of space available * after the end of data in an mbuf. */ #define M_TRAILINGSPACE(m) \ ((m)->m_flags & M_EXT ? ( MCLREFERENCED((m)) ? 0 : \ (m)->m_ext.ext_buf + (m)->m_ext.ext_size - \ ((m)->m_data + (m)->m_len) ) : \ &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) /* * Arrange to prepend space of size plen to mbuf m. * If a new mbuf must be allocated, how specifies whether to wait. * If how is M_DONTWAIT and allocation fails, the original mbuf chain * is freed and m is set to NULL. */ #define M_PREPEND(m, plen, how) \ { \ if (M_LEADINGSPACE(m) >= (plen)) \ { \ (m)->m_data -= (plen); \ (m)->m_len += (plen); \ } \ else \ { \ (m) = m_prepend((m), (plen), (how)); \ } \ \ if ((m) && (m)->m_flags & M_PKTHDR) \ (m)->m_pkthdr.len += (plen); \ } /* change mbuf to new type */ #define MCHTYPE(m, t) \ { \ MBSTAT2(mbstat.m_mtypes[(m)->m_type], -1); \ MBSTAT2(mbstat.m_mtypes[t], 1); \ (m)->m_type = t; \ } /* compatiblity with 4.3 */ #define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) #define m_clget(m) m_clgetm((m), M_DONTWAIT, MCLBYTES) #define m_getclust(h, t) m_getclustm(h, t, MCLBYTES) /* MLS macros */ #define MBUFSEC(m) (((m)->m_mls)) /* Does mbuf have a MLS label ? */ #define MBUFLABELED(m) ((((m)->m_mls) != NULL)) /* Sets the MLS label on the mbuf */ #define LABELMBUF(m, cred) \ { \ MBUFSEC(m)=(cred); \ } /* * Macros for dealing with buffers in private segments */ /* Does this buffer contain private segment buffer and need to be mapped in? */ #define M_NEED_PSEG_MAP(a) (M_HASCL(a) && (a)->m_hasxm && \ (a)->m_xmemd.aspace_id != XMEM_GLOBAL && \ !((a)->m_flags & M_MAPPED)) /* Does this buffer contain private segment buffer and need to be unmapped? */ #define M_NEED_PSEG_UNMAP(a) (M_HASCL(a) && (a)->m_hasxm && \ (a)->m_xmemd.aspace_id != XMEM_GLOBAL && \ ((a)->m_flags & M_MAPPED)) /* Map in a private segment */ #define M_PSEG_MAP(a) \ { \ long off; \ int __rv; \ \ off = a->m_data - a->m_ext.ext_buf; \ __rv = xm_mapin(&(a->m_xmemd), a->m_ext.ext_buf, \ a->m_ext.ext_size, &(a->m_ext.ext_buf)); \ a->m_flags |= M_MAPPED; \ a->m_data = a->m_ext.ext_buf + off; \ } /* Unmap a private segment */ #define M_PSEG_UNMAP(a) \ { \ xm_det(a->m_ext.ext_buf, &(a->m_xmemd)); \ a->m_flags &= ~M_MAPPED; \ } /* Map in any private segments in the chain */ #define M_DO_PSEG_MAP(a) \ { \ if ((a)->m_flags & M_PSEG) \ { \ struct mbuf *nextp; \ \ nextp = a; \ do \ { \ if (M_NEED_PSEG_MAP(nextp)) \ { \ M_PSEG_MAP(nextp); \ } \ nextp = nextp->m_next; \ \ } while (nextp); \ } \ } /* Unmap any private segments in the chain */ #define M_DO_PSEG_UNMAP(a) \ { \ if ((a)->m_flags & M_PSEG) \ { \ struct mbuf *nextp; \ \ nextp = a; \ do \ { \ if (M_NEED_PSEG_UNMAP(nextp)) \ { \ M_PSEG_UNMAP(nextp); \ } \ nextp = nextp->m_next; \ \ } while (nextp); \ } \ } /* This function checks if the len we are copying into nam is greater than MLEN. * If so we attach a cluster to the mbuf and then do the copy. * This function was added due to changes made in sockargs for long user names. * The sun_path in sockaddr_un was changed to PATH_MAX. Hence we allocate a * mbuf cluster to hold MT_SONAME. However some functions like sogetaddr etc * allocate mbuf's using m_get and we try to copy the address from a cluster * to mbuf of size MLEN. This caused thes system to crash. */ #define CHECKBUFF(error, nam, len) \ do { \ if(!((nam)->m_flags & M_EXT) && ((len) > MLEN)) \ { \ if(m_clgetm((nam), M_DONTWAIT, (len)) == 0) \ { \ (error) = ENOBUFS; \ } \ } \ } while (0) #define MPOOL_CBUF(mpool) ((mpool)->mclpCBufAddr) #define MBUCKET_FREECNT(bucket) ((bucket)->mclbElemCnt - (bucket)->mclbOutCnt) #define addr2cpu(cpu, addr) \ do { \ register struct kmemusage *kup; \ btokup(kup, (addr)); \ (cpu) = ((kup->ku_cpu >= 0) && CPU_IS_ONL(kup->ku_cpu)) ? \ kup->ku_cpu : CPUID ; \ } while (0) #define MPOOL_ETRACE(comp, lvl, mem, a, b, c, d, e) \ do { \ __INFREQUENT; \ MPOOL_TRACE((comp), (lvl), (mem), (a), (b), (c), (d), (e)); \ } while (0) #ifdef _KERNEL /* ------------------------------------------------------------------------------------- // // NAME: mclSetAttrFree() // // DESC: Set the free flag and hide the cluster if errlevel is set. // // ENV: Interrupt/Process // // ARGS: m mbuf pointer // // RETURN: n/a // // ------------------------------------------------------------------------------------ */ static __inline__ void mclSetAttrFree(struct mbuf *m, int mpflags) { long errlevel = ERR_LEVEL(m->m_extpool->mclpRasb); if (errlevel >= ERRCHECK_DETAIL) { assert(m->m_ext.ext_buf == m->m_extdebug->ext_addr); if (!(m->m_extdebug->ext_flags & CLUST_HIDDEN)) mclpHideClusterBase(m, mpflags); } m->m_extdebug->ext_flags |= CLUST_FREE; } /* ------------------------------------------------------------------------------------- // // NAME: mclSetAttrGet() // // DESC: Unset the free flag and unhide the cluster if hidden. // // ENV: Interrupt/Process // // ARGS: m mbuf pointer // // RETURN: n/a // // ------------------------------------------------------------------------------------ */ static __inline__ void mclSetAttrGet(struct mbuf *m) { long errlevel = ERR_LEVEL(m->m_extpool->mclpRasb); if (m->m_extdebug->ext_flags & CLUST_HIDDEN) mclpUnhideCluster(m, 0); if (errlevel >= ERRCHECK_DETAIL) { assert(m->m_ext.ext_buf == m->m_extdebug->ext_addr); log_clust_traceback(m->m_extdebug); } m->m_extdebug->ext_flags &= ~CLUST_FREE; } /* device drivers needing to clear a flush, should use this macro */ #define M_CLEAR_DCBFLUSH_INFO(_m, _global) \ { \ (_m)->m_flags &= ~M_DCBFLUSH_LOCAL; \ (_m)->m_dcbflushinfo.m = 0; \ (_m)->m_dcbflushinfo.data = 0; \ (_m)->m_dcbflushinfo.len = 0; \ } /* device drivers needing to do a flush, should use this macro */ #define M_SET_DCBFLUSH_INFO(_m, _data, _len, _global) \ { \ if ( (_m)->m_flags & M_EXT) \ { \ if (_global == FALSE) \ { \ (_m)->m_flags |= M_DCBFLUSH_LOCAL; \ } \ (_m)->m_dcbflushinfo.m = (uint64_t)_m; \ (_m)->m_dcbflushinfo.data = (uint64_t)_data; \ (_m)->m_dcbflushinfo.len = (uint64_t)_len; \ } \ } /* to obtain the immediate data location */ #define M_IMD_PTR(_m, _imdptr) \ { \ if ((_m)->m_flags & (M_PKTHDR|M_EXT)) \ { \ if ((_m)->m_flags & M_DCBFLUSH_LOCAL) \ { \ (caddr_t)(_imdptr) = (_m).m_dcbflushdat; \ } \ else if ((_m)->m_flags & M_QOS) \ { \ (caddr_t)(_imdptr) = (_m).m_qosdat; \ } \ else if ((_m)->m_flags & (M_CHECKSUM_TX|M_CHECKSUM_RX)) \ { \ if ((_m)->m_flags & M_EXT) \ { \ (caddr_t)(_imdptr) = (_m).m_qosdat; \ } \ else \ { \ (caddr_t)(_imdptr) = (_m).m_sumdat; \ } \ } \ else if ((_m)->m_flags & (M_EXT)) \ { \ (caddr_t)(_imdptr) = \ ((_m).m_pktdat+sizeof(struct __m_ext)); \ } \ else \ { \ (caddr_t)(_imdptr) = (_m).m_pktdat; \ } \ } \ else \ { \ (caddr_t)(_imdptr) = (_m).m_dat; \ } \ } /* To obtain the immediate data length*/ #define M_IMD_LEN(_m, _imdlen) \ { \ if ((_m)->m_flags & (M_PKTHDR|M_EXT)) \ { \ if ( (_m)->m_flags & M_DCBFLUSH_LOCAL) \ { \ (caddr_t)(_imdlen) = MDCBFLUSHLEN; \ } \ else if ((_m)->m_flags & M_QOS) \ { \ (caddr_t)(_imdlen) = MQOSLEN; \ } \ else if ((_m)->m_flags & (M_CHECKSUM_TX|M_CHECKSUM_RX)) \ { \ if ((_m)->m_flags & M_EXT) \ { \ (caddr_t)(_imdlen) = MQOSLEN; \ } \ else \ { \ (caddr_t)(_imdlen) = MSUMLEN; \ } \ } \ else if ((_m)->m_flags & (M_EXT)) \ { \ (caddr_t)(_imdlen) = (MHLEN-sizeof(struct __m_ext)); \ } \ else \ { \ (caddr_t)(_imdlen) = MHLEN; \ } \ } \ else \ { \ (caddr_t)(_imdlen) = MLEN; \ } \ } #define MCLP_LOCKINIT(_lock) \ do \ { \ NET_MALLOC((_lock), DRW_lock *, sizeof(DRW_lock), \ M_LOCKF, M_WAITOK); \ lock_alloc(_lock, LOCK_ALLOC_PIN, MCLUST_POOL_LOCK, -1); \ drw_lock_init(_lock); \ \ } while (0) #define MCLP_LOCK_FREE(_lock) \ do \ { \ drw_lock_free(_lock); \ NET_FREE((_lock), M_LOCKF); \ \ } while (0) /* * We only update the _old_pri place holder after we grab the write lock. * This will avoid side-effects when that place holder is shared across * other functions. */ #define MCLP_WRITE_LOCK(_lock, _old_pri, _new_pri) \ do \ { \ int _tmp_ret_i_disable = i_disable(_new_pri); \ drw_lock_write(_lock); \ (_old_pri) = _tmp_ret_i_disable; \ \ } while (0) /* * The _old_pri value is saved before releasing the lock because after * releasing it, a different thread could grab the write lock and update * that variable before we call i_enable. */ #define MCLP_WRITE_UNLOCK(_lock, _old_pri) \ do \ { \ int _tmp_old_pri = (_old_pri); \ drw_lock_done(_lock); \ i_enable(_tmp_old_pri); \ \ } while (0) /* * _old_pri must be a local variable to the calling function to avoid * side-effects that otherwise would occur when it's shared across * functions. */ #define MCLP_READ_LOCK(_lock, _old_pri, _new_pri) \ do \ { \ (_old_pri) = i_disable(_new_pri); \ drw_lock_read(_lock); \ \ } while (0) /* * _old_pri must be a local variable to the calling function to avoid * side-effects that otherwise would occur when it's shared across * functions. */ #define MCLP_READ_UNLOCK(_lock, _old_pri) \ do \ { \ drw_lock_done(_lock); \ i_enable(_old_pri); \ \ } while (0) #define MCLP_WRITE_TO_READ(_lock) \ drw_lock_write_to_read(_lock) #define MCLP_READ_TO_WRITE(_lock, _ret_val) \ (_ret_val) = drw_lock_read_to_write(_lock) #define MCLP_WRITE_LOCK_NO_DISABLE(_lock) \ drw_lock_write(_lock) #define MBUF_LOCK_HASH(claddr) ((unsigned long)(SEGOFFSET(claddr)) % MBLOCK_HASHSZ) #define MBUF_LOCK_DECL() int _mbufs, _mbcell; #define MBUF_UNLOCK(claddr) unlock_enable(_mbufs, &mbuf_slock[_mbcell].lock) #define MBUF_LOCK(claddr) \ { \ _mbufs = disable_lock(PL_IMP, &mbuf_slock[(_mbcell = MBUF_LOCK_HASH(claddr))].lock); \ mbuf_slock[_mbcell].counter++; \ } \ #define MBUF_LOCKINIT() \ { \ int i; \ for (i = MBLOCK_HASHSZ - 1; i >= 0; i--) \ { \ lock_alloc(&mbuf_slock[i].lock, \ LOCK_ALLOC_PIN, \ MBUF_LOCK_FAMILY, i); \ \ simple_lock_init(&mbuf_slock[i].lock); \ } \ } #define MBSTAT(x, i) (x) += (i) #define MBSTAT2(x, i) #define M_MEMREG(m) (((m)->m_contig) ? (m)->m_contig->mclcMemReg: 0) #define M_CB(m) ((m)->m_contig->mclcCB) #define M_MPOOLID(m) ((M_HASCL(m) && (m)->m_extpool) ? (m)->m_extpool->mclpID: 0) /* Tell if a value is 64K-aligned */ #define IS_64K_ALIGNED(_val) (((uintptr_t)(_val) & ((uintptr_t)PSIZE_64K - 1)) == 0) /* Tell if a value is page-aligned */ #define IS_PAGE_ALIGNED(_val) (((uintptr_t)(_val) & ((uintptr_t)PAGE_SIZE - 1)) == 0) /* * Check that if the size of a given mbuf cluster is multiple of * the page size, the address of the cluster must be page-aligned */ #define CHECK_CLUST_ALIGNMENT(_mbuf_p, _kerrno) \ NET_MEDSEV_CHECK3( \ !IS_PAGE_ALIGNED((_mbuf_p)->m_ext.ext_size) || \ IS_PAGE_ALIGNED((_mbuf_p)->m_ext.ext_buf), \ ras_mbuf_cbp, \ _kerrno, \ (_mbuf_p)->m_ext.ext_size, \ (_mbuf_p)->m_ext.ext_buf, \ _mbuf_p) #endif /* _KERNEL */ #ifdef __cplusplus } #endif #endif /* _SYS_MBUF_MACRO_H_ */