/* IBM_PROLOG_BEGIN_TAG */ /* This is an automatically generated prolog. */ /* */ /* bos72Q src/bos/kernel/net/net_globals.h 1.57.4.2 */ /* */ /* Licensed Materials - Property of IBM */ /* */ /* COPYRIGHT International Business Machines Corp. 1988,2019 */ /* All Rights Reserved */ /* */ /* US Government Users Restricted Rights - Use, duplication or */ /* disclosure restricted by GSA ADP Schedule Contract with IBM Corp. */ /* */ /* IBM_PROLOG_END_TAG */ /* @(#)14 1.57.4.2 src/bos/kernel/net/net_globals.h, sysnet, bos72Q, q2019_13A4 2/6/19 00:43:42 */ /* * COMPONENT_NAME: SYSNET * * FUNCTIONS: * * ORIGINS: 27,85 * * * (C) COPYRIGHT International Business Machines Corp. 1988,1993 * All Rights Reserved * Licensed Materials - Property of IBM * US Government Users Restricted Rights - Use, duplication or * disclosure restricted by GSA ADP Schedule Contract with IBM Corp. */ /* * (c) Copyright 1990, 1991, 1992, 1993 OPEN SOFTWARE FOUNDATION, INC. * ALL RIGHTS RESERVED */ /* * OSF/1 1.2 */ /* * Copyright (C) 1988,1989 Encore Computer Corporation. All Rights Reserved * * Property of Encore Computer Corporation. * This software is made available solely pursuant to the terms of * a software license agreement which governs its use. Unauthorized * duplication, distribution or sale are strictly prohibited. * */ /* * Global #defines for OSF/1 networking. * * Ugly as this file is, it makes the code a lot cleaner! * */ #ifndef _NET_GLOBALS_H_ #define _NET_GLOBALS_H_ #include #ifdef _KERNEL #include #include #include #include #include #include #include #ifndef _H_TYPES #include /* eye_catch8b_t */ #endif #ifndef _H_EYEC #include /* __EYEC8() */ #endif #endif /* _KERNEL */ #ifdef __cplusplus extern "C" { #endif void NET_LWSYNC(void); #pragma mc_func NET_LWSYNC { "7c2004ac" } /* lwsync */ #pragma reg_killed_by NET_LWSYNC /* * Stuff to fix #defines in if.h. */ #ifdef simple_lock_data_t #undef simple_lock_data_t #endif #ifdef lock_data_t #undef lock_data_t #endif typedef Simple_lock simple_lock_data_t; typedef Complex_lock lock_data_t; typedef int *task_t; #define lock_init2(lp, s, type) lock_init(lp, s) #define NETNHSQUE 128 #define netsqhash(X) (&nethsque[( ((long)(X) >> 12) + ((long)(X) >> 8) + (long)(X) ) & (NETNHSQUE-1)]) extern tid_t nethsque[]; #define assert_wait(addr, intr) \ e_assert_wait(netsqhash(addr), intr) #define assert_wait_mesg(addr, intr, msg) \ e_assert_wait(netsqhash(addr), intr) #define clear_wait(thread, result, flag) \ e_clear_wait((thread)->t_tid, result) #define wakeup_one(addr) \ e_wakeup_one(netsqhash(addr)) #define wakeup(addr) \ e_wakeup(netsqhash(addr)) #define thread_wakeup(addr) \ e_wakeup(netsqhash(addr)) #define current_thread() (curthread) #define thread_block() e_block_thread() #define thread_swappable(a, b) #define PAGE_SIZE 4096 #define MAXALLOCSAVE (32 * PAGE_SIZE) /* param.h in osf */ #define THEWALL_MAX_64BIT (MAXNETKMEM<next = (q)->prev = q) #define pfind(pgid) (pgid) #define BM(x) x #define P_UNREF(p) task_t first_task; /* * These are default settings. Either or both of locking and spl are valid * for 1 or more cpus. However, recommend locks for multis, non-locks for unis. * The thread decision is dependent on several things. To configure both * sockets and streams to use softnets requires locore or hardware support. */ #define NETNCPUS NCPUS #define NET_CACHELINESIZE 128 typedef int spl_t; #define LOCK_ASSERTL_DECL #ifdef DEBUG #define LOCK_ASSERT(string, cond) assert(cond) #else /* DEBUG */ #define LOCK_ASSERT(string, cond) #endif /* DEBUG */ #define LOCK_NETSTATS 0 #define LOCK_FREE(lp) lock_free(lp) #define NETSPL_DECL(s) spl_t s; #define NETSPL(s,level) s = spl##level() #define NETSPLX(s) splx(s) #define NETSTAT_LOCK_DECL() int _stats; #if LOCK_NETSTATS #define NETSTAT_LOCK(lockp) _stats = disable_lock(PL_IMP, lockp) #define NETSTAT_UNLOCK(lockp) unlock_enable(_stats, lockp) #define NETSTAT_LOCKINIT(lockp) { \ lock_alloc(lockp, LOCK_ALLOC_PIN, IF_SLOCK, (short)lockp); \ simple_lock_init(lockp); \ } #else #define NETSTAT_LOCKINIT(lockp) #define NETSTAT_LOCK(lockp) #define NETSTAT_UNLOCK(lockp) #endif /* ANSI-C compatibility */ #ifndef CONST #define CONST const #endif #ifndef VOLATILE #define VOLATILE volatile #endif /* types for 64 bit */ #ifdef __64BIT_KERNEL typedef ulong int32ulong64_t; typedef caddr_t int32caddr64_t; typedef uint short32uint64_t; #else typedef int int32ulong64_t; typedef int int32caddr64_t; typedef short short32uint64_t; #endif #ifdef __cplusplus } #endif /* Global function prototypes */ #include #include #include #ifdef __cplusplus extern "C" { #endif #ifdef _KERNEL #define RTO_DFLT_LOW 1 #define RTO_DFLT_HIGH 64 #define RTO_DFLT_LIMIT 7 #define RTO_DFLT_LENGTH 13 #define RTO_DFLT_SHIFT 7 #ifdef __ia64 #define INET_STACK_DFLT 32 #else #define INET_STACK_DFLT 16 #endif /* __ia64 */ #endif /* _KERNEL */ struct iftrace { int kmid; int promisc; }; #define TCP_NDEBUG 100 #define IF_SIZE 256 extern long ifsize; #ifdef _KERNEL /* Kernel and Kernel Extensions should get these from one place */ extern CONST u_char etherbroadcastaddr[6]; extern CONST u_char ie5_broadcastaddr[6]; extern CONST u_char fddi_broadcastaddr[6]; #else /* Others may still need these */ #ifndef etherbroadcastaddr static CONST u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #endif #ifndef ie5_broadcastaddr static CONST u_char ie5_broadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #endif #ifndef fddi_broadcastaddr static CONST u_char fddi_broadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #endif #endif /* _KERNEL */ struct rw_lock { char dummy1[NET_CACHELINESIZE - sizeof(simple_lock_data_t)]; simple_lock_data_t simple_lock; /*--------------------------cache line boundary-----------------------*/ int read_cnt; /* * Padding to make hole explicit */ int32_t rwl_padding; /* * Maximum number of spins in RW_WRITE_LOCK before assuming that that we * have spun too long. * * NOTE: To minimize performace impact in RW_WRITE_LOCK(), this field * needs to be in the same cache line as the 'read_cnt' field. */ uint64_t rwl_rw_write_lock_max_spins; /* * Eye-catcher for this structure */ eye_catch8b_t rwl_eyec; /* * Eye-catcher values */ # define EYEC_RW_LOCK_INITIALIZED __EYEC8('r','w','l','k','N','E','T','I') # define EYEC_RW_LOCK_UNINITIALIZED __EYEC8('r','w','l','k','N','E','T','U') /* * Padding to next cache line boundary */ char dummy2[NET_CACHELINESIZE - (3 * sizeof(uint64_t))]; /*--------------------------cache line boundary-----------------------*/ }; void network_rtec_rw_lock_hang(struct rw_lock *); extern const uint64_t net_rw_write_lock_max_spins; /* * NOTE: Callers of RW_LOCKINIT() are expected to pass 'M_WAITOK' in the * 'malloc_flags' arg, as this macro assumes that memory allocation done * by NET_MALLOC() always succeeds. */ #define RW_LOCKINIT(lock, size, type, malloc_flags, lock_flags, class, occurrence)\ { \ NET_MALLOC((lock), struct rw_lock *, (size), (type), (malloc_flags));\ bzero( (lock), (size) ); \ lock_alloc(&((lock)->simple_lock), (lock_flags), (class), (occurrence)); \ simple_lock_init(&((lock)->simple_lock)); \ (lock)->read_cnt = 0; \ (lock)->rwl_rw_write_lock_max_spins = net_rw_write_lock_max_spins; \ (lock)->rwl_eyec = EYEC_RW_LOCK_INITIALIZED; \ } /* * NOTE: rw locks allocated with RW_LOCK_ALLOC() must be freed by calling * RW_LOCK_FREE() */ #define RW_LOCK_ALLOC(_lock, _class, _occurrence) \ RW_LOCKINIT(_lock, sizeof(struct rw_lock), M_LOCKF, M_WAITOK, \ LOCK_ALLOC_PIN, _class, _occurrence) #define RW_LOCK_FREE(_lock) \ do { \ (_lock)->rwl_eyec = EYEC_RW_LOCK_UNINITIALIZED; \ lock_free(&(_lock)->simple_lock); \ NET_FREE(_lock, M_LOCKF); \ } while (0) #define RW_WRITE_LOCK(lock, old_pri, new_pri) \ { \ (old_pri) = disable_lock((new_pri), &((lock)->simple_lock)); \ { \ volatile int *myvalptr = &(lock)->read_cnt; \ uint64_t spin_count = 0; \ uint64_t max_spin_count = \ (lock)->rwl_rw_write_lock_max_spins; \ boolean_t hang_detected = FALSE; \ while (*myvalptr) { \ spin_count ++; \ /* Check if we have spun too long: */ \ if (spin_count == max_spin_count) { \ __INFREQUENT; \ if (!hang_detected) { \ hang_detected = TRUE; \ network_rtec_rw_lock_hang(lock); \ } \ } \ } \ } \ } #define RW_WRITE_UNLOCK(lock, old_pri) \ unlock_enable((old_pri), &((lock)->simple_lock)); #define RW_READ_LOCK(lock, old_pri, new_pri, read_pri) \ { \ (read_pri) = i_disable(new_pri); \ (old_pri) = disable_lock((new_pri), &((lock)->simple_lock)); \ fetch_and_add(&((lock)->read_cnt), 1); \ unlock_enable((old_pri), &((lock)->simple_lock)); \ } #define RW_READ_UNLOCK(lock, old_pri) { \ NET_LWSYNC(); \ fetch_and_add(&((lock)->read_cnt), -1); \ i_enable(old_pri); \ } /*----------------------------------------- * Defect 629937 * * RW_WRITE_TO_READ can be called when you wish * to convert a WRITE lock to a READ lock. * * To switch back to a WRITE lock, call RW_READ_TO_WRITE. * * You must restore back and unlock the WRITE lock -- not simply * call RW_READ_UNLOCK, because this may enable an incorrect priority. * */ #define RW_WRITE_TO_READ(lock) \ { \ fetch_and_add(&((lock)->read_cnt), 1); \ simple_unlock(&((lock)->simple_lock)); \ } #define RW_READ_TO_WRITE(lock) \ { \ fetch_and_add(&((lock)->read_cnt), -1);\ simple_lock(&((lock)->simple_lock)); \ } /* This version of RW_WRITE_LOCK does not spin while holding the lock, * but rather continuously releases it and re-acquires it before checking * the reference count again; this allows nested readers (i.e. functions * that grab the read lock that are called by functions that have grabbed * the read lock themselves) to increment the reference count and proceed, * even if another thread is trying to grab the same lock as a write lock. * Since the lock is released and re-acquired, nested readers have the * chance to grab the lock, increment the reference count, and proceed. */ #define RW_WRITE_LOCK_RELEASE(lock, old_pri, new_pri) \ { \ (old_pri) = disable_lock((new_pri), &((lock)->simple_lock)); \ { \ volatile int *myvalptr = &(lock)->read_cnt; \ while (*myvalptr) \ { \ unlock_enable((old_pri), &((lock)->simple_lock)); \ (old_pri) = disable_lock((new_pri), \ &((lock)->simple_lock)); \ } \ } \ } /* These macros implement read locks *without* disabling interrupts * (i.e. issuing "i_disable()" and "i_enable()" calls). * They are useful when the code is such that, although a read lock must be * held to prevent the state from changing during an operation, interrupts * must be enabled to obtain the desired behavior. * They must be used with caution because if a thread that is holding a * read lock is interrupted by a thread trying to grab the same lock as * a write lock, a deadlock situation would occur. * Thus, this version of read locks where interrupts are NOT disabled and * re-enabled must ONLY be used when the code is such that ALL functions * that attempt to grab the write lock are called exclusively in the process * context (and NEVER in the interrupt context). */ #define RW_READ_LOCK_NO_DISABLE(lock, old_pri, new_pri) \ { \ (old_pri) = disable_lock((new_pri), &((lock)->simple_lock)); \ fetch_and_add(&((lock)->read_cnt), 1); \ unlock_enable((old_pri), &((lock)->simple_lock)); \ } #define RW_READ_UNLOCK_NO_ENABLE(lock) \ { \ fetch_and_add(&((lock)->read_cnt), -1); \ } /* Macro for going from an exclusive write lock to a shared read lock * where interrupts are not being disabled for the read lock: the read count * is incremented with the lock held, and then the lock is released. */ #define RW_WRITE_TO_READ_LOCK_NO_DISABLE(lock, old_pri) \ { \ fetch_and_add(&((lock)->read_cnt), 1); \ unlock_enable((old_pri), &((lock)->simple_lock)); \ } /* Macro for going from a shared read lock to an exclusive write lock * where interrupts are not being re-enabled for the read lock: we decrement * the read count and then attempt to grab the write lock for exclusive use */ #define RW_READ_TO_WRITE_LOCK_NO_ENABLE(lock, old_pri, new_pri) \ { \ RW_READ_UNLOCK_NO_ENABLE(lock) \ RW_WRITE_LOCK_RELEASE(lock, old_pri, new_pri) \ } #ifdef __cplusplus } #endif #endif /* _NET_GLOBALS_H_ */