/* IBM_PROLOG_BEGIN_TAG                                                   */
/* This is an automatically generated prolog.                             */
/*                                                                        */
/* bos72V src/bos/kernel/sys/buf.h 1.51.1.11                              */
/*                                                                        */
/* Licensed Materials - Property of IBM                                   */
/*                                                                        */
/* COPYRIGHT International Business Machines Corp. 1989,2020              */
/* All Rights Reserved                                                    */
/*                                                                        */
/* US Government Users Restricted Rights - Use, duplication or            */
/* disclosure restricted by GSA ADP Schedule Contract with IBM Corp.      */
/*                                                                        */
/* IBM_PROLOG_END_TAG                                                     */
/* @(#)05     1.51.1.11  src/bos/kernel/sys/buf.h, sysios, bos72V, v2020_37A7 9/3/20 13:50:20 */
#ifndef _H_BUF
#define _H_BUF
/*
 * COMPONENT_NAME: (SYSIOS) Buffer Header structure
 *
 * ORIGINS: 3, 27, 83
 *
 * (C) COPYRIGHT International Business Machines Corp. 1989, 2013
 * All Rights Reserved
 * Licensed Materials - Property of IBM
 *
 * US Government Users Restricted Rights - Use, duplication or
 * disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
 */
/*
 *   LEVEL 1,  5 Years Bull Confidential Information
 */

/*
 *  Buffer cache buffer header
 *  A buffer header contains all the information required to perform block
 *  I/O.  It is the primary interface to the bottom half of block device
 *  drivers.  These drivers are needed for all file system and paging
 *  devices.  In AIX version 3, the traditional strategy() interface is
 *  extended as follows:
 *
 *    1.   The device driver strategy() routine is called with a
 *	   list of buf structures, chained using the av_forw pointer.
 *	   The last entry in this list has a NULL av_forw pointer.
 *
 *    2.   When the operation is completed, and the driver calls
 *	   iodone(), the b_iodone function is scheduled to run as a
 *	   INTIODONE software interrupt handler.  This function is
 *	   passed the buf struct address as its argument.
 *
 *  Buf structures are allocated by I/O requesters, and contain fields
 *  representing the state of some associated data page.  The device driver
 *  must leave most of the fields in this structure intact.  It is allowed
 *  to use av_forw and av_back to queue active requests, and it must set
 *  b_resid on return.	If there was an error, it sets B_ERROR in b_flags,
 *  and returns an errno value in b_error.
 *
 *  The buf struct and its associated data page must be pinned before
 *  calling the strategy() routine.  Block driver bottom halves run
 *  without access to user process context, and are not allowed to page
 *  fault.
 *
 *		    Kernel buffer cache management
 *
 * The block I/O routines use a pool of buf structures to manage
 * the kernel buffer cache.  Each buffer in the pool is usually
 * doubly linked into 2 lists:
 *
 *   1)  a hash list if it has useful contents, and
 *   2)  a list of blocks available for allocation.
 *
 * A buffer is on the available list, and can be reassigned to another
 * disk block, if and only if it is not marked BUSY.  When a buffer
 * is busy, the available-list pointers can be used for other purposes.
 * Most drivers use the forward ptr as a link in their I/O active queue.
 * A buffer header contains all the information required to perform I/O.
 * Most of the routines which manipulate these things are in bio.c.
 */

#ifdef __cplusplus
extern "C" {
#endif

#ifndef _H_TYPES
#include <sys/types.h>
#endif
 
#ifndef _H_KERRNO
#include <sys/kerrno.h>
#endif
 
#ifndef _H_XMEM
#include <sys/xmem.h>
#endif
 
#ifndef _H_SYS_TIME
#include <sys/time.h>
#endif
 
#ifndef _H_SYSRAS
#include <sys/ras.h>
#endif

#ifdef __cplusplus
struct vnode;
#endif

struct buf {				/* buffer header		 */
	__long64_t b_flags;		/* flag word (see defines below) */

	struct	buf *b_forw;		/* hash list forward link	 */
	struct	buf *b_back;		/* hash list backward link	 */
	struct	buf *av_forw;		/* free list forward link	 */
	struct	buf *av_back;		/* free list backward link	 */

	void	(*b_iodone)();		/* ptr to iodone routine	 */
	struct	vnode *b_vp;		/* vnode associated with block	 */
	dev_t	b_dev;			/* major+minor device name	 */
	daddr_t b_blkno;		/* block # on device or in file  */

	union {
	    caddr_t  b_addr;		/* buffer address		 */
	} b_un;

	__ulong64_t  b_bcount;		/* transfer count, OR		  */
					/* #blks in list (bfreelist only) */
	char	     b_error;		/* returned after I/O		  */
	__ulong64_t  b_resid;		/* words not xferred after error  */
	__long64_t   b_work;		/* work area for device drivers   */
	int	     b_options; 	/* readx/writex extension options */
	tid_t	     b_event;		/* anchor for event list	  */
	struct timestruc_t b_start;	/* request start time		  */
	struct	     xmem b_xmemd;	/* xmem descriptor		  */
};

#define	b_baddr		b_un.b_addr	/* address of data		  */

/*
 * These flags are kept in b_flags.
 */
#define B_WRITE 	(long)0x0000	/* non-read pseudo-flag */
#define B_READ		(long)0x0001	/* read when I/O occurs */
#define B_DONE	 	(long)0x0002	/* I/O complete */
#define B_ERROR 	(long)0x0004	/* error detected */
#define B_BUSY		(long)0x0008	/* in use or I/O in progress */
#define B_WLM_UNMNGD	(long)0x0010	/* not managed by WLM regulation */
#define B_INFLIGHT	(long)0x0020	/* this request is in-flight */
#define B_LOGDEV        (long)0x0040    /* request to/from the log device */
#define B_AGE		(long)0x0080	/* put at hd of freelst when released */
#define B_ASYNC 	(long)0x0100	/* don't wait for I/O completion */
#define B_DELWRI	(long)0x0200	/* don't write till blk is reassigned */
#define B_NOHIDE	(long)0x0400	/* don't hide pages during dma xfer */
#define B_STALE 	(long)0x0800	/* data in buffer is no longer valid */
#define B_MORE_DONE	(long)0x1000	/* more buffers to be processed */
#define B_PFSTORE 	(long)0x2000	/* store operation */
#define B_PFPROT 	(long)0x4000	/* protection violation */
#define B_SPLIT 	(long)0x8000	/* ok to enable split read/write */
#define B_PFEOF 	(long)0x10000	/* check for ref. beyond end-of-file */

				/* write of smudged page (async client seg) */
#define B_COMMIT	(long)0x20000 	
#define B_MPSAFE	(long)0x40000 /* Invoker of strategy() is MP safe */

#ifdef _KERNEL
#define B_MPSAFE_INITIAL (long)0x80000 /* devstrat() converts B_MPSAFE into */
				/*	this flag */
#endif /* _KERNEL */

#ifdef _KERNSYS
#define B_COMPACTED	(long)0x100000 /* comapcted coalesce list */
#define	B_DONTUNPIN	(long)0x200000 /* pin() failed - don't unpin buf */

#endif /* _KERNSYS */

#define B_BUSDMA        (long)0x400000
#define B_DONE_ASYNC	(long)0x800000 /* ASYNC write completed */
#define B_SETMOD        (long)0x1000000 /* fs -> VMM: page is modified */

/*
 * LVM lost I/O detection for system hang feature
 */
#define	B_LIOS		(long)0x1000000		/* buf scanned for lost I/O */
#define	B_LIOD		(long)0x2000000		/* buf detected as lost I/O */

/*
 * External pager buffer
 */
#define	B_XPAGER	(long)0x80000000	/* D_XPAGER */
#define	B_XREADONLY	(long)0x40000000	/* read-only page */
/* lvm logical track split flag */
#define	B_XSPLIT	(long)0x30000000	/* split i/o across lt boundary */
#define	B_XSPLIT1	(long)0x10000000	/* split i/o 1st half */
#define	B_XSPLIT2	(long)0x20000000	/* split i/o 1st half */

/* special iodone processing */
#define B_MIGRATABLE	(long)0x100000000	/* Can be moved to another CPU*/
#define B_LIST		(long)0x200000000	/* Chained by av_forw pointer */

#define B_RESVD_FLAG1	(long)0x400000000LL	/* Reserved flag */

/*
 * These flags are kept in bx_flags.
 *
 * The Lower 32 buts are reserved for use by the Kernel.
 * The Upper 32 bits are reserved for use by Kernel Extensions.
 */
#define BX_FLAGS_KERNEL_MASK 0x00000000FFFFFFFFLL /* Mask for KERNEL Flags  */
#define BX_FLAGS_KEXT_MASK   0xFFFFFFFF00000000LL /* Mask for KERNEXT Flags */

/* Kernel Extension bx_flags. */
#define BX_DK_EXT 0x0000000100000000LL /* bx_work2 contains pointer to      */
                                       /* disk_bufx_ext for disk driver     */
                                       /* specific information.             */


/* Request the underlying storage controller to release the blocks ranges 
 * specified in the bufx 
 */
#define BX_FLAGS_LBP_REL                    (0x0000000200000000LL)

/* Valid only with BX_FLAGS_LBP_REL. Tells the disk driver that the caller 
 * does not want to wait if no buffer is available to process the requested 
 * block release operation.
 */
#define BX_FLAGS_NOWAIT_4BUF                (0x0000000400000000LL)

/* 
 * Set by the caller to indicate that the bufx structure includes a pointer to
 * an extended error structure in the 'bufx.bx_error_detail'. This extended
 * error structure shall be operation specific. For example, if BX_FLAGS_LBP_REL
 * is set then the extended error structure shall be of type 'disk_lpb_err_t'.
 *
 * It is also assumed that the extended error structure uses the same kernel
 * protection keys as the 'bufx'.
 */
#define BX_FLAGS_EXT_ERR                    (0x0000000800000000LL)


/* Set by the disk driver to indicate that it has filled in appropriate values 
 * in the extended error structure (provided by the caller * via 
 * 'bufx.bx_error_detail' field).
 *
 * This value shall only be set if the caller had set BX_FLAGS_EXT_ERR in
 * 'bx_flags' and had provided a non-NULL 'bufx.bx_error_detail'. 
 */ 
#define BX_FLAGS_EXT_ERR_VALID              (0x0000001000000000LL)

/* The following 2 flags are reserved for future use. */
#define BX_FLAGS_USE_UNMAP                  (0x0000002000000000LL)
#define BX_FLAGS_USE_WS                     (0x0000004000000000LL)


/* The following flag is specifically for VIOS support of iSCSI disks       */
#define BX_DK_VIO_HANDLE (0x0000008000000000LL)  /* bx_work3 contains the   */
                                       /* vio handle to be used for calls   */
                                       /* to functions to map and copy data */

/* set if command with bufx should use fast fail timeout attribute to fail the
 * command if the fast fail timeout period has expired
 */
#define BX_FLAGS_FAST_FAIL (0x0000010000000000LL)

struct bufx {				/* buffer header		    */
	__long64_t b_flags;		/* flag word (see defines below)    */

	struct	bufx *b_forw;		/* hash list forward link	    */
	struct	bufx *b_back;		/* hash list backward link	    */
	struct	bufx *av_forw;		/* free list forward link	    */
	struct	bufx *av_back;		/* free list backward link	    */

	void	(*b_iodone)();		/* ptr to iodone routine            */
	struct	vnode *b_vp;		/* vnode associated with block	    */
	dev_t	b_dev;			/* major+minor device name	    */
	daddr_t b_blkno;		/* block # on device or in file     */

	union {
	    caddr_t  b_addr;		/* buffer address	            */
	} b_un;

	__ulong64_t  b_bcount;		/* transfer count, OR		    */
					/* #blks in list (bfreelist only)   */
	char	    b_error;		/* returned after I/O		    */
	char	    b_pad[3];		/* 3 bytes of padding		    */
	uint	    bx_refptrtop;	/* top half of reference pointer    */
	__ulong64_t  b_resid;		/* words not xferred after error    */
	__long64_t   b_work;		/* work area for device drivers     */
	int	    b_options;		/* readx/writex extension options   */
	uint	    bx_refptrbot;	/* bottom half of reference pointer */
	tid_t	    b_event;		/* anchor for event list	    */
	struct timestruc_t b_start;	/* request start time		    */
	struct xmem b_xmemd;		/* xmem descriptor		    */
	int	    bx_version;		/* bufx version number		    */
	eye_catch4b_t bx_eyecatcher;	/* eyecatcher ? "bufx"              */
	__long64_t   bx_flags;		/* bufx flags ? This field has been */
					/* added because b_flags is running */
					/* out of bits.      	            */
	__ulong64_t bx_error_detail;	/* additional info about error      */
	__ulong64_t bx_work2;		/* Extra work area                  */
	ushort    bx_io_priority;	/* IO Priority			    */
	ushort    bx_io_cache_hint;	/* IO cache hint		    */
	ushort    bx_resvd1;		/* Padding for 8 bytes alignment    */

	/* eRAS fields */
	char      bx_originator;	/* The originator of bufx           */
	char      bx_dbyte_trc;		/* Data byte tracing enable/disable */
	ushort    bx_state;		/* The state of bufx                */
	short     bx_memtrclvl;		/* private/LMT trace level          */
	short     bx_systrclvl;		/* system trace level               */
	short     bx_errlvl;		/* The runtime error check level    */
	__ulong64_t bx_correlator;	/* Correlator                       */
	__ulong64_t bx_c_correlator;	/* Coalesced correlator             */
	__ulong64_t bx_IO_init_time;	/* The bufx initialization time     */
	__ulong64_t bx_IO_time;		/* Time of last major state change  */
	__ulong64_t bx_work3;		/* Field for the originator's use   */
	__ulong64_t bx_vio_tag;		/* Field for VSCSI initiator/target */
	__ulong64_t bx_kerrno;		/* kerrno or errno if b_error is set*/

/*
* Add new fields as needed by claiming some of the padding space.
* There should be enough padding space so that it should not be
* necessary to increase the size of the structure in the foreseeable
* future.
*/
};

#define BUFX_VER0		0
#define BUFX_VER1		1
#define BUFX_VER0_SIZE (offsetof(struct bufx, bx_kerrno)+sizeof(__ulong64_t))
#define BUFX_VER1_SIZE BUFX_VER0_SIZE
#define BUFX_VERSION	BUFX_VER1	 /* current version - to be updated  *
					  * when new version is created      */
#define BUFX_SIZE	sizeof(struct bufx)

/* b_flags value for bufx */
#define B_BUFX		(long)0x4000000	 /* structure is a bufx */
#define B_BUFX_INITIAL	(long)0x8000000	 /* structure is a bufx */

/* eye cachers */
#define EYEC_BUFX	__EYEC4('b','u','f','x') /* bufx */
#define EYEC_BUFX_INVAL	__EYEC4('b','u','f','i') /* bufi */

#define BUFX_ADDR(bufxp)						 \
	(((unsigned long)(((struct bufx *)bufxp)->bx_refptrtop) << 32) | \
		(unsigned long)(((struct bufx *)bufxp)->bx_refptrbot))

#define IS_BUFX(bufxp)							\
	((((struct bufx *)bufxp)->b_flags & (B_BUFX | B_BUFX_INITIAL)) 	\
	&& (BUFX_ADDR(bufxp) == (unsigned long)bufxp)			\
	&& (((struct bufx *)bufxp)->bx_eyecatcher == EYEC_BUFX))

#define BUFX_VALIDATE (bufxp)						   \
	if ((((struct bufx *)bufxp)->b_flags & (B_BUFX | B_BUFX_INITIAL))  \
			&& !(IS_BUFX(bufxp)))				   \
		((struct bufx *)bufxp)->b_flags &= ~(B_BUFX | B_BUFX_INITIAL)

#define BUF_INIT(bufp)		memset((void *)bufp, 0, sizeof(struct buf))
#define BUF_INVALIDATE(bufp)	memset((void *)bufp, 0, sizeof(struct buf))

/* Defines for bx_state field: Block values from 0x00 to 0x9F are reserved for 
 * IBM use. Block values from 0xA0 to 0xFF are available to third party 
 * software. As a convention, the lowest nible (4-bits) of the state is 
 * consistent across all layers to make it easier to remember while deugging --
 * PENDING is always 1, ACTIVE is always 2, DONE is always 3, and ERROR is 
 * always 4. 
 */
#define B_ST_MACRO(_block, _state)  (((_block & 0xFF) << 8) | (_state & 0xFF))

#define B_ST_FREE		B_ST_MACRO(0x00,0x00)		/* 0x0000 */

/* VMM */
#define B_ST_VMM_PENDING	B_ST_MACRO(0x01,0x01)		/* 0x0101 */
#define B_ST_VMM_ACTIVE		B_ST_MACRO(0x01,0x02)		/* 0x0102 */
#define B_ST_VMM_DONE		B_ST_MACRO(0x01,0x03)		/* 0x0103 */
#define B_ST_VMM_ERROR		B_ST_MACRO(0x01,0x04)		/* 0x0104 */

/* LVM */
#define B_ST_LVM_PENDING	B_ST_MACRO(0x02,0x01)		/* 0x0201 */
#define B_ST_LVM_ACTIVE		B_ST_MACRO(0x02,0x02)		/* 0x0202 */
#define B_ST_LVM_DONE		B_ST_MACRO(0x02,0x03)		/* 0x0203 */
#define B_ST_LVM_ERROR		B_ST_MACRO(0x02,0x04)		/* 0x0204 */
#define B_ST_LVM_PENDQ		B_ST_MACRO(0x02,0x05)		/* 0x0205 */
#define B_ST_LVM_REFRESH	B_ST_MACRO(0x02,0x06)		/* 0x0206 */
#define B_ST_LVM_PVWAIT		B_ST_MACRO(0x02,0x07)		/* 0x0207 */
#define B_ST_LVM_CA_HLD 	B_ST_MACRO(0x02,0x08)		/* 0x0208 */
#define B_ST_LVM_BB_HLD		B_ST_MACRO(0x02,0x09)		/* 0x0209 */
#define B_ST_LVM_CONCSYNC_TERM 	B_ST_MACRO(0x02,0x0A)		/* 0x020A */
#define B_ST_LVM_SA_HOLDING	B_ST_MACRO(0x02,0xA1)		/* 0x02A1 */
#define B_ST_LVM_SA_ACTIVE 	B_ST_MACRO(0x02,0xA2)		/* 0x02A2 */
#define B_ST_LVM_MERGE_WAIT  	B_ST_MACRO(0x02,0xA3) 		/* 0x02A3 */

/* DISK driver */
#define B_ST_DISK_PENDING	B_ST_MACRO(0x03,0x01)		/* 0x0301 */
#define B_ST_DISK_ACTIVE	B_ST_MACRO(0x03,0x02)		/* 0x0302 */
#define B_ST_DISK_DONE		B_ST_MACRO(0x03,0x03)		/* 0x0303 */
#define B_ST_DISK_ERROR		B_ST_MACRO(0x03,0x04)		/* 0x0304 */

/* Protocol Driver */
#define B_ST_PROTO_PENDING	B_ST_MACRO(0x04,0x01)		/* 0x0401 */
#define B_ST_PROTO_ACTIVE	B_ST_MACRO(0x04,0x02)		/* 0x0402 */
#define B_ST_PROTO_DONE		B_ST_MACRO(0x04,0x03)		/* 0x0403 */
#define B_ST_PROTO_ERROR	B_ST_MACRO(0x04,0x04)		/* 0x0404 */

/* Adapter Driver */
#define B_ST_ADAPT_PENDING	B_ST_MACRO(0x05,0x01)		/* 0x0501 */
#define B_ST_ADAPT_ACTIVE	B_ST_MACRO(0x05,0x02)		/* 0x0502 */
#define B_ST_ADAPT_DONE		B_ST_MACRO(0x05,0x03)		/* 0x0503 */
#define B_ST_ADAPT_ERROR	B_ST_MACRO(0x05,0x04)		/* 0x0504 */
#define B_ST_ADAPT_PEND_ERROR	B_ST_MACRO(0x05,0x05)		/* 0x0505 */

/* uphysio() */
#define B_ST_UPHYSIO_PENDING	B_ST_MACRO(0x06,0x01)		/* 0x0601 */

/* Buffer cache */
#define B_ST_BUFCACHE_PENDING	B_ST_MACRO(0x07,0x01)		/* 0x0701 */

/* JFS2 */
#define B_ST_J2_UIO_PENDING	B_ST_MACRO(0x08,0x01)		/* 0x0801 */
#define B_ST_J2_UIO_ACTIVE	B_ST_MACRO(0x08,0x02)		/* 0x0802 */
#define B_ST_J2_UIO_SERV_Q	B_ST_MACRO(0x08,0x05)		/* 0x0805 */
#define B_ST_J2_SMREAD_ACTIVE	B_ST_MACRO(0x08,0x22)		/* 0x0822 */
#define B_ST_J2_SMREAD_ERROR	B_ST_MACRO(0x08,0x24)		/* 0x0824 */
#define B_ST_J2_COWPO_PENDING	B_ST_MACRO(0x08,0x31)		/* 0x0831 */
#define B_ST_J2_COWPO_DONE	B_ST_MACRO(0x08,0x33)		/* 0x0833 */
#define B_ST_J2_COWPI_PENDING	B_ST_MACRO(0x08,0x41)		/* 0x0841 */
#define B_ST_J2_BC_PENDING	B_ST_MACRO(0x08,0x51)		/* 0x0851 */
#define B_ST_J2_BC_ACTIVE	B_ST_MACRO(0x08,0x52)		/* 0x0852 */
#define B_ST_J2_BC_ERROR	B_ST_MACRO(0x08,0x54)		/* 0x0854 */
#define B_ST_J2_BC_CACHED	B_ST_MACRO(0x08,0x55)		/* 0x0855 */
#define B_ST_J2_LC_PENDING	B_ST_MACRO(0x08,0x61)		/* 0x0861 */
#define B_ST_J2_DIO_PENDING	B_ST_MACRO(0x08,0x71)		/* 0x0871 */

/* AIO */
#define B_ST_AIO_PENDING	B_ST_MACRO(0x09,0x01)		/* 0x0901 */
#define B_ST_AIO_ACTIVE		B_ST_MACRO(0x09,0x02)		/* 0x0902 */
#define B_ST_AIO_DONE		B_ST_MACRO(0x09,0x03)		/* 0x0903 */
#define B_ST_AIO_ERROR		B_ST_MACRO(0x09,0x04)		/* 0x0904 */

/* VSCSI Initiator */
#define B_ST_VSCSI_INIT_PENDING	B_ST_MACRO(0x0A,0x01)		/* 0x0A01 */

/* VSCSI Target */
#define B_ST_VSCSI_TARGET_ACTIVE	B_ST_MACRO(0x0B,0x02)	/* 0x0B02 */
#define B_ST_VSCSI_TARGET_DONE		B_ST_MACRO(0x0B,0x03)	/* 0x0B03 */
#define B_ST_VSCSI_TARGET_ERROR		B_ST_MACRO(0x0B,0x04)	/* 0x0B04 */

/* Loopback Device */
#define B_ST_LOOPBACK_PENDING		B_ST_MACRO(0x0C, 0x01)	/* 0x0C01 */
#define B_ST_LOOPBACK_ACTIVE		B_ST_MACRO(0x0C, 0x02)	/* 0x0C02 */
#define B_ST_LOOPBACK_DONE		B_ST_MACRO(0x0C, 0x03)	/* 0x0C03 */
#define B_ST_LOOPBACK_ERROR		B_ST_MACRO(0x0C, 0x04)	/* 0x0C04 */

/* hdcrypt encryption driver */
#define B_ST_HDCRYPT_PENDING	B_ST_MACRO(0x0D, 0x01) /* 0x0D01 */
#define B_ST_HDCRYPT_ACTIVE	B_ST_MACRO(0x0D, 0x02) /* 0x0D02 */
#define B_ST_HDCRYPT_DONE	B_ST_MACRO(0x0D, 0x03) /* 0x0D03 */
#define B_ST_HDCRYPT_ERROR	B_ST_MACRO(0x0D, 0x04) /* 0x0D04 */
#define B_ST_HDCRYPT_OP_DONE	B_ST_MACRO(0x0D, 0x13) /* 0x0D13 */
#define B_ST_HDCRYPT_OP_ERROR	B_ST_MACRO(0x0D, 0x14) /* 0x0D14 */

/* DPCOM */
#define B_ST_DPCOM_PENDING      B_ST_MACRO(0x10, 0x01) /* 0x1001 */
#define B_ST_DPCOM_ACTIVE       B_ST_MACRO(0x10, 0x02) /* 0x1002 */
#define B_ST_DPCOM_DONE         B_ST_MACRO(0x10, 0x03) /* 0x1003 */


/* Defines for bx_originator field: Values 0x00 to 0x9F are reserved for IBM 
 * use. Values above 0x9F are available for third party software.
 */
#define B_ORIG_UNKNOWN		0x00
#define B_ORIG_VMM		0x01
#define B_ORIG_LVM		0x02
#define B_ORIG_DISK		0x03
#define B_ORIG_PROTO		0x04
#define B_ORIG_UPHYSIO		0x05
#define B_ORIG_BUFCACHE		0x06
#define B_ORIG_J2		0x07
#define B_ORIG_AIO		0x08
#define B_ORIG_VSCSI_INIT	0x09
#define B_ORIG_VSCSI_TARGET	0x0A
#define B_ORIG_HDCRYPT		0x0B
#define B_ORIG_DPCOM		0x10

#define BUFX_TRC_PACK(_bpx) \
	((((__ulong64_t)(_bpx)->b_flags & 0xFFFFFFFFFFULL) << 24) | \
	((__ulong64_t)(_bpx)->bx_originator << 16) | \
	(__ulong64_t)(_bpx)->bx_state)

/* bufx tracing macros */
#define BUFX_HOOK0(cb, level, mem_dest, subhkid, bufx)                        \
    do {                                                                      \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
				((short)(level) <= (bufx)->bx_memtrclvl))     \
           mtrchook1(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,     \
						(ulong_t) (bufx), MTRC_RARE); \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook1(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,     \
					(ulong_t) (bufx), MTRC_COMMON);       \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) &&  \
				((short)(level) <= (bufx)->bx_memtrclvl))     \
           ct_hook1((cb), HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,\
							(ulong_t) (bufx));    \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,    \
							(ulong_t) (bufx));    \
	}                                                                     \
     } while (0)

#define BUFX_HOOK1(cb, level, mem_dest, subhkid, bufx, d1)                    \
    do {                                                                      \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0))     \
			&&((short)(level) <= (bufx)->bx_memtrclvl))           \
           mtrchook2(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,    \
				(ulong_t) (bufx), (ulong_t) (d1), MTRC_RARE); \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook2(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,    \
			(ulong_t) (bufx), (ulong_t) (d1), MTRC_COMMON);       \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) &&  \
			((short)(level) <= (bufx)->bx_memtrclvl))             \
           ct_hook2((cb), HKWD64_EXPAND((HKWD_BUFX|subhkid),16)|HKWD64_TMASK, \
				       (ulong_t) (bufx), (ulong_t) (d1));     \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,   \
					(ulong_t) (bufx), (ulong_t) (d1));    \
	}                                                                     \
     } while (0)

#define BUFX_HOOK2(cb, level, mem_dest, subhkid, bufx, d1, d2)                \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
			((short)(level) <= (bufx)->bx_memtrclvl))             \
           mtrchook3(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), MTRC_RARE);    \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook3(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), MTRC_COMMON);  \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ct_hook3((cb), HKWD64_EXPAND((HKWD_BUFX|subhkid),24)|HKWD64_TMASK, \
		       (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2));     \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,   \
		      (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2));      \
	}                                                                     \
     } while (0)

#define BUFX_HOOK3(cb, level, mem_dest, subhkid, bufx, d1, d2, d3)            \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           mtrchook4(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
		(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2),             \
						(ulong_t) (d3), MTRC_RARE);   \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook4(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
		(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2),             \
						(ulong_t) (d3), MTRC_COMMON); \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ct_hook4((cb), HKWD64_EXPAND((HKWD_BUFX|subhkid),32)|HKWD64_TMASK, \
       (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3));     \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
           __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3));\
	}                                                                     \
     } while (0)

#define BUFX_HOOK4(cb, level, mem_dest, subhkid, bufx, d1, d2, d3, d4)        \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           mtrchook5(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4), MTRC_RARE);                           \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook5(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4), MTRC_COMMON);                         \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ct_hook5((cb), HKWD64_EXPAND((HKWD_BUFX|subhkid),40)|HKWD64_TMASK, \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4));                                      \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
           __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4));                                      \
	}                                                                     \
     } while (0)

/* The following macros can only be used by the component-serialized code.
 * They are more efficient than above macros. 
 */
#define CTCS_BUFX_HOOK0(cb, level, mem_dest, subhkid, bufx)                   \
    do {                                                                      \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
				((short)(level) <= (bufx)->bx_memtrclvl))     \
           mtrchook1(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,     \
						(ulong_t) (bufx), MTRC_RARE); \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook1(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,     \
					(ulong_t) (bufx), MTRC_COMMON);       \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) && \
				((short)(level) <= (bufx)->bx_memtrclvl))     \
           ctcs_hook1((cb),                                                   \
			HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,  \
							(ulong_t) (bufx));    \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),8) | HKWD64_TMASK,    \
							(ulong_t) (bufx));    \
	}                                                                     \
     } while (0)

#define CTCS_BUFX_HOOK1(cb, level, mem_dest, subhkid, bufx, d1)               \
    do {                                                                      \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0))     \
			&&((short)(level) <= (bufx)->bx_memtrclvl))           \
           mtrchook2(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,    \
				(ulong_t) (bufx), (ulong_t) (d1), MTRC_RARE); \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook2(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,    \
			(ulong_t) (bufx), (ulong_t) (d1), MTRC_COMMON);       \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) && \
			((short)(level) <= (bufx)->bx_memtrclvl))             \
           ctcs_hook2((cb),                                                   \
			HKWD64_EXPAND((HKWD_BUFX|subhkid),16)|HKWD64_TMASK,   \
				       (ulong_t) (bufx), (ulong_t) (d1));     \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),16) | HKWD64_TMASK,   \
					(ulong_t) (bufx), (ulong_t) (d1));    \
	}                                                                     \
     } while (0)

#define CTCS_BUFX_HOOK2(cb, level, mem_dest, subhkid, bufx, d1, d2)           \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
			((short)(level) <= (bufx)->bx_memtrclvl))             \
           mtrchook3(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), MTRC_RARE);    \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook3(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), MTRC_COMMON);  \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) && \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ctcs_hook3((cb),                                                   \
			HKWD64_EXPAND((HKWD_BUFX|subhkid),24)|HKWD64_TMASK,   \
		       (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2));     \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
            __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),24) | HKWD64_TMASK,   \
		      (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2));      \
	}                                                                     \
     } while (0)

#define CTCS_BUFX_HOOK3(cb, level, mem_dest, subhkid, bufx, d1, d2, d3)       \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           mtrchook4(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
		(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2),             \
						(ulong_t) (d3), MTRC_RARE);   \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook4(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
		(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2),             \
						(ulong_t) (d3), MTRC_COMMON); \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) && \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ctcs_hook4((cb),                                                   \
			HKWD64_EXPAND((HKWD_BUFX|subhkid),32)|HKWD64_TMASK,   \
            (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3));\
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
           __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),32) | HKWD64_TMASK,    \
	     (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3));\
	}                                                                     \
     } while (0)

#define CTCS_BUFX_HOOK4(cb, level, mem_dest, subhkid, bufx, d1, d2, d3, d4)   \
  do {                                                                        \
        if (((mem_dest) & MT_RARE) && (!(rasrb_trace_memlevel((cb)) < 0)) &&  \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           mtrchook5(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4), MTRC_RARE);                           \
                                                                              \
        if (((mem_dest) & MT_COMMON) && (!(rasrb_trace_memlevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_memtrclvl))                      \
           mtrchook5(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4), MTRC_COMMON);                         \
                                                                              \
        if (((mem_dest) & MT_PRIV) && (!(rasrb_trace_privlevel((cb)) < 0)) && \
		((short)(level) <= (bufx)->bx_memtrclvl))                     \
           ctcs_hook5((cb),                                                   \
			HKWD64_EXPAND((HKWD_BUFX|subhkid),40)|HKWD64_TMASK,   \
	    (ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3), \
			(ulong_t) (d4));                                      \
                                                                              \
        if (((mem_dest) & MT_SYSTEM) && (!(rasrb_trace_syslevel((cb)) < 0))   \
	    && ((short)(level) <= (bufx)->bx_systrclvl)) {                    \
	    __INFREQUENT;                                                     \
           __TRCHOOK(HKWD64_EXPAND((HKWD_BUFX|subhkid),40) | HKWD64_TMASK,    \
	(ulong_t) (bufx), (ulong_t) (d1), (ulong_t) (d2), (ulong_t) (d3),     \
			(ulong_t) (d4));                                      \
	}                                                                     \
     } while (0)


/* Macro to check error level in a buf */
#define BUFX_ERR_LEVEL(_bufx)		(_bufx.bx_errlvl)

/* Macro for data byte tracing */
#define BUFX_DBYTE_HOOK(cb,level,mem_dest,subhkid,bufx,len,d1,d2,d3)   \
		BUFX_HOOK4(cb, level, mem_dest, subhkid, bufx, len, d1, d2, d3)

/* Macro to update bx_IO_time */
unsigned long mftbufx(void);
#pragma mc_func mftbufx    { "7c6c42a6" }          /* mfspr r3,TB */
#pragma reg_killed_by mftbufx      gr3
#define BUFX_UPDATE_TIME(_bufx)	(_bufx->bx_IO_time = mftbufx())

unsigned long get_timebase(void);

/* 
 * Structure and defines for register_bufx_region() and unregister_bufx_region()
 */
typedef struct {
	int version;		/* version of the structure */
	uint64_t region;	/* pointer, handle, etc of the bufx region */
	size_t size;		/* size of the bufx region in bytes */
	size_t elem_size; /* size of each element in the bufx region in bytes */
	uint bufx_offset; /* byte offset of bufx within an elem of bufx region*/
	ushort registered_by;	/* who registered the region */
	ushort flags;		/* flags */
} bufx_region_desc_t;

typedef void *	bufx_region_desc_handle_t ;

#define		BUFX_REGION_DESC_VER	0

#define BUFX_REGION_DESC_EYEC		__EYEC4('b','f','r','d') /* bfrd */
#define BUFX_REGION_DESC_EYEC_INVAL	__EYEC4('b','f','r','i') /* bfri */

/* "flags" values */
#define BUFX_REGION_LDATA	0x1	/* bufx region was created with 
					 * ldata_create(). "region" is ldata 
					 * handle */
#define BUFX_REGION_SHORTERM	0x2	/* bufx region is mapped for a short 
					 * period. */

/* Defines for the registered_by field: Values from 0x0 to 0x999F of 
 * registered_by are reserved for IBM use. Values above 0x999F can be used by 
 * 3rd party software.
 */
#define BUFX_REGION_BY_UNKNOWN		0x0000
#define BUFX_REGION_BY_VMM		0x0001
#define BUFX_REGION_BY_LVM_USER		0x0002
#define BUFX_REGION_BY_LVM_VGSA		0x0003
#define BUFX_REGION_BY_DISK		0x0004
#define BUFX_REGION_BY_UPHYSIO		0x0005
#define BUFX_REGION_BY_BUFCACHE		0x0006
#define BUFX_REGION_BY_J2_USER		0x0007
#define BUFX_REGION_BY_J2_METADATA	0x0008
#define BUFX_REGION_BY_J2_SNAPSHOT	0x0009
#define BUFX_REGION_BY_J2_DIO		0x000A
#define BUFX_REGION_BY_AIO		0x000B
#define BUFX_REGION_BY_VSCSI_INIT	0x000C
#define BUFX_REGION_BY_VSCSI_TARGET	0x000D

kerrno_t register_bufx_region(	bufx_region_desc_t desc_in, 
				bufx_region_desc_handle_t *desc_out);
kerrno_t unregister_bufx_region(bufx_region_desc_handle_t desc);

/* kerrno values for the register/unregister_bufx_region() services */ 
#define EINVAL_REGISTER_BUFX_REGION	KERROR(EINVAL,  sysios_BLOCK_00, 1)
#define EEXIST_REGISTER_BUFX_REGION	KERROR(EEXIST,  sysios_BLOCK_00, 2)
#define ENOMEM_REGISTER_BUFX_REGION	KERROR(ENOMEM,  sysios_BLOCK_00, 3)
#define EINVAL_UNREGISTER_BUFX_REGION	KERROR(EINVAL,  sysios_BLOCK_00, 6)
#define EINVAL_RASCHK_TOUCH_FUNCTION_POINTER_ALIGN \
					KERROR(EINVAL,  sysios_BLOCK_00, 10)

#define IS_LBP_BUFX(buf)            (IS_BUFX(buf) && \
                (((struct bufx *)(buf))->bx_flags & BX_FLAGS_LBP_REL) )

/*
 * The following services provide the interface to the buffer cache
 * and block I/O.
 */

#ifdef _KERNEL
#ifndef _NO_PROTO

struct buf * getblk(		/* allocate uninitialized buffer to block */
	dev_t dev,		/* the device containing block */
	daddr_t blkno);		/* the block to be allocated */

struct buf *geteblk(void);	/* allocate uninitialized buffer	*/

struct buf *bread( 		/* allocate buffer to block and read it */
	dev_t dev,		/* the device containing block */
	daddr_t blkno);		/* the block to be allocated */

struct buf *breada( 		/* allocate buffer to block and read it */
	dev_t dev,		/* the device containing block */
	daddr_t blkno,		/* the block to be allocated */
	daddr_t rablkno);	/* read ahead block */

void brelse(	 		/* free buffer; no I/O implied */
	struct buf *bp); 	/* buffer to be released */

int bwrite(	 		/* write buffer; then free it */
	struct buf *bp); 	/* buffer to be written */

void bdwrite(	 		/* mark buffer for delayed write and free it */
	struct buf *bp); 	/* buffer to be written */

int bawrite(	 		/* async write buffer; then free it */
	struct buf *bp); 	/* buffer to be written */

void bflush(	 		/* flush all delayed write blocks */
	dev_t dev);		/* the device containing blocks */

int blkflush(			/* flush the delayed write block */
	dev_t dev,		/* the device containing block */
	daddr_t blkno);		/* the block to be flushed */

void binval(	 		/* invalidate all blocks */
	dev_t dev);		/* the device containing blocks */

int iowait(			/* wait for I/O completion */
	struct buf *bp); 	/* buffer to wait for completion of */

void iodone(	 		/* call the requester's I/O done routine */
	struct buf *bp); 	/* buffer with completed operation */

long bufx_init(			/* Initializes extended buf */
	struct bufx *bufxp,
	int version);

void bufx_invalidate (		/* Invalidates extended buf */
	struct bufx *bufxp);

long buf_copy(			/* copies extended/legacy buf/bufx */
	void *src_bufp,		/* into extended/legacy buf	   */
	void *dest_bufp);

#else

struct buf *getblk();
struct buf *geteblk();
struct buf *bread();
struct buf *breada();
void brelse();
int bwrite();
void bdwrite();
int bawrite();
void bflush();
int blkflush();
void binval();
int iowait();
void iodone();
long bufx_init();
void bufx_invalidate();
long buf_copy();

#endif /* not _NO_PROTO */

void clrbuf(                    /* zero buffers memory */
        struct buf *bp);        /* buffer to clear */

int geterror(                   /* get completion status of a buffer */
        struct buf *bp);        /* buffer for which to check status */
                                /* returns 0 or errno value */

void purblk(                    /* Purge a block from the buffer cache */
        dev_t dev,              /* device containing the block */
        daddr_t blkno);         /* block to be purged */


#endif /* _KERNEL */

#ifdef __cplusplus
}
#endif

#endif /* _H_BUF */