2006-03-29 08:16:44 +04:00
|
|
|
/* $NetBSD: isp_pci.c,v 1.95 2006/03/29 04:16:50 thorpej Exp $ */
|
2000-08-14 10:58:45 +04:00
|
|
|
/*
|
|
|
|
* This driver, which is contained in NetBSD in the files:
|
|
|
|
*
|
|
|
|
* sys/dev/ic/isp.c
|
2000-12-23 04:37:57 +03:00
|
|
|
* sys/dev/ic/isp_inline.h
|
|
|
|
* sys/dev/ic/isp_netbsd.c
|
|
|
|
* sys/dev/ic/isp_netbsd.h
|
|
|
|
* sys/dev/ic/isp_target.c
|
|
|
|
* sys/dev/ic/isp_target.h
|
|
|
|
* sys/dev/ic/isp_tpublic.h
|
|
|
|
* sys/dev/ic/ispmbox.h
|
|
|
|
* sys/dev/ic/ispreg.h
|
|
|
|
* sys/dev/ic/ispvar.h
|
2000-08-14 10:58:45 +04:00
|
|
|
* sys/microcode/isp/asm_sbus.h
|
|
|
|
* sys/microcode/isp/asm_1040.h
|
|
|
|
* sys/microcode/isp/asm_1080.h
|
|
|
|
* sys/microcode/isp/asm_12160.h
|
|
|
|
* sys/microcode/isp/asm_2100.h
|
|
|
|
* sys/microcode/isp/asm_2200.h
|
|
|
|
* sys/pci/isp_pci.c
|
|
|
|
* sys/sbus/isp_sbus.c
|
|
|
|
*
|
2003-12-04 16:57:30 +03:00
|
|
|
* Is being actively maintained by Matthew Jacob (mjacob@NetBSD.org).
|
2000-08-14 10:58:45 +04:00
|
|
|
* This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
|
|
|
|
* Linux versions. This tends to be an interesting maintenance problem.
|
|
|
|
*
|
|
|
|
* Please coordinate with Matthew Jacob on changes you wish to make here.
|
|
|
|
*/
|
1997-03-12 23:44:50 +03:00
|
|
|
/*
|
|
|
|
* PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
|
1999-07-06 00:28:11 +04:00
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
|
1997-03-12 23:44:50 +03:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2001-03-14 08:47:56 +03:00
|
|
|
* Additional Copyright (C) 2000, 2001 by Matthew Jacob
|
|
|
|
*
|
1997-03-12 23:44:50 +03:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
1999-07-06 00:28:11 +04:00
|
|
|
* notice, this list of conditions and the following disclaimer.
|
2001-03-14 08:47:56 +03:00
|
|
|
* 2. The name of the author may not be used to endorse or promote products
|
1999-07-06 00:28:11 +04:00
|
|
|
* derived from this software without specific prior written permission
|
1998-07-15 23:53:57 +04:00
|
|
|
*
|
1999-07-06 00:28:11 +04:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1997-03-12 23:44:50 +03:00
|
|
|
*/
|
|
|
|
|
2001-11-13 10:48:40 +03:00
|
|
|
#include <sys/cdefs.h>
|
2006-03-29 08:16:44 +04:00
|
|
|
__KERNEL_RCSID(0, "$NetBSD: isp_pci.c,v 1.95 2006/03/29 04:16:50 thorpej Exp $");
|
2001-11-13 10:48:40 +03:00
|
|
|
|
1998-07-15 23:53:57 +04:00
|
|
|
#include <dev/ic/isp_netbsd.h>
|
1997-03-12 23:44:50 +03:00
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
#include <dev/pci/pcidevs.h>
|
2000-12-06 09:33:56 +03:00
|
|
|
#include <uvm/uvm_extern.h>
|
2000-12-30 11:49:11 +03:00
|
|
|
#include <sys/reboot.h>
|
1997-03-13 04:56:06 +03:00
|
|
|
|
2001-03-14 08:47:56 +03:00
|
|
|
static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
|
|
|
|
static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t);
|
2000-07-06 02:12:23 +04:00
|
|
|
#if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
|
2001-03-14 08:47:56 +03:00
|
|
|
static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int);
|
|
|
|
static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t);
|
|
|
|
#endif
|
2004-03-11 01:42:47 +03:00
|
|
|
#if !defined(ISP_DISABLE_2100_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_2200_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_1020_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_1080_SUPPORT) && \
|
2005-02-27 03:26:58 +03:00
|
|
|
!defined(ISP_DISABLE_12160_SUPPORT)
|
2001-09-01 11:12:23 +04:00
|
|
|
static int
|
|
|
|
isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
|
2004-03-11 01:42:47 +03:00
|
|
|
#endif
|
|
|
|
#if !defined(ISP_DISABLE_2300_SUPPORT)
|
2001-09-01 11:12:23 +04:00
|
|
|
static int
|
|
|
|
isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *);
|
2004-03-11 01:42:47 +03:00
|
|
|
#endif
|
2001-03-14 08:47:56 +03:00
|
|
|
static int isp_pci_mbxdma(struct ispsoftc *);
|
|
|
|
static int isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *,
|
|
|
|
u_int16_t *, u_int16_t);
|
|
|
|
static void isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t);
|
|
|
|
static void isp_pci_reset1(struct ispsoftc *);
|
|
|
|
static void isp_pci_dumpregs(struct ispsoftc *, const char *);
|
|
|
|
static int isp_pci_intr(void *);
|
1997-03-12 23:44:50 +03:00
|
|
|
|
2000-07-06 02:12:23 +04:00
|
|
|
#if defined(ISP_DISABLE_1020_SUPPORT)
|
1999-12-04 05:54:54 +03:00
|
|
|
#define ISP_1040_RISC_CODE NULL
|
2000-07-06 02:12:23 +04:00
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_1040_RISC_CODE (const u_int16_t *) isp_1040_risc_code
|
2000-07-06 02:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_1040.h>
|
1999-12-04 05:54:54 +03:00
|
|
|
#endif
|
2000-07-06 02:12:23 +04:00
|
|
|
|
|
|
|
#if defined(ISP_DISABLE_1080_SUPPORT)
|
1999-12-04 05:54:54 +03:00
|
|
|
#define ISP_1080_RISC_CODE NULL
|
2000-07-06 02:12:23 +04:00
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_1080_RISC_CODE (const u_int16_t *) isp_1080_risc_code
|
2000-07-06 02:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_1080.h>
|
1999-12-04 05:54:54 +03:00
|
|
|
#endif
|
2000-07-06 02:12:23 +04:00
|
|
|
|
|
|
|
#if defined(ISP_DISABLE_12160_SUPPORT)
|
2000-02-12 05:22:37 +03:00
|
|
|
#define ISP_12160_RISC_CODE NULL
|
2000-07-06 02:12:23 +04:00
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_12160_RISC_CODE (const u_int16_t *) isp_12160_risc_code
|
2000-07-06 02:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_12160.h>
|
2000-02-12 05:22:37 +03:00
|
|
|
#endif
|
2000-07-06 02:12:23 +04:00
|
|
|
|
|
|
|
#if defined(ISP_DISABLE_2100_SUPPORT)
|
1999-12-04 05:54:54 +03:00
|
|
|
#define ISP_2100_RISC_CODE NULL
|
2000-07-06 02:12:23 +04:00
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_2100_RISC_CODE (const u_int16_t *) isp_2100_risc_code
|
2000-07-06 02:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_2100.h>
|
1999-12-04 05:54:54 +03:00
|
|
|
#endif
|
2000-07-06 02:12:23 +04:00
|
|
|
|
|
|
|
#if defined(ISP_DISABLE_2200_SUPPORT)
|
1999-12-04 05:54:54 +03:00
|
|
|
#define ISP_2200_RISC_CODE NULL
|
2000-07-06 02:12:23 +04:00
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_2200_RISC_CODE (const u_int16_t *) isp_2200_risc_code
|
2000-07-06 02:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_2200.h>
|
1999-12-04 05:54:54 +03:00
|
|
|
#endif
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
#if defined(ISP_DISABLE_2300_SUPPORT)
|
|
|
|
#define ISP_2300_RISC_CODE NULL
|
|
|
|
#else
|
2005-05-30 08:35:22 +04:00
|
|
|
#define ISP_2300_RISC_CODE (const u_int16_t *) isp_2300_risc_code
|
2001-09-01 11:12:23 +04:00
|
|
|
#include <dev/microcode/isp/asm_2300.h>
|
|
|
|
#endif
|
|
|
|
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef ISP_DISABLE_1020_SUPPORT
|
1997-03-12 23:44:50 +03:00
|
|
|
static struct ispmdvec mdvec = {
|
2001-09-01 11:12:23 +04:00
|
|
|
isp_pci_rd_isr,
|
1997-03-12 23:44:50 +03:00
|
|
|
isp_pci_rd_reg,
|
|
|
|
isp_pci_wr_reg,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
1997-06-08 10:34:52 +04:00
|
|
|
isp_pci_dmateardown,
|
1997-03-12 23:44:50 +03:00
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
1997-08-16 04:28:10 +04:00
|
|
|
isp_pci_dumpregs,
|
1999-12-04 05:54:54 +03:00
|
|
|
ISP_1040_RISC_CODE,
|
2000-08-02 03:55:09 +04:00
|
|
|
BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
|
1997-08-16 04:28:10 +04:00
|
|
|
};
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef ISP_DISABLE_1080_SUPPORT
|
|
|
|
static struct ispmdvec mdvec_1080 = {
|
2001-09-01 11:12:23 +04:00
|
|
|
isp_pci_rd_isr,
|
1999-03-17 09:16:42 +03:00
|
|
|
isp_pci_rd_reg_1080,
|
|
|
|
isp_pci_wr_reg_1080,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
|
|
|
isp_pci_dmateardown,
|
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
|
|
|
isp_pci_dumpregs,
|
1999-12-04 05:54:54 +03:00
|
|
|
ISP_1080_RISC_CODE,
|
2000-08-02 03:55:09 +04:00
|
|
|
BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
|
1999-03-17 09:16:42 +03:00
|
|
|
};
|
|
|
|
#endif
|
1997-08-16 04:28:10 +04:00
|
|
|
|
2000-02-12 05:22:37 +03:00
|
|
|
#ifndef ISP_DISABLE_12160_SUPPORT
|
|
|
|
static struct ispmdvec mdvec_12160 = {
|
2001-09-01 11:12:23 +04:00
|
|
|
isp_pci_rd_isr,
|
2000-02-12 05:22:37 +03:00
|
|
|
isp_pci_rd_reg_1080,
|
|
|
|
isp_pci_wr_reg_1080,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
|
|
|
isp_pci_dmateardown,
|
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
|
|
|
isp_pci_dumpregs,
|
|
|
|
ISP_12160_RISC_CODE,
|
2000-08-02 03:55:09 +04:00
|
|
|
BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
|
2000-02-12 05:22:37 +03:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef ISP_DISABLE_2100_SUPPORT
|
1997-08-16 04:28:10 +04:00
|
|
|
static struct ispmdvec mdvec_2100 = {
|
2001-09-01 11:12:23 +04:00
|
|
|
isp_pci_rd_isr,
|
1997-08-16 04:28:10 +04:00
|
|
|
isp_pci_rd_reg,
|
|
|
|
isp_pci_wr_reg,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
|
|
|
isp_pci_dmateardown,
|
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
|
|
|
isp_pci_dumpregs,
|
2000-08-02 03:55:09 +04:00
|
|
|
ISP_2100_RISC_CODE
|
1997-03-12 23:44:50 +03:00
|
|
|
};
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
1997-03-12 23:44:50 +03:00
|
|
|
|
1999-07-06 00:28:11 +04:00
|
|
|
#ifndef ISP_DISABLE_2200_SUPPORT
|
|
|
|
static struct ispmdvec mdvec_2200 = {
|
2001-09-01 11:12:23 +04:00
|
|
|
isp_pci_rd_isr,
|
1999-07-06 00:28:11 +04:00
|
|
|
isp_pci_rd_reg,
|
|
|
|
isp_pci_wr_reg,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
|
|
|
isp_pci_dmateardown,
|
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
|
|
|
isp_pci_dumpregs,
|
2000-08-02 03:55:09 +04:00
|
|
|
ISP_2200_RISC_CODE
|
1999-07-06 00:28:11 +04:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
#ifndef ISP_DISABLE_2300_SUPPORT
|
|
|
|
static struct ispmdvec mdvec_2300 = {
|
|
|
|
isp_pci_rd_isr_2300,
|
|
|
|
isp_pci_rd_reg,
|
|
|
|
isp_pci_wr_reg,
|
|
|
|
isp_pci_mbxdma,
|
|
|
|
isp_pci_dmasetup,
|
|
|
|
isp_pci_dmateardown,
|
|
|
|
NULL,
|
|
|
|
isp_pci_reset1,
|
|
|
|
isp_pci_dumpregs,
|
|
|
|
ISP_2300_RISC_CODE
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef PCI_VENDOR_QLOGIC
|
|
|
|
#define PCI_VENDOR_QLOGIC 0x1077
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP1020
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP1080
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP1240
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
|
|
|
|
#endif
|
1997-03-12 23:44:50 +03:00
|
|
|
|
1999-12-16 08:35:42 +03:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP1280
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
|
|
|
|
#endif
|
|
|
|
|
2002-10-19 03:26:15 +04:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP10160
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016
|
|
|
|
#endif
|
|
|
|
|
2000-02-12 05:22:37 +03:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP12160
|
2000-08-02 03:55:09 +04:00
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
|
2000-02-12 05:22:37 +03:00
|
|
|
#endif
|
|
|
|
|
1997-08-16 04:28:10 +04:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP2100
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
|
|
|
|
#endif
|
1999-03-17 09:16:42 +03:00
|
|
|
|
1999-07-06 00:28:11 +04:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP2200
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
|
|
|
|
#endif
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP2300
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef PCI_PRODUCT_QLOGIC_ISP2312
|
|
|
|
#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312
|
|
|
|
#endif
|
|
|
|
|
1999-03-17 09:16:42 +03:00
|
|
|
#define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
|
|
|
#define PCI_QLOGIC_ISP1080 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
|
|
|
#define PCI_QLOGIC_ISP1240 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
1999-12-16 08:35:42 +03:00
|
|
|
#define PCI_QLOGIC_ISP1280 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
2002-10-19 03:26:15 +04:00
|
|
|
#define PCI_QLOGIC_ISP10160 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
2000-02-12 05:22:37 +03:00
|
|
|
#define PCI_QLOGIC_ISP12160 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
1997-08-16 04:28:10 +04:00
|
|
|
#define PCI_QLOGIC_ISP2100 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
1999-07-06 00:28:11 +04:00
|
|
|
#define PCI_QLOGIC_ISP2200 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
#define PCI_QLOGIC_ISP2300 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
|
|
|
#define PCI_QLOGIC_ISP2312 \
|
|
|
|
((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
|
|
|
|
|
1999-10-17 05:22:08 +04:00
|
|
|
#define IO_MAP_REG 0x10
|
|
|
|
#define MEM_MAP_REG 0x14
|
1999-04-04 05:14:58 +04:00
|
|
|
#define PCIR_ROMADDR 0x30
|
|
|
|
|
|
|
|
#define PCI_DFLT_LTNCY 0x40
|
|
|
|
#define PCI_DFLT_LNSZ 0x10
|
1997-03-16 03:37:00 +03:00
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
|
2001-03-14 08:47:56 +03:00
|
|
|
static int isp_pci_probe(struct device *, struct cfdata *, void *);
|
|
|
|
static void isp_pci_attach(struct device *, struct device *, void *);
|
1997-03-12 23:44:50 +03:00
|
|
|
|
|
|
|
struct isp_pcisoftc {
|
|
|
|
struct ispsoftc pci_isp;
|
1997-08-16 04:28:10 +04:00
|
|
|
pci_chipset_tag_t pci_pc;
|
|
|
|
pcitag_t pci_tag;
|
1997-03-16 03:37:00 +03:00
|
|
|
bus_space_tag_t pci_st;
|
|
|
|
bus_space_handle_t pci_sh;
|
1999-10-17 06:40:26 +04:00
|
|
|
bus_dmamap_t *pci_xfer_dmap;
|
1997-03-12 23:44:50 +03:00
|
|
|
void * pci_ih;
|
1999-03-17 09:16:42 +03:00
|
|
|
int16_t pci_poff[_NREG_BLKS];
|
1997-03-12 23:44:50 +03:00
|
|
|
};
|
|
|
|
|
2002-10-01 00:37:04 +04:00
|
|
|
CFATTACH_DECL(isp_pci, sizeof (struct isp_pcisoftc),
|
2002-10-02 20:51:16 +04:00
|
|
|
isp_pci_probe, isp_pci_attach, NULL, NULL);
|
1997-03-12 23:44:50 +03:00
|
|
|
|
2000-08-03 07:00:04 +04:00
|
|
|
#ifdef DEBUG
|
2005-02-27 03:26:58 +03:00
|
|
|
const char vstring[] =
|
2000-08-02 03:55:09 +04:00
|
|
|
"Qlogic ISP Driver, NetBSD (pci) Platform Version %d.%d Core Version %d.%d";
|
2000-08-03 07:00:04 +04:00
|
|
|
#endif
|
2000-08-02 03:55:09 +04:00
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
static int
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_probe(struct device *parent, struct cfdata *match, void *aux)
|
1999-10-17 05:22:08 +04:00
|
|
|
{
|
|
|
|
struct pci_attach_args *pa = aux;
|
|
|
|
switch (pa->pa_id) {
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef ISP_DISABLE_1020_SUPPORT
|
|
|
|
case PCI_QLOGIC_ISP:
|
1997-03-12 23:44:50 +03:00
|
|
|
return (1);
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
|
|
|
#ifndef ISP_DISABLE_1080_SUPPORT
|
|
|
|
case PCI_QLOGIC_ISP1080:
|
1999-05-12 22:59:23 +04:00
|
|
|
case PCI_QLOGIC_ISP1240:
|
1999-12-16 08:35:42 +03:00
|
|
|
case PCI_QLOGIC_ISP1280:
|
1999-03-17 09:16:42 +03:00
|
|
|
return (1);
|
|
|
|
#endif
|
2000-02-12 05:22:37 +03:00
|
|
|
#ifndef ISP_DISABLE_12160_SUPPORT
|
2002-10-19 03:26:15 +04:00
|
|
|
case PCI_QLOGIC_ISP10160:
|
2000-02-12 05:22:37 +03:00
|
|
|
case PCI_QLOGIC_ISP12160:
|
|
|
|
return (1);
|
|
|
|
#endif
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef ISP_DISABLE_2100_SUPPORT
|
|
|
|
case PCI_QLOGIC_ISP2100:
|
|
|
|
return (1);
|
1999-07-06 00:28:11 +04:00
|
|
|
#endif
|
|
|
|
#ifndef ISP_DISABLE_2200_SUPPORT
|
|
|
|
case PCI_QLOGIC_ISP2200:
|
|
|
|
return (1);
|
2001-09-01 11:12:23 +04:00
|
|
|
#endif
|
|
|
|
#ifndef ISP_DISABLE_2300_SUPPORT
|
|
|
|
case PCI_QLOGIC_ISP2300:
|
|
|
|
case PCI_QLOGIC_ISP2312:
|
|
|
|
return (1);
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
|
|
|
default:
|
1997-03-12 23:44:50 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1999-10-17 05:22:08 +04:00
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_attach(struct device *parent, struct device *self, void *aux)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
1998-09-08 11:16:46 +04:00
|
|
|
#ifdef DEBUG
|
1998-07-31 06:14:40 +04:00
|
|
|
static char oneshot = 1;
|
|
|
|
#endif
|
2002-04-29 01:32:14 +04:00
|
|
|
static const char nomem[] = "\n%s: no mem for sdparam table\n";
|
1999-12-04 05:54:54 +03:00
|
|
|
u_int32_t data, rev, linesz = PCI_DFLT_LNSZ;
|
1997-03-12 23:44:50 +03:00
|
|
|
struct pci_attach_args *pa = aux;
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
|
1998-07-15 23:53:57 +04:00
|
|
|
struct ispsoftc *isp = &pcs->pci_isp;
|
1997-04-13 23:48:16 +04:00
|
|
|
bus_space_tag_t st, iot, memt;
|
|
|
|
bus_space_handle_t sh, ioh, memh;
|
1997-03-12 23:44:50 +03:00
|
|
|
pci_intr_handle_t ih;
|
2003-08-07 05:05:56 +04:00
|
|
|
pcireg_t mem_type;
|
2005-05-30 08:35:22 +04:00
|
|
|
const char *dstring;
|
1997-03-12 23:44:50 +03:00
|
|
|
const char *intrstr;
|
2000-08-02 03:55:09 +04:00
|
|
|
int ioh_valid, memh_valid;
|
1997-04-13 23:48:16 +04:00
|
|
|
|
1997-04-14 00:14:20 +04:00
|
|
|
ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
|
1997-04-13 23:48:16 +04:00
|
|
|
PCI_MAPREG_TYPE_IO, 0,
|
|
|
|
&iot, &ioh, NULL, NULL) == 0);
|
2005-02-27 03:26:58 +03:00
|
|
|
|
2003-08-07 05:05:56 +04:00
|
|
|
mem_type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MEM_MAP_REG);
|
|
|
|
if (PCI_MAPREG_TYPE(mem_type) != PCI_MAPREG_TYPE_MEM) {
|
|
|
|
memh_valid = 0;
|
|
|
|
} else if (PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_32BIT &&
|
|
|
|
PCI_MAPREG_MEM_TYPE(mem_type) != PCI_MAPREG_MEM_TYPE_64BIT) {
|
|
|
|
memh_valid = 0;
|
|
|
|
} else {
|
|
|
|
memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG, mem_type, 0,
|
|
|
|
&memt, &memh, NULL, NULL) == 0);
|
|
|
|
}
|
1997-04-13 23:48:16 +04:00
|
|
|
if (memh_valid) {
|
|
|
|
st = memt;
|
|
|
|
sh = memh;
|
|
|
|
} else if (ioh_valid) {
|
|
|
|
st = iot;
|
|
|
|
sh = ioh;
|
1997-03-16 03:37:00 +03:00
|
|
|
} else {
|
1997-04-13 23:48:16 +04:00
|
|
|
printf(": unable to map device registers\n");
|
1997-03-29 01:25:01 +03:00
|
|
|
return;
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = "\n";
|
1997-03-12 23:44:50 +03:00
|
|
|
|
1997-03-16 03:37:00 +03:00
|
|
|
pcs->pci_st = st;
|
|
|
|
pcs->pci_sh = sh;
|
1997-08-16 04:28:10 +04:00
|
|
|
pcs->pci_pc = pa->pa_pc;
|
|
|
|
pcs->pci_tag = pa->pa_tag;
|
1999-03-17 09:16:42 +03:00
|
|
|
pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
|
|
|
|
pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
|
|
|
|
pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
|
|
|
|
pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
|
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
|
1999-12-04 05:54:54 +03:00
|
|
|
rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0xff;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
|
|
|
#ifndef ISP_DISABLE_1020_SUPPORT
|
1997-08-16 04:28:10 +04:00
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP) {
|
2003-08-07 05:05:56 +04:00
|
|
|
dstring = ": QLogic 1020 Fast Wide SCSI HBA\n";
|
1998-07-15 23:53:57 +04:00
|
|
|
isp->isp_mdvec = &mdvec;
|
|
|
|
isp->isp_type = ISP_HA_SCSI_UNKNOWN;
|
|
|
|
isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
1999-05-12 22:59:23 +04:00
|
|
|
printf(nomem, isp->isp_name);
|
1998-07-15 23:53:57 +04:00
|
|
|
return;
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, sizeof (sdparam));
|
1999-03-17 09:16:42 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifndef ISP_DISABLE_1080_SUPPORT
|
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP1080) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic 1080 Ultra-2 Wide SCSI HBA\n";
|
1999-03-17 09:16:42 +03:00
|
|
|
isp->isp_mdvec = &mdvec_1080;
|
|
|
|
isp->isp_type = ISP_HA_SCSI_1080;
|
|
|
|
isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
1999-05-12 22:59:23 +04:00
|
|
|
printf(nomem, isp->isp_name);
|
1999-03-17 09:16:42 +03:00
|
|
|
return;
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, sizeof (sdparam));
|
1999-03-17 09:16:42 +03:00
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
ISP1080_DMA_REGS_OFF;
|
|
|
|
}
|
1999-05-12 22:59:23 +04:00
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP1240) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic Dual Channel Ultra Wide SCSI HBA\n";
|
1999-05-12 22:59:23 +04:00
|
|
|
isp->isp_mdvec = &mdvec_1080;
|
1999-12-16 08:35:42 +03:00
|
|
|
isp->isp_type = ISP_HA_SCSI_1240;
|
|
|
|
isp->isp_param =
|
|
|
|
malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, 2 * sizeof (sdparam));
|
1999-12-16 08:35:42 +03:00
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
ISP1080_DMA_REGS_OFF;
|
|
|
|
}
|
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP1280) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic Dual Channel Ultra-2 Wide SCSI HBA\n";
|
1999-12-16 08:35:42 +03:00
|
|
|
isp->isp_mdvec = &mdvec_1080;
|
|
|
|
isp->isp_type = ISP_HA_SCSI_1280;
|
1999-05-12 22:59:23 +04:00
|
|
|
isp->isp_param =
|
|
|
|
malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, 2 * sizeof (sdparam));
|
1999-05-12 22:59:23 +04:00
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
ISP1080_DMA_REGS_OFF;
|
|
|
|
}
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
2000-02-12 05:22:37 +03:00
|
|
|
#ifndef ISP_DISABLE_12160_SUPPORT
|
2002-10-19 03:26:15 +04:00
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP10160) {
|
|
|
|
dstring = ": QLogic Ultra-3 Wide SCSI HBA\n";
|
|
|
|
isp->isp_mdvec = &mdvec_12160;
|
|
|
|
isp->isp_type = ISP_HA_SCSI_10160;
|
|
|
|
isp->isp_param = malloc(sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memset(isp->isp_param, 0, sizeof (sdparam));
|
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
ISP1080_DMA_REGS_OFF;
|
|
|
|
}
|
2000-02-12 05:22:37 +03:00
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP12160) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic Dual Channel Ultra-3 Wide SCSI HBA\n";
|
2000-02-12 05:22:37 +03:00
|
|
|
isp->isp_mdvec = &mdvec_12160;
|
|
|
|
isp->isp_type = ISP_HA_SCSI_12160;
|
|
|
|
isp->isp_param =
|
|
|
|
malloc(2 * sizeof (sdparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, 2 * sizeof (sdparam));
|
2000-02-12 05:22:37 +03:00
|
|
|
pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
ISP1080_DMA_REGS_OFF;
|
|
|
|
}
|
|
|
|
#endif
|
1999-03-17 09:16:42 +03:00
|
|
|
#ifndef ISP_DISABLE_2100_SUPPORT
|
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP2100) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic FC-AL HBA\n";
|
1998-07-15 23:53:57 +04:00
|
|
|
isp->isp_mdvec = &mdvec_2100;
|
|
|
|
isp->isp_type = ISP_HA_FC_2100;
|
|
|
|
isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
1999-05-12 22:59:23 +04:00
|
|
|
printf(nomem, isp->isp_name);
|
1998-07-15 23:53:57 +04:00
|
|
|
return;
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, sizeof (fcparam));
|
1999-03-17 09:16:42 +03:00
|
|
|
pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
PCI_MBOX_REGS2100_OFF;
|
1999-12-04 05:54:54 +03:00
|
|
|
if (rev < 3) {
|
1999-04-04 05:14:58 +04:00
|
|
|
/*
|
|
|
|
* XXX: Need to get the actual revision
|
|
|
|
* XXX: number of the 2100 FB. At any rate,
|
|
|
|
* XXX: lower cache line size for early revision
|
|
|
|
* XXX; boards.
|
|
|
|
*/
|
|
|
|
linesz = 1;
|
|
|
|
}
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
1999-03-17 09:16:42 +03:00
|
|
|
#endif
|
1999-07-06 00:28:11 +04:00
|
|
|
#ifndef ISP_DISABLE_2200_SUPPORT
|
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP2200) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic FC-AL and Fabric HBA\n";
|
1999-07-06 00:28:11 +04:00
|
|
|
isp->isp_mdvec = &mdvec_2200;
|
|
|
|
isp->isp_type = ISP_HA_FC_2200;
|
|
|
|
isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_param, 0, sizeof (fcparam));
|
1999-07-06 00:28:11 +04:00
|
|
|
pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
PCI_MBOX_REGS2100_OFF;
|
|
|
|
data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
|
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
#endif
|
|
|
|
#ifndef ISP_DISABLE_2300_SUPPORT
|
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP2300 ||
|
|
|
|
pa->pa_id == PCI_QLOGIC_ISP2312) {
|
|
|
|
isp->isp_mdvec = &mdvec_2300;
|
2002-02-22 01:32:40 +03:00
|
|
|
if (pa->pa_id == PCI_QLOGIC_ISP2300) {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring = ": QLogic FC-AL and 2Gbps Fabric HBA\n";
|
2002-02-22 01:32:40 +03:00
|
|
|
isp->isp_type = ISP_HA_FC_2300;
|
|
|
|
} else {
|
2002-04-29 01:32:14 +04:00
|
|
|
dstring =
|
|
|
|
": QLogic Dual Port FC-AL and 2Gbps Fabric HBA\n";
|
2002-02-22 01:32:40 +03:00
|
|
|
isp->isp_type = ISP_HA_FC_2312;
|
|
|
|
isp->isp_port = pa->pa_function;
|
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
isp->isp_param = malloc(sizeof (fcparam), M_DEVBUF, M_NOWAIT);
|
|
|
|
if (isp->isp_param == NULL) {
|
|
|
|
printf(nomem, isp->isp_name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
memset(isp->isp_param, 0, sizeof (fcparam));
|
|
|
|
pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
|
|
|
|
PCI_MBOX_REGS2300_OFF;
|
|
|
|
data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
|
|
|
|
}
|
1999-07-06 00:28:11 +04:00
|
|
|
#endif
|
2000-08-02 03:55:09 +04:00
|
|
|
/*
|
|
|
|
* Set up logging levels.
|
|
|
|
*/
|
|
|
|
#ifdef ISP_LOGDEFAULT
|
|
|
|
isp->isp_dblev = ISP_LOGDEFAULT;
|
|
|
|
#else
|
2000-12-30 11:49:11 +03:00
|
|
|
isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
|
|
|
|
if (bootverbose)
|
|
|
|
isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
|
2000-08-02 03:55:09 +04:00
|
|
|
#ifdef SCSIDEBUG
|
2002-04-29 01:32:14 +04:00
|
|
|
isp->isp_dblev |= ISP_LOGDEBUG0|ISP_LOGDEBUG1|ISP_LOGDEBUG2;
|
2000-08-02 03:55:09 +04:00
|
|
|
#endif
|
|
|
|
#endif
|
2002-04-29 01:32:14 +04:00
|
|
|
if (isp->isp_dblev & ISP_LOGCONFIG) {
|
|
|
|
printf("\n");
|
|
|
|
} else {
|
|
|
|
printf(dstring);
|
|
|
|
}
|
2000-10-16 09:12:26 +04:00
|
|
|
|
2000-08-02 21:39:50 +04:00
|
|
|
#ifdef DEBUG
|
2000-08-02 03:55:09 +04:00
|
|
|
if (oneshot) {
|
|
|
|
oneshot = 0;
|
|
|
|
isp_prt(isp, ISP_LOGCONFIG, vstring,
|
|
|
|
ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
|
|
|
|
ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
|
|
|
|
}
|
2000-08-02 21:39:50 +04:00
|
|
|
#endif
|
2000-08-02 03:55:09 +04:00
|
|
|
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
isp->isp_dmatag = pa->pa_dmat;
|
1999-12-04 05:54:54 +03:00
|
|
|
isp->isp_revision = rev;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
1999-02-09 03:35:35 +03:00
|
|
|
/*
|
|
|
|
* Make sure that command register set sanely.
|
|
|
|
*/
|
|
|
|
data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
|
|
|
|
data |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_INVALIDATE_ENABLE;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
1999-02-09 03:35:35 +03:00
|
|
|
/*
|
|
|
|
* Not so sure about these- but I think it's important that they get
|
|
|
|
* enabled......
|
|
|
|
*/
|
|
|
|
data |= PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE;
|
2002-02-22 01:32:40 +03:00
|
|
|
if (IS_2300(isp)) { /* per QLogic errata */
|
|
|
|
data &= ~PCI_COMMAND_INVALIDATE_ENABLE;
|
|
|
|
}
|
2002-06-15 04:11:36 +04:00
|
|
|
if (IS_23XX(isp)) {
|
|
|
|
isp->isp_touched = 1;
|
|
|
|
}
|
1999-02-09 03:35:35 +03:00
|
|
|
pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, data);
|
1999-03-17 09:16:42 +03:00
|
|
|
|
1999-02-09 03:35:35 +03:00
|
|
|
/*
|
1999-04-04 05:14:58 +04:00
|
|
|
* Make sure that the latency timer, cache line size,
|
|
|
|
* and ROM is disabled.
|
1999-02-09 03:35:35 +03:00
|
|
|
*/
|
|
|
|
data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
|
|
|
|
data &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT);
|
|
|
|
data &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT);
|
1999-04-04 05:14:58 +04:00
|
|
|
data |= (PCI_DFLT_LTNCY << PCI_LATTIMER_SHIFT);
|
|
|
|
data |= (linesz << PCI_CACHELINE_SHIFT);
|
1999-02-09 03:35:35 +03:00
|
|
|
pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, data);
|
|
|
|
|
1999-04-04 05:14:58 +04:00
|
|
|
data = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR);
|
|
|
|
data &= ~1;
|
|
|
|
pci_conf_write(pa->pa_pc, pa->pa_tag, PCIR_ROMADDR, data);
|
|
|
|
|
2000-12-29 01:59:06 +03:00
|
|
|
if (pci_intr_map(pa, &ih)) {
|
1998-07-15 23:53:57 +04:00
|
|
|
printf("%s: couldn't map interrupt\n", isp->isp_name);
|
|
|
|
free(isp->isp_param, M_DEVBUF);
|
1997-03-12 23:44:50 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
intrstr = pci_intr_string(pa->pa_pc, ih);
|
|
|
|
if (intrstr == NULL)
|
|
|
|
intrstr = "<I dunno>";
|
1999-10-17 05:22:08 +04:00
|
|
|
pcs->pci_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO,
|
|
|
|
isp_pci_intr, isp);
|
1997-03-12 23:44:50 +03:00
|
|
|
if (pcs->pci_ih == NULL) {
|
|
|
|
printf("%s: couldn't establish interrupt at %s\n",
|
1998-07-15 23:53:57 +04:00
|
|
|
isp->isp_name, intrstr);
|
1999-03-17 09:16:42 +03:00
|
|
|
free(isp->isp_param, M_DEVBUF);
|
|
|
|
return;
|
|
|
|
}
|
2000-08-02 03:55:09 +04:00
|
|
|
|
1999-03-17 09:16:42 +03:00
|
|
|
printf("%s: interrupting at %s\n", isp->isp_name, intrstr);
|
|
|
|
|
1999-10-14 06:14:35 +04:00
|
|
|
if (IS_FC(isp)) {
|
2002-11-25 05:16:50 +03:00
|
|
|
DEFAULT_NODEWWN(isp) = 0x400000007F000002ULL;
|
|
|
|
DEFAULT_PORTWWN(isp) = 0x400000007F000002ULL;
|
1999-10-14 06:14:35 +04:00
|
|
|
}
|
1999-07-06 00:28:11 +04:00
|
|
|
|
2006-03-29 08:16:44 +04:00
|
|
|
isp->isp_confopts = device_cfdata(self)->cf_flags;
|
2001-02-13 02:25:20 +03:00
|
|
|
isp->isp_role = ISP_DEFAULT_ROLES;
|
1999-03-17 09:16:42 +03:00
|
|
|
ISP_LOCK(isp);
|
2000-08-02 03:55:09 +04:00
|
|
|
isp->isp_osinfo.no_mbox_ints = 1;
|
1999-03-17 09:16:42 +03:00
|
|
|
isp_reset(isp);
|
|
|
|
if (isp->isp_state != ISP_RESETSTATE) {
|
|
|
|
ISP_UNLOCK(isp);
|
|
|
|
free(isp->isp_param, M_DEVBUF);
|
|
|
|
return;
|
|
|
|
}
|
2000-08-02 03:55:09 +04:00
|
|
|
ENABLE_INTS(isp);
|
1999-03-17 09:16:42 +03:00
|
|
|
isp_init(isp);
|
|
|
|
if (isp->isp_state != ISP_INITSTATE) {
|
1998-07-15 23:53:57 +04:00
|
|
|
isp_uninit(isp);
|
1998-07-19 01:02:06 +04:00
|
|
|
ISP_UNLOCK(isp);
|
1998-07-15 23:53:57 +04:00
|
|
|
free(isp->isp_param, M_DEVBUF);
|
1997-03-12 23:44:50 +03:00
|
|
|
return;
|
|
|
|
}
|
1997-06-08 10:34:52 +04:00
|
|
|
/*
|
2000-08-02 03:55:09 +04:00
|
|
|
* Do platform attach.
|
1997-03-12 23:44:50 +03:00
|
|
|
*/
|
2000-08-02 03:55:09 +04:00
|
|
|
ISP_UNLOCK(isp);
|
1998-07-15 23:53:57 +04:00
|
|
|
isp_attach(isp);
|
|
|
|
if (isp->isp_state != ISP_RUNSTATE) {
|
2000-08-02 03:55:09 +04:00
|
|
|
ISP_LOCK(isp);
|
1998-07-15 23:53:57 +04:00
|
|
|
isp_uninit(isp);
|
|
|
|
free(isp->isp_param, M_DEVBUF);
|
2000-08-02 03:55:09 +04:00
|
|
|
ISP_UNLOCK(isp);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
#define IspVirt2Off(a, x) \
|
|
|
|
(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
|
|
|
|
_BLK_REG_SHFT] + ((x) & 0xff))
|
|
|
|
|
|
|
|
#define BXR2(pcs, off) \
|
|
|
|
bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
|
|
|
|
#define BXW2(pcs, off, v) \
|
|
|
|
bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
|
|
|
|
|
|
|
|
|
|
|
|
static INLINE int
|
|
|
|
isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp)
|
|
|
|
{
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
|
|
|
u_int16_t val0, val1;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
val0 = BXR2(pcs, IspVirt2Off(isp, off));
|
|
|
|
val1 = BXR2(pcs, IspVirt2Off(isp, off));
|
|
|
|
} while (val0 != val1 && ++i < 1000);
|
|
|
|
if (val0 != val1) {
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
*rp = val0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2004-03-11 01:42:47 +03:00
|
|
|
#if !defined(ISP_DISABLE_2100_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_2200_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_1020_SUPPORT) && \
|
|
|
|
!defined(ISP_DISABLE_1080_SUPPORT) && \
|
2005-02-27 03:26:58 +03:00
|
|
|
!defined(ISP_DISABLE_12160_SUPPORT)
|
2001-09-01 11:12:23 +04:00
|
|
|
static int
|
|
|
|
isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp,
|
|
|
|
u_int16_t *semap, u_int16_t *mbp)
|
|
|
|
{
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
|
|
|
u_int16_t isr, sema;
|
|
|
|
|
|
|
|
if (IS_2100(isp)) {
|
|
|
|
if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
|
|
|
|
sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
|
|
|
|
}
|
|
|
|
isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
|
|
|
|
isr &= INT_PENDING_MASK(isp);
|
|
|
|
sema &= BIU_SEMA_LOCK;
|
|
|
|
if (isr == 0 && sema == 0) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
*isrp = isr;
|
|
|
|
if ((*semap = sema) != 0) {
|
|
|
|
if (IS_2100(isp)) {
|
|
|
|
if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (1);
|
|
|
|
}
|
2004-03-11 01:42:47 +03:00
|
|
|
#endif
|
2001-09-01 11:12:23 +04:00
|
|
|
|
|
|
|
#ifndef ISP_DISABLE_2300_SUPPORT
|
|
|
|
static int
|
|
|
|
isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp,
|
|
|
|
u_int16_t *semap, u_int16_t *mbox0p)
|
|
|
|
{
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
|
|
|
u_int32_t r2hisr;
|
|
|
|
|
2001-10-07 00:33:24 +04:00
|
|
|
if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT)) {
|
|
|
|
*isrp = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
|
|
|
|
IspVirt2Off(pcs, BIU_R2HSTSLO));
|
|
|
|
isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
|
|
|
|
if ((r2hisr & BIU_R2HST_INTR) == 0) {
|
|
|
|
*isrp = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
|
|
|
|
case ISPR2HST_ROM_MBX_OK:
|
|
|
|
case ISPR2HST_ROM_MBX_FAIL:
|
|
|
|
case ISPR2HST_MBX_OK:
|
|
|
|
case ISPR2HST_MBX_FAIL:
|
|
|
|
case ISPR2HST_ASYNC_EVENT:
|
2002-08-13 01:33:39 +04:00
|
|
|
*isrp = r2hisr & 0xffff;
|
|
|
|
*mbox0p = (r2hisr >> 16);
|
|
|
|
*semap = 1;
|
|
|
|
return (1);
|
2002-02-22 01:32:40 +03:00
|
|
|
case ISPR2HST_RIO_16:
|
2002-08-13 01:33:39 +04:00
|
|
|
*isrp = r2hisr & 0xffff;
|
|
|
|
*mbox0p = ASYNC_RIO1;
|
|
|
|
*semap = 1;
|
|
|
|
return (1);
|
2001-09-01 11:12:23 +04:00
|
|
|
case ISPR2HST_FPOST:
|
2002-08-13 01:33:39 +04:00
|
|
|
*isrp = r2hisr & 0xffff;
|
|
|
|
*mbox0p = ASYNC_CMD_CMPLT;
|
|
|
|
*semap = 1;
|
|
|
|
return (1);
|
2001-09-01 11:12:23 +04:00
|
|
|
case ISPR2HST_FPOST_CTIO:
|
|
|
|
*isrp = r2hisr & 0xffff;
|
2002-08-13 01:33:39 +04:00
|
|
|
*mbox0p = ASYNC_CTIO_DONE;
|
2001-09-01 11:12:23 +04:00
|
|
|
*semap = 1;
|
|
|
|
return (1);
|
|
|
|
case ISPR2HST_RSPQ_UPDATE:
|
|
|
|
*isrp = r2hisr & 0xffff;
|
|
|
|
*mbox0p = 0;
|
|
|
|
*semap = 0;
|
|
|
|
return (1);
|
|
|
|
default:
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
static u_int16_t
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_rd_reg(struct ispsoftc *isp, int regoff)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
1997-08-16 04:28:10 +04:00
|
|
|
u_int16_t rv;
|
1997-03-12 23:44:50 +03:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
2001-09-01 11:12:23 +04:00
|
|
|
int oldconf = 0;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
|
1997-03-12 23:44:50 +03:00
|
|
|
/*
|
1997-08-16 04:28:10 +04:00
|
|
|
* We will assume that someone has paused the RISC processor.
|
1997-03-12 23:44:50 +03:00
|
|
|
*/
|
2001-09-01 11:12:23 +04:00
|
|
|
oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
|
|
|
|
oldconf | BIU_PCI_CONF1_SXP);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
rv = BXR2(pcs, IspVirt2Off(isp, regoff));
|
1999-03-17 09:16:42 +03:00
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
|
|
|
return (rv);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
2001-09-01 11:12:23 +04:00
|
|
|
int oldconf = 0;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
|
1997-03-12 23:44:50 +03:00
|
|
|
/*
|
1997-08-16 04:28:10 +04:00
|
|
|
* We will assume that someone has paused the RISC processor.
|
1997-03-12 23:44:50 +03:00
|
|
|
*/
|
2001-09-01 11:12:23 +04:00
|
|
|
oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
|
|
|
|
oldconf | BIU_PCI_CONF1_SXP);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, regoff), val);
|
1999-03-17 09:16:42 +03:00
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
|
2000-07-06 02:12:23 +04:00
|
|
|
#if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
|
1999-03-17 09:16:42 +03:00
|
|
|
static u_int16_t
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff)
|
1999-03-17 09:16:42 +03:00
|
|
|
{
|
1999-12-16 08:35:42 +03:00
|
|
|
u_int16_t rv, oc = 0;
|
1999-03-17 09:16:42 +03:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
|
|
|
|
(regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
|
1999-12-16 08:35:42 +03:00
|
|
|
u_int16_t tc;
|
1999-03-17 09:16:42 +03:00
|
|
|
/*
|
|
|
|
* We will assume that someone has paused the RISC processor.
|
|
|
|
*/
|
2001-09-01 11:12:23 +04:00
|
|
|
oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
1999-12-16 08:35:42 +03:00
|
|
|
tc = oc & ~BIU_PCI1080_CONF1_DMA;
|
2001-09-01 11:12:23 +04:00
|
|
|
if (regoff & SXP_BANK1_SELECT)
|
|
|
|
tc |= BIU_PCI1080_CONF1_SXP1;
|
|
|
|
else
|
1999-12-16 08:35:42 +03:00
|
|
|
tc |= BIU_PCI1080_CONF1_SXP0;
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
|
1999-03-17 09:16:42 +03:00
|
|
|
} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
|
2001-09-01 11:12:23 +04:00
|
|
|
oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
2005-02-27 03:26:58 +03:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
|
2001-09-01 11:12:23 +04:00
|
|
|
oc | BIU_PCI1080_CONF1_DMA);
|
1999-03-17 09:16:42 +03:00
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
rv = BXR2(pcs, IspVirt2Off(isp, regoff));
|
1999-12-16 08:35:42 +03:00
|
|
|
if (oc) {
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
|
1999-03-17 09:16:42 +03:00
|
|
|
}
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val)
|
1999-03-17 09:16:42 +03:00
|
|
|
{
|
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
|
2001-09-01 11:12:23 +04:00
|
|
|
int oc = 0;
|
1999-03-17 09:16:42 +03:00
|
|
|
|
2001-09-01 11:12:23 +04:00
|
|
|
if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
|
|
|
|
(regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
|
1999-12-16 08:35:42 +03:00
|
|
|
u_int16_t tc;
|
1999-03-17 09:16:42 +03:00
|
|
|
/*
|
|
|
|
* We will assume that someone has paused the RISC processor.
|
|
|
|
*/
|
2001-09-01 11:12:23 +04:00
|
|
|
oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
1999-12-16 08:35:42 +03:00
|
|
|
tc = oc & ~BIU_PCI1080_CONF1_DMA;
|
2001-09-01 11:12:23 +04:00
|
|
|
if (regoff & SXP_BANK1_SELECT)
|
|
|
|
tc |= BIU_PCI1080_CONF1_SXP1;
|
|
|
|
else
|
1999-12-16 08:35:42 +03:00
|
|
|
tc |= BIU_PCI1080_CONF1_SXP0;
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
|
1999-03-17 09:16:42 +03:00
|
|
|
} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
|
2001-09-01 11:12:23 +04:00
|
|
|
oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
|
2005-02-27 03:26:58 +03:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
|
2001-09-01 11:12:23 +04:00
|
|
|
oc | BIU_PCI1080_CONF1_DMA);
|
1999-03-17 09:16:42 +03:00
|
|
|
}
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, regoff), val);
|
1999-12-16 08:35:42 +03:00
|
|
|
if (oc) {
|
2001-09-01 11:12:23 +04:00
|
|
|
BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
|
1999-03-17 09:16:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1997-06-08 10:34:52 +04:00
|
|
|
static int
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_mbxdma(struct ispsoftc *isp)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
2000-08-02 03:55:09 +04:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dma_tag_t dmat = isp->isp_dmatag;
|
2000-08-02 03:55:09 +04:00
|
|
|
bus_dma_segment_t sg;
|
1997-06-08 10:34:52 +04:00
|
|
|
bus_size_t len;
|
1997-08-16 04:28:10 +04:00
|
|
|
fcparam *fcp;
|
2000-08-02 03:55:09 +04:00
|
|
|
int rs, i;
|
1997-03-12 23:44:50 +03:00
|
|
|
|
1999-10-14 06:14:35 +04:00
|
|
|
if (isp->isp_rquest_dma) /* been here before? */
|
|
|
|
return (0);
|
|
|
|
|
2001-06-14 23:54:07 +04:00
|
|
|
len = isp->isp_maxcmds * sizeof (XS_T *);
|
2000-08-02 03:55:09 +04:00
|
|
|
isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
|
1999-10-14 06:14:35 +04:00
|
|
|
if (isp->isp_xflist == NULL) {
|
2000-08-02 03:55:09 +04:00
|
|
|
isp_prt(isp, ISP_LOGERR, "cannot malloc xflist array");
|
1999-10-14 06:14:35 +04:00
|
|
|
return (1);
|
|
|
|
}
|
2001-07-07 20:46:34 +04:00
|
|
|
memset(isp->isp_xflist, 0, len);
|
1999-10-17 06:40:26 +04:00
|
|
|
len = isp->isp_maxcmds * sizeof (bus_dmamap_t);
|
2000-08-02 03:55:09 +04:00
|
|
|
pcs->pci_xfer_dmap = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
|
|
|
|
if (pcs->pci_xfer_dmap == NULL) {
|
|
|
|
free(isp->isp_xflist, M_DEVBUF);
|
|
|
|
isp->isp_xflist = NULL;
|
2003-05-03 22:10:37 +04:00
|
|
|
isp_prt(isp, ISP_LOGERR, "cannot malloc DMA map array");
|
2000-08-02 03:55:09 +04:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
for (i = 0; i < isp->isp_maxcmds; i++) {
|
2000-11-14 21:42:55 +03:00
|
|
|
if (bus_dmamap_create(dmat, MAXPHYS, (MAXPHYS / PAGE_SIZE) + 1,
|
2000-08-02 03:55:09 +04:00
|
|
|
MAXPHYS, 0, BUS_DMA_NOWAIT, &pcs->pci_xfer_dmap[i])) {
|
2003-05-03 22:10:37 +04:00
|
|
|
isp_prt(isp, ISP_LOGERR, "cannot create DMA maps");
|
2000-08-02 03:55:09 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i < isp->isp_maxcmds) {
|
|
|
|
while (--i >= 0) {
|
|
|
|
bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
|
|
|
|
}
|
|
|
|
free(isp->isp_xflist, M_DEVBUF);
|
|
|
|
free(pcs->pci_xfer_dmap, M_DEVBUF);
|
|
|
|
isp->isp_xflist = NULL;
|
|
|
|
pcs->pci_xfer_dmap = NULL;
|
1999-10-17 06:40:26 +04:00
|
|
|
return (1);
|
|
|
|
}
|
1999-10-14 06:14:35 +04:00
|
|
|
|
1997-06-08 10:34:52 +04:00
|
|
|
/*
|
|
|
|
* Allocate and map the request queue.
|
|
|
|
*/
|
2000-08-02 03:55:09 +04:00
|
|
|
len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
|
2000-11-14 21:42:55 +03:00
|
|
|
if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
|
|
|
|
BUS_DMA_NOWAIT) ||
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dmamem_map(isp->isp_dmatag, &sg, rs, len,
|
2000-08-02 03:55:09 +04:00
|
|
|
(caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
&isp->isp_rqdmap) || bus_dmamap_load(dmat, isp->isp_rqdmap,
|
|
|
|
(caddr_t)isp->isp_rquest, len, NULL,
|
2000-08-02 03:55:09 +04:00
|
|
|
BUS_DMA_NOWAIT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
isp->isp_rquest_dma = isp->isp_rqdmap->dm_segs[0].ds_addr;
|
1997-03-12 23:44:50 +03:00
|
|
|
|
1997-06-08 10:34:52 +04:00
|
|
|
/*
|
|
|
|
* Allocate and map the result queue.
|
|
|
|
*/
|
2000-08-02 03:55:09 +04:00
|
|
|
len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
|
2000-11-14 21:42:55 +03:00
|
|
|
if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
|
|
|
|
BUS_DMA_NOWAIT) ||
|
2000-08-02 03:55:09 +04:00
|
|
|
bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&isp->isp_result,
|
|
|
|
BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
|
|
|
if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
&isp->isp_rsdmap) || bus_dmamap_load(isp->isp_dmatag,
|
|
|
|
isp->isp_rsdmap, (caddr_t)isp->isp_result, len, NULL,
|
2000-08-02 03:55:09 +04:00
|
|
|
BUS_DMA_NOWAIT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
isp->isp_result_dma = isp->isp_rsdmap->dm_segs[0].ds_addr;
|
1997-06-08 10:34:52 +04:00
|
|
|
|
1999-07-06 00:28:11 +04:00
|
|
|
if (IS_SCSI(isp)) {
|
1997-08-16 04:28:10 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
fcp = isp->isp_param;
|
|
|
|
len = ISP2100_SCRLEN;
|
2000-11-14 21:42:55 +03:00
|
|
|
if (bus_dmamem_alloc(dmat, len, PAGE_SIZE, 0, &sg, 1, &rs,
|
|
|
|
BUS_DMA_NOWAIT) ||
|
2000-08-02 03:55:09 +04:00
|
|
|
bus_dmamem_map(dmat, &sg, rs, len, (caddr_t *)&fcp->isp_scratch,
|
|
|
|
BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
|
|
|
if (bus_dmamap_create(dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
&isp->isp_scdmap) || bus_dmamap_load(dmat,
|
|
|
|
isp->isp_scdmap, (caddr_t)fcp->isp_scratch, len, NULL,
|
2000-08-02 03:55:09 +04:00
|
|
|
BUS_DMA_NOWAIT)) {
|
|
|
|
goto dmafail;
|
|
|
|
}
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
fcp->isp_scdma = isp->isp_scdmap->dm_segs[0].ds_addr;
|
1997-06-08 10:34:52 +04:00
|
|
|
return (0);
|
2000-08-02 03:55:09 +04:00
|
|
|
dmafail:
|
2003-05-03 22:10:37 +04:00
|
|
|
isp_prt(isp, ISP_LOGERR, "mailbox DMA setup failure");
|
2000-08-02 03:55:09 +04:00
|
|
|
for (i = 0; i < isp->isp_maxcmds; i++) {
|
|
|
|
bus_dmamap_destroy(dmat, pcs->pci_xfer_dmap[i]);
|
|
|
|
}
|
|
|
|
free(isp->isp_xflist, M_DEVBUF);
|
|
|
|
free(pcs->pci_xfer_dmap, M_DEVBUF);
|
|
|
|
isp->isp_xflist = NULL;
|
|
|
|
pcs->pci_xfer_dmap = NULL;
|
|
|
|
return (1);
|
1997-06-08 10:34:52 +04:00
|
|
|
}
|
1997-03-12 23:44:50 +03:00
|
|
|
|
|
|
|
static int
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_dmasetup(struct ispsoftc *isp, struct scsipi_xfer *xs, ispreq_t *rq,
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
u_int16_t *nxtip, u_int16_t optr)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
2000-08-02 03:55:09 +04:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
|
2000-07-06 02:12:23 +04:00
|
|
|
bus_dmamap_t dmap;
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
u_int16_t starti = isp->isp_reqidx, nxti = *nxtip;
|
|
|
|
ispreq_t *qep;
|
1997-09-10 06:16:13 +04:00
|
|
|
int segcnt, seg, error, ovseg, seglim, drq;
|
1997-03-12 23:44:50 +03:00
|
|
|
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, starti);
|
2000-08-02 03:55:09 +04:00
|
|
|
dmap = pcs->pci_xfer_dmap[isp_handle_index(rq->req_handle)];
|
1997-03-12 23:44:50 +03:00
|
|
|
if (xs->datalen == 0) {
|
|
|
|
rq->req_seg_count = 1;
|
1998-07-31 06:08:16 +04:00
|
|
|
goto mbxsync;
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
1999-10-01 03:04:39 +04:00
|
|
|
if (xs->xs_control & XS_CTL_DATA_IN) {
|
1997-09-10 06:16:13 +04:00
|
|
|
drq = REQFLAG_DATA_IN;
|
1997-03-12 23:44:50 +03:00
|
|
|
} else {
|
1997-09-10 06:16:13 +04:00
|
|
|
drq = REQFLAG_DATA_OUT;
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
|
1999-07-06 00:28:11 +04:00
|
|
|
if (IS_FC(isp)) {
|
1997-08-16 04:28:10 +04:00
|
|
|
seglim = ISP_RQDSEG_T2;
|
|
|
|
((ispreqt2_t *)rq)->req_totalcnt = xs->datalen;
|
1997-09-10 06:16:13 +04:00
|
|
|
((ispreqt2_t *)rq)->req_flags |= drq;
|
1997-08-16 04:28:10 +04:00
|
|
|
} else {
|
1997-09-10 06:16:13 +04:00
|
|
|
rq->req_flags |= drq;
|
2000-02-19 04:54:42 +03:00
|
|
|
if (XS_CDBLEN(xs) > 12) {
|
|
|
|
seglim = 0;
|
|
|
|
} else {
|
|
|
|
seglim = ISP_RQDSEG;
|
|
|
|
}
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
error = bus_dmamap_load(isp->isp_dmatag, dmap, xs->data, xs->datalen,
|
2001-03-08 02:07:12 +03:00
|
|
|
NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ?
|
2001-07-19 20:36:14 +04:00
|
|
|
BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
|
|
|
|
((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
|
1998-07-15 23:53:57 +04:00
|
|
|
if (error) {
|
2003-05-03 22:10:37 +04:00
|
|
|
isp_prt(isp, ISP_LOGWARN, "unable to load DMA (%d)", error);
|
2002-05-17 23:05:08 +04:00
|
|
|
XS_SETERR(xs, HBA_BOTCH);
|
2002-05-12 20:51:06 +04:00
|
|
|
if (error == EAGAIN || error == ENOMEM)
|
|
|
|
return (CMD_EAGAIN);
|
2002-05-17 23:05:08 +04:00
|
|
|
else
|
|
|
|
return (CMD_COMPLETE);
|
1998-07-15 23:53:57 +04:00
|
|
|
}
|
1997-06-08 10:34:52 +04:00
|
|
|
|
|
|
|
segcnt = dmap->dm_nsegs;
|
|
|
|
|
2000-10-16 09:12:26 +04:00
|
|
|
isp_prt(isp, ISP_LOGDEBUG2, "%d byte %s %p in %d segs",
|
|
|
|
xs->datalen, (xs->xs_control & XS_CTL_DATA_IN)? "read to" :
|
|
|
|
"write from", xs->data, segcnt);
|
|
|
|
|
1997-06-08 10:34:52 +04:00
|
|
|
for (seg = 0, rq->req_seg_count = 0;
|
2000-08-14 10:58:45 +04:00
|
|
|
seglim && seg < segcnt && rq->req_seg_count < seglim;
|
1999-10-17 05:22:08 +04:00
|
|
|
seg++, rq->req_seg_count++) {
|
1999-07-06 00:28:11 +04:00
|
|
|
if (IS_FC(isp)) {
|
1997-08-16 04:28:10 +04:00
|
|
|
ispreqt2_t *rq2 = (ispreqt2_t *)rq;
|
|
|
|
rq2->req_dataseg[rq2->req_seg_count].ds_count =
|
|
|
|
dmap->dm_segs[seg].ds_len;
|
|
|
|
rq2->req_dataseg[rq2->req_seg_count].ds_base =
|
|
|
|
dmap->dm_segs[seg].ds_addr;
|
|
|
|
} else {
|
|
|
|
rq->req_dataseg[rq->req_seg_count].ds_count =
|
|
|
|
dmap->dm_segs[seg].ds_len;
|
|
|
|
rq->req_dataseg[rq->req_seg_count].ds_base =
|
|
|
|
dmap->dm_segs[seg].ds_addr;
|
|
|
|
}
|
2000-12-29 01:23:41 +03:00
|
|
|
isp_prt(isp, ISP_LOGDEBUG2, "seg0.[%d]={0x%lx,%lu}",
|
|
|
|
rq->req_seg_count, (long) dmap->dm_segs[seg].ds_addr,
|
|
|
|
(unsigned long) dmap->dm_segs[seg].ds_len);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
if (seg == segcnt) {
|
1998-07-31 06:08:16 +04:00
|
|
|
goto dmasync;
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
}
|
1997-06-08 10:34:52 +04:00
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
do {
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
u_int16_t onxti;
|
|
|
|
ispcontreq_t *crq, *cqe, local;
|
|
|
|
|
|
|
|
crq = &local;
|
|
|
|
|
|
|
|
cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
|
|
|
|
onxti = nxti;
|
|
|
|
nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
|
|
|
|
if (nxti == optr) {
|
2002-08-13 01:33:39 +04:00
|
|
|
isp_prt(isp, /* ISP_LOGDEBUG0 */ ISP_LOGERR, "Request Queue Overflow++");
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dmamap_unload(isp->isp_dmatag, dmap);
|
1998-07-15 23:53:57 +04:00
|
|
|
XS_SETERR(xs, HBA_BOTCH);
|
2000-07-06 02:12:23 +04:00
|
|
|
return (CMD_EAGAIN);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
rq->req_header.rqs_entry_count++;
|
2001-07-07 20:46:34 +04:00
|
|
|
memset((void *)crq, 0, sizeof (*crq));
|
1997-03-12 23:44:50 +03:00
|
|
|
crq->req_header.rqs_entry_count = 1;
|
|
|
|
crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
|
1997-06-08 10:34:52 +04:00
|
|
|
|
|
|
|
for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
|
|
|
|
rq->req_seg_count++, seg++, ovseg++) {
|
|
|
|
crq->req_dataseg[ovseg].ds_count =
|
|
|
|
dmap->dm_segs[seg].ds_len;
|
|
|
|
crq->req_dataseg[ovseg].ds_base =
|
|
|
|
dmap->dm_segs[seg].ds_addr;
|
2000-12-29 01:23:41 +03:00
|
|
|
isp_prt(isp, ISP_LOGDEBUG2, "seg%d.[%d]={0x%lx,%lu}",
|
2000-10-16 09:12:26 +04:00
|
|
|
rq->req_header.rqs_entry_count - 1,
|
2000-12-29 01:23:41 +03:00
|
|
|
rq->req_seg_count, (long)dmap->dm_segs[seg].ds_addr,
|
|
|
|
(unsigned long) dmap->dm_segs[seg].ds_len);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
isp_put_cont_req(isp, crq, cqe);
|
|
|
|
MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
|
1997-06-08 10:34:52 +04:00
|
|
|
} while (seg < segcnt);
|
|
|
|
|
2000-08-14 10:58:45 +04:00
|
|
|
|
1998-07-31 06:08:16 +04:00
|
|
|
dmasync:
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
|
1999-10-01 03:04:39 +04:00
|
|
|
(xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
|
1998-09-18 03:10:20 +04:00
|
|
|
BUS_DMASYNC_PREWRITE);
|
1998-07-31 06:08:16 +04:00
|
|
|
|
|
|
|
mbxsync:
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
switch (rq->req_header.rqs_entry_type) {
|
|
|
|
case RQSTYPE_REQUEST:
|
|
|
|
isp_put_request(isp, rq, qep);
|
|
|
|
break;
|
|
|
|
case RQSTYPE_CMDONLY:
|
|
|
|
isp_put_extended_request(isp, (ispextreq_t *)rq,
|
|
|
|
(ispextreq_t *)qep);
|
|
|
|
break;
|
|
|
|
case RQSTYPE_T2RQS:
|
|
|
|
isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*nxtip = nxti;
|
1998-09-18 03:10:20 +04:00
|
|
|
return (CMD_QUEUED);
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
|
|
|
|
1998-07-31 06:08:16 +04:00
|
|
|
static int
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_intr(void *arg)
|
1998-07-31 06:08:16 +04:00
|
|
|
{
|
2001-09-01 11:12:23 +04:00
|
|
|
u_int16_t isr, sema, mbox;
|
|
|
|
struct ispsoftc *isp = arg;
|
|
|
|
|
|
|
|
isp->isp_intcnt++;
|
|
|
|
if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
|
2005-02-27 03:26:58 +03:00
|
|
|
isp->isp_intbogus++;
|
2001-09-01 11:12:23 +04:00
|
|
|
return (0);
|
|
|
|
} else {
|
|
|
|
isp->isp_osinfo.onintstack = 1;
|
|
|
|
isp_intr(isp, isr, sema, mbox);
|
|
|
|
isp->isp_osinfo.onintstack = 0;
|
|
|
|
return (1);
|
|
|
|
}
|
1998-07-31 06:08:16 +04:00
|
|
|
}
|
|
|
|
|
1997-06-08 10:34:52 +04:00
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle)
|
1997-06-08 10:34:52 +04:00
|
|
|
{
|
2000-08-02 03:55:09 +04:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
|
|
|
|
bus_dmamap_t dmap = pcs->pci_xfer_dmap[isp_handle_index(handle)];
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dmamap_sync(isp->isp_dmatag, dmap, 0, dmap->dm_mapsize,
|
1999-10-01 03:04:39 +04:00
|
|
|
xs->xs_control & XS_CTL_DATA_IN ?
|
1997-06-08 10:34:52 +04:00
|
|
|
BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
|
Major restructuring for swizzling to the request queue and unswizzling from
the response queue. Instead of the ad hoc ISP_SWIZZLE_REQUEST, we now have
a complete set of inline functions in isp_inline.h. Each platform is
responsible for providing just one of a set of ISP_IOX_{GET,PUT}{8,16,32}
macros.
The reason this needs to be done is that we need to have a single set of
functions that will work correctly on multiple architectures for both little
and big endian machines. It also needs to work correctly in the case that
we have the request or response queues in memory that has to be treated
specially (e.g., have ddi_dma_sync called on it for Solaris after we update
it or before we read from it).
One thing that falls out of this is that we no longer build requests in the
request queue itself. Instead, we build the request locally (e.g., on the
stack) and then as part of the swizzling operation, copy it to the request
queue entry we've allocated. I thought long and hard about whether this was
too expensive a change to make as it in a lot of cases requires an extra
copy. On balance, the flexbility is worth it. With any luck, the entry that
we build locally stays in a processor writeback cache (after all, it's only
64 bytes) so that the cost of actually flushing it to the memory area that is
the shared queue with the PCI device is not all that expensive. We may examine
this again and try to get clever in the future to try and avoid copies.
Another change that falls out of this is that MEMORYBARRIER should be taken
a lot more seriously. The macro ISP_ADD_REQUEST does a MEMORYBARRIER on the
entry being added. But there had been many other places this had been missing.
It's now very important that it be done.
For NetBSD, it does a ddi_dmamap_sync as appropriate. This gets us out of
the explicit ddi_dmamap_sync on the whole response queue that we did for SBus
cards at each interrupt.
Set things up so that platforms that cannot have an SBus don't get a lot of
the SBus code checks (dead coded out).
Additional changes:
Fix a longstanding buglet of sorts. When we get an entry via isp_getrqentry,
the iptr value that gets returned is the value we intend to eventually plug
into the ISP registers as the entry *one past* the last one we've written-
*not* the current entry we're updating. All along we've been calling sync
functions on the wrong index value. Argh. The 'fix' here is to rename all
'iptr' variables as 'nxti' to remember that this is the 'next' pointer-
not the current pointer.
Devote a single bit to mboxbsy- and set aside bits for output mbox registers
that we need to pick up- we can have at least one command which does not
have any defined output registers (MBOX_EXECUTE_FIRMWARE).
Explicitly decode GetAllNext SNS Response back *as* a GetAllNext response.
Otherwise, we won't unswizzle it correctly.
Nuke some additional __P macros.
2001-12-14 03:13:44 +03:00
|
|
|
bus_dmamap_unload(isp->isp_dmatag, dmap);
|
1997-06-08 10:34:52 +04:00
|
|
|
}
|
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_reset1(struct ispsoftc *isp)
|
1997-03-12 23:44:50 +03:00
|
|
|
{
|
|
|
|
/* Make sure the BIOS is disabled */
|
|
|
|
isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
|
2002-02-22 01:32:40 +03:00
|
|
|
if (isp->isp_osinfo.no_mbox_ints == 0) {
|
|
|
|
ENABLE_INTS(isp);
|
|
|
|
}
|
|
|
|
|
1997-03-12 23:44:50 +03:00
|
|
|
}
|
1997-08-16 04:28:10 +04:00
|
|
|
|
|
|
|
static void
|
2001-03-14 08:47:56 +03:00
|
|
|
isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
|
1997-08-16 04:28:10 +04:00
|
|
|
{
|
2000-08-02 03:55:09 +04:00
|
|
|
struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
|
|
|
|
if (msg)
|
|
|
|
printf("%s: %s\n", isp->isp_name, msg);
|
|
|
|
if (IS_SCSI(isp))
|
|
|
|
printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
|
|
|
|
else
|
|
|
|
printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
|
|
|
|
printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
|
|
|
|
ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
|
|
|
|
printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
|
|
|
|
|
|
|
|
|
|
|
|
if (IS_SCSI(isp)) {
|
|
|
|
ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
|
|
|
|
printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
|
|
|
|
ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
|
|
|
|
ISP_READ(isp, CDMA_FIFO_STS));
|
|
|
|
printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
|
|
|
|
ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
|
|
|
|
ISP_READ(isp, DDMA_FIFO_STS));
|
|
|
|
printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
|
|
|
|
ISP_READ(isp, SXP_INTERRUPT),
|
|
|
|
ISP_READ(isp, SXP_GROSS_ERR),
|
|
|
|
ISP_READ(isp, SXP_PINS_CTRL));
|
|
|
|
ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
|
|
|
|
}
|
|
|
|
printf(" mbox regs: %x %x %x %x %x\n",
|
|
|
|
ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
|
|
|
|
ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
|
|
|
|
ISP_READ(isp, OUTMAILBOX4));
|
|
|
|
printf(" PCI Status Command/Status=%x\n",
|
|
|
|
pci_conf_read(pcs->pci_pc, pcs->pci_tag, PCI_COMMAND_STATUS_REG));
|
1997-08-16 04:28:10 +04:00
|
|
|
}
|