NetBSD/sys/dev/raidframe/rf_driver.c

899 lines
25 KiB
C
Raw Normal View History

2007-12-05 11:39:46 +03:00
/* $NetBSD: rf_driver.c,v 1.115 2007/12/05 08:39:46 ad Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Greg Oster
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1995 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II,
* Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
/******************************************************************************
*
* rf_driver.c -- main setup, teardown, and access routines for the RAID driver
*
* all routines are prefixed with rf_ (raidframe), to avoid conficts.
*
******************************************************************************/
2001-11-13 10:11:12 +03:00
#include <sys/cdefs.h>
2007-12-05 11:39:46 +03:00
__KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.115 2007/12/05 08:39:46 ad Exp $");
#include "opt_raid_diagnostic.h"
2001-11-13 10:11:12 +03:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/ioctl.h>
#include <sys/fcntl.h>
#include <sys/vnode.h>
#include "rf_archs.h"
#include "rf_threadstuff.h"
#include <sys/errno.h>
#include "rf_raid.h"
#include "rf_dag.h"
#include "rf_aselect.h"
#include "rf_diskqueue.h"
#include "rf_parityscan.h"
#include "rf_alloclist.h"
#include "rf_dagutils.h"
#include "rf_utils.h"
#include "rf_etimer.h"
#include "rf_acctrace.h"
#include "rf_general.h"
#include "rf_desc.h"
#include "rf_states.h"
#include "rf_decluster.h"
#include "rf_map.h"
#include "rf_revent.h"
#include "rf_callback.h"
#include "rf_engine.h"
#include "rf_mcpair.h"
#include "rf_nwayxor.h"
#include "rf_copyback.h"
#include "rf_driver.h"
#include "rf_options.h"
#include "rf_shutdown.h"
#include "rf_kintf.h"
#include <sys/buf.h>
#ifndef RF_ACCESS_DEBUG
#define RF_ACCESS_DEBUG 0
#endif
/* rad == RF_RaidAccessDesc_t */
RF_DECLARE_MUTEX(rf_rad_lock)
#define RF_MAX_FREE_RAD 128
#define RF_MIN_FREE_RAD 32
/* debug variables */
char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */
/* main configuration routines */
static int raidframe_booted = 0;
static void rf_ConfigureDebug(RF_Config_t * cfgPtr);
static void set_debug_option(char *name, long val);
static void rf_UnconfigureArray(void);
static void rf_ShutdownRDFreeList(void *);
static int rf_ConfigureRDFreeList(RF_ShutdownList_t **);
RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved
* printfs by different stripes */
#define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended))
#define WAIT_FOR_QUIESCENCE(_raid_) \
ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \
"raidframe quiesce", 0, &((_raid_)->access_suspend_mutex))
static int configureCount = 0; /* number of active configurations */
static int isconfigged = 0; /* is basic raidframe (non per-array)
* stuff configged */
RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex) /* used to lock the configuration
* stuff */
static RF_ShutdownList_t *globalShutdown; /* non array-specific
* stuff */
static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp);
static int rf_AllocEmergBuffers(RF_Raid_t *);
static void rf_FreeEmergBuffers(RF_Raid_t *);
/* called at system boot time */
2005-02-27 03:26:58 +03:00
int
rf_BootRaidframe()
{
if (raidframe_booted)
return (EBUSY);
raidframe_booted = 1;
2007-12-05 11:39:46 +03:00
mutex_init(&configureMutex, MUTEX_DEFAULT, IPL_NONE);
configureCount = 0;
isconfigged = 0;
globalShutdown = NULL;
return (0);
}
/*
* Called whenever an array is shutdown
*/
2005-02-27 03:26:58 +03:00
static void
rf_UnconfigureArray()
{
RF_LOCK_LKMGR_MUTEX(configureMutex);
if (--configureCount == 0) { /* if no active configurations, shut
* everything down */
isconfigged = 0;
rf_ShutdownList(&globalShutdown);
/*
* We must wait until now, because the AllocList module
* uses the DebugMem module.
*/
#if RF_DEBUG_MEM
if (rf_memDebug)
rf_print_unfreed();
#endif
}
RF_UNLOCK_LKMGR_MUTEX(configureMutex);
}
/*
* Called to shut down an array.
*/
2005-02-27 03:26:58 +03:00
int
rf_Shutdown(RF_Raid_t *raidPtr)
{
if (!raidPtr->valid) {
RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n");
return (EINVAL);
}
/*
* wait for outstanding IOs to land
* As described in rf_raid.h, we use the rad_freelist lock
* to protect the per-array info about outstanding descs
* since we need to do freelist locking anyway, and this
* cuts down on the amount of serialization we've got going
* on.
*/
RF_LOCK_MUTEX(rf_rad_lock);
if (raidPtr->waitShutdown) {
RF_UNLOCK_MUTEX(rf_rad_lock);
return (EBUSY);
}
raidPtr->waitShutdown = 1;
while (raidPtr->nAccOutstanding) {
RF_WAIT_COND(raidPtr->outstandingCond, rf_rad_lock);
}
RF_UNLOCK_MUTEX(rf_rad_lock);
/* Wait for any parity re-writes to stop... */
while (raidPtr->parity_rewrite_in_progress) {
printf("Waiting for parity re-write to exit...\n");
tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO,
"rfprwshutdown", 0);
}
raidPtr->valid = 0;
rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE);
rf_UnconfigureVnodes(raidPtr);
rf_FreeEmergBuffers(raidPtr);
rf_ShutdownList(&raidPtr->shutdownList);
rf_UnconfigureArray();
return (0);
}
#define DO_INIT_CONFIGURE(f) { \
rc = f (&globalShutdown); \
if (rc) { \
RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
rf_ShutdownList(&globalShutdown); \
configureCount--; \
RF_UNLOCK_LKMGR_MUTEX(configureMutex); \
return(rc); \
} \
}
#define DO_RAID_FAIL() { \
rf_UnconfigureVnodes(raidPtr); \
rf_FreeEmergBuffers(raidPtr); \
rf_ShutdownList(&raidPtr->shutdownList); \
rf_UnconfigureArray(); \
}
#define DO_RAID_INIT_CONFIGURE(f) { \
rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \
if (rc) { \
RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \
DO_RAID_FAIL(); \
return(rc); \
} \
}
#define DO_RAID_MUTEX(_m_) { \
rf_mutex_init((_m_)); \
}
2005-02-27 03:26:58 +03:00
int
rf_Configure(RF_Raid_t *raidPtr, RF_Config_t *cfgPtr, RF_AutoConfig_t *ac)
{
RF_RowCol_t col;
int rc;
RF_LOCK_LKMGR_MUTEX(configureMutex);
configureCount++;
if (isconfigged == 0) {
rf_mutex_init(&rf_printf_mutex);
/* initialize globals */
DO_INIT_CONFIGURE(rf_ConfigureAllocList);
/*
* Yes, this does make debugging general to the whole
2005-02-27 03:26:58 +03:00
* system instead of being array specific. Bummer, drag.
*/
rf_ConfigureDebug(cfgPtr);
DO_INIT_CONFIGURE(rf_ConfigureDebugMem);
#if RF_ACC_TRACE > 0
DO_INIT_CONFIGURE(rf_ConfigureAccessTrace);
#endif
DO_INIT_CONFIGURE(rf_ConfigureMapModule);
DO_INIT_CONFIGURE(rf_ConfigureReconEvent);
DO_INIT_CONFIGURE(rf_ConfigureCallback);
DO_INIT_CONFIGURE(rf_ConfigureRDFreeList);
DO_INIT_CONFIGURE(rf_ConfigureNWayXor);
DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList);
DO_INIT_CONFIGURE(rf_ConfigureMCPair);
DO_INIT_CONFIGURE(rf_ConfigureDAGs);
DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs);
DO_INIT_CONFIGURE(rf_ConfigureReconstruction);
DO_INIT_CONFIGURE(rf_ConfigureCopyback);
DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem);
DO_INIT_CONFIGURE(rf_ConfigurePSStatus);
isconfigged = 1;
}
RF_UNLOCK_LKMGR_MUTEX(configureMutex);
DO_RAID_MUTEX(&raidPtr->mutex);
/* set up the cleanup list. Do this after ConfigureDebug so that
* value of memDebug will be set */
rf_MakeAllocList(raidPtr->cleanupList);
if (raidPtr->cleanupList == NULL) {
DO_RAID_FAIL();
return (ENOMEM);
}
rf_ShutdownCreate(&raidPtr->shutdownList,
(void (*) (void *)) rf_FreeAllocList,
raidPtr->cleanupList);
raidPtr->numCol = cfgPtr->numCol;
raidPtr->numSpare = cfgPtr->numSpare;
raidPtr->status = rf_rs_optimal;
raidPtr->reconControl = NULL;
2005-02-27 03:26:58 +03:00
TAILQ_INIT(&(raidPtr->iodone));
simple_lock_init(&(raidPtr->iodone_lock));
DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine);
DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks);
raidPtr->outstandingCond = 0;
raidPtr->nAccOutstanding = 0;
raidPtr->waitShutdown = 0;
DO_RAID_MUTEX(&raidPtr->access_suspend_mutex);
raidPtr->waitForReconCond = 0;
if (ac!=NULL) {
/* We have an AutoConfig structure.. Don't do the
normal disk configuration... call the auto config
stuff */
rf_AutoConfigureDisks(raidPtr, cfgPtr, ac);
} else {
DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks);
DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks);
}
/* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev
* no. is set */
DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues);
DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout);
/* Initialize per-RAID PSS bits */
rf_InitPSStatus(raidPtr);
#if RF_INCLUDE_CHAINDECLUSTER > 0
for (col = 0; col < raidPtr->numCol; col++) {
/*
* XXX better distribution
*/
raidPtr->hist_diskreq[col] = 0;
}
#endif
raidPtr->numNewFailures = 0;
raidPtr->copyback_in_progress = 0;
raidPtr->parity_rewrite_in_progress = 0;
raidPtr->adding_hot_spare = 0;
raidPtr->recon_in_progress = 0;
raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs;
2005-02-27 03:26:58 +03:00
/* autoconfigure and root_partition will actually get filled in
after the config is done */
raidPtr->autoconfigure = 0;
raidPtr->root_partition = 0;
raidPtr->last_unit = raidPtr->raidid;
raidPtr->config_order = 0;
if (rf_keepAccTotals) {
raidPtr->keep_acc_totals = 1;
}
2004-03-20 07:22:05 +03:00
/* Allocate a bunch of buffers to be used in low-memory conditions */
raidPtr->iobuf = NULL;
2005-02-27 03:26:58 +03:00
rc = rf_AllocEmergBuffers(raidPtr);
if (rc) {
printf("raid%d: Unable to allocate emergency buffers.\n",
raidPtr->raidid);
DO_RAID_FAIL();
return(rc);
}
raidPtr->valid = 1;
printf("raid%d: %s\n", raidPtr->raidid,
raidPtr->Layout.map->configName);
printf("raid%d: Components:", raidPtr->raidid);
for (col = 0; col < raidPtr->numCol; col++) {
printf(" %s", raidPtr->Disks[col].devname);
if (RF_DEAD_DISK(raidPtr->Disks[col].status)) {
printf("[**FAILED**]");
}
}
printf("\n");
printf("raid%d: Total Sectors: %lu (%lu MB)\n",
raidPtr->raidid,
(unsigned long) raidPtr->totalSectors,
2005-02-27 03:26:58 +03:00
(unsigned long) (raidPtr->totalSectors / 1024 *
(1 << raidPtr->logBytesPerSector) / 1024));
return (0);
}
/*
Routines to allocate and free the "emergency buffers" for a given
RAID set. These emergency buffers will be used when the kernel runs
2005-02-27 03:26:58 +03:00
out of kernel memory.
*/
2005-02-27 03:26:58 +03:00
static int
rf_AllocEmergBuffers(RF_Raid_t *raidPtr)
{
void *tmpbuf;
RF_VoidPointerListElem_t *vple;
int i;
2004-03-20 07:22:05 +03:00
/* XXX next line needs tuning... */
raidPtr->numEmergencyBuffers = 10 * raidPtr->numCol;
#if DEBUG
printf("raid%d: allocating %d buffers of %d bytes.\n",
raidPtr->raidid,
2005-02-27 03:26:58 +03:00
raidPtr->numEmergencyBuffers,
(int)(raidPtr->Layout.sectorsPerStripeUnit <<
2004-03-20 07:22:05 +03:00
raidPtr->logBytesPerSector));
#endif
for (i = 0; i < raidPtr->numEmergencyBuffers; i++) {
2005-02-27 03:26:58 +03:00
tmpbuf = malloc( raidPtr->Layout.sectorsPerStripeUnit <<
raidPtr->logBytesPerSector,
M_RAIDFRAME, M_WAITOK);
2004-03-20 07:22:05 +03:00
if (tmpbuf) {
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
vple = rf_AllocVPListElem();
vple->p= tmpbuf;
vple->next = raidPtr->iobuf;
raidPtr->iobuf = vple;
2004-03-20 07:22:05 +03:00
raidPtr->iobuf_count++;
} else {
printf("raid%d: failed to allocate emergency buffer!\n",
raidPtr->raidid);
return 1;
2004-03-20 07:22:05 +03:00
}
}
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
/* XXX next line needs tuning too... */
raidPtr->numEmergencyStripeBuffers = 10;
for (i = 0; i < raidPtr->numEmergencyStripeBuffers; i++) {
tmpbuf = malloc( raidPtr->numCol * (raidPtr->Layout.sectorsPerStripeUnit <<
raidPtr->logBytesPerSector),
M_RAIDFRAME, M_WAITOK);
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
if (tmpbuf) {
vple = rf_AllocVPListElem();
vple->p= tmpbuf;
vple->next = raidPtr->stripebuf;
raidPtr->stripebuf = vple;
raidPtr->stripebuf_count++;
} else {
printf("raid%d: failed to allocate emergency stripe buffer!\n",
raidPtr->raidid);
return 1;
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
}
}
2005-02-27 03:26:58 +03:00
return (0);
}
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
static void
rf_FreeEmergBuffers(RF_Raid_t *raidPtr)
{
RF_VoidPointerListElem_t *tmp;
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
/* Free the emergency IO buffers */
while (raidPtr->iobuf != NULL) {
tmp = raidPtr->iobuf;
raidPtr->iobuf = raidPtr->iobuf->next;
free(tmp->p, M_RAIDFRAME);
rf_FreeVPListElem(tmp);
}
/* Free the emergency stripe buffers */
while (raidPtr->stripebuf != NULL) {
tmp = raidPtr->stripebuf;
raidPtr->stripebuf = raidPtr->stripebuf->next;
free(tmp->p, M_RAIDFRAME);
rf_FreeVPListElem(tmp);
}
}
2005-02-27 03:26:58 +03:00
static void
rf_ShutdownRDFreeList(void *ignored)
{
pool_destroy(&rf_pools.rad);
}
2005-02-27 03:26:58 +03:00
static int
rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
{
rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
"rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
simple_lock_init(&rf_rad_lock);
return (0);
}
RF_RaidAccessDesc_t *
rf_AllocRaidAccDesc(RF_Raid_t *raidPtr, RF_IoType_t type,
RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
void *bufPtr, void *bp, RF_RaidAccessFlags_t flags,
const RF_AccessState_t *states)
{
RF_RaidAccessDesc_t *desc;
desc = pool_get(&rf_pools.rad, PR_WAITOK);
RF_LOCK_MUTEX(rf_rad_lock);
if (raidPtr->waitShutdown) {
/*
* Actually, we're shutting the array down. Free the desc
* and return NULL.
*/
RF_UNLOCK_MUTEX(rf_rad_lock);
pool_put(&rf_pools.rad, desc);
return (NULL);
}
raidPtr->nAccOutstanding++;
RF_UNLOCK_MUTEX(rf_rad_lock);
desc->raidPtr = (void *) raidPtr;
desc->type = type;
desc->raidAddress = raidAddress;
desc->numBlocks = numBlocks;
desc->bufPtr = bufPtr;
desc->bp = bp;
desc->flags = flags;
desc->states = states;
desc->state = 0;
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
desc->dagList = NULL;
desc->status = 0;
desc->numRetries = 0;
#if RF_ACC_TRACE > 0
2001-07-18 10:45:33 +04:00
memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t));
#endif
desc->callbackFunc = NULL;
desc->callbackArg = NULL;
desc->next = NULL;
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
desc->iobufs = NULL;
desc->stripebufs = NULL;
return (desc);
}
2005-02-27 03:26:58 +03:00
void
rf_FreeRaidAccDesc(RF_RaidAccessDesc_t *desc)
{
RF_Raid_t *raidPtr = desc->raidPtr;
RF_DagList_t *dagList, *temp;
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
RF_VoidPointerListElem_t *tmp;
RF_ASSERT(desc);
/* Cleanup the dagList(s) */
dagList = desc->dagList;
while(dagList != NULL) {
temp = dagList;
dagList = dagList->next;
rf_FreeDAGList(temp);
}
These changes complete the effective removal of malloc() from all write paths within RAIDframe. They also resolve the "panics with RAID 5 sets with more than 3 components" issue which was present (briefly) in the commits which were previously supposed to address the malloc() issue. With this new code the 5-component RAID 5 set panics are now gone. It is also now also possible to swap to RAID 5. The changes made are: 1) Introduce rf_AllocStripeBuffer() and rf_FreeStripeBuffer() to allocate/free one stripe's worth of space. rf_AllocStripeBuffer() is used in rf_MapUnaccessedPortionOfStripe() where it is not sufficient to allocate memory using just rf_AllocBuffer(). rf_FreeStripeBuffer() is called from rf_FreeRaidAccDesc(), well after the DAG is finished. 2) Add a set of emergency "stripe buffers" to struct RF_Raid_s. Arrange for their initialization in rf_Configure(). In low-memory situations these buffers will be returned by rf_AllocStripeBuffer() and re-populated by rf_FreeStripeBuffer(). 3) Move RF_VoidPointerListElem_t *iobufs from the dagHeader into into struct RF_RaidAccessDesc_s. This is more consistent with the original code, and will not result in items being freed "too early". 4) Add a RF_RaidAccessDesc_t *desc to RF_DagHeader_s so that we have a way to find desc->iobufs. 5) Arrange for desc in the DagHeader to be initialized in InitHdrNode(). 6) Don't cleanup iobufs in rf_FreeDAG() -- the freeing is now delayed until rf_FreeRaidAccDesc() (which is how the original code handled the allocList, and for which there seem to be some subtle, undocumented assumptions). 7) Rename rf_AllocBuffer2() to be rf_AllocBuffer() and remove the former rf_AllocBuffer(). Fix all callers of rf_AllocBuffer(). (This was how it was *supposed* to be after the last time these changes were made, before they were backed out). 8) Remove RF_IOBufHeader and all references to it. 9) Remove desc->cleanupList and all references to it. Fixes PR#20191
2004-04-10 03:10:16 +04:00
while (desc->iobufs) {
tmp = desc->iobufs;
desc->iobufs = desc->iobufs->next;
rf_FreeIOBuffer(raidPtr, tmp);
}
while (desc->stripebufs) {
tmp = desc->stripebufs;
desc->stripebufs = desc->stripebufs->next;
rf_FreeStripeBuffer(raidPtr, tmp);
}
pool_put(&rf_pools.rad, desc);
RF_LOCK_MUTEX(rf_rad_lock);
raidPtr->nAccOutstanding--;
if (raidPtr->waitShutdown) {
RF_SIGNAL_COND(raidPtr->outstandingCond);
}
RF_UNLOCK_MUTEX(rf_rad_lock);
}
/*********************************************************************
* Main routine for performing an access.
* Accesses are retried until a DAG can not be selected. This occurs
* when either the DAG library is incomplete or there are too many
* failures in a parity group.
*
* type should be read or write async_flag should be RF_TRUE or
* RF_FALSE bp_in is a buf pointer. void *to facilitate ignoring it
* outside the kernel
********************************************************************/
2005-02-27 03:26:58 +03:00
int
rf_DoAccess(RF_Raid_t * raidPtr, RF_IoType_t type, int async_flag,
RF_RaidAddr_t raidAddress, RF_SectorCount_t numBlocks,
void *bufPtr, struct buf *bp, RF_RaidAccessFlags_t flags)
{
RF_RaidAccessDesc_t *desc;
void *lbufPtr = bufPtr;
raidAddress += rf_raidSectorOffset;
#if RF_ACCESS_DEBUG
if (rf_accessDebug) {
printf("logBytes is: %d %d %d\n", raidPtr->raidid,
raidPtr->logBytesPerSector,
(int) rf_RaidAddressToByte(raidPtr, numBlocks));
printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid,
(type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress,
(int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress),
(int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1),
(int) numBlocks,
(int) rf_RaidAddressToByte(raidPtr, numBlocks),
(long) bufPtr);
}
#endif
desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress,
numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states);
if (desc == NULL) {
return (ENOMEM);
}
#if RF_ACC_TRACE > 0
RF_ETIMER_START(desc->tracerec.tot_timer);
#endif
desc->async_flag = async_flag;
rf_ContinueRaidAccess(desc);
return (0);
}
#if 0
/* force the array into reconfigured mode without doing reconstruction */
2005-02-27 03:26:58 +03:00
int
rf_SetReconfiguredMode(RF_Raid_t *raidPtr, int col)
{
if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) {
printf("Can't set reconfigured mode in dedicated-spare array\n");
RF_PANIC();
}
RF_LOCK_MUTEX(raidPtr->mutex);
raidPtr->numFailures++;
raidPtr->Disks[col].status = rf_ds_dist_spared;
raidPtr->status = rf_rs_reconfigured;
rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
/* install spare table only if declustering + distributed sparing
* architecture. */
if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED)
rf_InstallSpareTable(raidPtr, col);
RF_UNLOCK_MUTEX(raidPtr->mutex);
return (0);
}
#endif
2005-02-27 03:26:58 +03:00
int
rf_FailDisk(RF_Raid_t *raidPtr, int fcol, int initRecon)
{
/* need to suspend IO's here -- if there are DAGs in flight
2005-02-27 03:26:58 +03:00
and we pull the rug out from under ci_vp, Bad Things
can happen. */
rf_SuspendNewRequestsAndWait(raidPtr);
RF_LOCK_MUTEX(raidPtr->mutex);
if (raidPtr->Disks[fcol].status != rf_ds_failed) {
/* must be failing something that is valid, or else it's
2005-02-27 03:26:58 +03:00
already marked as failed (in which case we don't
want to mark it failed again!) */
raidPtr->numFailures++;
raidPtr->Disks[fcol].status = rf_ds_failed;
2005-02-27 03:26:58 +03:00
raidPtr->status = rf_rs_degraded;
}
RF_UNLOCK_MUTEX(raidPtr->mutex);
2005-02-27 03:26:58 +03:00
rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
2005-02-27 03:26:58 +03:00
/* Close the component, so that it's not "locked" if someone
else want's to use it! */
rf_close_component(raidPtr, raidPtr->raid_cinfo[fcol].ci_vp,
raidPtr->Disks[fcol].auto_configured);
RF_LOCK_MUTEX(raidPtr->mutex);
raidPtr->raid_cinfo[fcol].ci_vp = NULL;
2005-02-27 03:26:58 +03:00
/* Need to mark the component as not being auto_configured
(in case it was previously). */
raidPtr->Disks[fcol].auto_configured = 0;
RF_UNLOCK_MUTEX(raidPtr->mutex);
/* now we can allow IO to continue -- we'll be suspending it
again in rf_ReconstructFailedDisk() if we have to.. */
rf_ResumeNewRequests(raidPtr);
if (initRecon)
rf_ReconstructFailedDisk(raidPtr, fcol);
return (0);
}
/* releases a thread that is waiting for the array to become quiesced.
* access_suspend_mutex should be locked upon calling this
*/
2005-02-27 03:26:58 +03:00
void
rf_SignalQuiescenceLock(RF_Raid_t *raidPtr)
{
#if RF_DEBUG_QUIESCE
if (rf_quiesceDebug) {
2005-02-27 03:26:58 +03:00
printf("raid%d: Signalling quiescence lock\n",
raidPtr->raidid);
}
#endif
raidPtr->access_suspend_release = 1;
if (raidPtr->waiting_for_quiescence) {
SIGNAL_QUIESCENT_COND(raidPtr);
}
}
/* suspends all new requests to the array. No effect on accesses that are in flight. */
2005-02-27 03:26:58 +03:00
int
rf_SuspendNewRequestsAndWait(RF_Raid_t *raidPtr)
{
#if RF_DEBUG_QUIESCE
if (rf_quiesceDebug)
2002-08-03 05:24:03 +04:00
printf("raid%d: Suspending new reqs\n", raidPtr->raidid);
#endif
RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
raidPtr->accesses_suspended++;
raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1;
if (raidPtr->waiting_for_quiescence) {
raidPtr->access_suspend_release = 0;
while (!raidPtr->access_suspend_release) {
#if RF_DEBUG_QUIESCE
2002-08-03 05:24:03 +04:00
printf("raid%d: Suspending: Waiting for Quiescence\n",
raidPtr->raidid);
#endif
WAIT_FOR_QUIESCENCE(raidPtr);
raidPtr->waiting_for_quiescence = 0;
}
}
#if RF_DEBUG_QUIESCE
2002-08-03 05:24:03 +04:00
printf("raid%d: Quiescence reached..\n", raidPtr->raidid);
#endif
RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
return (raidPtr->waiting_for_quiescence);
}
/* wake up everyone waiting for quiescence to be released */
2005-02-27 03:26:58 +03:00
void
rf_ResumeNewRequests(RF_Raid_t *raidPtr)
{
RF_CallbackDesc_t *t, *cb;
#if RF_DEBUG_QUIESCE
if (rf_quiesceDebug)
printf("Resuming new reqs\n");
#endif
RF_LOCK_MUTEX(raidPtr->access_suspend_mutex);
raidPtr->accesses_suspended--;
if (raidPtr->accesses_suspended == 0)
cb = raidPtr->quiesce_wait_list;
else
cb = NULL;
raidPtr->quiesce_wait_list = NULL;
RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex);
while (cb) {
t = cb;
cb = cb->next;
(t->callbackFunc) (t->callbackArg);
rf_FreeCallbackDesc(t);
}
}
/*****************************************************************************************
*
* debug routines
*
****************************************************************************************/
2005-02-27 03:26:58 +03:00
static void
set_debug_option(char *name, long val)
{
RF_DebugName_t *p;
for (p = rf_debugNames; p->name; p++) {
if (!strcmp(p->name, name)) {
*(p->ptr) = val;
printf("[Set debug variable %s to %ld]\n", name, val);
return;
}
}
RF_ERRORMSG1("Unknown debug string \"%s\"\n", name);
}
/* would like to use sscanf here, but apparently not available in kernel */
/*ARGSUSED*/
2005-02-27 03:26:58 +03:00
static void
rf_ConfigureDebug(RF_Config_t *cfgPtr)
{
char *val_p, *name_p, *white_p;
long val;
int i;
rf_ResetDebugOptions();
for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) {
name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]);
white_p = rf_find_white(name_p); /* skip to start of 2nd
* word */
val_p = rf_find_non_white(white_p);
if (*val_p == '0' && *(val_p + 1) == 'x')
val = rf_htoi(val_p + 2);
else
val = rf_atoi(val_p);
*white_p = '\0';
set_debug_option(name_p, val);
}
}
void
rf_print_panic_message(int line, const char *file)
{
2004-04-22 04:17:10 +04:00
snprintf(rf_panicbuf, sizeof(rf_panicbuf),
"raidframe error at line %d file %s", line, file);
}
#ifdef RAID_DIAGNOSTIC
void
rf_print_assert_panic_message(int line, const char *file, const char *condition)
{
2004-04-22 04:17:10 +04:00
snprintf(rf_panicbuf, sizeof(rf_panicbuf),
"raidframe error at line %d file %s (failed asserting %s)\n",
line, file, condition);
}
#endif
void
rf_print_unable_to_init_mutex(const char *file, int line, int rc)
{
RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
file, line, rc);
}
void
rf_print_unable_to_add_shutdown(const char *file, int line, int rc)
{
RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n",
file, line, rc);
}