1999-11-17 04:16:37 +03:00
|
|
|
/* $NetBSD: rf_netbsdkintf.c,v 1.30 1999/11/17 01:16:37 oster Exp $ */
|
1998-11-13 07:20:26 +03:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The NetBSD Foundation
|
|
|
|
* by Greg Oster; Jason R. Thorpe.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the NetBSD
|
|
|
|
* Foundation, Inc. and its contributors.
|
|
|
|
* 4. Neither the name of The NetBSD Foundation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
|
|
|
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1988 University of Utah.
|
|
|
|
* Copyright (c) 1990, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* the Systems Programming Group of the University of Utah Computer
|
|
|
|
* Science Department.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* from: Utah $Hdr: cd.c 1.6 90/11/28$
|
|
|
|
*
|
|
|
|
* @(#)cd.c 8.2 (Berkeley) 11/16/93
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 1995 Carnegie-Mellon University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Authors: Mark Holland, Jim Zelenka
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify and distribute this software and
|
|
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
|
|
* notice and this permission notice appear in all copies of the
|
|
|
|
* software, derivative works or modified versions, and any portions
|
|
|
|
* thereof, and that both notices appear in supporting documentation.
|
|
|
|
*
|
|
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
|
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
|
|
|
*
|
|
|
|
* Carnegie Mellon requests users of this software to return to
|
|
|
|
*
|
|
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
|
|
* School of Computer Science
|
|
|
|
* Carnegie Mellon University
|
|
|
|
* Pittsburgh PA 15213-3890
|
|
|
|
*
|
|
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
|
|
* rights to redistribute these changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/***********************************************************
|
|
|
|
*
|
|
|
|
* rf_kintf.c -- the kernel interface routines for RAIDframe
|
|
|
|
*
|
|
|
|
***********************************************************/
|
|
|
|
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/pool.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/disk.h>
|
|
|
|
#include <sys/device.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/ioctl.h>
|
|
|
|
#include <sys/fcntl.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <machine/types.h>
|
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/user.h>
|
1999-01-26 05:33:49 +03:00
|
|
|
|
|
|
|
#include "raid.h"
|
1998-11-13 07:20:26 +03:00
|
|
|
#include "rf_raid.h"
|
|
|
|
#include "rf_raidframe.h"
|
|
|
|
#include "rf_dag.h"
|
|
|
|
#include "rf_dagflags.h"
|
|
|
|
#include "rf_diskqueue.h"
|
|
|
|
#include "rf_acctrace.h"
|
|
|
|
#include "rf_etimer.h"
|
|
|
|
#include "rf_general.h"
|
|
|
|
#include "rf_debugMem.h"
|
|
|
|
#include "rf_kintf.h"
|
|
|
|
#include "rf_options.h"
|
|
|
|
#include "rf_driver.h"
|
|
|
|
#include "rf_parityscan.h"
|
|
|
|
#include "rf_debugprint.h"
|
|
|
|
#include "rf_threadstuff.h"
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
int rf_kdebug_level = 0;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
#define db0_printf(a) printf a
|
|
|
|
#define db_printf(a) if (rf_kdebug_level > 0) printf a
|
|
|
|
#define db1_printf(a) if (rf_kdebug_level > 0) printf a
|
|
|
|
#define db2_printf(a) if (rf_kdebug_level > 1) printf a
|
|
|
|
#define db3_printf(a) if (rf_kdebug_level > 2) printf a
|
|
|
|
#define db4_printf(a) if (rf_kdebug_level > 3) printf a
|
|
|
|
#define db5_printf(a) if (rf_kdebug_level > 4) printf a
|
1999-02-05 03:06:06 +03:00
|
|
|
#else /* DEBUG */
|
1998-11-13 07:20:26 +03:00
|
|
|
#define db0_printf(a) printf a
|
|
|
|
#define db1_printf(a) { }
|
|
|
|
#define db2_printf(a) { }
|
|
|
|
#define db3_printf(a) { }
|
|
|
|
#define db4_printf(a) { }
|
|
|
|
#define db5_printf(a) { }
|
1999-02-05 03:06:06 +03:00
|
|
|
#endif /* DEBUG */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
static RF_Raid_t **raidPtrs; /* global raid device descriptors */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex)
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
static RF_SparetWait_t *rf_sparet_wait_queue; /* requests to install a
|
|
|
|
* spare table */
|
|
|
|
static RF_SparetWait_t *rf_sparet_resp_queue; /* responses from
|
|
|
|
* installation process */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
static struct rf_recon_req *recon_queue = NULL; /* used to communicate
|
|
|
|
* reconstruction
|
|
|
|
* requests */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
decl_simple_lock_data(, recon_queue_mutex)
|
1998-11-13 07:20:26 +03:00
|
|
|
#define LOCK_RECON_Q_MUTEX() simple_lock(&recon_queue_mutex)
|
|
|
|
#define UNLOCK_RECON_Q_MUTEX() simple_unlock(&recon_queue_mutex)
|
|
|
|
|
|
|
|
/* prototypes */
|
1999-02-11 04:23:32 +03:00
|
|
|
static void KernelWakeupFunc(struct buf * bp);
|
|
|
|
static void InitBP(struct buf * bp, struct vnode *, unsigned rw_flag,
|
|
|
|
dev_t dev, RF_SectorNum_t startSect,
|
|
|
|
RF_SectorCount_t numSect, caddr_t buf,
|
|
|
|
void (*cbFunc) (struct buf *), void *cbArg,
|
|
|
|
int logBytesPerSector, struct proc * b_proc);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-24 02:57:53 +03:00
|
|
|
#define Dprintf0(s) if (rf_queueDebug) \
|
|
|
|
rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
|
|
|
|
#define Dprintf1(s,a) if (rf_queueDebug) \
|
|
|
|
rf_debug_printf(s,a,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
|
|
|
|
#define Dprintf2(s,a,b) if (rf_queueDebug) \
|
|
|
|
rf_debug_printf(s,a,b,NULL,NULL,NULL,NULL,NULL,NULL)
|
|
|
|
#define Dprintf3(s,a,b,c) if (rf_queueDebug) \
|
|
|
|
rf_debug_printf(s,a,b,c,NULL,NULL,NULL,NULL,NULL)
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-03-02 06:18:48 +03:00
|
|
|
int raidmarkclean(dev_t dev, struct vnode *b_vp, int);
|
|
|
|
int raidmarkdirty(dev_t dev, struct vnode *b_vp, int);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
void raidattach __P((int));
|
|
|
|
int raidsize __P((dev_t));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
void rf_DiskIOComplete(RF_DiskQueue_t *, RF_DiskQueueData_t *, int);
|
|
|
|
void rf_CopybackReconstructedData(RF_Raid_t * raidPtr);
|
|
|
|
static int raidinit __P((dev_t, RF_Raid_t *, int));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
int raidopen __P((dev_t, int, int, struct proc *));
|
|
|
|
int raidclose __P((dev_t, int, int, struct proc *));
|
|
|
|
int raidioctl __P((dev_t, u_long, caddr_t, int, struct proc *));
|
|
|
|
int raidwrite __P((dev_t, struct uio *, int));
|
|
|
|
int raidread __P((dev_t, struct uio *, int));
|
|
|
|
void raidstrategy __P((struct buf *));
|
|
|
|
int raiddump __P((dev_t, daddr_t, caddr_t, size_t));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-24 02:57:53 +03:00
|
|
|
int raidwrite_component_label(dev_t, struct vnode *, RF_ComponentLabel_t *);
|
|
|
|
int raidread_component_label(dev_t, struct vnode *, RF_ComponentLabel_t *);
|
1999-03-09 05:59:25 +03:00
|
|
|
void rf_update_component_labels( RF_Raid_t *);
|
1998-11-13 07:20:26 +03:00
|
|
|
/*
|
|
|
|
* Pilfered from ccd.c
|
|
|
|
*/
|
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
struct raidbuf {
|
|
|
|
struct buf rf_buf; /* new I/O buf. MUST BE FIRST!!! */
|
|
|
|
struct buf *rf_obp; /* ptr. to original I/O buf */
|
|
|
|
int rf_flags; /* misc. flags */
|
1999-02-24 02:57:53 +03:00
|
|
|
RF_DiskQueueData_t *req;/* the request that this was part of.. */
|
1999-02-11 04:23:32 +03:00
|
|
|
};
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
|
|
|
|
#define RAIDGETBUF(rs) pool_get(&(rs)->sc_cbufpool, PR_NOWAIT)
|
|
|
|
#define RAIDPUTBUF(rs, cbp) pool_put(&(rs)->sc_cbufpool, cbp)
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* XXX Not sure if the following should be replacing the raidPtrs above,
|
1999-02-11 04:23:32 +03:00
|
|
|
or if it should be used in conjunction with that... */
|
|
|
|
|
|
|
|
struct raid_softc {
|
|
|
|
int sc_flags; /* flags */
|
|
|
|
int sc_cflags; /* configuration flags */
|
1999-02-24 02:57:53 +03:00
|
|
|
size_t sc_size; /* size of the raid device */
|
|
|
|
dev_t sc_dev; /* our device.. */
|
1999-02-11 04:23:32 +03:00
|
|
|
char sc_xname[20]; /* XXX external name */
|
|
|
|
struct disk sc_dkdev; /* generic disk device info */
|
|
|
|
struct pool sc_cbufpool; /* component buffer pool */
|
|
|
|
};
|
1998-11-13 07:20:26 +03:00
|
|
|
/* sc_flags */
|
|
|
|
#define RAIDF_INITED 0x01 /* unit has been initialized */
|
|
|
|
#define RAIDF_WLABEL 0x02 /* label area is writable */
|
|
|
|
#define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
|
|
|
|
#define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
|
|
|
|
#define RAIDF_LOCKED 0x80 /* unit is locked */
|
|
|
|
|
|
|
|
#define raidunit(x) DISKUNIT(x)
|
1999-02-11 04:23:32 +03:00
|
|
|
static int numraid = 0;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-07-08 04:45:23 +04:00
|
|
|
/*
|
|
|
|
* Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
|
|
|
|
* Be aware that large numbers can allow the driver to consume a lot of
|
1999-08-14 07:47:07 +04:00
|
|
|
* kernel memory, especially on writes, and in degraded mode reads.
|
|
|
|
*
|
|
|
|
* For example: with a stripe width of 64 blocks (32k) and 5 disks,
|
|
|
|
* a single 64K write will typically require 64K for the old data,
|
|
|
|
* 64K for the old parity, and 64K for the new parity, for a total
|
|
|
|
* of 192K (if the parity buffer is not re-used immediately).
|
|
|
|
* Even it if is used immedately, that's still 128K, which when multiplied
|
|
|
|
* by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
|
|
|
|
*
|
|
|
|
* Now in degraded mode, for example, a 64K read on the above setup may
|
|
|
|
* require data reconstruction, which will require *all* of the 4 remaining
|
|
|
|
* disks to participate -- 4 * 32K/disk == 128K again.
|
1999-07-08 04:45:23 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RAIDOUTSTANDING
|
1999-08-14 07:47:07 +04:00
|
|
|
#define RAIDOUTSTANDING 6
|
1999-07-08 04:45:23 +04:00
|
|
|
#endif
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
#define RAIDLABELDEV(dev) \
|
|
|
|
(MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
|
|
|
|
|
|
|
|
/* declared here, and made public, for the benefit of KVM stuff.. */
|
1999-02-11 04:23:32 +03:00
|
|
|
struct raid_softc *raid_softc;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
static void raidgetdefaultlabel __P((RF_Raid_t *, struct raid_softc *,
|
|
|
|
struct disklabel *));
|
|
|
|
static void raidgetdisklabel __P((dev_t));
|
|
|
|
static void raidmakedisklabel __P((struct raid_softc *));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
static int raidlock __P((struct raid_softc *));
|
|
|
|
static void raidunlock __P((struct raid_softc *));
|
|
|
|
int raidlookup __P((char *, struct proc * p, struct vnode **));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-03-02 06:18:48 +03:00
|
|
|
static void rf_markalldirty __P((RF_Raid_t *));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-11 04:23:32 +03:00
|
|
|
void
|
|
|
|
raidattach(num)
|
1999-02-05 03:06:06 +03:00
|
|
|
int num;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-03-09 06:53:18 +03:00
|
|
|
int raidID;
|
|
|
|
int i, rc;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
1999-02-05 03:06:06 +03:00
|
|
|
printf("raidattach: Asked for %d units\n", num);
|
1998-11-13 07:20:26 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (num <= 0) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
panic("raidattach: count <= 0");
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
/* This is where all the initialization stuff gets done. */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* Make some space for requested number of units... */
|
|
|
|
|
|
|
|
RF_Calloc(raidPtrs, num, sizeof(RF_Raid_t *), (RF_Raid_t **));
|
|
|
|
if (raidPtrs == NULL) {
|
|
|
|
panic("raidPtrs is NULL!!\n");
|
|
|
|
}
|
1999-03-09 06:53:18 +03:00
|
|
|
|
|
|
|
rc = rf_mutex_init(&rf_sparet_wait_mutex);
|
|
|
|
if (rc) {
|
|
|
|
RF_PANIC();
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-03-09 06:53:18 +03:00
|
|
|
|
|
|
|
rf_sparet_wait_queue = rf_sparet_resp_queue = NULL;
|
|
|
|
recon_queue = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < numraid; i++)
|
|
|
|
raidPtrs[i] = NULL;
|
|
|
|
rc = rf_BootRaidframe();
|
|
|
|
if (rc == 0)
|
|
|
|
printf("Kernelized RAIDframe activated\n");
|
|
|
|
else
|
|
|
|
panic("Serious error booting RAID!!\n");
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* put together some datastructures like the CCD device does.. This
|
|
|
|
* lets us lock the device and what-not when it gets opened. */
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
raid_softc = (struct raid_softc *)
|
1999-02-05 03:06:06 +03:00
|
|
|
malloc(num * sizeof(struct raid_softc),
|
|
|
|
M_RAIDFRAME, M_NOWAIT);
|
1998-11-13 07:20:26 +03:00
|
|
|
if (raid_softc == NULL) {
|
|
|
|
printf("WARNING: no memory for RAIDframe driver\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
numraid = num;
|
|
|
|
bzero(raid_softc, num * sizeof(struct raid_softc));
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
for (raidID = 0; raidID < num; raidID++) {
|
|
|
|
RF_Calloc(raidPtrs[raidID], 1, sizeof(RF_Raid_t),
|
1999-02-24 02:57:53 +03:00
|
|
|
(RF_Raid_t *));
|
1999-02-05 03:06:06 +03:00
|
|
|
if (raidPtrs[raidID] == NULL) {
|
|
|
|
printf("raidPtrs[%d] is NULL\n", raidID);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
raidsize(dev)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
|
|
|
struct raid_softc *rs;
|
|
|
|
struct disklabel *lp;
|
1999-02-05 03:06:06 +03:00
|
|
|
int part, unit, omask, size;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
unit = raidunit(dev);
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (-1);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
if ((rs->sc_flags & RAIDF_INITED) == 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
part = DISKPART(dev);
|
|
|
|
omask = rs->sc_dkdev.dk_openmask & (1 << part);
|
|
|
|
lp = rs->sc_dkdev.dk_label;
|
|
|
|
|
|
|
|
if (omask == 0 && raidopen(dev, 0, S_IFBLK, curproc))
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (lp->d_partitions[part].p_fstype != FS_SWAP)
|
|
|
|
size = -1;
|
|
|
|
else
|
|
|
|
size = lp->d_partitions[part].p_size *
|
|
|
|
(lp->d_secsize / DEV_BSIZE);
|
|
|
|
|
|
|
|
if (omask == 0 && raidclose(dev, 0, S_IFBLK, curproc))
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
return (size);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
raiddump(dev, blkno, va, size)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
daddr_t blkno;
|
|
|
|
caddr_t va;
|
1999-02-05 03:06:06 +03:00
|
|
|
size_t size;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
|
|
|
/* Not implemented. */
|
|
|
|
return ENXIO;
|
|
|
|
}
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidopen(dev, flags, fmt, p)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
|
|
|
int flags, fmt;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct proc *p;
|
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
|
|
|
struct disklabel *lp;
|
1999-02-05 03:06:06 +03:00
|
|
|
int part, pmask;
|
|
|
|
int error = 0;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (ENXIO);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
if ((error = raidlock(rs)) != 0)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (error);
|
1998-11-13 07:20:26 +03:00
|
|
|
lp = rs->sc_dkdev.dk_label;
|
|
|
|
|
|
|
|
part = DISKPART(dev);
|
|
|
|
pmask = (1 << part);
|
|
|
|
|
|
|
|
db1_printf(("Opening raid device number: %d partition: %d\n",
|
1999-03-09 06:53:18 +03:00
|
|
|
unit, part));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
|
|
|
|
if ((rs->sc_flags & RAIDF_INITED) &&
|
|
|
|
(rs->sc_dkdev.dk_openmask == 0))
|
1999-02-05 03:06:06 +03:00
|
|
|
raidgetdisklabel(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* make sure that this partition exists */
|
|
|
|
|
|
|
|
if (part != RAW_PART) {
|
|
|
|
db1_printf(("Not a raw partition..\n"));
|
|
|
|
if (((rs->sc_flags & RAIDF_INITED) == 0) ||
|
|
|
|
((part >= lp->d_npartitions) ||
|
1999-02-05 03:06:06 +03:00
|
|
|
(lp->d_partitions[part].p_fstype == FS_UNUSED))) {
|
1998-11-13 07:20:26 +03:00
|
|
|
error = ENXIO;
|
|
|
|
raidunlock(rs);
|
|
|
|
db1_printf(("Bailing out...\n"));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (error);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Prevent this unit from being unconfigured while open. */
|
|
|
|
switch (fmt) {
|
|
|
|
case S_IFCHR:
|
|
|
|
rs->sc_dkdev.dk_copenmask |= pmask;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case S_IFBLK:
|
|
|
|
rs->sc_dkdev.dk_bopenmask |= pmask;
|
|
|
|
break;
|
|
|
|
}
|
1999-03-09 05:59:25 +03:00
|
|
|
|
|
|
|
if ((rs->sc_dkdev.dk_openmask == 0) &&
|
|
|
|
((rs->sc_flags & RAIDF_INITED) != 0)) {
|
|
|
|
/* First one... mark things as dirty... Note that we *MUST*
|
|
|
|
have done a configure before this. I DO NOT WANT TO BE
|
|
|
|
SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
|
|
|
|
THAT THEY BELONG TOGETHER!!!!! */
|
|
|
|
/* XXX should check to see if we're only open for reading
|
|
|
|
here... If so, we needn't do this, but then need some
|
|
|
|
other way of keeping track of what's happened.. */
|
|
|
|
|
|
|
|
rf_markalldirty( raidPtrs[unit] );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
rs->sc_dkdev.dk_openmask =
|
|
|
|
rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
|
|
|
|
|
|
|
|
raidunlock(rs);
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
return (error);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidclose(dev, flags, fmt, p)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
|
|
|
int flags, fmt;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct proc *p;
|
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
1999-02-05 03:06:06 +03:00
|
|
|
int error = 0;
|
|
|
|
int part;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (ENXIO);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
if ((error = raidlock(rs)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
part = DISKPART(dev);
|
|
|
|
|
|
|
|
/* ...that much closer to allowing unconfiguration... */
|
|
|
|
switch (fmt) {
|
|
|
|
case S_IFCHR:
|
|
|
|
rs->sc_dkdev.dk_copenmask &= ~(1 << part);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case S_IFBLK:
|
|
|
|
rs->sc_dkdev.dk_bopenmask &= ~(1 << part);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rs->sc_dkdev.dk_openmask =
|
|
|
|
rs->sc_dkdev.dk_copenmask | rs->sc_dkdev.dk_bopenmask;
|
1999-03-09 05:59:25 +03:00
|
|
|
|
|
|
|
if ((rs->sc_dkdev.dk_openmask == 0) &&
|
|
|
|
((rs->sc_flags & RAIDF_INITED) != 0)) {
|
|
|
|
/* Last one... device is not unconfigured yet.
|
|
|
|
Device shutdown has taken care of setting the
|
|
|
|
clean bits if RAIDF_INITED is not set
|
|
|
|
mark things as clean... */
|
|
|
|
rf_update_component_labels( raidPtrs[unit] );
|
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
raidunlock(rs);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
raidstrategy(bp)
|
|
|
|
register struct buf *bp;
|
|
|
|
{
|
|
|
|
register int s;
|
|
|
|
|
|
|
|
unsigned int raidID = raidunit(bp->b_dev);
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
struct raid_softc *rs = &raid_softc[raidID];
|
|
|
|
struct disklabel *lp;
|
1999-02-05 03:06:06 +03:00
|
|
|
int wlabel;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("Strategy: 0x%x 0x%x\n", bp, bp->b_data));
|
|
|
|
db1_printf(("Strategy(2): bp->b_bufsize%d\n", (int) bp->b_bufsize));
|
|
|
|
db1_printf(("bp->b_count=%d\n", (int) bp->b_bcount));
|
|
|
|
db1_printf(("bp->b_resid=%d\n", (int) bp->b_resid));
|
|
|
|
db1_printf(("bp->b_blkno=%d\n", (int) bp->b_blkno));
|
1998-12-22 23:03:14 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (bp->b_flags & B_READ)
|
1998-11-13 07:20:26 +03:00
|
|
|
db1_printf(("READ\n"));
|
|
|
|
else
|
|
|
|
db1_printf(("WRITE\n"));
|
|
|
|
#endif
|
1999-11-17 04:16:37 +03:00
|
|
|
if ((rs->sc_flags & RAIDF_INITED) ==0) {
|
|
|
|
bp->b_error = ENXIO;
|
|
|
|
bp->b_flags = B_ERROR;
|
|
|
|
bp->b_resid = bp->b_bcount;
|
|
|
|
biodone(bp);
|
1998-11-13 07:20:26 +03:00
|
|
|
return;
|
1999-11-17 04:16:37 +03:00
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
if (raidID >= numraid || !raidPtrs[raidID]) {
|
|
|
|
bp->b_error = ENODEV;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_resid = bp->b_bcount;
|
|
|
|
biodone(bp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
raidPtr = raidPtrs[raidID];
|
|
|
|
if (!raidPtr->valid) {
|
|
|
|
bp->b_error = ENODEV;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_resid = bp->b_bcount;
|
|
|
|
biodone(bp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (bp->b_bcount == 0) {
|
|
|
|
db1_printf(("b_bcount is zero..\n"));
|
|
|
|
biodone(bp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
lp = rs->sc_dkdev.dk_label;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do bounds checking and adjust transfer. If there's an
|
|
|
|
* error, the bounds check will flag that for us.
|
|
|
|
*/
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
wlabel = rs->sc_flags & (RAIDF_WLABEL | RAIDF_LABELLING);
|
1998-11-13 07:20:26 +03:00
|
|
|
if (DISKPART(bp->b_dev) != RAW_PART)
|
|
|
|
if (bounds_check_with_label(bp, lp, wlabel) <= 0) {
|
|
|
|
db1_printf(("Bounds check failed!!:%d %d\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
(int) bp->b_blkno, (int) wlabel));
|
1998-11-13 07:20:26 +03:00
|
|
|
biodone(bp);
|
|
|
|
return;
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
s = splbio(); /* XXX Needed? */
|
1998-11-13 07:20:26 +03:00
|
|
|
db1_printf(("Beginning strategy...\n"));
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
bp->b_resid = 0;
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp,
|
|
|
|
NULL, NULL, NULL);
|
1998-11-13 07:20:26 +03:00
|
|
|
if (bp->b_error) {
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
db1_printf(("bp->b_flags HAS B_ERROR SET!!!: %d\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_error));
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
splx(s);
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1998-11-13 07:20:26 +03:00
|
|
|
db1_printf(("Strategy exiting: 0x%x 0x%x %d %d\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
bp, bp->b_data,
|
|
|
|
(int) bp->b_bcount, (int) bp->b_resid));
|
1998-12-22 23:03:14 +03:00
|
|
|
#endif
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidread(dev, uio, flags)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct uio *uio;
|
1999-02-05 03:06:06 +03:00
|
|
|
int flags;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
1999-02-05 03:06:06 +03:00
|
|
|
int part;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (ENXIO);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
if ((rs->sc_flags & RAIDF_INITED) == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
part = DISKPART(dev);
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("raidread: unit: %d partition: %d\n", unit, part));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
return (physio(raidstrategy, NULL, dev, B_READ, minphys, uio));
|
|
|
|
|
|
|
|
}
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidwrite(dev, uio, flags)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct uio *uio;
|
1999-02-05 03:06:06 +03:00
|
|
|
int flags;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
|
|
|
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (ENXIO);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
if ((rs->sc_flags & RAIDF_INITED) == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
db1_printf(("raidwrite\n"));
|
|
|
|
return (physio(raidstrategy, NULL, dev, B_WRITE, minphys, uio));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
raidioctl(dev, cmd, data, flag, p)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
|
|
|
u_long cmd;
|
1998-11-13 07:20:26 +03:00
|
|
|
caddr_t data;
|
1999-02-05 03:06:06 +03:00
|
|
|
int flag;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct proc *p;
|
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
|
|
|
int error = 0;
|
|
|
|
int part, pmask;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
|
|
|
RF_Config_t *k_cfg, *u_cfg;
|
|
|
|
u_char *specific_buf;
|
1999-02-24 02:57:53 +03:00
|
|
|
int retcode = 0;
|
|
|
|
int row;
|
|
|
|
int column;
|
1999-07-21 07:15:26 +04:00
|
|
|
int s;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct rf_recon_req *rrcopy, *rr;
|
1999-02-24 02:57:53 +03:00
|
|
|
RF_ComponentLabel_t *component_label;
|
|
|
|
RF_ComponentLabel_t ci_label;
|
|
|
|
RF_ComponentLabel_t **c_label_ptr;
|
1999-03-02 06:18:48 +03:00
|
|
|
RF_SingleComponent_t *sparePtr,*componentPtr;
|
|
|
|
RF_SingleComponent_t hot_spare;
|
|
|
|
RF_SingleComponent_t component;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
if (unit >= numraid)
|
|
|
|
return (ENXIO);
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("raidioctl: %d %d %d %d\n", (int) dev,
|
|
|
|
(int) DISKPART(dev), (int) unit, (int) cmd));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* Must be open for writes for these commands... */
|
|
|
|
switch (cmd) {
|
|
|
|
case DIOCSDINFO:
|
|
|
|
case DIOCWDINFO:
|
|
|
|
case DIOCWLABEL:
|
|
|
|
if ((flag & FWRITE) == 0)
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Must be initialized for these... */
|
|
|
|
switch (cmd) {
|
|
|
|
case DIOCGDINFO:
|
|
|
|
case DIOCSDINFO:
|
|
|
|
case DIOCWDINFO:
|
|
|
|
case DIOCGPART:
|
|
|
|
case DIOCWLABEL:
|
|
|
|
case DIOCGDEFLABEL:
|
|
|
|
case RAIDFRAME_SHUTDOWN:
|
|
|
|
case RAIDFRAME_REWRITEPARITY:
|
|
|
|
case RAIDFRAME_GET_INFO:
|
|
|
|
case RAIDFRAME_RESET_ACCTOTALS:
|
|
|
|
case RAIDFRAME_GET_ACCTOTALS:
|
|
|
|
case RAIDFRAME_KEEP_ACCTOTALS:
|
|
|
|
case RAIDFRAME_GET_SIZE:
|
|
|
|
case RAIDFRAME_FAIL_DISK:
|
|
|
|
case RAIDFRAME_COPYBACK:
|
|
|
|
case RAIDFRAME_CHECKRECON:
|
1999-02-24 02:57:53 +03:00
|
|
|
case RAIDFRAME_GET_COMPONENT_LABEL:
|
|
|
|
case RAIDFRAME_SET_COMPONENT_LABEL:
|
|
|
|
case RAIDFRAME_ADD_HOT_SPARE:
|
|
|
|
case RAIDFRAME_REMOVE_HOT_SPARE:
|
|
|
|
case RAIDFRAME_INIT_LABELS:
|
1999-03-02 06:18:48 +03:00
|
|
|
case RAIDFRAME_REBUILD_IN_PLACE:
|
1999-08-11 01:41:37 +04:00
|
|
|
case RAIDFRAME_CHECK_PARITY:
|
1998-11-13 07:20:26 +03:00
|
|
|
if ((rs->sc_flags & RAIDF_INITED) == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
|
|
|
|
/* configure the system */
|
|
|
|
case RAIDFRAME_CONFIGURE:
|
|
|
|
|
|
|
|
db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n"));
|
|
|
|
/* copy-in the configuration information */
|
|
|
|
/* data points to a pointer to the configuration structure */
|
1999-02-05 03:06:06 +03:00
|
|
|
u_cfg = *((RF_Config_t **) data);
|
|
|
|
RF_Malloc(k_cfg, sizeof(RF_Config_t), (RF_Config_t *));
|
1998-11-13 07:20:26 +03:00
|
|
|
if (k_cfg == NULL) {
|
|
|
|
db3_printf(("rf_ioctl: ENOMEM for config. Code is %d\n", retcode));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENOMEM);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg,
|
|
|
|
sizeof(RF_Config_t));
|
1998-11-13 07:20:26 +03:00
|
|
|
if (retcode) {
|
1999-02-05 03:06:06 +03:00
|
|
|
db3_printf(("rf_ioctl: retcode=%d copyin.1\n",
|
|
|
|
retcode));
|
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
/* allocate a buffer for the layout-specific data, and copy it
|
|
|
|
* in */
|
1998-11-13 07:20:26 +03:00
|
|
|
if (k_cfg->layoutSpecificSize) {
|
1999-02-05 03:06:06 +03:00
|
|
|
if (k_cfg->layoutSpecificSize > 10000) {
|
1998-11-13 07:20:26 +03:00
|
|
|
/* sanity check */
|
|
|
|
db3_printf(("rf_ioctl: EINVAL %d\n", retcode));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (EINVAL);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_Malloc(specific_buf, k_cfg->layoutSpecificSize,
|
|
|
|
(u_char *));
|
1998-11-13 07:20:26 +03:00
|
|
|
if (specific_buf == NULL) {
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_Free(k_cfg, sizeof(RF_Config_t));
|
1998-11-13 07:20:26 +03:00
|
|
|
db3_printf(("rf_ioctl: ENOMEM %d\n", retcode));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENOMEM);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = copyin(k_cfg->layoutSpecific,
|
|
|
|
(caddr_t) specific_buf,
|
|
|
|
k_cfg->layoutSpecificSize);
|
1998-11-13 07:20:26 +03:00
|
|
|
if (retcode) {
|
|
|
|
db3_printf(("rf_ioctl: retcode=%d copyin.2\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode));
|
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
} else
|
|
|
|
specific_buf = NULL;
|
1998-11-13 07:20:26 +03:00
|
|
|
k_cfg->layoutSpecific = specific_buf;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
/* should do some kind of sanity check on the configuration.
|
|
|
|
* Store the sum of all the bytes in the last byte? */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* configure the system */
|
|
|
|
|
|
|
|
raidPtrs[unit]->raidid = unit;
|
1999-07-08 04:45:23 +04:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
retcode = rf_Configure(raidPtrs[unit], k_cfg);
|
|
|
|
|
1999-07-08 04:45:23 +04:00
|
|
|
/* allow this many simultaneous IO's to this RAID device */
|
|
|
|
raidPtrs[unit]->openings = RAIDOUTSTANDING;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (retcode == 0) {
|
|
|
|
retcode = raidinit(dev, raidPtrs[unit], unit);
|
1999-03-02 06:18:48 +03:00
|
|
|
rf_markalldirty( raidPtrs[unit] );
|
1999-02-05 03:06:06 +03:00
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
/* free the buffers. No return code here. */
|
|
|
|
if (k_cfg->layoutSpecificSize) {
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_Free(specific_buf, k_cfg->layoutSpecificSize);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_Free(k_cfg, sizeof(RF_Config_t));
|
|
|
|
|
|
|
|
db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n",
|
|
|
|
retcode));
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
|
|
|
|
|
|
|
/* shutdown the system */
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_SHUTDOWN:
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
if ((error = raidlock(rs)) != 0)
|
|
|
|
return (error);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If somebody has a partition mounted, we shouldn't
|
|
|
|
* shutdown.
|
|
|
|
*/
|
|
|
|
|
|
|
|
part = DISKPART(dev);
|
|
|
|
pmask = (1 << part);
|
1999-02-05 03:06:06 +03:00
|
|
|
if ((rs->sc_dkdev.dk_openmask & ~pmask) ||
|
|
|
|
((rs->sc_dkdev.dk_bopenmask & pmask) &&
|
|
|
|
(rs->sc_dkdev.dk_copenmask & pmask))) {
|
|
|
|
raidunlock(rs);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
if (rf_debugKernelAccess) {
|
|
|
|
printf("call shutdown\n");
|
|
|
|
}
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
retcode = rf_Shutdown(raidPtrs[unit]);
|
|
|
|
|
1998-11-15 03:01:24 +03:00
|
|
|
db1_printf(("Done main shutdown\n"));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
pool_destroy(&rs->sc_cbufpool);
|
1998-11-15 03:01:24 +03:00
|
|
|
db1_printf(("Done freeing component buffer freelist\n"));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* It's no longer initialized... */
|
|
|
|
rs->sc_flags &= ~RAIDF_INITED;
|
1999-03-27 04:26:37 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* Detach the disk. */
|
|
|
|
disk_detach(&rs->sc_dkdev);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
raidunlock(rs);
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
1999-02-24 02:57:53 +03:00
|
|
|
case RAIDFRAME_GET_COMPONENT_LABEL:
|
|
|
|
c_label_ptr = (RF_ComponentLabel_t **) data;
|
|
|
|
/* need to read the component label for the disk indicated
|
|
|
|
by row,column in component_label
|
|
|
|
XXX need to sanity check these values!!!
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* For practice, let's get it directly fromdisk, rather
|
|
|
|
than from the in-core copy */
|
|
|
|
RF_Malloc( component_label, sizeof( RF_ComponentLabel_t ),
|
|
|
|
(RF_ComponentLabel_t *));
|
|
|
|
if (component_label == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
bzero((char *) component_label, sizeof(RF_ComponentLabel_t));
|
|
|
|
|
|
|
|
retcode = copyin( *c_label_ptr, component_label,
|
|
|
|
sizeof(RF_ComponentLabel_t));
|
|
|
|
|
|
|
|
if (retcode) {
|
|
|
|
return(retcode);
|
|
|
|
}
|
|
|
|
|
|
|
|
row = component_label->row;
|
|
|
|
column = component_label->column;
|
1999-08-14 06:41:36 +04:00
|
|
|
|
|
|
|
if ((row < 0) || (row >= raidPtrs[unit]->numRow) ||
|
|
|
|
(column < 0) || (column >= raidPtrs[unit]->numCol)) {
|
|
|
|
return(EINVAL);
|
1999-02-24 02:57:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtrs[unit]->Disks[row][column].dev,
|
|
|
|
raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
|
|
|
|
component_label );
|
|
|
|
|
|
|
|
retcode = copyout((caddr_t) component_label,
|
|
|
|
(caddr_t) *c_label_ptr,
|
|
|
|
sizeof(RF_ComponentLabel_t));
|
|
|
|
RF_Free( component_label, sizeof(RF_ComponentLabel_t));
|
|
|
|
return (retcode);
|
|
|
|
|
|
|
|
case RAIDFRAME_SET_COMPONENT_LABEL:
|
|
|
|
component_label = (RF_ComponentLabel_t *) data;
|
|
|
|
|
|
|
|
/* XXX check the label for valid stuff... */
|
|
|
|
/* Note that some things *should not* get modified --
|
|
|
|
the user should be re-initing the labels instead of
|
|
|
|
trying to patch things.
|
|
|
|
*/
|
|
|
|
|
|
|
|
printf("Got component label:\n");
|
|
|
|
printf("Version: %d\n",component_label->version);
|
|
|
|
printf("Serial Number: %d\n",component_label->serial_number);
|
|
|
|
printf("Mod counter: %d\n",component_label->mod_counter);
|
|
|
|
printf("Row: %d\n", component_label->row);
|
|
|
|
printf("Column: %d\n", component_label->column);
|
|
|
|
printf("Num Rows: %d\n", component_label->num_rows);
|
|
|
|
printf("Num Columns: %d\n", component_label->num_columns);
|
|
|
|
printf("Clean: %d\n", component_label->clean);
|
|
|
|
printf("Status: %d\n", component_label->status);
|
|
|
|
|
|
|
|
row = component_label->row;
|
|
|
|
column = component_label->column;
|
1999-03-02 06:18:48 +03:00
|
|
|
|
1999-08-14 06:41:36 +04:00
|
|
|
if ((row < 0) || (row >= raidPtrs[unit]->numRow) ||
|
|
|
|
(column < 0) || (column >= raidPtrs[unit]->numCol)) {
|
1999-03-02 06:18:48 +03:00
|
|
|
return(EINVAL);
|
1999-02-24 02:57:53 +03:00
|
|
|
}
|
1999-03-02 06:18:48 +03:00
|
|
|
|
|
|
|
/* XXX this isn't allowed to do anything for now :-) */
|
|
|
|
#if 0
|
1999-02-24 02:57:53 +03:00
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtrs[unit]->Disks[row][column].dev,
|
|
|
|
raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
|
|
|
|
component_label );
|
1999-03-02 06:18:48 +03:00
|
|
|
#endif
|
|
|
|
return (0);
|
1999-02-24 02:57:53 +03:00
|
|
|
|
|
|
|
case RAIDFRAME_INIT_LABELS:
|
|
|
|
component_label = (RF_ComponentLabel_t *) data;
|
|
|
|
/*
|
|
|
|
we only want the serial number from
|
|
|
|
the above. We get all the rest of the information
|
|
|
|
from the config that was used to create this RAID
|
|
|
|
set.
|
|
|
|
*/
|
1999-03-02 06:18:48 +03:00
|
|
|
|
|
|
|
raidPtrs[unit]->serial_number = component_label->serial_number;
|
|
|
|
/* current version number */
|
|
|
|
ci_label.version = RF_COMPONENT_LABEL_VERSION;
|
1999-02-24 02:57:53 +03:00
|
|
|
ci_label.serial_number = component_label->serial_number;
|
1999-03-02 06:18:48 +03:00
|
|
|
ci_label.mod_counter = raidPtrs[unit]->mod_counter;
|
1999-02-24 02:57:53 +03:00
|
|
|
ci_label.num_rows = raidPtrs[unit]->numRow;
|
|
|
|
ci_label.num_columns = raidPtrs[unit]->numCol;
|
|
|
|
ci_label.clean = RF_RAID_DIRTY; /* not clean */
|
|
|
|
ci_label.status = rf_ds_optimal; /* "It's good!" */
|
|
|
|
|
|
|
|
for(row=0;row<raidPtrs[unit]->numRow;row++) {
|
|
|
|
ci_label.row = row;
|
|
|
|
for(column=0;column<raidPtrs[unit]->numCol;column++) {
|
|
|
|
ci_label.column = column;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtrs[unit]->Disks[row][column].dev,
|
|
|
|
raidPtrs[unit]->raid_cinfo[row][column].ci_vp,
|
|
|
|
&ci_label );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (retcode);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* initialize all parity */
|
|
|
|
case RAIDFRAME_REWRITEPARITY:
|
|
|
|
|
1999-04-07 18:17:10 +04:00
|
|
|
if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
|
|
|
|
/* Parity for RAID 0 is trivially correct */
|
|
|
|
raidPtrs[unit]->parity_good = RF_RAID_CLEAN;
|
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* borrow the thread of the requesting process */
|
1999-08-14 07:10:03 +04:00
|
|
|
|
1999-08-10 22:18:30 +04:00
|
|
|
s = splbio();
|
1998-11-13 07:20:26 +03:00
|
|
|
retcode = rf_RewriteParity(raidPtrs[unit]);
|
1999-08-10 22:18:30 +04:00
|
|
|
splx(s);
|
1999-02-05 03:06:06 +03:00
|
|
|
/* return I/O Error if the parity rewrite fails */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-24 02:57:53 +03:00
|
|
|
if (retcode) {
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = EIO;
|
1999-02-24 02:57:53 +03:00
|
|
|
} else {
|
1999-03-02 06:18:48 +03:00
|
|
|
/* set the clean bit! If we shutdown correctly,
|
|
|
|
the clean bit on each component label will get
|
|
|
|
set */
|
|
|
|
raidPtrs[unit]->parity_good = RF_RAID_CLEAN;
|
1999-02-24 02:57:53 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-24 02:57:53 +03:00
|
|
|
|
|
|
|
case RAIDFRAME_ADD_HOT_SPARE:
|
1999-03-02 06:18:48 +03:00
|
|
|
sparePtr = (RF_SingleComponent_t *) data;
|
|
|
|
memcpy( &hot_spare, sparePtr, sizeof(RF_SingleComponent_t));
|
|
|
|
printf("Adding spare\n");
|
|
|
|
retcode = rf_add_hot_spare(raidPtrs[unit], &hot_spare);
|
1999-02-24 02:57:53 +03:00
|
|
|
return(retcode);
|
|
|
|
|
|
|
|
case RAIDFRAME_REMOVE_HOT_SPARE:
|
|
|
|
return(retcode);
|
|
|
|
|
1999-03-02 06:18:48 +03:00
|
|
|
case RAIDFRAME_REBUILD_IN_PLACE:
|
1999-08-11 02:46:59 +04:00
|
|
|
|
|
|
|
if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
|
|
|
|
/* Can't do this on a RAID 0!! */
|
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
|
1999-03-02 06:18:48 +03:00
|
|
|
componentPtr = (RF_SingleComponent_t *) data;
|
|
|
|
memcpy( &component, componentPtr,
|
|
|
|
sizeof(RF_SingleComponent_t));
|
|
|
|
row = component.row;
|
|
|
|
column = component.column;
|
|
|
|
printf("Rebuild: %d %d\n",row, column);
|
1999-08-14 06:41:36 +04:00
|
|
|
if ((row < 0) || (row >= raidPtrs[unit]->numRow) ||
|
|
|
|
(column < 0) || (column >= raidPtrs[unit]->numCol)) {
|
1999-03-02 06:18:48 +03:00
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
printf("Attempting a rebuild in place\n");
|
1999-07-21 07:15:26 +04:00
|
|
|
s = splbio();
|
1999-03-02 06:18:48 +03:00
|
|
|
retcode = rf_ReconstructInPlace(raidPtrs[unit], row, column);
|
1999-07-21 07:15:26 +04:00
|
|
|
splx(s);
|
1999-03-02 06:18:48 +03:00
|
|
|
return(retcode);
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_GET_INFO:
|
|
|
|
{
|
|
|
|
RF_Raid_t *raid = raidPtrs[unit];
|
|
|
|
RF_DeviceConfig_t *cfg, **ucfgp;
|
1999-02-05 03:06:06 +03:00
|
|
|
int i, j, d;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
if (!raid->valid)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENODEV);
|
|
|
|
ucfgp = (RF_DeviceConfig_t **) data;
|
|
|
|
RF_Malloc(cfg, sizeof(RF_DeviceConfig_t),
|
1999-02-24 02:57:53 +03:00
|
|
|
(RF_DeviceConfig_t *));
|
1998-11-13 07:20:26 +03:00
|
|
|
if (cfg == NULL)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENOMEM);
|
|
|
|
bzero((char *) cfg, sizeof(RF_DeviceConfig_t));
|
1998-11-13 07:20:26 +03:00
|
|
|
cfg->rows = raid->numRow;
|
|
|
|
cfg->cols = raid->numCol;
|
|
|
|
cfg->ndevs = raid->numRow * raid->numCol;
|
|
|
|
if (cfg->ndevs >= RF_MAX_DISKS) {
|
|
|
|
cfg->ndevs = 0;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENOMEM);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
cfg->nspares = raid->numSpare;
|
|
|
|
if (cfg->nspares >= RF_MAX_DISKS) {
|
|
|
|
cfg->nspares = 0;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (ENOMEM);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
cfg->maxqdepth = raid->maxQueueDepth;
|
|
|
|
d = 0;
|
1999-02-05 03:06:06 +03:00
|
|
|
for (i = 0; i < cfg->rows; i++) {
|
|
|
|
for (j = 0; j < cfg->cols; j++) {
|
1998-11-13 07:20:26 +03:00
|
|
|
cfg->devs[d] = raid->Disks[i][j];
|
|
|
|
d++;
|
|
|
|
}
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
for (j = cfg->cols, i = 0; i < cfg->nspares; i++, j++) {
|
1998-11-13 07:20:26 +03:00
|
|
|
cfg->spares[i] = raid->Disks[0][j];
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = copyout((caddr_t) cfg, (caddr_t) * ucfgp,
|
1999-02-24 02:57:53 +03:00
|
|
|
sizeof(RF_DeviceConfig_t));
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_Free(cfg, sizeof(RF_DeviceConfig_t));
|
|
|
|
|
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
break;
|
1999-08-10 22:18:30 +04:00
|
|
|
case RAIDFRAME_CHECK_PARITY:
|
|
|
|
*(int *) data = raidPtrs[unit]->parity_good;
|
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_RESET_ACCTOTALS:
|
|
|
|
{
|
|
|
|
RF_Raid_t *raid = raidPtrs[unit];
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
bzero(&raid->acc_totals, sizeof(raid->acc_totals));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
break;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_GET_ACCTOTALS:
|
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_AccTotals_t *totals = (RF_AccTotals_t *) data;
|
1998-11-13 07:20:26 +03:00
|
|
|
RF_Raid_t *raid = raidPtrs[unit];
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
*totals = raid->acc_totals;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
break;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_KEEP_ACCTOTALS:
|
|
|
|
{
|
|
|
|
RF_Raid_t *raid = raidPtrs[unit];
|
1999-02-05 03:06:06 +03:00
|
|
|
int *keep = (int *) data;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
raid->keep_acc_totals = *keep;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
break;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_GET_SIZE:
|
|
|
|
*(int *) data = raidPtrs[unit]->totalSectors;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
#define RAIDFRAME_RECON 1
|
|
|
|
/* XXX The above should probably be set somewhere else!! GO */
|
|
|
|
#if RAIDFRAME_RECON > 0
|
|
|
|
|
|
|
|
/* fail a disk & optionally start reconstruction */
|
|
|
|
case RAIDFRAME_FAIL_DISK:
|
1999-08-11 02:46:59 +04:00
|
|
|
|
|
|
|
if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
|
|
|
|
/* Can't do this on a RAID 0!! */
|
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
rr = (struct rf_recon_req *) data;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
if (rr->row < 0 || rr->row >= raidPtrs[unit]->numRow
|
1998-11-13 07:20:26 +03:00
|
|
|
|| rr->col < 0 || rr->col >= raidPtrs[unit]->numCol)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (EINVAL);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-03-02 06:18:48 +03:00
|
|
|
printf("raid%d: Failing the disk: row: %d col: %d\n",
|
|
|
|
unit, rr->row, rr->col);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
/* make a copy of the recon request so that we don't rely on
|
|
|
|
* the user's buffer */
|
1998-11-13 07:20:26 +03:00
|
|
|
RF_Malloc(rrcopy, sizeof(*rrcopy), (struct rf_recon_req *));
|
|
|
|
bcopy(rr, rrcopy, sizeof(*rr));
|
|
|
|
rrcopy->raidPtr = (void *) raidPtrs[unit];
|
|
|
|
|
|
|
|
LOCK_RECON_Q_MUTEX();
|
|
|
|
rrcopy->next = recon_queue;
|
|
|
|
recon_queue = rrcopy;
|
|
|
|
wakeup(&recon_queue);
|
|
|
|
UNLOCK_RECON_Q_MUTEX();
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* invoke a copyback operation after recon on whatever disk
|
|
|
|
* needs it, if any */
|
|
|
|
case RAIDFRAME_COPYBACK:
|
1999-08-11 02:46:59 +04:00
|
|
|
|
|
|
|
if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
|
|
|
|
/* This makes no sense on a RAID 0!! */
|
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* borrow the current thread to get this done */
|
1999-08-14 07:10:03 +04:00
|
|
|
|
1999-07-21 07:15:26 +04:00
|
|
|
s = splbio();
|
1998-11-13 07:20:26 +03:00
|
|
|
rf_CopybackReconstructedData(raidPtrs[unit]);
|
1999-07-21 07:15:26 +04:00
|
|
|
splx(s);
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* return the percentage completion of reconstruction */
|
|
|
|
case RAIDFRAME_CHECKRECON:
|
1999-08-11 02:46:59 +04:00
|
|
|
if (raidPtrs[unit]->Layout.map->faultsTolerated == 0) {
|
|
|
|
/* This makes no sense on a RAID 0 */
|
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
row = *(int *) data;
|
|
|
|
if (row < 0 || row >= raidPtrs[unit]->numRow)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (EINVAL);
|
|
|
|
if (raidPtrs[unit]->status[row] != rf_rs_reconstructing)
|
1998-11-13 07:20:26 +03:00
|
|
|
*(int *) data = 100;
|
1999-02-05 03:06:06 +03:00
|
|
|
else
|
1998-11-13 07:20:26 +03:00
|
|
|
*(int *) data = raidPtrs[unit]->reconControl[row]->percentComplete;
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
/* the sparetable daemon calls this to wait for the kernel to
|
|
|
|
* need a spare table. this ioctl does not return until a
|
|
|
|
* spare table is needed. XXX -- calling mpsleep here in the
|
|
|
|
* ioctl code is almost certainly wrong and evil. -- XXX XXX
|
|
|
|
* -- I should either compute the spare table in the kernel,
|
|
|
|
* or have a different -- XXX XXX -- interface (a different
|
|
|
|
* character device) for delivering the table -- XXX */
|
1998-11-13 07:20:26 +03:00
|
|
|
#if 0
|
|
|
|
case RAIDFRAME_SPARET_WAIT:
|
|
|
|
RF_LOCK_MUTEX(rf_sparet_wait_mutex);
|
1999-02-05 03:06:06 +03:00
|
|
|
while (!rf_sparet_wait_queue)
|
|
|
|
mpsleep(&rf_sparet_wait_queue, (PZERO + 1) | PCATCH, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex), MS_LOCK_SIMPLE);
|
1998-11-13 07:20:26 +03:00
|
|
|
waitreq = rf_sparet_wait_queue;
|
|
|
|
rf_sparet_wait_queue = rf_sparet_wait_queue->next;
|
|
|
|
RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
*((RF_SparetWait_t *) data) = *waitreq; /* structure assignment */
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
RF_Free(waitreq, sizeof(*waitreq));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
|
|
|
|
/* wakes up a process waiting on SPARET_WAIT and puts an error
|
|
|
|
* code in it that will cause the dameon to exit */
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_ABORT_SPARET_WAIT:
|
|
|
|
RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
|
|
|
|
waitreq->fcol = -1;
|
|
|
|
RF_LOCK_MUTEX(rf_sparet_wait_mutex);
|
|
|
|
waitreq->next = rf_sparet_wait_queue;
|
|
|
|
rf_sparet_wait_queue = waitreq;
|
|
|
|
RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
|
|
|
|
wakeup(&rf_sparet_wait_queue);
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* used by the spare table daemon to deliver a spare table
|
|
|
|
* into the kernel */
|
1998-11-13 07:20:26 +03:00
|
|
|
case RAIDFRAME_SEND_SPARET:
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* install the spare table */
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = rf_SetSpareTable(raidPtrs[unit], *(void **) data);
|
|
|
|
|
|
|
|
/* respond to the requestor. the return status of the spare
|
|
|
|
* table installation is passed in the "fcol" field */
|
1998-11-13 07:20:26 +03:00
|
|
|
RF_Malloc(waitreq, sizeof(*waitreq), (RF_SparetWait_t *));
|
|
|
|
waitreq->fcol = retcode;
|
|
|
|
RF_LOCK_MUTEX(rf_sparet_wait_mutex);
|
|
|
|
waitreq->next = rf_sparet_resp_queue;
|
|
|
|
rf_sparet_resp_queue = waitreq;
|
|
|
|
wakeup(&rf_sparet_resp_queue);
|
|
|
|
RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
#endif /* RAIDFRAME_RECON > 0 */
|
|
|
|
|
|
|
|
default:
|
|
|
|
break; /* fall through to the os-specific code below */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
if (!raidPtrs[unit]->valid)
|
1999-02-05 03:06:06 +03:00
|
|
|
return (EINVAL);
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/*
|
|
|
|
* Add support for "regular" device ioctls here.
|
|
|
|
*/
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
switch (cmd) {
|
|
|
|
case DIOCGDINFO:
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("DIOCGDINFO %d %d\n", (int) dev, (int) DISKPART(dev)));
|
|
|
|
*(struct disklabel *) data = *(rs->sc_dkdev.dk_label);
|
1998-11-13 07:20:26 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DIOCGPART:
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("DIOCGPART: %d %d\n", (int) dev, (int) DISKPART(dev)));
|
|
|
|
((struct partinfo *) data)->disklab = rs->sc_dkdev.dk_label;
|
|
|
|
((struct partinfo *) data)->part =
|
1998-11-13 07:20:26 +03:00
|
|
|
&rs->sc_dkdev.dk_label->d_partitions[DISKPART(dev)];
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DIOCWDINFO:
|
|
|
|
db1_printf(("DIOCWDINFO\n"));
|
|
|
|
case DIOCSDINFO:
|
|
|
|
db1_printf(("DIOCSDINFO\n"));
|
|
|
|
if ((error = raidlock(rs)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
rs->sc_flags |= RAIDF_LABELLING;
|
|
|
|
|
|
|
|
error = setdisklabel(rs->sc_dkdev.dk_label,
|
1999-02-05 03:06:06 +03:00
|
|
|
(struct disklabel *) data, 0, rs->sc_dkdev.dk_cpulabel);
|
1998-11-13 07:20:26 +03:00
|
|
|
if (error == 0) {
|
|
|
|
if (cmd == DIOCWDINFO)
|
|
|
|
error = writedisklabel(RAIDLABELDEV(dev),
|
|
|
|
raidstrategy, rs->sc_dkdev.dk_label,
|
|
|
|
rs->sc_dkdev.dk_cpulabel);
|
|
|
|
}
|
|
|
|
rs->sc_flags &= ~RAIDF_LABELLING;
|
|
|
|
|
|
|
|
raidunlock(rs);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DIOCWLABEL:
|
|
|
|
db1_printf(("DIOCWLABEL\n"));
|
1999-02-05 03:06:06 +03:00
|
|
|
if (*(int *) data != 0)
|
1998-11-13 07:20:26 +03:00
|
|
|
rs->sc_flags |= RAIDF_WLABEL;
|
|
|
|
else
|
|
|
|
rs->sc_flags &= ~RAIDF_WLABEL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DIOCGDEFLABEL:
|
|
|
|
db1_printf(("DIOCGDEFLABEL\n"));
|
|
|
|
raidgetdefaultlabel(raidPtrs[unit], rs,
|
1999-02-05 03:06:06 +03:00
|
|
|
(struct disklabel *) data);
|
1998-11-13 07:20:26 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
1999-02-05 03:06:06 +03:00
|
|
|
retcode = ENOTTY; /* XXXX ?? OR EINVAL ? */
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* raidinit -- complete the rest of the initialization for the
|
1998-11-13 07:20:26 +03:00
|
|
|
RAIDframe device. */
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
1999-02-05 03:06:06 +03:00
|
|
|
raidinit(dev, raidPtr, unit)
|
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
RF_Raid_t *raidPtr;
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int retcode;
|
|
|
|
/* int ix; */
|
|
|
|
/* struct raidbuf *raidbp; */
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
|
|
|
|
|
|
|
retcode = 0;
|
|
|
|
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
pool_init(&rs->sc_cbufpool, sizeof(struct raidbuf), 0,
|
1999-02-24 02:57:53 +03:00
|
|
|
0, 0, "raidpl", 0, NULL, NULL, M_RAIDFRAME);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* XXX should check return code first... */
|
|
|
|
rs->sc_flags |= RAIDF_INITED;
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
sprintf(rs->sc_xname, "raid%d", unit); /* XXX doesn't check bounds. */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
rs->sc_dkdev.dk_name = rs->sc_xname;
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* disk_attach actually creates space for the CPU disklabel, among
|
1999-02-05 03:06:06 +03:00
|
|
|
* other things, so it's critical to call this *BEFORE* we try putzing
|
|
|
|
* with disklabels. */
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
disk_attach(&rs->sc_dkdev);
|
|
|
|
|
|
|
|
/* XXX There may be a weird interaction here between this, and
|
1999-02-05 03:06:06 +03:00
|
|
|
* protectedSectors, as used in RAIDframe. */
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
rs->sc_size = raidPtr->totalSectors;
|
1998-11-13 07:20:26 +03:00
|
|
|
rs->sc_dev = dev;
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This kernel thread never exits. It is created once, and persists
|
|
|
|
* until the system reboots.
|
|
|
|
*/
|
1999-02-24 02:57:53 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
void
|
|
|
|
rf_ReconKernelThread()
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
struct rf_recon_req *req;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
/* XXX not sure what spl() level we should be at here... probably
|
|
|
|
* splbio() */
|
|
|
|
s = splbio();
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
/* grab the next reconstruction request from the queue */
|
|
|
|
LOCK_RECON_Q_MUTEX();
|
|
|
|
while (!recon_queue) {
|
|
|
|
UNLOCK_RECON_Q_MUTEX();
|
1999-03-15 00:53:31 +03:00
|
|
|
tsleep(&recon_queue, PRIBIO,
|
1999-02-24 02:57:53 +03:00
|
|
|
"raidframe recon", 0);
|
1999-02-05 03:06:06 +03:00
|
|
|
LOCK_RECON_Q_MUTEX();
|
|
|
|
}
|
|
|
|
req = recon_queue;
|
|
|
|
recon_queue = recon_queue->next;
|
|
|
|
UNLOCK_RECON_Q_MUTEX();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If flags specifies that we should start recon, this call
|
1999-02-24 02:57:53 +03:00
|
|
|
* will not return until reconstruction completes, fails,
|
|
|
|
* or is aborted.
|
1999-02-05 03:06:06 +03:00
|
|
|
*/
|
|
|
|
rf_FailDisk((RF_Raid_t *) req->raidPtr, req->row, req->col,
|
|
|
|
((req->flags & RF_FDFLAGS_RECON) ? 1 : 0));
|
|
|
|
|
|
|
|
RF_Free(req, sizeof(*req));
|
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
/* wake up the daemon & tell it to get us a spare table
|
|
|
|
* XXX
|
1999-02-05 03:06:06 +03:00
|
|
|
* the entries in the queues should be tagged with the raidPtr
|
1999-02-24 02:57:53 +03:00
|
|
|
* so that in the extremely rare case that two recons happen at once,
|
|
|
|
* we know for which device were requesting a spare table
|
1998-11-13 07:20:26 +03:00
|
|
|
* XXX
|
|
|
|
*/
|
1999-02-05 03:06:06 +03:00
|
|
|
int
|
|
|
|
rf_GetSpareTableFromDaemon(req)
|
|
|
|
RF_SparetWait_t *req;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int retcode;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_LOCK_MUTEX(rf_sparet_wait_mutex);
|
|
|
|
req->next = rf_sparet_wait_queue;
|
|
|
|
rf_sparet_wait_queue = req;
|
|
|
|
wakeup(&rf_sparet_wait_queue);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* mpsleep unlocks the mutex */
|
|
|
|
while (!rf_sparet_resp_queue) {
|
1999-03-15 00:53:31 +03:00
|
|
|
tsleep(&rf_sparet_resp_queue, PRIBIO,
|
1999-02-05 03:06:06 +03:00
|
|
|
"raidframe getsparetable", 0);
|
|
|
|
}
|
|
|
|
req = rf_sparet_resp_queue;
|
|
|
|
rf_sparet_resp_queue = req->next;
|
|
|
|
RF_UNLOCK_MUTEX(rf_sparet_wait_mutex);
|
|
|
|
|
|
|
|
retcode = req->fcol;
|
|
|
|
RF_Free(req, sizeof(*req)); /* this is not the same req as we
|
|
|
|
* alloc'd */
|
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-24 02:57:53 +03:00
|
|
|
/* a wrapper around rf_DoAccess that extracts appropriate info from the
|
|
|
|
* bp & passes it down.
|
1998-11-13 07:20:26 +03:00
|
|
|
* any calls originating in the kernel must use non-blocking I/O
|
|
|
|
* do some extra sanity checking to return "appropriate" error values for
|
|
|
|
* certain conditions (to make some standard utilities work)
|
|
|
|
*/
|
1999-02-05 03:06:06 +03:00
|
|
|
int
|
|
|
|
rf_DoAccessKernel(raidPtr, bp, flags, cbFunc, cbArg)
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
struct buf *bp;
|
|
|
|
RF_RaidAccessFlags_t flags;
|
|
|
|
void (*cbFunc) (struct buf *);
|
|
|
|
void *cbArg;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
|
|
|
RF_SectorCount_t num_blocks, pb, sum;
|
|
|
|
RF_RaidAddr_t raid_addr;
|
1999-02-05 03:06:06 +03:00
|
|
|
int retcode;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct partition *pp;
|
1999-02-05 03:06:06 +03:00
|
|
|
daddr_t blocknum;
|
|
|
|
int unit;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
1999-02-05 03:06:06 +03:00
|
|
|
int do_async;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/* XXX The dev_t used here should be for /dev/[r]raid* !!! */
|
|
|
|
|
|
|
|
unit = raidPtr->raidid;
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
/* Ok, for the bp we have here, bp->b_blkno is relative to the
|
1999-02-05 03:06:06 +03:00
|
|
|
* partition.. Need to make it absolute to the underlying device.. */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
blocknum = bp->b_blkno;
|
|
|
|
if (DISKPART(bp->b_dev) != RAW_PART) {
|
|
|
|
pp = &rs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
|
|
|
|
blocknum += pp->p_offset;
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("updated: %d %d\n", DISKPART(bp->b_dev),
|
|
|
|
pp->p_offset));
|
1998-11-13 07:20:26 +03:00
|
|
|
} else {
|
|
|
|
db1_printf(("Is raw..\n"));
|
|
|
|
}
|
|
|
|
db1_printf(("Blocks: %d, %d\n", (int) bp->b_blkno, (int) blocknum));
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("bp->b_bcount = %d\n", (int) bp->b_bcount));
|
|
|
|
db1_printf(("bp->b_resid = %d\n", (int) bp->b_resid));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* *THIS* is where we adjust what block we're going to... but DO NOT
|
|
|
|
* TOUCH bp->b_blkno!!! */
|
1998-11-13 07:20:26 +03:00
|
|
|
raid_addr = blocknum;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
|
1999-02-05 03:06:06 +03:00
|
|
|
pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
|
1998-11-13 07:20:26 +03:00
|
|
|
sum = raid_addr + num_blocks + pb;
|
|
|
|
if (1 || rf_debugKernelAccess) {
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
|
|
|
|
(int) raid_addr, (int) sum, (int) num_blocks,
|
|
|
|
(int) pb, (int) bp->b_resid));
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
|
1999-02-05 03:06:06 +03:00
|
|
|
|| (sum < num_blocks) || (sum < pb)) {
|
1998-11-13 07:20:26 +03:00
|
|
|
bp->b_error = ENOSPC;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_resid = bp->b_bcount;
|
|
|
|
biodone(bp);
|
1999-02-05 03:06:06 +03:00
|
|
|
return (bp->b_error);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* XXX rf_DoAccess() should do this, not just DoAccessKernel()
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (bp->b_bcount & raidPtr->sectorMask) {
|
|
|
|
bp->b_error = EINVAL;
|
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_resid = bp->b_bcount;
|
|
|
|
biodone(bp);
|
1999-02-05 03:06:06 +03:00
|
|
|
return (bp->b_error);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
db1_printf(("Calling DoAccess..\n"));
|
|
|
|
|
1999-07-08 04:45:23 +04:00
|
|
|
|
|
|
|
/* Put a throttle on the number of requests we handle simultanously */
|
|
|
|
|
|
|
|
RF_LOCK_MUTEX(raidPtr->mutex);
|
|
|
|
|
|
|
|
while(raidPtr->openings <= 0) {
|
|
|
|
RF_UNLOCK_MUTEX(raidPtr->mutex);
|
|
|
|
(void)tsleep(&raidPtr->openings, PRIBIO, "rfdwait", 0);
|
|
|
|
RF_LOCK_MUTEX(raidPtr->mutex);
|
|
|
|
}
|
|
|
|
raidPtr->openings--;
|
|
|
|
|
|
|
|
RF_UNLOCK_MUTEX(raidPtr->mutex);
|
|
|
|
|
1999-01-15 20:55:52 +03:00
|
|
|
/*
|
1999-07-08 04:45:23 +04:00
|
|
|
* Everything is async.
|
1999-01-15 20:55:52 +03:00
|
|
|
*/
|
|
|
|
do_async = 1;
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* don't ever condition on bp->b_flags & B_WRITE. always condition on
|
|
|
|
* B_READ instead */
|
|
|
|
retcode = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
|
|
|
|
RF_IO_TYPE_READ : RF_IO_TYPE_WRITE,
|
|
|
|
do_async, raid_addr, num_blocks,
|
|
|
|
bp->b_un.b_addr,
|
|
|
|
bp, NULL, NULL, RF_DAG_NONBLOCKING_IO | flags,
|
|
|
|
NULL, cbFunc, cbArg);
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("After call to DoAccess: 0x%x 0x%x %d\n", bp,
|
|
|
|
bp->b_data, (int) bp->b_resid));
|
1998-12-22 23:03:14 +03:00
|
|
|
#endif
|
1999-01-15 20:55:52 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
return (retcode);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
/* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
int
|
|
|
|
rf_DispatchKernelIO(queue, req)
|
|
|
|
RF_DiskQueue_t *queue;
|
|
|
|
RF_DiskQueueData_t *req;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int op = (req->type == RF_IO_TYPE_READ) ? B_READ : B_WRITE;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct buf *bp;
|
1999-02-05 03:06:06 +03:00
|
|
|
struct raidbuf *raidbp = NULL;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs;
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit;
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
/* XXX along with the vnode, we also need the softc associated with
|
1999-02-05 03:06:06 +03:00
|
|
|
* this device.. */
|
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
req->queue = queue;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
unit = queue->raidPtr->raidid;
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("DispatchKernelIO unit: %d\n", unit));
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (unit >= numraid) {
|
|
|
|
printf("Invalid unit number: %d %d\n", unit, numraid);
|
1998-11-13 07:20:26 +03:00
|
|
|
panic("Invalid Unit number in rf_DispatchKernelIO\n");
|
|
|
|
}
|
|
|
|
rs = &raid_softc[unit];
|
|
|
|
|
|
|
|
/* XXX is this the right place? */
|
1999-02-05 03:06:06 +03:00
|
|
|
disk_busy(&rs->sc_dkdev);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
bp = req->bp;
|
1999-03-27 04:26:37 +03:00
|
|
|
#if 1
|
1999-02-05 03:06:06 +03:00
|
|
|
/* XXX when there is a physical disk failure, someone is passing us a
|
|
|
|
* buffer that contains old stuff!! Attempt to deal with this problem
|
|
|
|
* without taking a performance hit... (not sure where the real bug
|
|
|
|
* is. It's buried in RAIDframe somewhere) :-( GO ) */
|
1998-12-03 18:14:40 +03:00
|
|
|
|
|
|
|
if (bp->b_flags & B_ERROR) {
|
|
|
|
bp->b_flags &= ~B_ERROR;
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
if (bp->b_error != 0) {
|
1998-12-03 18:14:40 +03:00
|
|
|
bp->b_error = 0;
|
|
|
|
}
|
1999-03-27 04:26:37 +03:00
|
|
|
#endif
|
1998-11-13 07:20:26 +03:00
|
|
|
raidbp = RAIDGETBUF(rs);
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
raidbp->rf_flags = 0; /* XXX not really used anywhere... */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* context for raidiodone
|
|
|
|
*/
|
|
|
|
raidbp->rf_obp = bp;
|
|
|
|
raidbp->req = req;
|
|
|
|
|
|
|
|
switch (req->type) {
|
1999-02-05 03:06:06 +03:00
|
|
|
case RF_IO_TYPE_NOP: /* used primarily to unlock a locked queue */
|
|
|
|
/* Dprintf2("rf_DispatchKernelIO: NOP to r %d c %d\n",
|
|
|
|
* queue->row, queue->col); */
|
1998-11-13 07:20:26 +03:00
|
|
|
/* XXX need to do something extra here.. */
|
1999-02-05 03:06:06 +03:00
|
|
|
/* I'm leaving this in, as I've never actually seen it used,
|
|
|
|
* and I'd like folks to report it... GO */
|
1998-11-13 07:20:26 +03:00
|
|
|
printf(("WAKEUP CALLED\n"));
|
|
|
|
queue->numOutstanding++;
|
|
|
|
|
|
|
|
/* XXX need to glue the original buffer into this?? */
|
|
|
|
|
|
|
|
KernelWakeupFunc(&raidbp->rf_buf);
|
|
|
|
break;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
case RF_IO_TYPE_READ:
|
|
|
|
case RF_IO_TYPE_WRITE:
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
if (req->tracerec) {
|
|
|
|
RF_ETIMER_START(req->tracerec->timer);
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
InitBP(&raidbp->rf_buf, queue->rf_cinfo->ci_vp,
|
|
|
|
op | bp->b_flags, queue->rf_cinfo->ci_dev,
|
|
|
|
req->sectorOffset, req->numSector,
|
|
|
|
req->buf, KernelWakeupFunc, (void *) req,
|
|
|
|
queue->raidPtr->logBytesPerSector, req->b_proc);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
if (rf_debugKernelAccess) {
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("dispatch: bp->b_blkno = %ld\n",
|
|
|
|
(long) bp->b_blkno));
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
queue->numOutstanding++;
|
|
|
|
queue->last_deq_sector = req->sectorOffset;
|
1999-02-05 03:06:06 +03:00
|
|
|
/* acc wouldn't have been let in if there were any pending
|
|
|
|
* reqs at any other priority */
|
1998-11-13 07:20:26 +03:00
|
|
|
queue->curPriority = req->priority;
|
1999-02-05 03:06:06 +03:00
|
|
|
/* Dprintf3("rf_DispatchKernelIO: %c to row %d col %d\n",
|
|
|
|
* req->type, queue->row, queue->col); */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
db1_printf(("Going for %c to unit %d row %d col %d\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
req->type, unit, queue->row, queue->col));
|
1998-11-13 07:20:26 +03:00
|
|
|
db1_printf(("sector %d count %d (%d bytes) %d\n",
|
1999-02-05 03:06:06 +03:00
|
|
|
(int) req->sectorOffset, (int) req->numSector,
|
|
|
|
(int) (req->numSector <<
|
|
|
|
queue->raidPtr->logBytesPerSector),
|
|
|
|
(int) queue->raidPtr->logBytesPerSector));
|
1998-11-13 07:20:26 +03:00
|
|
|
if ((raidbp->rf_buf.b_flags & B_READ) == 0) {
|
|
|
|
raidbp->rf_buf.b_vp->v_numoutput++;
|
|
|
|
}
|
|
|
|
VOP_STRATEGY(&raidbp->rf_buf);
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
break;
|
1999-02-05 03:06:06 +03:00
|
|
|
|
1998-11-13 07:20:26 +03:00
|
|
|
default:
|
|
|
|
panic("bad req->type in rf_DispatchKernelIO");
|
|
|
|
}
|
|
|
|
db1_printf(("Exiting from DispatchKernelIO\n"));
|
1999-02-05 03:06:06 +03:00
|
|
|
return (0);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
/* this is the callback function associated with a I/O invoked from
|
1998-11-13 07:20:26 +03:00
|
|
|
kernel code.
|
|
|
|
*/
|
1999-02-05 03:06:06 +03:00
|
|
|
static void
|
|
|
|
KernelWakeupFunc(vbp)
|
|
|
|
struct buf *vbp;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
RF_DiskQueueData_t *req = NULL;
|
|
|
|
RF_DiskQueue_t *queue;
|
|
|
|
struct raidbuf *raidbp = (struct raidbuf *) vbp;
|
|
|
|
struct buf *bp;
|
|
|
|
struct raid_softc *rs;
|
|
|
|
int unit;
|
|
|
|
register int s;
|
|
|
|
|
|
|
|
s = splbio(); /* XXX */
|
|
|
|
db1_printf(("recovering the request queue:\n"));
|
|
|
|
req = raidbp->req;
|
|
|
|
|
|
|
|
bp = raidbp->rf_obp;
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("bp=0x%x\n", bp));
|
1998-12-22 23:03:14 +03:00
|
|
|
#endif
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
queue = (RF_DiskQueue_t *) req->queue;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (raidbp->rf_buf.b_flags & B_ERROR) {
|
1998-11-13 07:20:26 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
printf("Setting bp->b_flags!!! %d\n", raidbp->rf_buf.b_error);
|
1998-11-13 07:20:26 +03:00
|
|
|
#endif
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_flags |= B_ERROR;
|
|
|
|
bp->b_error = raidbp->rf_buf.b_error ?
|
|
|
|
raidbp->rf_buf.b_error : EIO;
|
|
|
|
}
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("raidbp->rf_buf.b_bcount=%d\n", (int) raidbp->rf_buf.b_bcount));
|
|
|
|
db1_printf(("raidbp->rf_buf.b_bufsize=%d\n", (int) raidbp->rf_buf.b_bufsize));
|
|
|
|
db1_printf(("raidbp->rf_buf.b_resid=%d\n", (int) raidbp->rf_buf.b_resid));
|
|
|
|
db1_printf(("raidbp->rf_buf.b_data=0x%x\n", raidbp->rf_buf.b_data));
|
1998-12-22 23:03:14 +03:00
|
|
|
#endif
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* XXX methinks this could be wrong... */
|
1998-11-13 07:20:26 +03:00
|
|
|
#if 1
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_resid = raidbp->rf_buf.b_resid;
|
1998-11-13 07:20:26 +03:00
|
|
|
#endif
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (req->tracerec) {
|
|
|
|
RF_ETIMER_STOP(req->tracerec->timer);
|
|
|
|
RF_ETIMER_EVAL(req->tracerec->timer);
|
|
|
|
RF_LOCK_MUTEX(rf_tracing_mutex);
|
|
|
|
req->tracerec->diskwait_us += RF_ETIMER_VAL_US(req->tracerec->timer);
|
|
|
|
req->tracerec->phys_io_us += RF_ETIMER_VAL_US(req->tracerec->timer);
|
|
|
|
req->tracerec->num_phys_ios++;
|
|
|
|
RF_UNLOCK_MUTEX(rf_tracing_mutex);
|
|
|
|
}
|
|
|
|
bp->b_bcount = raidbp->rf_buf.b_bcount; /* XXXX ?? */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
unit = queue->raidPtr->raidid; /* *Much* simpler :-> */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1998-12-03 18:14:40 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
/* XXX Ok, let's get aggressive... If B_ERROR is set, let's go
|
|
|
|
* ballistic, and mark the component as hosed... */
|
1998-12-03 18:14:40 +03:00
|
|
|
#if 1
|
1999-02-05 03:06:06 +03:00
|
|
|
if (bp->b_flags & B_ERROR) {
|
|
|
|
/* Mark the disk as dead */
|
|
|
|
/* but only mark it once... */
|
|
|
|
if (queue->raidPtr->Disks[queue->row][queue->col].status ==
|
|
|
|
rf_ds_optimal) {
|
|
|
|
printf("raid%d: IO Error. Marking %s as failed.\n",
|
|
|
|
unit, queue->raidPtr->Disks[queue->row][queue->col].devname);
|
|
|
|
queue->raidPtr->Disks[queue->row][queue->col].status =
|
|
|
|
rf_ds_failed;
|
|
|
|
queue->raidPtr->status[queue->row] = rf_rs_degraded;
|
|
|
|
queue->raidPtr->numFailures++;
|
1999-02-24 02:57:53 +03:00
|
|
|
/* XXX here we should bump the version number for each component, and write that data out */
|
1999-02-05 03:06:06 +03:00
|
|
|
} else { /* Disk is already dead... */
|
|
|
|
/* printf("Disk already marked as dead!\n"); */
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
1998-12-03 18:14:40 +03:00
|
|
|
#endif
|
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
rs = &raid_softc[unit];
|
|
|
|
RAIDPUTBUF(rs, raidbp);
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1998-12-03 18:14:40 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
if (bp->b_resid == 0) {
|
|
|
|
db1_printf(("Disk is no longer busy for this buffer... %d %ld %ld\n",
|
|
|
|
unit, bp->b_resid, bp->b_bcount));
|
|
|
|
/* XXX is this the right place for a disk_unbusy()??!??!?!? */
|
|
|
|
disk_unbusy(&rs->sc_dkdev, (bp->b_bcount - bp->b_resid));
|
|
|
|
} else {
|
|
|
|
db1_printf(("b_resid is still %ld\n", bp->b_resid));
|
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
rf_DiskIOComplete(queue, req, (bp->b_flags & B_ERROR) ? 1 : 0);
|
|
|
|
(req->CompleteFunc) (req->argument, (bp->b_flags & B_ERROR) ? 1 : 0);
|
|
|
|
/* printf("Exiting KernelWakeupFunc\n"); */
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
splx(s); /* XXX */
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize a buf structure for doing an I/O in the kernel.
|
|
|
|
*/
|
1999-02-05 03:06:06 +03:00
|
|
|
static void
|
|
|
|
InitBP(
|
|
|
|
struct buf * bp,
|
|
|
|
struct vnode * b_vp,
|
|
|
|
unsigned rw_flag,
|
|
|
|
dev_t dev,
|
|
|
|
RF_SectorNum_t startSect,
|
|
|
|
RF_SectorCount_t numSect,
|
|
|
|
caddr_t buf,
|
|
|
|
void (*cbFunc) (struct buf *),
|
|
|
|
void *cbArg,
|
|
|
|
int logBytesPerSector,
|
|
|
|
struct proc * b_proc)
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
/* bp->b_flags = B_PHYS | rw_flag; */
|
|
|
|
bp->b_flags = B_CALL | rw_flag; /* XXX need B_PHYS here too??? */
|
|
|
|
bp->b_bcount = numSect << logBytesPerSector;
|
|
|
|
bp->b_bufsize = bp->b_bcount;
|
|
|
|
bp->b_error = 0;
|
|
|
|
bp->b_dev = dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
db1_printf(("bp->b_dev is %d\n", dev));
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_un.b_addr = buf;
|
1998-12-22 23:03:14 +03:00
|
|
|
#if 0
|
1999-02-05 03:06:06 +03:00
|
|
|
db1_printf(("bp->b_data=0x%x\n", bp->b_data));
|
1998-12-22 23:03:14 +03:00
|
|
|
#endif
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_blkno = startSect;
|
|
|
|
bp->b_resid = bp->b_bcount; /* XXX is this right!??!?!! */
|
|
|
|
db1_printf(("b_bcount is: %d\n", (int) bp->b_bcount));
|
1998-11-13 07:20:26 +03:00
|
|
|
if (bp->b_bcount == 0) {
|
|
|
|
panic("bp->b_bcount is zero in InitBP!!\n");
|
|
|
|
}
|
1999-02-05 03:06:06 +03:00
|
|
|
bp->b_proc = b_proc;
|
|
|
|
bp->b_iodone = cbFunc;
|
|
|
|
bp->b_vp = b_vp;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
1999-02-05 03:06:06 +03:00
|
|
|
}
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
static void
|
|
|
|
raidgetdefaultlabel(raidPtr, rs, lp)
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
struct raid_softc *rs;
|
|
|
|
struct disklabel *lp;
|
|
|
|
{
|
|
|
|
db1_printf(("Building a default label...\n"));
|
|
|
|
bzero(lp, sizeof(*lp));
|
|
|
|
|
|
|
|
/* fabricate a label... */
|
|
|
|
lp->d_secperunit = raidPtr->totalSectors;
|
|
|
|
lp->d_secsize = raidPtr->bytesPerSector;
|
|
|
|
lp->d_nsectors = 1024 * (1024 / raidPtr->bytesPerSector);
|
|
|
|
lp->d_ntracks = 1;
|
|
|
|
lp->d_ncylinders = raidPtr->totalSectors / lp->d_nsectors;
|
|
|
|
lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
|
|
|
|
|
|
|
|
strncpy(lp->d_typename, "raid", sizeof(lp->d_typename));
|
1999-02-05 03:06:06 +03:00
|
|
|
lp->d_type = DTYPE_RAID;
|
1998-11-13 07:20:26 +03:00
|
|
|
strncpy(lp->d_packname, "fictitious", sizeof(lp->d_packname));
|
|
|
|
lp->d_rpm = 3600;
|
|
|
|
lp->d_interleave = 1;
|
|
|
|
lp->d_flags = 0;
|
|
|
|
|
|
|
|
lp->d_partitions[RAW_PART].p_offset = 0;
|
|
|
|
lp->d_partitions[RAW_PART].p_size = raidPtr->totalSectors;
|
|
|
|
lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
|
|
|
|
lp->d_npartitions = RAW_PART + 1;
|
|
|
|
|
|
|
|
lp->d_magic = DISKMAGIC;
|
|
|
|
lp->d_magic2 = DISKMAGIC;
|
|
|
|
lp->d_checksum = dkcksum(rs->sc_dkdev.dk_label);
|
|
|
|
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Read the disklabel from the raid device. If one is not present, fake one
|
|
|
|
* up.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raidgetdisklabel(dev)
|
1999-02-05 03:06:06 +03:00
|
|
|
dev_t dev;
|
1998-11-13 07:20:26 +03:00
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int unit = raidunit(dev);
|
1998-11-13 07:20:26 +03:00
|
|
|
struct raid_softc *rs = &raid_softc[unit];
|
1999-02-05 03:06:06 +03:00
|
|
|
char *errstring;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct disklabel *lp = rs->sc_dkdev.dk_label;
|
|
|
|
struct cpu_disklabel *clp = rs->sc_dkdev.dk_cpulabel;
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
|
|
|
|
db1_printf(("Getting the disklabel...\n"));
|
|
|
|
|
|
|
|
bzero(clp, sizeof(*clp));
|
|
|
|
|
|
|
|
raidPtr = raidPtrs[unit];
|
|
|
|
|
|
|
|
raidgetdefaultlabel(raidPtr, rs, lp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the generic disklabel extraction routine.
|
|
|
|
*/
|
|
|
|
errstring = readdisklabel(RAIDLABELDEV(dev), raidstrategy,
|
|
|
|
rs->sc_dkdev.dk_label, rs->sc_dkdev.dk_cpulabel);
|
1999-02-05 03:06:06 +03:00
|
|
|
if (errstring)
|
1998-11-13 07:20:26 +03:00
|
|
|
raidmakedisklabel(rs);
|
|
|
|
else {
|
1999-02-05 03:06:06 +03:00
|
|
|
int i;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct partition *pp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check whether the found disklabel is valid.
|
|
|
|
*
|
|
|
|
* This is necessary since total size of the raid device
|
|
|
|
* may vary when an interleave is changed even though exactly
|
|
|
|
* same componets are used, and old disklabel may used
|
|
|
|
* if that is found.
|
|
|
|
*/
|
|
|
|
if (lp->d_secperunit != rs->sc_size)
|
|
|
|
printf("WARNING: %s: "
|
|
|
|
"total sector size in disklabel (%d) != "
|
1999-04-12 23:39:59 +04:00
|
|
|
"the size of raid (%ld)\n", rs->sc_xname,
|
|
|
|
lp->d_secperunit, (long) rs->sc_size);
|
1998-11-13 07:20:26 +03:00
|
|
|
for (i = 0; i < lp->d_npartitions; i++) {
|
|
|
|
pp = &lp->d_partitions[i];
|
|
|
|
if (pp->p_offset + pp->p_size > rs->sc_size)
|
|
|
|
printf("WARNING: %s: end of partition `%c' "
|
1999-04-12 23:39:59 +04:00
|
|
|
"exceeds the size of raid (%ld)\n",
|
|
|
|
rs->sc_xname, 'a' + i, (long) rs->sc_size);
|
1998-11-13 07:20:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Take care of things one might want to take care of in the event
|
|
|
|
* that a disklabel isn't present.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raidmakedisklabel(rs)
|
|
|
|
struct raid_softc *rs;
|
|
|
|
{
|
|
|
|
struct disklabel *lp = rs->sc_dkdev.dk_label;
|
|
|
|
db1_printf(("Making a label..\n"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For historical reasons, if there's no disklabel present
|
|
|
|
* the raw partition must be marked FS_BSDFFS.
|
|
|
|
*/
|
|
|
|
|
|
|
|
lp->d_partitions[RAW_PART].p_fstype = FS_BSDFFS;
|
|
|
|
|
|
|
|
strncpy(lp->d_packname, "default label", sizeof(lp->d_packname));
|
|
|
|
|
|
|
|
lp->d_checksum = dkcksum(lp);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Lookup the provided name in the filesystem. If the file exists,
|
|
|
|
* is a valid block device, and isn't being used by anyone else,
|
|
|
|
* set *vpp to the file's vnode.
|
1999-02-05 03:06:06 +03:00
|
|
|
* You'll find the original of this in ccd.c
|
1998-11-13 07:20:26 +03:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
raidlookup(path, p, vpp)
|
1999-02-05 03:06:06 +03:00
|
|
|
char *path;
|
1998-11-13 07:20:26 +03:00
|
|
|
struct proc *p;
|
|
|
|
struct vnode **vpp; /* result */
|
|
|
|
{
|
|
|
|
struct nameidata nd;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct vattr va;
|
1999-02-05 03:06:06 +03:00
|
|
|
int error;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, path, p);
|
1999-02-05 03:06:06 +03:00
|
|
|
if ((error = vn_open(&nd, FREAD | FWRITE, 0)) != 0) {
|
1998-11-13 07:20:26 +03:00
|
|
|
#ifdef DEBUG
|
1999-02-05 03:06:06 +03:00
|
|
|
printf("RAIDframe: vn_open returned %d\n", error);
|
1998-11-13 07:20:26 +03:00
|
|
|
#endif
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
vp = nd.ni_vp;
|
|
|
|
if (vp->v_usecount > 1) {
|
|
|
|
VOP_UNLOCK(vp, 0);
|
1999-02-05 03:06:06 +03:00
|
|
|
(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
|
1998-11-13 07:20:26 +03:00
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) != 0) {
|
|
|
|
VOP_UNLOCK(vp, 0);
|
1999-02-05 03:06:06 +03:00
|
|
|
(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
|
1998-11-13 07:20:26 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/* XXX: eventually we should handle VREG, too. */
|
|
|
|
if (va.va_type != VBLK) {
|
|
|
|
VOP_UNLOCK(vp, 0);
|
1999-02-05 03:06:06 +03:00
|
|
|
(void) vn_close(vp, FREAD | FWRITE, p->p_ucred, p);
|
1998-11-13 07:20:26 +03:00
|
|
|
return (ENOTBLK);
|
|
|
|
}
|
|
|
|
VOP_UNLOCK(vp, 0);
|
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Wait interruptibly for an exclusive lock.
|
|
|
|
*
|
|
|
|
* XXX
|
|
|
|
* Several drivers do this; it should be abstracted and made MP-safe.
|
|
|
|
* (Hmm... where have we seen this warning before :-> GO )
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
raidlock(rs)
|
|
|
|
struct raid_softc *rs;
|
|
|
|
{
|
1999-02-05 03:06:06 +03:00
|
|
|
int error;
|
1998-11-13 07:20:26 +03:00
|
|
|
|
|
|
|
while ((rs->sc_flags & RAIDF_LOCKED) != 0) {
|
|
|
|
rs->sc_flags |= RAIDF_WANTED;
|
1999-02-05 03:06:06 +03:00
|
|
|
if ((error =
|
|
|
|
tsleep(rs, PRIBIO | PCATCH, "raidlck", 0)) != 0)
|
1998-11-13 07:20:26 +03:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
rs->sc_flags |= RAIDF_LOCKED;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Unlock and wake up any waiters.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
raidunlock(rs)
|
|
|
|
struct raid_softc *rs;
|
|
|
|
{
|
|
|
|
|
|
|
|
rs->sc_flags &= ~RAIDF_LOCKED;
|
|
|
|
if ((rs->sc_flags & RAIDF_WANTED) != 0) {
|
|
|
|
rs->sc_flags &= ~RAIDF_WANTED;
|
|
|
|
wakeup(rs);
|
|
|
|
}
|
|
|
|
}
|
1999-02-24 02:57:53 +03:00
|
|
|
|
|
|
|
|
|
|
|
#define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
|
|
|
|
#define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
|
|
|
|
|
|
|
|
int
|
1999-03-02 06:18:48 +03:00
|
|
|
raidmarkclean(dev_t dev, struct vnode *b_vp, int mod_counter)
|
|
|
|
{
|
|
|
|
RF_ComponentLabel_t component_label;
|
|
|
|
raidread_component_label(dev, b_vp, &component_label);
|
|
|
|
component_label.mod_counter = mod_counter;
|
|
|
|
component_label.clean = RF_RAID_CLEAN;
|
|
|
|
raidwrite_component_label(dev, b_vp, &component_label);
|
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
raidmarkdirty(dev_t dev, struct vnode *b_vp, int mod_counter)
|
1999-02-24 02:57:53 +03:00
|
|
|
{
|
1999-03-02 06:18:48 +03:00
|
|
|
RF_ComponentLabel_t component_label;
|
|
|
|
raidread_component_label(dev, b_vp, &component_label);
|
|
|
|
component_label.mod_counter = mod_counter;
|
|
|
|
component_label.clean = RF_RAID_DIRTY;
|
|
|
|
raidwrite_component_label(dev, b_vp, &component_label);
|
1999-02-24 02:57:53 +03:00
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidread_component_label(dev, b_vp, component_label)
|
|
|
|
dev_t dev;
|
|
|
|
struct vnode *b_vp;
|
|
|
|
RF_ComponentLabel_t *component_label;
|
|
|
|
{
|
|
|
|
struct buf *bp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* XXX should probably ensure that we don't try to do this if
|
|
|
|
someone has changed rf_protected_sectors. */
|
|
|
|
|
|
|
|
/* get a block of the appropriate size... */
|
|
|
|
bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
|
|
|
|
bp->b_dev = dev;
|
|
|
|
|
|
|
|
/* get our ducks in a row for the read */
|
|
|
|
bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
|
|
|
|
bp->b_bcount = RF_COMPONENT_INFO_SIZE;
|
|
|
|
bp->b_flags = B_BUSY | B_READ;
|
|
|
|
bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
|
|
|
|
|
|
|
|
(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
|
|
|
|
|
|
|
|
error = biowait(bp);
|
|
|
|
|
|
|
|
if (!error) {
|
|
|
|
memcpy(component_label, bp->b_un.b_addr,
|
|
|
|
sizeof(RF_ComponentLabel_t));
|
1999-03-02 06:18:48 +03:00
|
|
|
#if 0
|
1999-02-24 02:57:53 +03:00
|
|
|
printf("raidread_component_label: got component label:\n");
|
|
|
|
printf("Version: %d\n",component_label->version);
|
|
|
|
printf("Serial Number: %d\n",component_label->serial_number);
|
|
|
|
printf("Mod counter: %d\n",component_label->mod_counter);
|
|
|
|
printf("Row: %d\n", component_label->row);
|
|
|
|
printf("Column: %d\n", component_label->column);
|
|
|
|
printf("Num Rows: %d\n", component_label->num_rows);
|
|
|
|
printf("Num Columns: %d\n", component_label->num_columns);
|
|
|
|
printf("Clean: %d\n", component_label->clean);
|
|
|
|
printf("Status: %d\n", component_label->status);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
printf("Failed to read RAID component label!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
bp->b_flags = B_INVAL | B_AGE;
|
|
|
|
brelse(bp);
|
|
|
|
return(error);
|
|
|
|
}
|
|
|
|
/* ARGSUSED */
|
|
|
|
int
|
|
|
|
raidwrite_component_label(dev, b_vp, component_label)
|
|
|
|
dev_t dev;
|
|
|
|
struct vnode *b_vp;
|
|
|
|
RF_ComponentLabel_t *component_label;
|
|
|
|
{
|
|
|
|
struct buf *bp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* get a block of the appropriate size... */
|
|
|
|
bp = geteblk((int)RF_COMPONENT_INFO_SIZE);
|
|
|
|
bp->b_dev = dev;
|
|
|
|
|
|
|
|
/* get our ducks in a row for the write */
|
|
|
|
bp->b_blkno = RF_COMPONENT_INFO_OFFSET / DEV_BSIZE;
|
|
|
|
bp->b_bcount = RF_COMPONENT_INFO_SIZE;
|
|
|
|
bp->b_flags = B_BUSY | B_WRITE;
|
|
|
|
bp->b_resid = RF_COMPONENT_INFO_SIZE / DEV_BSIZE;
|
|
|
|
|
|
|
|
memset( bp->b_un.b_addr, 0, RF_COMPONENT_INFO_SIZE );
|
|
|
|
|
|
|
|
memcpy( bp->b_un.b_addr, component_label, sizeof(RF_ComponentLabel_t));
|
|
|
|
|
|
|
|
(*bdevsw[major(bp->b_dev)].d_strategy)(bp);
|
|
|
|
error = biowait(bp);
|
|
|
|
bp->b_flags = B_INVAL | B_AGE;
|
|
|
|
brelse(bp);
|
|
|
|
if (error) {
|
|
|
|
printf("Failed to write RAID component info!\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return(error);
|
|
|
|
}
|
1999-03-02 06:18:48 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
rf_markalldirty( raidPtr )
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
{
|
|
|
|
RF_ComponentLabel_t c_label;
|
|
|
|
int r,c;
|
|
|
|
|
|
|
|
raidPtr->mod_counter++;
|
|
|
|
for (r = 0; r < raidPtr->numRow; r++) {
|
|
|
|
for (c = 0; c < raidPtr->numCol; c++) {
|
|
|
|
if (raidPtr->Disks[r][c].status != rf_ds_failed) {
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
if (c_label.status == rf_ds_spared) {
|
|
|
|
/* XXX do something special...
|
|
|
|
but whatever you do, don't
|
|
|
|
try to access it!! */
|
|
|
|
} else {
|
|
|
|
#if 0
|
|
|
|
c_label.status =
|
|
|
|
raidPtr->Disks[r][c].status;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
#endif
|
|
|
|
raidmarkdirty(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
raidPtr->mod_counter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1999-03-09 05:59:25 +03:00
|
|
|
/* printf("Component labels marked dirty.\n"); */
|
1999-03-02 06:18:48 +03:00
|
|
|
#if 0
|
|
|
|
for( c = 0; c < raidPtr->numSpare ; c++) {
|
|
|
|
sparecol = raidPtr->numCol + c;
|
|
|
|
if (raidPtr->Disks[r][sparecol].status == rf_ds_used_spare) {
|
|
|
|
/*
|
|
|
|
|
|
|
|
XXX this is where we get fancy and map this spare
|
|
|
|
into it's correct spot in the array.
|
|
|
|
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
|
|
|
|
we claim this disk is "optimal" if it's
|
|
|
|
rf_ds_used_spare, as that means it should be
|
|
|
|
directly substitutable for the disk it replaced.
|
|
|
|
We note that too...
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
for(i=0;i<raidPtr->numRow;i++) {
|
|
|
|
for(j=0;j<raidPtr->numCol;j++) {
|
|
|
|
if ((raidPtr->Disks[i][j].spareRow ==
|
|
|
|
r) &&
|
|
|
|
(raidPtr->Disks[i][j].spareCol ==
|
|
|
|
sparecol)) {
|
|
|
|
srow = r;
|
|
|
|
scol = sparecol;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtr->Disks[r][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[r][sparecol].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
/* make sure status is noted */
|
|
|
|
c_label.version = RF_COMPONENT_LABEL_VERSION;
|
|
|
|
c_label.mod_counter = raidPtr->mod_counter;
|
|
|
|
c_label.serial_number = raidPtr->serial_number;
|
|
|
|
c_label.row = srow;
|
|
|
|
c_label.column = scol;
|
|
|
|
c_label.num_rows = raidPtr->numRow;
|
|
|
|
c_label.num_columns = raidPtr->numCol;
|
|
|
|
c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
|
|
|
|
c_label.status = rf_ds_optimal;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtr->Disks[r][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[r][sparecol].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
raidmarkclean( raidPtr->Disks[r][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[r][sparecol].ci_vp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1999-03-09 05:59:25 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
rf_update_component_labels( raidPtr )
|
|
|
|
RF_Raid_t *raidPtr;
|
|
|
|
{
|
|
|
|
RF_ComponentLabel_t c_label;
|
|
|
|
int sparecol;
|
|
|
|
int r,c;
|
|
|
|
int i,j;
|
|
|
|
int srow, scol;
|
|
|
|
|
|
|
|
srow = -1;
|
|
|
|
scol = -1;
|
|
|
|
|
|
|
|
/* XXX should do extra checks to make sure things really are clean,
|
|
|
|
rather than blindly setting the clean bit... */
|
|
|
|
|
|
|
|
raidPtr->mod_counter++;
|
|
|
|
|
|
|
|
for (r = 0; r < raidPtr->numRow; r++) {
|
|
|
|
for (c = 0; c < raidPtr->numCol; c++) {
|
|
|
|
if (raidPtr->Disks[r][c].status == rf_ds_optimal) {
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
/* make sure status is noted */
|
|
|
|
c_label.status = rf_ds_optimal;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
if (raidPtr->parity_good == RF_RAID_CLEAN) {
|
|
|
|
raidmarkclean(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
raidPtr->mod_counter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* else we don't touch it.. */
|
|
|
|
#if 0
|
|
|
|
else if (raidPtr->Disks[r][c].status !=
|
|
|
|
rf_ds_failed) {
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
/* make sure status is noted */
|
|
|
|
c_label.status =
|
|
|
|
raidPtr->Disks[r][c].status;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
if (raidPtr->parity_good == RF_RAID_CLEAN) {
|
|
|
|
raidmarkclean(
|
|
|
|
raidPtr->Disks[r][c].dev,
|
|
|
|
raidPtr->raid_cinfo[r][c].ci_vp,
|
|
|
|
raidPtr->mod_counter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for( c = 0; c < raidPtr->numSpare ; c++) {
|
|
|
|
sparecol = raidPtr->numCol + c;
|
|
|
|
if (raidPtr->Disks[0][sparecol].status == rf_ds_used_spare) {
|
|
|
|
/*
|
|
|
|
|
|
|
|
we claim this disk is "optimal" if it's
|
|
|
|
rf_ds_used_spare, as that means it should be
|
|
|
|
directly substitutable for the disk it replaced.
|
|
|
|
We note that too...
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
for(i=0;i<raidPtr->numRow;i++) {
|
|
|
|
for(j=0;j<raidPtr->numCol;j++) {
|
|
|
|
if ((raidPtr->Disks[i][j].spareRow ==
|
|
|
|
0) &&
|
|
|
|
(raidPtr->Disks[i][j].spareCol ==
|
|
|
|
sparecol)) {
|
|
|
|
srow = i;
|
|
|
|
scol = j;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
raidread_component_label(
|
|
|
|
raidPtr->Disks[0][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[0][sparecol].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
/* make sure status is noted */
|
|
|
|
c_label.version = RF_COMPONENT_LABEL_VERSION;
|
|
|
|
c_label.mod_counter = raidPtr->mod_counter;
|
|
|
|
c_label.serial_number = raidPtr->serial_number;
|
|
|
|
c_label.row = srow;
|
|
|
|
c_label.column = scol;
|
|
|
|
c_label.num_rows = raidPtr->numRow;
|
|
|
|
c_label.num_columns = raidPtr->numCol;
|
|
|
|
c_label.clean = RF_RAID_DIRTY; /* changed in a bit*/
|
|
|
|
c_label.status = rf_ds_optimal;
|
|
|
|
raidwrite_component_label(
|
|
|
|
raidPtr->Disks[0][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[0][sparecol].ci_vp,
|
|
|
|
&c_label);
|
|
|
|
if (raidPtr->parity_good == RF_RAID_CLEAN) {
|
|
|
|
raidmarkclean( raidPtr->Disks[0][sparecol].dev,
|
|
|
|
raidPtr->raid_cinfo[0][sparecol].ci_vp,
|
|
|
|
raidPtr->mod_counter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* printf("Component labels updated\n"); */
|
|
|
|
}
|