We don't use Aztec C.

This commit is contained in:
mycroft 1994-02-15 14:02:59 +00:00
parent 451fbfb366
commit e69b4e2e08
1 changed files with 229 additions and 229 deletions

View File

@ -36,7 +36,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: kern_lkm.c,v 1.12 1994/02/05 02:25:03 cgd Exp $
* $Id: kern_lkm.c,v 1.13 1994/02/15 14:02:59 mycroft Exp $
*/
#include <sys/param.h>
@ -58,7 +58,7 @@
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#define PAGESIZE 1024 /* kmem_alloc() allocation quantum*/
#define PAGESIZE 1024 /* kmem_alloc() allocation quantum */
#define LKM_ALLOC 0x01
#define LKM_WANT 0x02
@ -77,11 +77,11 @@ static int lkm_state = LKMS_IDLE;
#define MAXLKMS 20
#endif
static struct lkm_table lkmods[ MAXLKMS]; /* table of loaded modules*/
static struct lkm_table *curp; /* global for in-progress ops*/
static struct lkm_table lkmods[MAXLKMS]; /* table of loaded modules */
static struct lkm_table *curp; /* global for in-progress ops */
/*ARGSUSED*/
lkmopen( dev, flag, devtype, p)
lkmopen(dev, flag, devtype, p)
dev_t dev;
int flag;
int devtype;
@ -89,28 +89,28 @@ struct proc *p;
{
int error;
if( minor( dev) != 0)
return( ENXIO); /* bad minor #*/
if (minor(dev) != 0)
return(ENXIO); /* bad minor # */
/*
* Use of the loadable kernel module device must be exclusive; we
* may try to remove this restriction later, but it's really no
* hardship.
*/
while( lkm_v & LKM_ALLOC) {
if( flag & FNONBLOCK) /* don't hang*/
return( EBUSY);
while (lkm_v & LKM_ALLOC) {
if (flag & FNONBLOCK) /* don't hang */
return(EBUSY);
lkm_v |= LKM_WANT;
/*
* Sleep pending unlock; we use tsleep() to allow
* an alarm out of the open.
*/
if( error = tsleep( (caddr_t)&lkm_v, TTIPRI|PCATCH, "LKM", 0))
return( error); /* leave LKM_WANT set -- no problem*/
if (error = tsleep((caddr_t)&lkm_v, TTIPRI|PCATCH, "LKM", 0))
return(error); /* leave LKM_WANT set -- no problem */
}
lkm_v |= LKM_ALLOC;
return( 0); /* pseudo-device open*/
return(0); /* pseudo-device open */
}
@ -124,14 +124,14 @@ struct proc *p;
static int
lkmunreserve()
{
if( lkm_state == LKMS_IDLE)
if (lkm_state == LKMS_IDLE)
return;
/*
* Actually unreserve the memory
*/
if (curp && curp->area) {
kmem_free( kmem_map, curp->area, curp->size);/**/
kmem_free(kmem_map, curp->area, curp->size);/**/
curp->area = 0;
}
@ -139,38 +139,38 @@ lkmunreserve()
}
lkmclose( dev, flag, mode, p)
lkmclose(dev, flag, mode, p)
dev_t dev;
int flag;
int mode;
struct proc *p;
{
if( !( lkm_v & LKM_ALLOC)) {
if (!(lkm_v & LKM_ALLOC)) {
#ifdef DEBUG
printf( "LKM: close before open!\n");
#endif /* DEBUG*/
return( EBADF);
printf("LKM: close before open!\n");
#endif /* DEBUG */
return(EBADF);
}
/* do this before waking the herd...*/
if( curp && !curp->used) {
/* do this before waking the herd... */
if (curp && !curp->used) {
/*
* If we close before setting used, we have aborted
* by way of error or by way of close-on-exit from
* a premature exit of "modload".
*/
lkmunreserve(); /* coerce state to LKM_IDLE*/
lkmunreserve(); /* coerce state to LKM_IDLE */
}
lkm_v &= ~LKM_ALLOC;
wakeup( (caddr_t)&lkm_v); /* thundering herd "problem" here*/
wakeup((caddr_t)&lkm_v); /* thundering herd "problem" here */
return( 0); /* pseudo-device closed*/
return(0); /* pseudo-device closed */
}
/*ARGSUSED*/
lkmioctl( dev, cmd, data, flag)
lkmioctl(dev, cmd, data, flag)
dev_t dev;
int cmd;
caddr_t data;
@ -183,10 +183,10 @@ int flag;
struct lmc_unload *unloadp;
struct lmc_stat *statp;
int (*funcp)();
char istr[ MAXLKMNAME];
char istr[MAXLKMNAME];
switch( cmd) {
case LMRESERV: /* reserve pages for a module*/
switch(cmd) {
case LMRESERV: /* reserve pages for a module */
if ((flag & FWRITE) == 0) /* only allow this if writing */
return EPERM;
@ -195,45 +195,45 @@ int flag;
/*
* Find a free slot.
*/
for( i = 0; i < MAXLKMS; i++) {
if( !lkmods[ i].used)
for (i = 0; i < MAXLKMS; i++) {
if (!lkmods[i].used)
break;
}
if( i == MAXLKMS) {
err = ENOMEM; /* no slots available*/
if (i == MAXLKMS) {
err = ENOMEM; /* no slots available */
break;
}
curp = &lkmods[ i];
curp->id = i; /* self reference slot offset*/
curp = &lkmods[i];
curp->id = i; /* self reference slot offset */
resrvp->slot = i; /* return slot*/
resrvp->slot = i; /* return slot */
/*
* Get memory for module
*/
curp->size = resrvp->size;
curp->area = kmem_alloc( kmem_map, curp->size);/**/
curp->area = kmem_alloc(kmem_map, curp->size);/**/
curp->offset = 0; /* load offset*/
curp->offset = 0; /* load offset */
resrvp->addr = curp->area; /* ret kernel addr*/
resrvp->addr = curp->area; /* ret kernel addr */
#ifdef DEBUG
printf( "LKM: LMRESERV (actual = 0x%08x)\n", curp->area);
printf( "LKM: LMRESERV (adjusted = 0x%08x)\n",
printf("LKM: LMRESERV (actual = 0x%08x)\n", curp->area);
printf("LKM: LMRESERV (adjusted = 0x%08x)\n",
trunc_page(curp->area));
#endif /* DEBUG*/
#endif /* DEBUG */
lkm_state = LKMS_RESERVED;
break;
case LMLOADBUF: /* Copy in; stateful, follows LMRESERV*/
case LMLOADBUF: /* Copy in; stateful, follows LMRESERV */
if ((flag & FWRITE) == 0) /* only allow this if writing */
return EPERM;
loadbufp = (struct lmc_loadbuf *)data;
i = loadbufp->cnt;
if( (lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
|| i < 0
|| i > MODIOBUF
|| i > curp->size - curp->offset) {
@ -241,91 +241,91 @@ int flag;
break;
}
/* copy in buffer full of data*/
if( err = copyin( (caddr_t)loadbufp->data, (caddr_t)curp->area + curp->offset, i))
/* copy in buffer full of data */
if (err = copyin((caddr_t)loadbufp->data, (caddr_t)curp->area + curp->offset, i))
break;
if( ( curp->offset + i) < curp->size) {
if ((curp->offset + i) < curp->size) {
lkm_state = LKMS_LOADING;
#ifdef DEBUG
printf( "LKM: LMLOADBUF (loading @ %d of %d, i = %d)\n",
printf("LKM: LMLOADBUF (loading @ %d of %d, i = %d)\n",
curp->offset, curp->size, i);
#endif /* DEBUG*/
#endif /* DEBUG */
} else {
lkm_state = LKMS_LOADED;
#ifdef DEBUG
printf( "LKM: LMLOADBUF (loaded)\n");
#endif /* DEBUG*/
printf("LKM: LMLOADBUF (loaded)\n");
#endif /* DEBUG */
}
curp->offset += i;
break;
case LMUNRESRV: /* discard reserved pages for a module*/
case LMUNRESRV: /* discard reserved pages for a module */
if ((flag & FWRITE) == 0) /* only allow this if writing */
return EPERM;
lkmunreserve(); /* coerce state to LKM_IDLE*/
lkmunreserve(); /* coerce state to LKM_IDLE */
#ifdef DEBUG
printf( "LKM: LMUNRESERV\n");
#endif /* DEBUG*/
printf("LKM: LMUNRESERV\n");
#endif /* DEBUG */
break;
case LMREADY: /* module loaded: call entry*/
case LMREADY: /* module loaded: call entry */
if ((flag & FWRITE) == 0) /* only allow this if writing */
return EPERM;
if( lkm_state != LKMS_LOADED) {
if (lkm_state != LKMS_LOADED) {
#ifdef DEBUG
printf( "lkm_state is %02x\n", lkm_state);
#endif /* DEBUG*/
printf("lkm_state is %02x\n", lkm_state);
#endif /* DEBUG */
err = ENXIO;
break;
}
curp->entry = (int (*)()) (*((int *) ( data)));
curp->entry = (int (*)()) (*((int *) (data)));
/* call entry(load)... (assigns "private" portion)*/
if( err = (*(curp->entry))( curp, LKM_E_LOAD, LKM_VERSION)) {
/* call entry(load)... (assigns "private" portion) */
if (err = (*(curp->entry))(curp, LKM_E_LOAD, LKM_VERSION)) {
/*
* Module may refuse loading or may have a
* version mismatch...
*/
lkm_state = LKMS_UNLOADING; /* for lkmunreserve*/
lkmunreserve(); /* free memory*/
curp->used = 0; /* free slot*/
lkm_state = LKMS_UNLOADING; /* for lkmunreserve */
lkmunreserve(); /* free memory */
curp->used = 0; /* free slot */
break;
}
curp->used = 1;
#ifdef DEBUG
printf( "LKM: LMREADY\n");
#endif /* DEBUG*/
printf("LKM: LMREADY\n");
#endif /* DEBUG */
lkm_state = LKMS_IDLE;
break;
case LMUNLOAD: /* unload a module*/
case LMUNLOAD: /* unload a module */
if ((flag & FWRITE) == 0) /* only allow this if writing */
return EPERM;
unloadp = (struct lmc_unload *)data;
if( ( i = unloadp->id) == -1) { /* unload by name*/
if ((i = unloadp->id) == -1) { /* unload by name */
/*
* Copy name and lookup id from all loaded
* modules. May fail.
*/
if (err = copyinstr( unloadp->name, istr, MAXLKMNAME-1, NULL))
if (err = copyinstr(unloadp->name, istr, MAXLKMNAME-1, NULL))
break;
/*
* look up id...
*/
for( i = 0; i < MAXLKMS; i++) {
if( !lkmods[ i].used)
for (i = 0; i < MAXLKMS; i++) {
if (!lkmods[i].used)
continue;
if( !strcmp( istr,
lkmods[ i].private.lkm_any->lkm_name))
if (!strcmp(istr,
lkmods[i].private.lkm_any->lkm_name))
break;
}
}
@ -333,47 +333,47 @@ int flag;
/*
* Range check the value; on failure, return EINVAL
*/
if( i < 0 || i >= MAXLKMS) {
if (i < 0 || i >= MAXLKMS) {
err = EINVAL;
break;
}
curp = &lkmods[ i];
curp = &lkmods[i];
/* call entry(unload)*/
if( (*(curp->entry))( curp, LKM_E_UNLOAD, LKM_VERSION)) {
/* call entry(unload) */
if ((*(curp->entry))(curp, LKM_E_UNLOAD, LKM_VERSION)) {
err = EBUSY;
break;
}
lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve*/
lkmunreserve(); /* free memory*/
curp->used = 0; /* free slot*/
lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve */
lkmunreserve(); /* free memory */
curp->used = 0; /* free slot */
break;
case LMSTAT: /* stat a module by id/name*/
case LMSTAT: /* stat a module by id/name */
/* allow readers and writers to stat */
statp = (struct lmc_stat *)data;
if( ( i = statp->id) == -1) { /* stat by name*/
if ((i = statp->id) == -1) { /* stat by name */
/*
* Copy name and lookup id from all loaded
* modules.
*/
copystr( statp->name, istr, MAXLKMNAME-1, NULL);
copystr(statp->name, istr, MAXLKMNAME-1, NULL);
/*
* look up id...
*/
for( i = 0; i < MAXLKMS; i++) {
if( !lkmods[ i].used)
for (i = 0; i < MAXLKMS; i++) {
if (!lkmods[i].used)
continue;
if( !strcmp( istr,
lkmods[ i].private.lkm_any->lkm_name))
if (!strcmp(istr,
lkmods[i].private.lkm_any->lkm_name))
break;
}
if( i == MAXLKMS) { /* Not found*/
if (i == MAXLKMS) { /* Not found */
err = ENOENT;
break;
}
@ -382,14 +382,14 @@ int flag;
/*
* Range check the value; on failure, return EINVAL
*/
if( i < 0 || i >= MAXLKMS) {
if (i < 0 || i >= MAXLKMS) {
err = EINVAL;
break;
}
curp = &lkmods[ i];
curp = &lkmods[i];
if( !curp->used) { /* Not found*/
if (!curp->used) { /* Not found */
err = ENOENT;
break;
}
@ -404,14 +404,14 @@ int flag;
statp->size = curp->size / PAGESIZE;
statp->private = (unsigned long)curp->private.lkm_any;
statp->ver = curp->private.lkm_any->lkm_ver;
copystr( curp->private.lkm_any->lkm_name,
copystr(curp->private.lkm_any->lkm_name,
statp->name,
MAXLKMNAME - 2,
NULL);
break;
default: /* bad ioctl()...*/
default: /* bad ioctl()... */
err = ENOTTY;
break;
}
@ -420,7 +420,7 @@ int flag;
}
/*********************************************************************/
/******************************************************************** */
/*
@ -433,7 +433,7 @@ int flag;
*/
lkmnosys()
{
return( nosys());
return(nosys());
}
/*
@ -448,14 +448,14 @@ lkmnosys()
int
lkmenodev()
{
return( enodev());
return(enodev());
}
/*********************************************************************/
int
lkmexists( lkmtp)
lkmexists(lkmtp)
struct lkm_table *lkmtp;
{
int i;
@ -463,19 +463,19 @@ struct lkm_table *lkmtp;
/*
* see if name exists...
*/
for( i = 0; i < MAXLKMS; i++) {
for (i = 0; i < MAXLKMS; i++) {
/*
* An unused module and the one we are testing are not
* considered.
*/
if( !lkmods[ i].used || &lkmods[ i] == lkmtp)
if (!lkmods[i].used || &lkmods[i] == lkmtp)
continue;
if( !strcmp( lkmtp->private.lkm_any->lkm_name,
lkmods[ i].private.lkm_any->lkm_name))
return( 1); /* already loaded...*/
if (!strcmp(lkmtp->private.lkm_any->lkm_name,
lkmods[i].private.lkm_any->lkm_name))
return(1); /* already loaded... */
}
return( 0); /* module not loaded...*/
return(0); /* module not loaded... */
}
@ -484,64 +484,64 @@ struct lkm_table *lkmtp;
* by lkmtp, load/unload/stat it depending on the cmd requested.
*/
static int
_lkm_syscall( lkmtp, cmd)
_lkm_syscall(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
struct lkm_syscall *args = lkmtp->private.lkm_syscall;
int i;
int err = 0;
extern int nsysent; /* init_sysent.c*/
extern int nsysent; /* init_sysent.c */
switch( cmd) {
switch(cmd) {
case LKM_E_LOAD:
/* don't load twice!*/
if( lkmexists( lkmtp))
return( EEXIST);
if( ( i = args->lkm_offset) == -1) { /* auto*/
/* don't load twice! */
if (lkmexists(lkmtp))
return(EEXIST);
if ((i = args->lkm_offset) == -1) { /* auto */
/*
* Search the table looking for a slot...
*/
for( i = 0; i < nsysent; i++)
if( sysent[ i].sy_call == lkmnosys)
break; /* found it!*/
/* out of allocable slots?*/
if( i == nsysent) {
for (i = 0; i < nsysent; i++)
if (sysent[i].sy_call == lkmnosys)
break; /* found it! */
/* out of allocable slots? */
if (i == nsysent) {
err = ENFILE;
break;
}
} else { /* assign*/
if( i < 0 || i >= nsysent) {
} else { /* assign */
if (i < 0 || i >= nsysent) {
err = EINVAL;
break;
}
}
/* save old*/
bcopy( &sysent[ i], &(args->lkm_oldent), sizeof( struct sysent));
/* save old */
bcopy(&sysent[i], &(args->lkm_oldent), sizeof(struct sysent));
/* replace with new*/
bcopy( args->lkm_sysent, &sysent[ i], sizeof( struct sysent));
/* replace with new */
bcopy(args->lkm_sysent, &sysent[i], sizeof(struct sysent));
/* done!*/
args->lkm_offset = i; /* slot in sysent[]*/
/* done! */
args->lkm_offset = i; /* slot in sysent[] */
break;
case LKM_E_UNLOAD:
/* current slot...*/
/* current slot... */
i = args->lkm_offset;
/* replace current slot contents with old contents*/
bcopy( &(args->lkm_oldent), &sysent[ i], sizeof( struct sysent));
/* replace current slot contents with old contents */
bcopy(&(args->lkm_oldent), &sysent[i], sizeof(struct sysent));
break;
case LKM_E_STAT: /* no special handling...*/
case LKM_E_STAT: /* no special handling... */
break;
}
return( err);
return(err);
}
@ -550,7 +550,7 @@ int cmd;
* to by lkmtp, load/unload/stat it depending on the cmd requested.
*/
static int
_lkm_vfs( lkmtp, cmd)
_lkm_vfs(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
@ -558,11 +558,11 @@ int cmd;
int i;
int err = 0;
switch( cmd) {
switch(cmd) {
case LKM_E_LOAD:
/* don't load twice!*/
if( lkmexists( lkmtp))
return( EEXIST);
/* don't load twice! */
if (lkmexists(lkmtp))
return(EEXIST);
/*
* Currently, the VFS and mount code in 386BSD is malformed;
* this means that the per volume file system identifier is
@ -575,12 +575,12 @@ int cmd;
* I will correct the VFS code when I get a chance.
*/
i = args->lkm_offset;
if( i < 0 || i > MOUNT_MAXTYPE) {
if (i < 0 || i > MOUNT_MAXTYPE) {
err = EINVAL;
break;
}
if( vfssw[ i] != (struct vfsops *)0) {
if (vfssw[i] != (struct vfsops *)0) {
err = EEXIST;
break;
}
@ -588,32 +588,32 @@ int cmd;
/*
* Set up file system
*/
vfssw[ i] = args->lkm_vfsops;
vfssw[i] = args->lkm_vfsops;
/*
* Call init function for this VFS...
*/
( *(vfssw[ i]->vfs_init))( args->lkm_flags);
(*(vfssw[i]->vfs_init))(args->lkm_flags);
/* done!*/
args->lkm_offset = i; /* slot in sysent[]*/
/* done! */
args->lkm_offset = i; /* slot in sysent[] */
break;
case LKM_E_UNLOAD:
/* current slot...*/
/* current slot... */
i = args->lkm_offset;
/* replace current slot contents with old contents*/
vfssw[ i] = (struct vfsops *)0;
/* replace current slot contents with old contents */
vfssw[i] = (struct vfsops *)0;
break;
case LKM_E_STAT: /* no special handling...*/
case LKM_E_STAT: /* no special handling... */
break;
}
return( err);
return(err);
}
@ -622,80 +622,80 @@ int cmd;
* by lkmtp, load/unload/stat it depending on the cmd requested.
*/
static int
_lkm_dev( lkmtp, cmd)
_lkm_dev(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
struct lkm_dev *args = lkmtp->private.lkm_dev;
int i;
int err = 0;
extern int nblkdev; /* i386/i386/conf.c*/
extern int nchrdev; /* i386/i386/conf.c*/
extern int nblkdev; /* i386/i386/conf.c */
extern int nchrdev; /* i386/i386/conf.c */
switch( cmd) {
switch(cmd) {
case LKM_E_LOAD:
/* don't load twice!*/
if( lkmexists( lkmtp))
return( EEXIST);
switch( args->lkm_devtype) {
/* don't load twice! */
if (lkmexists(lkmtp))
return(EEXIST);
switch(args->lkm_devtype) {
case LM_DT_BLOCK:
if( ( i = args->lkm_offset) == -1) { /* auto*/
if ((i = args->lkm_offset) == -1) { /* auto */
/*
* Search the table looking for a slot...
*/
for( i = 0; i < nblkdev; i++)
if( bdevsw[ i].d_open == lkmenodev)
break; /* found it!*/
/* out of allocable slots?*/
if( i == nblkdev) {
for (i = 0; i < nblkdev; i++)
if (bdevsw[i].d_open == lkmenodev)
break; /* found it! */
/* out of allocable slots? */
if (i == nblkdev) {
err = ENFILE;
break;
}
} else { /* assign*/
if( i < 0 || i >= nblkdev) {
} else { /* assign */
if (i < 0 || i >= nblkdev) {
err = EINVAL;
break;
}
}
/* save old*/
bcopy( &bdevsw[ i], &(args->lkm_olddev.bdev), sizeof( struct bdevsw));
/* save old */
bcopy(&bdevsw[i], &(args->lkm_olddev.bdev), sizeof(struct bdevsw));
/* replace with new*/
bcopy( args->lkm_dev.bdev, &bdevsw[ i], sizeof( struct bdevsw));
/* replace with new */
bcopy(args->lkm_dev.bdev, &bdevsw[i], sizeof(struct bdevsw));
/* done!*/
args->lkm_offset = i; /* slot in bdevsw[]*/
/* done! */
args->lkm_offset = i; /* slot in bdevsw[] */
break;
case LM_DT_CHAR:
if( ( i = args->lkm_offset) == -1) { /* auto*/
if ((i = args->lkm_offset) == -1) { /* auto */
/*
* Search the table looking for a slot...
*/
for( i = 0; i < nchrdev; i++)
if( cdevsw[ i].d_open == lkmenodev)
break; /* found it!*/
/* out of allocable slots?*/
if( i == nchrdev) {
for (i = 0; i < nchrdev; i++)
if (cdevsw[i].d_open == lkmenodev)
break; /* found it! */
/* out of allocable slots? */
if (i == nchrdev) {
err = ENFILE;
break;
}
} else { /* assign*/
if( i < 0 || i >= nchrdev) {
} else { /* assign */
if (i < 0 || i >= nchrdev) {
err = EINVAL;
break;
}
}
/* save old*/
bcopy( &cdevsw[ i], &(args->lkm_olddev.cdev), sizeof( struct cdevsw));
/* save old */
bcopy(&cdevsw[i], &(args->lkm_olddev.cdev), sizeof(struct cdevsw));
/* replace with new*/
bcopy( args->lkm_dev.cdev, &cdevsw[ i], sizeof( struct cdevsw));
/* replace with new */
bcopy(args->lkm_dev.cdev, &cdevsw[i], sizeof(struct cdevsw));
/* done!*/
args->lkm_offset = i; /* slot in cdevsw[]*/
/* done! */
args->lkm_offset = i; /* slot in cdevsw[] */
break;
@ -706,18 +706,18 @@ int cmd;
break;
case LKM_E_UNLOAD:
/* current slot...*/
/* current slot... */
i = args->lkm_offset;
switch( args->lkm_devtype) {
switch(args->lkm_devtype) {
case LM_DT_BLOCK:
/* replace current slot contents with old contents*/
bcopy( &(args->lkm_olddev.bdev), &bdevsw[ i], sizeof( struct bdevsw));
/* replace current slot contents with old contents */
bcopy(&(args->lkm_olddev.bdev), &bdevsw[i], sizeof(struct bdevsw));
break;
case LM_DT_CHAR:
/* replace current slot contents with old contents*/
bcopy( &(args->lkm_olddev.cdev), &cdevsw[ i], sizeof( struct cdevsw));
/* replace current slot contents with old contents */
bcopy(&(args->lkm_olddev.cdev), &cdevsw[i], sizeof(struct cdevsw));
break;
default:
@ -726,11 +726,11 @@ int cmd;
}
break;
case LKM_E_STAT: /* no special handling...*/
case LKM_E_STAT: /* no special handling... */
break;
}
return( err);
return(err);
}
@ -740,7 +740,7 @@ int cmd;
* by lkmtp, load/unload/stat it depending on the cmd requested.
*/
static int
_lkm_strmod( lkmtp, cmd)
_lkm_strmod(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
@ -748,30 +748,30 @@ int cmd;
int i;
int err = 0;
switch( cmd) {
switch(cmd) {
case LKM_E_LOAD:
/* don't load twice!*/
if( lkmexists( lkmtp))
return( EEXIST);
/* don't load twice! */
if (lkmexists(lkmtp))
return(EEXIST);
break;
case LKM_E_UNLOAD:
break;
case LKM_E_STAT: /* no special handling...*/
case LKM_E_STAT: /* no special handling... */
break;
}
return( err);
return(err);
}
#endif /* STREAMS*/
#endif /* STREAMS */
/*
* For the loadable execution class described by the structure pointed to
* by lkmtp, load/unload/stat it depending on the cmd requested.
*/
static int
_lkm_exec( lkmtp, cmd)
_lkm_exec(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
@ -779,49 +779,49 @@ int cmd;
int i;
int err = 0;
switch( cmd) {
switch(cmd) {
case LKM_E_LOAD:
/* don't load twice!*/
if( lkmexists( lkmtp))
return( EEXIST);
if( ( i = args->lkm_offset) == -1) { /* auto*/
/* don't load twice! */
if (lkmexists(lkmtp))
return(EEXIST);
if ((i = args->lkm_offset) == -1) { /* auto */
/*
* Search the table looking for a slot...
*/
for( i = 0; i < nexecs; i++)
if( execsw[ i].es_check == NULL)
break; /* found it!*/
/* out of allocable slots?*/
if( i == nexecs) {
for (i = 0; i < nexecs; i++)
if (execsw[i].es_check == NULL)
break; /* found it! */
/* out of allocable slots? */
if (i == nexecs) {
err = ENFILE;
break;
}
} else { /* assign*/
if( i < 0 || i >= nexecs) {
} else { /* assign */
if (i < 0 || i >= nexecs) {
err = EINVAL;
break;
}
}
/* save old*/
/* save old */
bcopy(&execsw[i], &(args->lkm_oldexec), sizeof(struct execsw));
/* replace with new*/
/* replace with new */
bcopy(args->lkm_exec, &execsw[i], sizeof(struct execsw));
/* realize need to recompute max header size */
exec_maxhdrsz = 0;
/* done!*/
args->lkm_offset = i; /* slot in execsw[]*/
/* done! */
args->lkm_offset = i; /* slot in execsw[] */
break;
case LKM_E_UNLOAD:
/* current slot...*/
/* current slot... */
i = args->lkm_offset;
/* replace current slot contents with old contents*/
/* replace current slot contents with old contents */
bcopy(&(args->lkm_oldexec), &execsw[i], sizeof(struct execsw));
/* realize need to recompute max header size */
@ -829,11 +829,11 @@ int cmd;
break;
case LKM_E_STAT: /* no special handling...*/
case LKM_E_STAT: /* no special handling... */
break;
}
return( err);
return(err);
}
/*
@ -842,23 +842,23 @@ int cmd;
* is assumed to be done in their entry routines internal to the module
* itself.
*/
lkmdispatch( lkmtp, cmd)
lkmdispatch(lkmtp, cmd)
struct lkm_table *lkmtp;
int cmd;
{
int err = 0; /* default = success*/
int err = 0; /* default = success */
switch( lkmtp->private.lkm_any->lkm_type) {
switch(lkmtp->private.lkm_any->lkm_type) {
case LM_SYSCALL:
err = _lkm_syscall( lkmtp, cmd);
err = _lkm_syscall(lkmtp, cmd);
break;
case LM_VFS:
err = _lkm_vfs( lkmtp, cmd);
err = _lkm_vfs(lkmtp, cmd);
break;
case LM_DEV:
err = _lkm_dev( lkmtp, cmd);
err = _lkm_dev(lkmtp, cmd);
break;
#ifdef STREAMS
@ -868,19 +868,19 @@ int cmd;
}
break;
#endif /* STREAMS*/
#endif /* STREAMS */
case LM_EXEC:
err = _lkm_exec( lkmtp, cmd);
err = _lkm_exec(lkmtp, cmd);
break;
case LM_MISC: /* ignore content -- no "misc-specific" procedure*/
case LM_MISC: /* ignore content -- no "misc-specific" procedure */
break;
default:
err = ENXIO; /* unknown type*/
err = ENXIO; /* unknown type */
break;
}
return( err);
return(err);
}