If we are using the VBAR to access the system vectors, we can just branch

to the exception routines avoiding a load.  VBAR only exists for those
processors which implement the Security extension.
This commit is contained in:
matt 2013-06-12 07:17:23 +00:00
parent 2889348c0f
commit d8de3b9ec7

View File

@ -1,4 +1,4 @@
/* $NetBSD: vectors.S,v 1.4 2002/08/17 16:36:32 thorpej Exp $ */
/* $NetBSD: vectors.S,v 1.5 2013/06/12 07:17:23 matt Exp $ */
/*
* Copyright (C) 1994-1997 Mark Brinicombe
@ -32,6 +32,7 @@
*/
#include "assym.h"
#include "opt_cputypes.h"
#include <machine/asm.h>
/*
@ -47,6 +48,26 @@
.global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
.global _C_LABEL(fiqvector)
#if defined(CPU_ARMV7) || defined(CPU_ARM11)
/*
* ARMv[67] processors with the Security Extension have the VBAR
* which redirects the low vector to any 32-byte aligned address.
* Since we are in kernel, we can just do a relative branch to the
* exception code and avoid the intermediate load.
*/
.global _C_LABEL(page0rel)
.p2align 5
_C_LABEL(page0rel):
b reset_entry
b undefined_entry
b swi_entry
b prefetch_abort_entry
b data_abort_entry
b address_exception_entry
b irq_entry
b _C_LABEL(fiqvector)
#endif
_C_LABEL(page0):
ldr pc, .Lreset_target
ldr pc, .Lundefined_target