Add an optimized in4_cksum().

This commit is contained in:
eeh 2001-08-10 20:53:11 +00:00
parent 12a3da3896
commit 0e7789220f
2 changed files with 184 additions and 61 deletions

View File

@ -0,0 +1,107 @@
/* $NetBSD: in4_cksum.c,v 1.1 2001/08/10 20:53:11 eeh Exp $ */
/*
* Copyright (c) 2001 Eduardo Horvath.
* Copyright (c) 1995 Zubin Dittia.
* Copyright (c) 1995 Matthew R. Green.
* Copyright (c) 1994, 1998 Charles M. Hannum.
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, and it's contributors.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)in_cksum.c 8.1 (Berkeley) 6/11/93
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
extern int in_cksum_internal __P((struct mbuf *, int len, int offset, int sum));
int
in4_cksum(m, nxt, off, len)
struct mbuf *m;
u_int8_t nxt;
int off, len;
{
u_char *w;
u_int sum = 0;
struct ipovly ipov;
/*
* Declare three temporary registers for use by the asm code. We
* allow the compiler to pick which specific machine registers to
* use, instead of hard-coding this in the asm code.
*/
u_int tmp1, tmp2, tmp3;
if (nxt != 0) {
/* pseudo header */
memset(&ipov, 0, sizeof(ipov));
ipov.ih_len = htons(len);
ipov.ih_pr = nxt;
ipov.ih_src = mtod(m, struct ip *)->ip_src;
ipov.ih_dst = mtod(m, struct ip *)->ip_dst;
w = (u_char *)&ipov;
/* assumes sizeof(ipov) == 20 */
__asm __volatile(" lduw [%5 + 0], %1; "
" lduw [%5 + 4], %2; "
" lduw [%5 + 8], %3; add %0, %1, %0; "
" lduw [%5 + 12], %1; add %0, %2, %0; "
" lduw [%5 + 16], %2; add %0, %3, %0; "
" mov -1, %3; "
" add %0, %1, %0; "
" srl %3, 0, %3; "
" add %0, %2, %0; "
" srlx %0, 32, %2; and %0, %3, %1; "
" add %0, %2, %0; "
: "=r" (sum), "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
: "0" (sum), "r" (w));
}
/* skip unnecessary part */
while (m && off > 0) {
if (m->m_len > off)
break;
off -= m->m_len;
m = m->m_next;
}
return (in_cksum_internal(m, len, off, sum));
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: in_cksum.S,v 1.1 2001/08/08 00:12:37 eeh Exp $ */
/* $NetBSD: in_cksum.S,v 1.2 2001/08/10 20:53:11 eeh Exp $ */
/*
* Copyright (c) 2001 Eduardo Horvath
@ -34,6 +34,7 @@
/*
* int in_cksum(struct mbuf *m, int len)
* int in_cksum_internal(struct mbuf *m, int len, int offset, int sum)
*
* The only fields of the mbuf we really care about
* is m_next and m_len and m_data.
@ -44,9 +45,9 @@
* %o0 - mbuf
* %o1 - len
* %o2 - mlen
* %o3 - mdata
* %o3 - sum
* %o4 - temp
* %o5 - sum
* %o5 - mdata
* %g1 - swapped
* %g4 - temp
* %g5 - temp
@ -55,56 +56,72 @@
#define IALIGN .align 32
ENTRY(in_cksum)
clr %o3 ! sum = 0;
clr %o2
_ENTRY(_C_LABEL(in_cksum_internal))
brz %o0, Lfinish ! for (; m && len > 0; m->m_next) {
clr %o5 ! sum = 0;
brlez %o1, Lfinish
clr %g1 ! swapped = 0;
ba,a,pt %icc, Lloop
nop
brlez %o1, Lfinish
mov %o2, %o4 ! Stash this elsewhere for a bit
lduw [%o0 + M_LEN], %o2 ! Code duplicated at Lloop
srlx %o3, 32, %g4 ! REDUCE bigtime
sethi %hi(0xffff), %g5
ldx [%o0 + M_DATA], %o5
srl %o3, 0, %o3
or %g5, %lo(0xffff), %g5
sub %o2, %o4, %o2 ! Correct for initial offset
ba,pt %icc, 0f
add %o5, %o4, %o5
IALIGN
Lloop:
lduw [%o0 + M_LEN], %o2
srlx %o3, 32, %g4 ! REDUCE bigtime
sethi %hi(0xffff), %g5
ldx [%o0 + M_DATA], %o3
ldx [%o0 + M_DATA], %o5
srl %o3, 0, %o3
or %g5, %lo(0xffff), %g5
0:
add %o3, %g4, %o3
brz %o2, Lnext ! if (m->m_len == 0) continue;
cmp %o1, %o2 ! if (len < mlen)
movl %icc, %o1, %o2 ! mlen = len;
btst 3, %o3 ! if (!(*w & 3)) {
btst 3, %o5 ! if (!(*w & 3)) {
bz Lint_aligned
sub %o1, %o2, %o1 ! len -= mlen
srlx %o5, 16, %o4 ! REDUCE {sum = (sum & 0xffff) + (sum >> 16);}
and %o5, %g5, %o5
srlx %o3, 16, %o4 ! REDUCE {sum = (sum & 0xffff) + (sum >> 16);}
and %o3, %g5, %o3
add %o5, %o4, %o5
btst 1, %o3 ! if (!(*w & 3) &&
add %o3, %o4, %o3
btst 1, %o5 ! if (!(*w & 3) &&
bz Lshort_aligned
nop
deccc %o2
bl,a,pn %icc, Lfinish ! mlen >= 1) {
bl,a,pn %icc, Lnext ! mlen >= 1) {
inc %o2
ldub [%o3], %o4 ! ADDBYTE {ROL; sum += *w; byte_swapped ^= 1;}
sllx %o5, 8, %o5 ! ROL { sum = sum << 8; }
inc %o3 ! }
add %o5, %o4, %o5
ldub [%o5], %o4 ! ADDBYTE {ROL; sum += *w; byte_swapped ^= 1;}
sllx %o3, 8, %o3 ! ROL { sum = sum << 8; }
inc %o5 ! }
add %o3, %o4, %o3
xor %g1, 1, %g1 ! Flip byte_swapped
Lshort_aligned:
btst 2, %o3 ! if (!(*w & 3) &&
btst 2, %o5 ! if (!(*w & 3) &&
bz Lint_aligned
nop
deccc 2, %o2 ! mlen >= 1) {
bl,a,pn %icc, Lfinish_byte
inc 2, %o2
lduh [%o3], %o4 ! ADDSHORT {sum += *(u_short *)w;}
inc 2, %o3 ! }
add %o5, %o4, %o5 ! }
lduh [%o5], %o4 ! ADDSHORT {sum += *(u_short *)w;}
inc 2, %o5 ! }
add %o3, %o4, %o3 ! }
Lint_aligned:
deccc 0xc, %o2 ! while (mlen >= 12) {
ble,pn %icc, Ltoofar
@ -113,17 +130,17 @@ Lint_aligned:
clr %g4
IALIGN
0:
lduw [%o3 + 0x00], %o4
add %o5, %g4, %o5
lduw [%o5 + 0x00], %o4
add %o3, %g4, %o3
deccc 0xc, %o2
lduw [%o3 + 0x04], %g4
add %o5, %g5, %o5
lduw [%o3 + 0x08], %g5
inc 0xc, %o3 ! ADVANCE(12) }
lduw [%o5 + 0x04], %g4
add %o3, %g5, %o3
lduw [%o5 + 0x08], %g5
inc 0xc, %o5 ! ADVANCE(12) }
bg,pt %icc, 0b
add %o5, %o4, %o5
add %o5, %g4, %o5
add %o5, %g5, %o5
add %o3, %o4, %o3
add %o3, %g4, %o3
add %o3, %g5, %o3
Ltoofar:
inc 0xc, %o2
@ -132,62 +149,61 @@ Ldo_int:
bl,pn %icc, Lfinish_short
nop
0:
lduw [%o3], %o4
inc 4, %o3
lduw [%o5], %o4
inc 4, %o5
deccc 4, %o2
bge,pt %icc, 0b
add %o5, %o4, %o5
add %o3, %o4, %o3
Lfinish_short:
btst 2, %o2
bz Lfinish_byte
nop
lduh [%o3], %o4
inc 2, %o3
add %o5, %o4, %o5
lduh [%o5], %o4
inc 2, %o5
add %o3, %o4, %o3
Lfinish_byte:
btst 1, %o2
bz Lnext
nop
ldub [%o3], %o4
sllx %o5, 8, %o5 ! ROL { sum = sum << 8; }
add %o5, %o4, %o5
inc %o3
ldub [%o5], %o4
sllx %o3, 8, %o3 ! ROL { sum = sum << 8; }
inc %o5
xor %g1, 1, %g1 ! Flip byte_swapped
add %o3, %o4, %o3
Lnext:
ldx [%o0 + M_NEXT], %o0
brnz,pn %o0, Lloop ! In general there is only one mbuf
nop
brnz,pt %o1, Lloop ! But usually all need to be fully checksummed
nop
Lfinish:
srlx %o5, 32, %o4 ! REDUCE
sethi %hi(0x0000ffff), %o3 ! data ptr not needed any more
srlx %o3, 32, %o4 ! Reduce to 32-bits
srl %o3, 0, %o3
brz,pt %o0, 1f ! In general there is only one mbuf
add %o3, %o4, %o3
brgz,pt %o1, Lloop ! But usually all need to be fully checksummed
nop
1:
sethi %hi(0x0000ffff), %o5 ! data ptr not needed any more
srl %o5, 0, %o5 ! Clear top 64 bits
or %o3, %lo(0x0000ffff), %o3
srlx %o3, 16, %o4
or %o5, %lo(0x0000ffff), %o5
add %o4, %o5, %o5
and %o3, %o5, %o3
srl %o5, 16, %o4
and %o5, %o3, %o5
add %o5, %o4, %o5
add %o3, %o4, %o3
brz,pt %g1, 0f ! if (byte_swapped) {
nop
sllx %o5, 8, %o5 ! ROL
sllx %o3, 8, %o3 ! ROL
srl %o5, 16, %o4 ! REDUCE
and %o5, %o3, %o5
srlx %o3, 16, %o4 ! REDUCE
and %o3, %o5, %o3
add %o5, %o4, %o5
add %o3, %o4, %o3
0:
subcc %o5, %o3, %o4 ! if (sum > 0xffff)
movg %icc, %o4, %o5 ! sum -= 0xffff;
subcc %o3, %o5, %o4 ! if (sum > 0xffff)
movg %icc, %o4, %o3 ! sum -= 0xffff;
clr %g4 ! In case we are using EMBEDANY (ick)
retl
xor %o5, %o3, %o0 ! return (0xffff ^ sum);
xor %o3, %o5, %o0 ! return (0xffff ^ sum);