util/bufferiszero: Split out host include files
Split out host/bufferiszero.h.inc for x86, aarch64 and generic in order to avoid an overlong ifdef ladder. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
6b0ca412e1
commit
2d32a5d2a0
76
host/include/aarch64/host/bufferiszero.c.inc
Normal file
76
host/include/aarch64/host/bufferiszero.c.inc
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* buffer_is_zero acceleration, aarch64 version.
|
||||
*/
|
||||
|
||||
#ifdef __ARM_NEON
|
||||
#include <arm_neon.h>
|
||||
|
||||
/*
|
||||
* Helper for preventing the compiler from reassociating
|
||||
* chains of binary vector operations.
|
||||
*/
|
||||
#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
|
||||
|
||||
static bool buffer_is_zero_simd(const void *buf, size_t len)
|
||||
{
|
||||
uint32x4_t t0, t1, t2, t3;
|
||||
|
||||
/* Align head/tail to 16-byte boundaries. */
|
||||
const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
|
||||
const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
|
||||
|
||||
/* Unaligned loads at head/tail. */
|
||||
t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
t1 = e[-7] | e[-6];
|
||||
t2 = e[-5] | e[-4];
|
||||
t3 = e[-3] | e[-2];
|
||||
t0 |= e[-1];
|
||||
REASSOC_BARRIER(t0, t1);
|
||||
REASSOC_BARRIER(t2, t3);
|
||||
t0 |= t1;
|
||||
t2 |= t3;
|
||||
REASSOC_BARRIER(t0, t2);
|
||||
t0 |= t2;
|
||||
|
||||
/*
|
||||
* Loop over complete 128-byte blocks.
|
||||
* With the head and tail removed, e - p >= 14, so the loop
|
||||
* must iterate at least once.
|
||||
*/
|
||||
do {
|
||||
/*
|
||||
* Reduce via UMAXV. Whatever the actual result,
|
||||
* it will only be zero if all input bytes are zero.
|
||||
*/
|
||||
if (unlikely(vmaxvq_u32(t0) != 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
t0 = p[0] | p[1];
|
||||
t1 = p[2] | p[3];
|
||||
t2 = p[4] | p[5];
|
||||
t3 = p[6] | p[7];
|
||||
REASSOC_BARRIER(t0, t1);
|
||||
REASSOC_BARRIER(t2, t3);
|
||||
t0 |= t1;
|
||||
t2 |= t3;
|
||||
REASSOC_BARRIER(t0, t2);
|
||||
t0 |= t2;
|
||||
p += 8;
|
||||
} while (p < e - 7);
|
||||
|
||||
return vmaxvq_u32(t0) == 0;
|
||||
}
|
||||
|
||||
static biz_accel_fn const accel_table[] = {
|
||||
buffer_is_zero_int_ge256,
|
||||
buffer_is_zero_simd,
|
||||
};
|
||||
|
||||
#define best_accel() 1
|
||||
#else
|
||||
# include "host/include/generic/host/bufferiszero.c.inc"
|
||||
#endif
|
10
host/include/generic/host/bufferiszero.c.inc
Normal file
10
host/include/generic/host/bufferiszero.c.inc
Normal file
@ -0,0 +1,10 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* buffer_is_zero acceleration, generic version.
|
||||
*/
|
||||
|
||||
static biz_accel_fn const accel_table[1] = {
|
||||
buffer_is_zero_int_ge256
|
||||
};
|
||||
|
||||
#define best_accel() 0
|
124
host/include/i386/host/bufferiszero.c.inc
Normal file
124
host/include/i386/host/bufferiszero.c.inc
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* buffer_is_zero acceleration, x86 version.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
|
||||
#include <immintrin.h>
|
||||
|
||||
/* Helper for preventing the compiler from reassociating
|
||||
chains of binary vector operations. */
|
||||
#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
|
||||
|
||||
/* Note that these vectorized functions may assume len >= 256. */
|
||||
|
||||
static bool __attribute__((target("sse2")))
|
||||
buffer_zero_sse2(const void *buf, size_t len)
|
||||
{
|
||||
/* Unaligned loads at head/tail. */
|
||||
__m128i v = *(__m128i_u *)(buf);
|
||||
__m128i w = *(__m128i_u *)(buf + len - 16);
|
||||
/* Align head/tail to 16-byte boundaries. */
|
||||
const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
|
||||
const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
|
||||
__m128i zero = { 0 };
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
v |= e[-1]; w |= e[-2];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-3]; w |= e[-4];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-5]; w |= e[-6];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-7]; v |= w;
|
||||
|
||||
/*
|
||||
* Loop over complete 128-byte blocks.
|
||||
* With the head and tail removed, e - p >= 14, so the loop
|
||||
* must iterate at least once.
|
||||
*/
|
||||
do {
|
||||
v = _mm_cmpeq_epi8(v, zero);
|
||||
if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
|
||||
return false;
|
||||
}
|
||||
v = p[0]; w = p[1];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[2]; w |= p[3];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[4]; w |= p[5];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[6]; w |= p[7];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= w;
|
||||
p += 8;
|
||||
} while (p < e - 7);
|
||||
|
||||
return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
static bool __attribute__((target("avx2")))
|
||||
buffer_zero_avx2(const void *buf, size_t len)
|
||||
{
|
||||
/* Unaligned loads at head/tail. */
|
||||
__m256i v = *(__m256i_u *)(buf);
|
||||
__m256i w = *(__m256i_u *)(buf + len - 32);
|
||||
/* Align head/tail to 32-byte boundaries. */
|
||||
const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
|
||||
const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
|
||||
__m256i zero = { 0 };
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
v |= e[-1]; w |= e[-2];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-3]; w |= e[-4];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-5]; w |= e[-6];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-7]; v |= w;
|
||||
|
||||
/* Loop over complete 256-byte blocks. */
|
||||
for (; p < e - 7; p += 8) {
|
||||
/* PTEST is not profitable here. */
|
||||
v = _mm256_cmpeq_epi8(v, zero);
|
||||
if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
|
||||
return false;
|
||||
}
|
||||
v = p[0]; w = p[1];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[2]; w |= p[3];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[4]; w |= p[5];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[6]; w |= p[7];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= w;
|
||||
}
|
||||
|
||||
return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
|
||||
}
|
||||
#endif /* CONFIG_AVX2_OPT */
|
||||
|
||||
static biz_accel_fn const accel_table[] = {
|
||||
buffer_is_zero_int_ge256,
|
||||
buffer_zero_sse2,
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
buffer_zero_avx2,
|
||||
#endif
|
||||
};
|
||||
|
||||
static unsigned best_accel(void)
|
||||
{
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
unsigned info = cpuinfo_init();
|
||||
if (info & CPUINFO_AVX2) {
|
||||
return 2;
|
||||
}
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
#else
|
||||
# include "host/include/generic/host/bufferiszero.c.inc"
|
||||
#endif
|
1
host/include/x86_64/host/bufferiszero.c.inc
Normal file
1
host/include/x86_64/host/bufferiszero.c.inc
Normal file
@ -0,0 +1 @@
|
||||
#include "host/include/i386/host/bufferiszero.c.inc"
|
@ -81,196 +81,7 @@ static bool buffer_is_zero_int_ge256(const void *buf, size_t len)
|
||||
return t == 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
|
||||
#include <immintrin.h>
|
||||
|
||||
/* Helper for preventing the compiler from reassociating
|
||||
chains of binary vector operations. */
|
||||
#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
|
||||
|
||||
/* Note that these vectorized functions may assume len >= 256. */
|
||||
|
||||
static bool __attribute__((target("sse2")))
|
||||
buffer_zero_sse2(const void *buf, size_t len)
|
||||
{
|
||||
/* Unaligned loads at head/tail. */
|
||||
__m128i v = *(__m128i_u *)(buf);
|
||||
__m128i w = *(__m128i_u *)(buf + len - 16);
|
||||
/* Align head/tail to 16-byte boundaries. */
|
||||
const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
|
||||
const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
|
||||
__m128i zero = { 0 };
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
v |= e[-1]; w |= e[-2];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-3]; w |= e[-4];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-5]; w |= e[-6];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-7]; v |= w;
|
||||
|
||||
/*
|
||||
* Loop over complete 128-byte blocks.
|
||||
* With the head and tail removed, e - p >= 14, so the loop
|
||||
* must iterate at least once.
|
||||
*/
|
||||
do {
|
||||
v = _mm_cmpeq_epi8(v, zero);
|
||||
if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
|
||||
return false;
|
||||
}
|
||||
v = p[0]; w = p[1];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[2]; w |= p[3];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[4]; w |= p[5];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[6]; w |= p[7];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= w;
|
||||
p += 8;
|
||||
} while (p < e - 7);
|
||||
|
||||
return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
static bool __attribute__((target("avx2")))
|
||||
buffer_zero_avx2(const void *buf, size_t len)
|
||||
{
|
||||
/* Unaligned loads at head/tail. */
|
||||
__m256i v = *(__m256i_u *)(buf);
|
||||
__m256i w = *(__m256i_u *)(buf + len - 32);
|
||||
/* Align head/tail to 32-byte boundaries. */
|
||||
const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
|
||||
const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
|
||||
__m256i zero = { 0 };
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
v |= e[-1]; w |= e[-2];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-3]; w |= e[-4];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-5]; w |= e[-6];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= e[-7]; v |= w;
|
||||
|
||||
/* Loop over complete 256-byte blocks. */
|
||||
for (; p < e - 7; p += 8) {
|
||||
/* PTEST is not profitable here. */
|
||||
v = _mm256_cmpeq_epi8(v, zero);
|
||||
if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
|
||||
return false;
|
||||
}
|
||||
v = p[0]; w = p[1];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[2]; w |= p[3];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[4]; w |= p[5];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= p[6]; w |= p[7];
|
||||
SSE_REASSOC_BARRIER(v, w);
|
||||
v |= w;
|
||||
}
|
||||
|
||||
return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
|
||||
}
|
||||
#endif /* CONFIG_AVX2_OPT */
|
||||
|
||||
static biz_accel_fn const accel_table[] = {
|
||||
buffer_is_zero_int_ge256,
|
||||
buffer_zero_sse2,
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
buffer_zero_avx2,
|
||||
#endif
|
||||
};
|
||||
|
||||
static unsigned best_accel(void)
|
||||
{
|
||||
#ifdef CONFIG_AVX2_OPT
|
||||
unsigned info = cpuinfo_init();
|
||||
|
||||
if (info & CPUINFO_AVX2) {
|
||||
return 2;
|
||||
}
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
#elif defined(__aarch64__) && defined(__ARM_NEON)
|
||||
#include <arm_neon.h>
|
||||
|
||||
/*
|
||||
* Helper for preventing the compiler from reassociating
|
||||
* chains of binary vector operations.
|
||||
*/
|
||||
#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
|
||||
|
||||
static bool buffer_is_zero_simd(const void *buf, size_t len)
|
||||
{
|
||||
uint32x4_t t0, t1, t2, t3;
|
||||
|
||||
/* Align head/tail to 16-byte boundaries. */
|
||||
const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
|
||||
const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
|
||||
|
||||
/* Unaligned loads at head/tail. */
|
||||
t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
|
||||
|
||||
/* Collect a partial block at tail end. */
|
||||
t1 = e[-7] | e[-6];
|
||||
t2 = e[-5] | e[-4];
|
||||
t3 = e[-3] | e[-2];
|
||||
t0 |= e[-1];
|
||||
REASSOC_BARRIER(t0, t1);
|
||||
REASSOC_BARRIER(t2, t3);
|
||||
t0 |= t1;
|
||||
t2 |= t3;
|
||||
REASSOC_BARRIER(t0, t2);
|
||||
t0 |= t2;
|
||||
|
||||
/*
|
||||
* Loop over complete 128-byte blocks.
|
||||
* With the head and tail removed, e - p >= 14, so the loop
|
||||
* must iterate at least once.
|
||||
*/
|
||||
do {
|
||||
/*
|
||||
* Reduce via UMAXV. Whatever the actual result,
|
||||
* it will only be zero if all input bytes are zero.
|
||||
*/
|
||||
if (unlikely(vmaxvq_u32(t0) != 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
t0 = p[0] | p[1];
|
||||
t1 = p[2] | p[3];
|
||||
t2 = p[4] | p[5];
|
||||
t3 = p[6] | p[7];
|
||||
REASSOC_BARRIER(t0, t1);
|
||||
REASSOC_BARRIER(t2, t3);
|
||||
t0 |= t1;
|
||||
t2 |= t3;
|
||||
REASSOC_BARRIER(t0, t2);
|
||||
t0 |= t2;
|
||||
p += 8;
|
||||
} while (p < e - 7);
|
||||
|
||||
return vmaxvq_u32(t0) == 0;
|
||||
}
|
||||
|
||||
#define best_accel() 1
|
||||
static biz_accel_fn const accel_table[] = {
|
||||
buffer_is_zero_int_ge256,
|
||||
buffer_is_zero_simd,
|
||||
};
|
||||
#else
|
||||
#define best_accel() 0
|
||||
static biz_accel_fn const accel_table[1] = {
|
||||
buffer_is_zero_int_ge256
|
||||
};
|
||||
#endif
|
||||
#include "host/bufferiszero.c.inc"
|
||||
|
||||
static biz_accel_fn buffer_is_zero_accel;
|
||||
static unsigned accel_index;
|
||||
|
Loading…
Reference in New Issue
Block a user