libFLAC/stream_encoder_intrin_*.c: More refactoring

Combine two intrinsic instructions into one line of code.

Patch-from: lvqcl <lvqcl.mail@gmail.com>
This commit is contained in:
Erik de Castro Lopo 2015-11-18 19:24:44 +11:00
parent 3c56688aa2
commit 2319a688ec
2 changed files with 9 additions and 18 deletions

View File

@ -77,8 +77,7 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
} }
for( ; residual_sample < end; residual_sample++) { for( ; residual_sample < end; residual_sample++) {
__m128i res128 = _mm_cvtsi32_si128(residual[residual_sample]); __m128i res128 = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample]));
res128 = _mm_abs_epi32(res128);
sum128 = _mm_add_epi32(sum128, res128); sum128 = _mm_add_epi32(sum128, res128);
} }
@ -102,15 +101,13 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256)); sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
for( ; (int)residual_sample < (int)end-1; residual_sample+=2) { for( ; (int)residual_sample < (int)end-1; residual_sample+=2) {
__m128i res128 = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); __m128i res128 = _mm_abs_epi32(_mm_loadl_epi64((const __m128i*)(residual+residual_sample)));
res128 = _mm_abs_epi32(res128);
res128 = _mm_cvtepu32_epi64(res128); res128 = _mm_cvtepu32_epi64(res128);
sum128 = _mm_add_epi64(sum128, res128); sum128 = _mm_add_epi64(sum128, res128);
} }
for( ; residual_sample < end; residual_sample++) { for( ; residual_sample < end; residual_sample++) {
__m128i res128 = _mm_cvtsi32_si128(residual[residual_sample]); __m128i res128 = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample]));
res128 = _mm_abs_epi32(res128);
sum128 = _mm_add_epi64(sum128, res128); sum128 = _mm_add_epi64(sum128, res128);
} }

View File

@ -70,20 +70,17 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
/* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */ /* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */
for( ; residual_sample < e1; residual_sample++) { for( ; residual_sample < e1; residual_sample++) {
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample]));
mm_res = _mm_abs_epi32(mm_res); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
mm_sum = _mm_add_epi32(mm_sum, mm_res); mm_sum = _mm_add_epi32(mm_sum, mm_res);
} }
for( ; residual_sample < e3; residual_sample+=4) { for( ; residual_sample < e3; residual_sample+=4) {
__m128i mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample)); __m128i mm_res = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi32(mm_sum, mm_res); mm_sum = _mm_add_epi32(mm_sum, mm_res);
} }
for( ; residual_sample < end; residual_sample++) { for( ; residual_sample < end; residual_sample++) {
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample]));
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi32(mm_sum, mm_res); mm_sum = _mm_add_epi32(mm_sum, mm_res);
} }
@ -102,21 +99,18 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
FLAC__ASSERT(e1 <= end); FLAC__ASSERT(e1 <= end);
for( ; residual_sample < e1; residual_sample++) { for( ; residual_sample < e1; residual_sample++) {
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /* 0 0 0 r0 */ __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample])); /* 0 0 0 |r0| == 00 |r0_64| */
mm_res = _mm_abs_epi32(mm_res); /* 0 0 0 |r0| == 00 |r0_64| */
mm_sum = _mm_add_epi64(mm_sum, mm_res); mm_sum = _mm_add_epi64(mm_sum, mm_res);
} }
for( ; residual_sample < e3; residual_sample+=2) { for( ; residual_sample < e3; residual_sample+=2) {
__m128i mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /* 0 0 r1 r0 */ __m128i mm_res = _mm_abs_epi32(_mm_loadl_epi64((const __m128i*)(residual+residual_sample))); /* 0 0 |r1| |r0| */
mm_res = _mm_abs_epi32(mm_res); /* 0 0 |r1| |r0| */
mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */ mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0 |r1| 0 |r0| == |r1_64| |r0_64| */
mm_sum = _mm_add_epi64(mm_sum, mm_res); mm_sum = _mm_add_epi64(mm_sum, mm_res);
} }
for( ; residual_sample < end; residual_sample++) { for( ; residual_sample < end; residual_sample++) {
__m128i mm_res = _mm_cvtsi32_si128(residual[residual_sample]); __m128i mm_res = _mm_abs_epi32(_mm_cvtsi32_si128(residual[residual_sample]));
mm_res = _mm_abs_epi32(mm_res);
mm_sum = _mm_add_epi64(mm_sum, mm_res); mm_sum = _mm_add_epi64(mm_sum, mm_res);
} }