Added NEON and SSSE3 YUV conversion optimisations.

This commit is contained in:
Armin Novak 2017-02-13 16:00:12 +01:00
parent 5897d833c8
commit b1e3bab8ef
4 changed files with 807 additions and 119 deletions

View File

@ -280,47 +280,6 @@ static pstatus_t general_YUV444SplitToYUV420(
return PRIMITIVES_SUCCESS;
}
/**
* | R | ( | 256 0 403 | | Y | )
* | G | = ( | 256 -48 -120 | | U - 128 | ) >> 8
* | B | ( | 256 475 0 | | V - 128 | )
*/
static INLINE INT32 C(INT32 Y)
{
return (Y) - 0L;
}
static INLINE INT32 D(INT32 U)
{
return (U) - 128L;
}
static INLINE INT32 E(INT32 V)
{
return (V) - 128L;
}
static INLINE BYTE YUV2R(INT32 Y, INT32 U, INT32 V)
{
const INT32 r = (256L * C(Y) + 0L * D(U) + 403L * E(V));
const INT32 r8 = r >> 8L;
return CLIP(r8);
}
static INLINE BYTE YUV2G(INT32 Y, INT32 U, INT32 V)
{
const INT32 g = (256L * C(Y) - 48L * D(U) - 120L * E(V));
const INT32 g8 = g >> 8L;
return CLIP(g8);
}
static INLINE BYTE YUV2B(INT32 Y, INT32 U, INT32 V)
{
const INT32 b = (256L * C(Y) + 475L * D(U) + 0L * E(V));
const INT32 b8 = b >> 8L;
return CLIP(b8);
}
static pstatus_t general_YUV444ToRGB_8u_P3AC4R_general(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst, UINT32 dstStep, UINT32 DstFormat,
@ -343,8 +302,8 @@ static pstatus_t general_YUV444ToRGB_8u_P3AC4R_general(
for (x = 0; x < nWidth; x++)
{
const BYTE Y = pY[x];
const INT32 U = pU[x];
const INT32 V = pV[x];
const BYTE U = pU[x];
const BYTE V = pV[x];
const BYTE r = YUV2R(Y, U, V);
const BYTE g = YUV2G(Y, U, V);
const BYTE b = YUV2B(Y, U, V);
@ -376,8 +335,8 @@ static pstatus_t general_YUV444ToRGB_8u_P3AC4R_BGRX(
for (x = 0; x < nWidth; x++)
{
const BYTE Y = pY[x];
const INT32 U = pU[x];
const INT32 V = pV[x];
const BYTE U = pU[x];
const BYTE V = pV[x];
const BYTE r = YUV2R(Y, U, V);
const BYTE g = YUV2G(Y, U, V);
const BYTE b = YUV2B(Y, U, V);
@ -589,7 +548,7 @@ static INLINE pstatus_t general_RGBToYUV420_BGRX(
const BYTE* pSrc, UINT32 srcStep,
BYTE* pDst[3], UINT32 dstStep[3], const prim_size_t* roi)
{
UINT32 x, y, i, j;
UINT32 x, y, i;
size_t x1 = 0, x2 = 4, x3 = srcStep, x4 = srcStep + 4;
size_t y1 = 0, y2 = 1, y3 = dstStep[0], y4 = dstStep[0] + 1;
UINT32 max_x = roi->width - 1;
@ -606,7 +565,6 @@ static INLINE pstatus_t general_RGBToYUV420_BGRX(
{
BYTE R, G, B;
INT32 Ra, Ga, Ba;
UINT32 color;
/* row 1, pixel 1 */
Ba = B = *(src + x1 + 0);
Ga = G = *(src + x1 + 1);
@ -658,7 +616,7 @@ static INLINE pstatus_t general_RGBToYUV420_ANY(
BYTE* pDst[3], UINT32 dstStep[3], const prim_size_t* roi)
{
const UINT32 bpp = GetBytesPerPixel(srcFormat);
UINT32 x, y, i, j;
UINT32 x, y, i;
size_t x1 = 0, x2 = bpp, x3 = srcStep, x4 = srcStep + bpp;
size_t y1 = 0, y2 = 1, y3 = dstStep[0], y4 = dstStep[0] + 1;
UINT32 max_x = roi->width - 1;

View File

@ -31,14 +31,17 @@
#include "prim_internal.h"
static primitives_t* generic = NULL;
#ifdef WITH_SSE2
#include <emmintrin.h>
#include <tmmintrin.h>
#elif defined(WITH_NEON)
#include <arm_neon.h>
#endif /* WITH_SSE2 else WITH_NEON */
static primitives_t* generic = NULL;
#ifdef WITH_SSE2
/****************************************************************************/
/* SSSE3 YUV420 -> RGB conversion */
/****************************************************************************/
@ -356,8 +359,115 @@ static pstatus_t ssse3_YUV420ToRGB(
}
}
static pstatus_t ssse3_YUV444ToRGB_8u_P3AC4R_BGRX(
const BYTE** pSrc, const UINT32* srcStep,
BYTE* pDst, UINT32 dstStep,
const prim_size_t* roi)
{
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
const __m128i c128 = _mm_set1_epi16(128);
const __m128i mapY = _mm_set_epi32(0x80800380, 0x80800280, 0x80800180, 0x80800080);
const __m128i map = _mm_set_epi32(0x80038002, 0x80018000, 0x80808080, 0x80808080);
UINT32 y;
for (y = 0; y < nHeight; y++)
{
UINT32 x;
__m128i* dst = (__m128i*)(pDst + dstStep * y);
const BYTE* YData = pSrc[0] + y * srcStep[0];
const BYTE* UData = pSrc[1] + y * srcStep[1];
const BYTE* VData = pSrc[2] + y * srcStep[2];
for (x = 0; x < nWidth; x += 4)
{
__m128i BGRX = _mm_set_epi32(0xFF000000, 0xFF000000, 0xFF000000, 0xFF000000);
{
__m128i C, D, E;
/* Load Y values and expand to 32 bit */
{
const __m128i Yraw = _mm_loadu_si128((__m128i*)YData);
C = _mm_shuffle_epi8(Yraw, mapY); /* Reorder and multiply by 256 */
}
/* Load U values and expand to 32 bit */
{
const __m128i Uraw = _mm_loadu_si128((__m128i*)UData);
const __m128i U = _mm_shuffle_epi8(Uraw, map); /* Reorder dcba */
D = _mm_sub_epi16(U, c128); /* D = U - 128 */
}
/* Load V values and expand to 32 bit */
{
const __m128i Vraw = _mm_loadu_si128((__m128i*)VData);
const __m128i V = _mm_shuffle_epi8(Vraw, map); /* Reorder dcba */
E = _mm_sub_epi16(V, c128); /* E = V - 128 */
}
/* Get the R value */
{
const __m128i c403 = _mm_set1_epi16(403);
const __m128i e403 = _mm_unpackhi_epi16(_mm_mullo_epi16(E, c403), _mm_mulhi_epi16(E, c403));
const __m128i Rs = _mm_add_epi32(C, e403);
const __m128i R32 = _mm_srai_epi32(Rs, 8);
const __m128i R16 = _mm_packs_epi32(R32, _mm_setzero_si128());
const __m128i R = _mm_packus_epi16(R16, _mm_setzero_si128());
const __m128i mask = _mm_set_epi32(0x80038080, 0x80028080, 0x80018080, 0x80008080);
const __m128i packed = _mm_shuffle_epi8(R, mask);
BGRX = _mm_or_si128(BGRX, packed);
}
/* Get the G value */
{
const __m128i c48 = _mm_set1_epi16(48);
const __m128i d48 = _mm_unpackhi_epi16(_mm_mullo_epi16(D, c48), _mm_mulhi_epi16(D, c48));
const __m128i c120 = _mm_set1_epi16(120);
const __m128i e120 = _mm_unpackhi_epi16(_mm_mullo_epi16(E, c120), _mm_mulhi_epi16(E, c120));
const __m128i de = _mm_add_epi32(d48, e120);
const __m128i Gs = _mm_sub_epi32(C, de);
const __m128i G32 = _mm_srai_epi32(Gs, 8);
const __m128i G16 = _mm_packs_epi32(G32, _mm_setzero_si128());
const __m128i G = _mm_packus_epi16(G16, _mm_setzero_si128());
const __m128i mask = _mm_set_epi32(0x80800380, 0x80800280, 0x80800180, 0x80800080);
const __m128i packed = _mm_shuffle_epi8(G, mask);
BGRX = _mm_or_si128(BGRX, packed);
}
/* Get the B value */
{
const __m128i c475 = _mm_set1_epi16(475);
const __m128i d475 = _mm_unpackhi_epi16(_mm_mullo_epi16(D, c475), _mm_mulhi_epi16(D, c475));
const __m128i Bs = _mm_add_epi32(C, d475);
const __m128i B32 = _mm_srai_epi32(Bs, 8);
const __m128i B16 = _mm_packs_epi32(B32, _mm_setzero_si128());
const __m128i B = _mm_packus_epi16(B16, _mm_setzero_si128());
const __m128i mask = _mm_set_epi32(0x80808003, 0x80808002, 0x80808001, 0x80808000);
const __m128i packed = _mm_shuffle_epi8(B, mask);
BGRX = _mm_or_si128(BGRX, packed);
}
}
_mm_storeu_si128(dst++, BGRX);
YData += 4;
UData += 4;
VData += 4;
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t ssse3_YUV444ToRGB_8u_P3AC4R(const BYTE** pSrc, const UINT32* srcStep,
BYTE* pDst, UINT32 dstStep, UINT32 DstFormat,
const prim_size_t* roi)
{
if (roi->width % 4 != 0)
return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
switch (DstFormat)
{
case PIXEL_FORMAT_BGRX32:
case PIXEL_FORMAT_BGRA32:
return ssse3_YUV444ToRGB_8u_P3AC4R_BGRX(pSrc, srcStep, pDst, dstStep, roi);
default:
return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
/****************************************************************************/
/* SSSE3 RGB -> YUV420 conversion **/
@ -383,17 +493,21 @@ static pstatus_t ssse3_YUV420ToRGB(
*
*/
PRIM_ALIGN_128 static const BYTE bgrx_y_factors[] = {
PRIM_ALIGN_128 static const BYTE bgrx_y_factors[] =
{
9, 92, 27, 0, 9, 92, 27, 0, 9, 92, 27, 0, 9, 92, 27, 0
};
PRIM_ALIGN_128 static const BYTE bgrx_u_factors[] = {
PRIM_ALIGN_128 static const BYTE bgrx_u_factors[] =
{
64, -49, -15, 0, 64, -49, -15, 0, 64, -49, -15, 0, 64, -49, -15, 0
};
PRIM_ALIGN_128 static const BYTE bgrx_v_factors[] = {
PRIM_ALIGN_128 static const BYTE bgrx_v_factors[] =
{
-6, -58, 64, 0, -6, -58, 64, 0, -6, -58, 64, 0, -6, -58, 64, 0
};
PRIM_ALIGN_128 static const BYTE const_buf_128b[] = {
PRIM_ALIGN_128 static const BYTE const_buf_128b[] =
{
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
};
@ -424,7 +538,6 @@ static INLINE void ssse3_RGBToYUV420_BGRX_Y(
__m128i y_factors, x0, x1, x2, x3;
const __m128i* argb = (const __m128i*) src;
__m128i* ydst = (__m128i*) dst;
y_factors = _mm_load_si128((__m128i*)bgrx_y_factors);
for (x = 0; x < width; x += 16)
@ -434,24 +547,19 @@ static INLINE void ssse3_RGBToYUV420_BGRX_Y(
x1 = _mm_load_si128(argb++); // 2nd 4 pixels
x2 = _mm_load_si128(argb++); // 3rd 4 pixels
x3 = _mm_load_si128(argb++); // 4th 4 pixels
/* multiplications and subtotals */
x0 = _mm_maddubs_epi16(x0, y_factors);
x1 = _mm_maddubs_epi16(x1, y_factors);
x2 = _mm_maddubs_epi16(x2, y_factors);
x3 = _mm_maddubs_epi16(x3, y_factors);
/* the total sums */
x0 = _mm_hadd_epi16(x0, x1);
x2 = _mm_hadd_epi16(x2, x3);
/* shift the results */
x0 = _mm_srli_epi16(x0, 7);
x2 = _mm_srli_epi16(x2, 7);
/* pack the 16 words into bytes */
x0 = _mm_packus_epi16(x0, x2);
/* save to y plane */
_mm_storeu_si128(ydst++, x0);
}
@ -465,13 +573,10 @@ static INLINE void ssse3_RGBToYUV420_BGRX_UV(
{
UINT32 x;
__m128i vector128, u_factors, v_factors, x0, x1, x2, x3, x4, x5;
const __m128i* rgb1 = (const __m128i*)src1;
const __m128i* rgb2 = (const __m128i*)src2;
__m64* udst = (__m64*)dst1;
__m64* vdst = (__m64*)dst2;
vector128 = _mm_load_si128((__m128i*)const_buf_128b);
u_factors = _mm_load_si128((__m128i*)bgrx_u_factors);
v_factors = _mm_load_si128((__m128i*)bgrx_v_factors);
@ -482,59 +587,44 @@ static INLINE void ssse3_RGBToYUV420_BGRX_UV(
x0 = _mm_load_si128(rgb1++);
x4 = _mm_load_si128(rgb2++);
x0 = _mm_avg_epu8(x0, x4);
x1 = _mm_load_si128(rgb1++);
x4 = _mm_load_si128(rgb2++);
x1 = _mm_avg_epu8(x1, x4);
x2 = _mm_load_si128(rgb1++);
x4 = _mm_load_si128(rgb2++);
x2 = _mm_avg_epu8(x2, x4);
x3 = _mm_load_si128(rgb1++);
x4 = _mm_load_si128(rgb2++);
x3 = _mm_avg_epu8(x3, x4);
// subsample these 16x1 pixels into 8x1 pixels */
/**
* shuffle controls
* c = a[0],a[2],b[0],b[2] == 10 00 10 00 = 0x88
* c = a[1],a[3],b[1],b[3] == 11 01 11 01 = 0xdd
*/
x4 = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(x0), _mm_castsi128_ps(x1), 0x88) );
x0 = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(x0), _mm_castsi128_ps(x1), 0xdd) );
x4 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(x0), _mm_castsi128_ps(x1), 0x88));
x0 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(x0), _mm_castsi128_ps(x1), 0xdd));
x0 = _mm_avg_epu8(x0, x4);
x4 = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(x2), _mm_castsi128_ps(x3), 0x88) );
x1 = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(x2), _mm_castsi128_ps(x3), 0xdd) );
x4 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(x2), _mm_castsi128_ps(x3), 0x88));
x1 = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(x2), _mm_castsi128_ps(x3), 0xdd));
x1 = _mm_avg_epu8(x1, x4);
/* multiplications and subtotals */
x2 = _mm_maddubs_epi16(x0, u_factors);
x3 = _mm_maddubs_epi16(x1, u_factors);
x4 = _mm_maddubs_epi16(x0, v_factors);
x5 = _mm_maddubs_epi16(x1, v_factors);
/* the total sums */
x0 = _mm_hadd_epi16(x2, x3);
x1 = _mm_hadd_epi16(x4, x5);
/* shift the results */
x0 = _mm_srai_epi16(x0, 7);
x1 = _mm_srai_epi16(x1, 7);
/* pack the 16 words into bytes */
x0 = _mm_packs_epi16(x0, x1);
/* add 128 */
x0 = _mm_add_epi8(x0, vector128);
/* the lower 8 bytes go to the u plane */
_mm_storel_pi(udst++, _mm_castsi128_ps(x0));
/* the upper 8 bytes go to the v plane */
_mm_storeh_pi(vdst++, _mm_castsi128_ps(x0));
}
@ -561,15 +651,13 @@ static pstatus_t ssse3_RGBToYUV420_BGRX(
return generic->RGBToYUV420_8u_P3AC4R(pSrc, srcFormat, srcStep, pDst, dstStep, roi);
}
for (y = 0; y < roi->height-1; y+=2)
for (y = 0; y < roi->height - 1; y += 2)
{
const BYTE* line1 = argb;
const BYTE* line2 = argb + srcStep;
ssse3_RGBToYUV420_BGRX_UV(line1, line2, udst, vdst, roi->width);
ssse3_RGBToYUV420_BGRX_Y(line1, ydst, roi->width);
ssse3_RGBToYUV420_BGRX_Y(line2, ydst + dstStep[0], roi->width);
argb += 2 * srcStep;
ydst += 2 * dstStep[0];
udst += 1 * dstStep[1];
@ -596,12 +684,560 @@ static pstatus_t ssse3_RGBToYUV420(
case PIXEL_FORMAT_BGRX32:
case PIXEL_FORMAT_BGRA32:
return ssse3_RGBToYUV420_BGRX(pSrc, srcFormat, srcStep, pDst, dstStep, roi);
default:
return generic->RGBToYUV420_8u_P3AC4R(pSrc, srcFormat, srcStep, pDst, dstStep, roi);
}
}
#elif defined(WITH_NEON)
static INLINE uint8x8_t neon_YUV2R(int32x4_t Ch, int32x4_t Cl,
int16x4_t Dh, int16x4_t Dl,
int16x4_t Eh, int16x4_t El)
{
/* R = (256 * Y + 403 * (V - 128)) >> 8 */
const int16x4_t c403 = vdup_n_s16(403);
const int32x4_t CEh = vmlal_s16(Ch, Eh, c403);
const int32x4_t CEl = vmlal_s16(Cl, El, c403);
const int32x4_t Rh = vrshrq_n_s32(CEh, 8);
const int32x4_t Rl = vrshrq_n_s32(CEl, 8);
const int16x8_t R = vcombine_s16(vqmovn_s32(Rl), vqmovn_s32(Rh));
return vqmovun_s16(R);
}
static INLINE uint8x8_t neon_YUV2G(int32x4_t Ch, int32x4_t Cl,
int16x4_t Dh, int16x4_t Dl,
int16x4_t Eh, int16x4_t El)
{
/* G = (256L * Y - 48 * (U - 128) - 120 * (V - 128)) >> 8 */
const int16x4_t c48 = vdup_n_s16(48);
const int16x4_t c120 = vdup_n_s16(120);
const int32x4_t CDh = vmlsl_s16(Ch, Dh, c48);
const int32x4_t CDl = vmlsl_s16(Cl, Dl, c48);
const int32x4_t CDEh = vmlsl_s16(CDh, Eh, c120);
const int32x4_t CDEl = vmlsl_s16(CDl, El, c120);
const int32x4_t Gh = vrshrq_n_s32(CDEh, 8);
const int32x4_t Gl = vrshrq_n_s32(CDEl, 8);
const int16x8_t G = vcombine_s16(vqmovn_s32(Gl), vqmovn_s32(Gh));
return vqmovun_s16(G);
}
static INLINE uint8x8_t neon_YUV2B(int32x4_t Ch, int32x4_t Cl,
int16x4_t Dh, int16x4_t Dl,
int16x4_t Eh, int16x4_t El)
{
/* B = (256L * Y + 475 * (U - 128)) >> 8*/
const int16x4_t c475 = vdup_n_s16(475);
const int32x4_t CDh = vmlal_s16(Ch, Dh, c475);
const int32x4_t CDl = vmlal_s16(Ch, Dl, c475);
const int32x4_t Bh = vrshrq_n_s32(CDh, 8);
const int32x4_t Bl = vrshrq_n_s32(CDl, 8);
const int16x8_t B = vcombine_s16(vqmovn_s32(Bl), vqmovn_s32(Bh));
return vqmovun_s16(B);
}
static INLINE pstatus_t neon_YUV420ToX(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst, UINT32 dstStep,
const prim_size_t* roi, const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
UINT32 y;
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
const int16x8_t c128 = vdupq_n_s16(128);
for (y = 0; y < nHeight; y += 2)
{
const uint8_t* pY1 = pSrc[0] + y * srcStep[0];
const uint8_t* pY2 = pY1 + srcStep[0];
const uint8_t* pU = pSrc[1] + (y / 2) * srcStep[1];
const uint8_t* pV = pSrc[2] + (y / 2) * srcStep[2];
uint8_t* pRGB1 = pDst + y * dstStep;
uint8_t* pRGB2 = pRGB1 + dstStep;
UINT32 x;
const BOOL lastY = y >= nHeight - 1;
for (x = 0; x < nWidth;)
{
const BOOL lastX = (nWidth - x) < 16;
const uint8x8_t Uraw = vld1_u8(pU);
const uint8x8x2_t Uu = vzip_u8(Uraw, Uraw);
const int16x8_t U1 = vreinterpretq_s16_u16(vmovl_u8(Uu.val[0]));
const int16x8_t U2 = vreinterpretq_s16_u16(vmovl_u8(Uu.val[1]));
const uint8x8_t Vraw = vld1_u8(pV);
const uint8x8x2_t Vu = vzip_u8(Vraw, Vraw);
const int16x8_t V1 = vreinterpretq_s16_u16(vmovl_u8(Vu.val[0]));
const int16x8_t V2 = vreinterpretq_s16_u16(vmovl_u8(Vu.val[1]));
const int16x8_t D1 = vsubq_s16(U1, c128);
const int16x4_t D1h = vget_high_s16(D1);
const int16x4_t D1l = vget_low_s16(D1);
const int16x8_t E1 = vsubq_s16(V1, c128);
const int16x4_t E1h = vget_high_s16(E1);
const int16x4_t E1l = vget_low_s16(E1);
const int16x8_t D2 = vsubq_s16(U2, c128);
const int16x4_t D2h = vget_high_s16(D2);
const int16x4_t D2l = vget_low_s16(D2);
const int16x8_t E2 = vsubq_s16(V2, c128);
const int16x4_t E2h = vget_high_s16(E2);
const int16x4_t E2l = vget_low_s16(E2);
uint8x8x4_t bgrx;
bgrx.val[aPos] = vdup_n_u8(0xFF);
{
const uint8x8_t Y1u = vld1_u8(pY1);
const int16x8_t Y1 = vreinterpretq_s16_u16(vmovl_u8(Y1u));
const int32x4_t Ch = vmulq_n_s32(vmovl_s16(vget_high_s16(Y1)), 256); /* Y * 256 */
const int32x4_t Cl = vmulq_n_s32(vmovl_s16(vget_low_s16(Y1)), 256); /* Y * 256 */
bgrx.val[rPos] = neon_YUV2R(Ch, Cl, D1h, D1l, E1h, E1l);
bgrx.val[gPos] = neon_YUV2G(Ch, Cl, D1h, D1l, E1h, E1l);
bgrx.val[bPos] = neon_YUV2B(Ch, Cl, D1h, D1l, E1h, E1l);
vst4_u8(pRGB1, bgrx);
pRGB1 += 32;
pY1 += 8;
x += 8;
}
if (!lastX)
{
const uint8x8_t Y1u = vld1_u8(pY1);
const int16x8_t Y1 = vreinterpretq_s16_u16(vmovl_u8(Y1u));
const int32x4_t Ch = vmulq_n_s32(vmovl_s16(vget_high_s16(Y1)), 256); /* Y * 256 */
const int32x4_t Cl = vmulq_n_s32(vmovl_s16(vget_low_s16(Y1)), 256); /* Y * 256 */
bgrx.val[rPos] = neon_YUV2R(Ch, Cl, D2h, D2l, E2h, E2l);
bgrx.val[gPos] = neon_YUV2G(Ch, Cl, D2h, D2l, E2h, E2l);
bgrx.val[bPos] = neon_YUV2B(Ch, Cl, D2h, D2l, E2h, E2l);
vst4_u8(pRGB1, bgrx);
pRGB1 += 32;
pY1 += 8;
x += 8;
}
if (!lastY)
{
const uint8x8_t Y2u = vld1_u8(pY2);
const int16x8_t Y2 = vreinterpretq_s16_u16(vmovl_u8(Y2u));
const int32x4_t Ch = vmulq_n_s32(vmovl_s16(vget_high_s16(Y2)), 256); /* Y * 256 */
const int32x4_t Cl = vmulq_n_s32(vmovl_s16(vget_low_s16(Y2)), 256); /* Y * 256 */
bgrx.val[rPos] = neon_YUV2R(Ch, Cl, D1h, D1l, E1h, E1l);
bgrx.val[gPos] = neon_YUV2G(Ch, Cl, D1h, D1l, E1h, E1l);
bgrx.val[bPos] = neon_YUV2B(Ch, Cl, D1h, D1l, E1h, E1l);
vst4_u8(pRGB2, bgrx);
pRGB2 += 32;
pY2 += 8;
if (!lastX)
{
const uint8x8_t Y2u = vld1_u8(pY2);
const int16x8_t Y2 = vreinterpretq_s16_u16(vmovl_u8(Y2u));
const int32x4_t Ch = vmulq_n_s32(vmovl_s16(vget_high_s16(Y2)), 256); /* Y * 256 */
const int32x4_t Cl = vmulq_n_s32(vmovl_s16(vget_low_s16(Y2)), 256); /* Y * 256 */
bgrx.val[rPos] = neon_YUV2R(Ch, Cl, D2h, D2l, E2h, E2l);
bgrx.val[gPos] = neon_YUV2G(Ch, Cl, D2h, D2l, E2h, E2l);
bgrx.val[bPos] = neon_YUV2B(Ch, Cl, D2h, D2l, E2h, E2l);
vst4_u8(pRGB2, bgrx);
pRGB2 += 32;
pY2 += 8;
}
}
pU += 8;
pV += 8;
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YUV420ToRGB_8u_P3AC4R(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst, UINT32 dstStep, UINT32 DstFormat,
const prim_size_t* roi)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_YUV420ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->YUV420ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
static INLINE pstatus_t neon_YUV444ToX(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst, UINT32 dstStep,
const prim_size_t* roi, const uint8_t rPos, const uint8_t gPos,
const uint8_t bPos, const uint8_t aPos)
{
UINT32 y;
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
const UINT32 yPad = srcStep[0] - roi->width;
const UINT32 uPad = srcStep[1] - roi->width;
const UINT32 vPad = srcStep[2] - roi->width;
const UINT32 dPad = dstStep - roi->width * 4;
const uint8_t* pY = pSrc[0];
const uint8_t* pU = pSrc[1];
const uint8_t* pV = pSrc[2];
uint8_t* pRGB = pDst;
const int16x8_t c128 = vdupq_n_s16(128);
const int16x4_t c48 = vdup_n_s16(48);
const int16x4_t c120 = vdup_n_s16(120);
const int16x4_t c403 = vdup_n_s16(403);
const int16x4_t c475 = vdup_n_s16(475);
const DWORD pad = nWidth % 8;
for (y = 0; y < nHeight; y++)
{
UINT32 x;
for (x = 0; x < nWidth - pad; x += 8)
{
const uint8x8_t Yu = vld1_u8(pY);
const int16x8_t Y = vreinterpretq_s16_u16(vmovl_u8(Yu));
const uint8x8_t Uu = vld1_u8(pU);
const int16x8_t U = vreinterpretq_s16_u16(vmovl_u8(Uu));
const uint8x8_t Vu = vld1_u8(pV);
const int16x8_t V = vreinterpretq_s16_u16(vmovl_u8(Vu));
/* Do the calculations on Y in 32bit width, the result of 255 * 256 does not fit
* a signed 16 bit value. */
const int32x4_t Ch = vmulq_n_s32(vmovl_s16(vget_high_s16(Y)), 256); /* Y * 256 */
const int32x4_t Cl = vmulq_n_s32(vmovl_s16(vget_low_s16(Y)), 256); /* Y * 256 */
const int16x8_t D = vsubq_s16(U, c128);
const int16x4_t Dh = vget_high_s16(D);
const int16x4_t Dl = vget_low_s16(D);
const int16x8_t E = vsubq_s16(V, c128);
const int16x4_t Eh = vget_high_s16(E);
const int16x4_t El = vget_low_s16(E);
uint8x8x4_t bgrx;
{
/* B = (256L * Y + 475 * (U - 128)) >> 8*/
const int32x4_t CDh = vmlal_s16(Ch, Dh, c475);
const int32x4_t CDl = vmlal_s16(Cl, Dl, c475);
const int32x4_t Bh = vrshrq_n_s32(CDh, 8);
const int32x4_t Bl = vrshrq_n_s32(CDl, 8);
const int16x8_t B = vcombine_s16(vqmovn_s32(Bl), vqmovn_s32(Bh));
bgrx.val[bPos] = vqmovun_s16(B);
}
{
/* G = (256L * Y - 48 * (U - 128) - 120 * (V - 128)) >> 8 */
const int32x4_t CDh = vmlsl_s16(Ch, Dh, c48);
const int32x4_t CDl = vmlsl_s16(Cl, Dl, c48);
const int32x4_t CDEh = vmlsl_s16(CDh, Eh, c120);
const int32x4_t CDEl = vmlsl_s16(CDl, El, c120);
const int32x4_t Gh = vrshrq_n_s32(CDEh, 8);
const int32x4_t Gl = vrshrq_n_s32(CDEl, 8);
const int16x8_t G = vcombine_s16(vqmovn_s32(Gl), vqmovn_s32(Gh));
bgrx.val[gPos] = vqmovun_s16(G);
}
{
/* R = (256 * Y + 403 * (V - 128)) >> 8 */
const int32x4_t CEh = vmlal_s16(Ch, Eh, c403);
const int32x4_t CEl = vmlal_s16(Cl, El, c403);
const int32x4_t Rh = vrshrq_n_s32(CEh, 8);
const int32x4_t Rl = vrshrq_n_s32(CEl, 8);
const int16x8_t R = vcombine_s16(vqmovn_s32(Rl), vqmovn_s32(Rh));
bgrx.val[rPos] = vqmovun_s16(R);
}
{
/* A */
bgrx.val[aPos] = vdup_n_u8(0xFF);
}
vst4_u8(pRGB, bgrx);
pRGB += 32;
pY += 8;
pU += 8;
pV += 8;
}
for (x = 0; x < pad; x++)
{
const BYTE Y = *pY++;
const BYTE U = *pU++;
const BYTE V = *pV++;
const BYTE r = YUV2R(Y, U, V);
const BYTE g = YUV2G(Y, U, V);
const BYTE b = YUV2B(Y, U, V);
pRGB[aPos] = 0xFF;
pRGB[rPos] = r;
pRGB[gPos] = g;
pRGB[bPos] = b;
pRGB += 4;
}
pRGB += dPad;
pY += yPad;
pU += uPad;
pV += vPad;
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t neon_YUV444ToRGB_8u_P3AC4R(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst, UINT32 dstStep, UINT32 DstFormat,
const prim_size_t* roi)
{
switch (DstFormat)
{
case PIXEL_FORMAT_BGRA32:
case PIXEL_FORMAT_BGRX32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 2, 1, 0, 3);
case PIXEL_FORMAT_RGBA32:
case PIXEL_FORMAT_RGBX32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 0, 1, 2, 3);
case PIXEL_FORMAT_ARGB32:
case PIXEL_FORMAT_XRGB32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 1, 2, 3, 0);
case PIXEL_FORMAT_ABGR32:
case PIXEL_FORMAT_XBGR32:
return neon_YUV444ToX(pSrc, srcStep, pDst, dstStep, roi, 3, 2, 1, 0);
default:
return generic->YUV444ToRGB_8u_P3AC4R(pSrc, srcStep, pDst, dstStep, DstFormat, roi);
}
}
static pstatus_t neon_YUV420CombineToYUV444(
const BYTE* pMainSrc[3], const UINT32 srcMainStep[3],
const BYTE* pAuxSrc[3], const UINT32 srcAuxStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const prim_size_t* roi)
{
UINT32 x, y;
const UINT32 nWidth = roi->width;
const UINT32 nHeight = roi->height;
const UINT32 halfWidth = nWidth / 2, halfHeight = nHeight / 2;
const UINT32 oddY = 1;
const UINT32 evenY = 0;
/* The auxilary frame is aligned to multiples of 16x16.
* We need the padded height for B4 and B5 conversion. */
const UINT32 padHeigth = roi->height + 16 - roi->height % 16;
if (pMainSrc && pMainSrc[0] && pMainSrc[1] && pMainSrc[2])
{
/* Y data is already here... */
/* B1 */
for (y = 0; y < nHeight; y++)
{
const BYTE* Ym = pMainSrc[0] + srcMainStep[0] * y;
BYTE* pY = pDst[0] + dstStep[0] * y;
memcpy(pY, Ym, nWidth);
}
/* The first half of U, V are already here part of this frame. */
/* B2 and B3 */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (2 * y + evenY);
const BYTE* Um = pMainSrc[1] + srcMainStep[1] * y;
const BYTE* Vm = pMainSrc[2] + srcMainStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pU + dstStep[1];
BYTE* pV1 = pV + dstStep[2];
for (x = 0; x + 16 < halfWidth; x += 16)
{
{
const uint8x16_t u = vld1q_u8(Um);
uint8x16x2_t u2x;
u2x.val[0] = u;
u2x.val[1] = u;
vst2q_u8(pU, u2x);
vst2q_u8(pU1, u2x);
Um += 16;
pU += 32;
pU1 += 32;
}
{
const uint8x16_t v = vld1q_u8(Vm);
uint8x16x2_t v2x;
v2x.val[0] = v;
v2x.val[1] = v;
vst2q_u8(pV, v2x);
vst2q_u8(pV1, v2x);
Vm += 16;
pV += 32;
pV1 += 32;
}
}
for (; x < halfWidth; x++)
{
const BYTE u = *Um++;
const BYTE v = *Vm++;
*pU++ = u;
*pU++ = u;
*pU1++ = u;
*pU1++ = u;
*pV++ = v;
*pV++ = v;
*pV1++ = v;
*pV1++ = v;
}
}
}
if (!pAuxSrc || !pAuxSrc[0] || !pAuxSrc[1] || !pAuxSrc[2])
return PRIMITIVES_SUCCESS;
/* The second half of U and V is a bit more tricky... */
/* B4 and B5 */
for (y = 0; y < padHeigth; y += 16)
{
const BYTE* Ya = pAuxSrc[0] + srcAuxStep[0] * y;
UINT32 x;
BYTE* pU = pDst[1] + dstStep[1] * (y + 1);
BYTE* pV = pDst[2] + dstStep[2] * (y + 1);
for (x = 0; x < 8; x++)
{
if (y + x >= nHeight)
continue;
memcpy(pU, Ya, nWidth);
pU += dstStep[1] * 2;
Ya += srcAuxStep[0];
}
for (x = 0; x < 8; x++)
{
if (y + x >= nHeight)
continue;
memcpy(pV, Ya, nWidth);
pV += dstStep[1] * 2;
Ya += srcAuxStep[0];
}
}
/* B6 and B7 */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const BYTE* Ua = pAuxSrc[1] + srcAuxStep[1] * y;
const BYTE* Va = pAuxSrc[2] + srcAuxStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
for (x = 0; x + 16 < halfWidth; x += 16)
{
{
const uint8x16_t u = vld1q_u8(Ua);
uint8x16x2_t uu = vld2q_u8(pU);
uu.val[1] = u;
vst2q_u8(pU, uu);
Ua += 16;
pU += 32;
}
{
const uint8x16_t v = vld1q_u8(Va);
uint8x16x2_t vv = vld2q_u8(pV);
vv.val[1] = v;
vst2q_u8(pV, vv);
Va += 16;
pV += 32;
}
}
for (; x < halfWidth; x++)
{
pU++;
*pU++ = *Ua++;
pV++;
*pV++ = *Va++;
}
}
/* Filter */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const UINT32 val2y1 = val2y + oddY;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pU + dstStep[1];
BYTE* pV1 = pV + dstStep[2];
if (val2y1 > nHeight)
continue;
for (x = 0; x + 16 < halfWidth; x += 16)
{
{
/* U = (U2x,2y << 2) - U2x1,2y - U2x,2y1 - U2x1,2y1 */
uint8x8x2_t u = vld2_u8(pU);
const int16x8_t up = vreinterpretq_s16_u16(vshll_n_u8(u.val[0], 2)); /* Ux2,2y << 2 */
const uint8x8x2_t u1 = vld2_u8(pU1);
const uint16x8_t usub = vaddl_u8(u1.val[1], u1.val[0]); /* U2x,2y1 + U2x1,2y1 */
const int16x8_t us = vreinterpretq_s16_u16(vaddw_u8(usub,
u.val[1])); /* U2x1,2y + U2x,2y1 + U2x1,2y1 */
const int16x8_t un = vsubq_s16(up, us);
const uint8x8_t u8 = vqmovun_s16(un); /* CLIP(un) */
u.val[0] = u8;
vst2_u8(pU, u);
pU += 16;
pU1 += 16;
}
{
/* V = (V2x,2y << 2) - V2x1,2y - V2x,2y1 - V2x1,2y1 */
uint8x8x2_t v = vld2_u8(pV);
const int16x8_t vp = vreinterpretq_s16_u16(vshll_n_u8(v.val[0], 2)); /* Vx2,2y << 2 */
const uint8x8x2_t v1 = vld2_u8(pV1);
const uint16x8_t vsub = vaddl_u8(v1.val[1], v1.val[0]); /* V2x,2y1 + V2x1,2y1 */
const int16x8_t vs = vreinterpretq_s16_u16(vaddw_u8(vsub,
v.val[1])); /* V2x1,2y + V2x,2y1 + V2x1,2y1 */
const int16x8_t vn = vsubq_s16(vp, vs);
const uint8x8_t v8 = vqmovun_s16(vn); /* CLIP(vn) */
v.val[0] = v8;
vst2_u8(pV, v);
pV += 16;
pV1 += 16;
}
}
for (; x < halfWidth; x++)
{
const UINT32 val2x = (x * 2);
const UINT32 val2x1 = val2x + 1;
const INT32 up = pU[val2x] * 4;
const INT32 vp = pV[val2x] * 4;
INT32 u2020;
INT32 v2020;
if (val2x1 > nWidth)
continue;
u2020 = up - pU[val2x1] - pU1[val2x] - pU1[val2x1];
v2020 = vp - pV[val2x1] - pV1[val2x] - pV1[val2x1];
*pU = CLIP(u2020);
*pV = CLIP(v2020);
pU += 2;
pV += 2;
}
}
return PRIMITIVES_SUCCESS;
}
#endif
void primitives_init_YUV_opt(primitives_t* prims)
@ -615,6 +1251,16 @@ void primitives_init_YUV_opt(primitives_t* prims)
{
prims->RGBToYUV420_8u_P3AC4R = ssse3_RGBToYUV420;
prims->YUV420ToRGB_8u_P3AC4R = ssse3_YUV420ToRGB;
prims->YUV444ToRGB_8u_P3AC4R = ssse3_YUV444ToRGB_8u_P3AC4R;
}
#elif defined(WITH_NEON)
if (IsProcessorFeaturePresent(PF_ARM_NEON_INSTRUCTIONS_AVAILABLE))
{
prims->YUV420ToRGB_8u_P3AC4R = neon_YUV420ToRGB_8u_P3AC4R;
prims->YUV444ToRGB_8u_P3AC4R = neon_YUV444ToRGB_8u_P3AC4R;
prims->YUV420CombineToYUV444 = neon_YUV420CombineToYUV444;
}
#endif

View File

@ -129,6 +129,47 @@ static INLINE BYTE CLIP(INT32 X)
return X;
}
/**
* | R | ( | 256 0 403 | | Y | )
* | G | = ( | 256 -48 -120 | | U - 128 | ) >> 8
* | B | ( | 256 475 0 | | V - 128 | )
*/
static INLINE INT32 C(INT32 Y)
{
return (Y) - 0L;
}
static INLINE INT32 D(INT32 U)
{
return (U) - 128L;
}
static INLINE INT32 E(INT32 V)
{
return (V) - 128L;
}
static INLINE BYTE YUV2R(INT32 Y, INT32 U, INT32 V)
{
const INT32 r = (256L * C(Y) + 0L * D(U) + 403L * E(V));
const INT32 r8 = r >> 8L;
return CLIP(r8);
}
static INLINE BYTE YUV2G(INT32 Y, INT32 U, INT32 V)
{
const INT32 g = (256L * C(Y) - 48L * D(U) - 120L * E(V));
const INT32 g8 = g >> 8L;
return CLIP(g8);
}
static INLINE BYTE YUV2B(INT32 Y, INT32 U, INT32 V)
{
const INT32 b = (256L * C(Y) + 475L * D(U) + 0L * E(V));
const INT32 b8 = b >> 8L;
return CLIP(b8);
}
/* Function prototypes for all the init/deinit routines. */
FREERDP_LOCAL void primitives_init_copy(primitives_t* prims);
FREERDP_LOCAL void primitives_init_set(primitives_t* prims);

View File

@ -22,7 +22,7 @@ static BOOL similar(const BYTE* src, const BYTE* dst, size_t size)
{
int diff = src[x] - dst[x];
if (abs(diff) > 2)
if (abs(diff) > 4)
{
fprintf(stderr, "%"PRIuz" %02"PRIX8" : %02"PRIX8" diff=%d\n", x, src[x], dst[x], abs(diff));
return FALSE;
@ -78,13 +78,14 @@ static BOOL similarRGB(const BYTE* src, const BYTE* dst, size_t size, UINT32 for
return TRUE;
}
static void get_size(UINT32* width, UINT32* height)
static void get_size(BOOL large, UINT32* width, UINT32* height)
{
UINT32 shift = large ? 8 : 1;
winpr_RAND((BYTE*)width, sizeof(*width));
winpr_RAND((BYTE*)height, sizeof(*height));
// TODO: Algorithm only works on even resolutions...
*width = (*width % 64 + 1) << 1;
*height = (*height % 64 + 1) << 1;
*width = (*width % 64 + 1) << shift;
*height = (*height % 64 + 1) << shift;
}
static BOOL check_padding(const BYTE* psrc, size_t size, size_t padding,
@ -170,7 +171,7 @@ static void free_padding(void* src, size_t padding)
/* Create 2 pseudo YUV420 frames of same size.
* Combine them and check, if the data is at the expected position. */
static BOOL TestPrimitiveYUVCombine(void)
static BOOL TestPrimitiveYUVCombine(primitives_t* prims, prim_size_t roi)
{
UINT32 x, y, i;
UINT32 awidth, aheight;
@ -186,9 +187,6 @@ static BOOL TestPrimitiveYUVCombine(void)
size_t padding = 10000;
PROFILER_DEFINE(yuvCombine);
PROFILER_DEFINE(yuvSplit);
prim_size_t roi;
primitives_t* prims = primitives_get();
get_size(&roi.width, &roi.height);
awidth = roi.width + 16 - roi.width % 16;
aheight = roi.height + 16 - roi.height % 16;
fprintf(stderr, "Running YUVCombine on frame size %"PRIu32"x%"PRIu32" [%"PRIu32"x%"PRIu32"]\n",
@ -360,18 +358,16 @@ fail:
return rc;
}
static BOOL TestPrimitiveYUV(BOOL use444)
static BOOL TestPrimitiveYUV(primitives_t* prims, prim_size_t roi, BOOL use444)
{
BOOL rc = FALSE;
UINT32 x, y;
UINT32 awidth, aheight;
BYTE* yuv[3] = {0};
UINT32 yuv_step[3];
prim_size_t roi;
BYTE* rgb = NULL;
BYTE* rgb_dst = NULL;
size_t size;
primitives_t* prims = primitives_get();
size_t uvsize, uvwidth;
size_t padding = 10000;
size_t stride;
@ -390,7 +386,6 @@ static BOOL TestPrimitiveYUV(BOOL use444)
PROFILER_DEFINE(rgbToYUV444);
PROFILER_DEFINE(yuv420ToRGB);
PROFILER_DEFINE(yuv444ToRGB);
get_size(&roi.width, &roi.height);
/* Buffers need to be 16x16 aligned. */
awidth = roi.width + 16 - roi.width % 16;
aheight = roi.height + 16 - roi.height % 16;
@ -457,6 +452,7 @@ static BOOL TestPrimitiveYUV(BOOL use444)
for (x = 0; x < sizeof(formats) / sizeof(formats[0]); x++)
{
const UINT32 DstFormat = formats[x];
printf("Testing destination color format %s\n", GetColorFormatName(DstFormat));
if (use444)
{
@ -561,26 +557,73 @@ fail:
int TestPrimitivesYUV(int argc, char* argv[])
{
BOOL large = (argc > 1);
UINT32 x;
int rc = -1;
prim_test_setup(FALSE);
primitives_t* prims = primitives_get();
primitives_t* generic = primitives_get_generic();
for (x = 0; x < 10; x++)
{
if (!TestPrimitiveYUV(TRUE)) {
prim_size_t roi;
get_size(large, &roi.width, &roi.height);
printf("-------------------- GENERIC ------------------------\n");
if (!TestPrimitiveYUV(generic, roi, TRUE))
{
printf("TestPrimitiveYUV (444) failed.\n");
goto end;
}
if (!TestPrimitiveYUV(FALSE)) {
printf("---------------------- END --------------------------\n");
#if 1
printf("------------------- OPTIMIZED -----------------------\n");
if (!TestPrimitiveYUV(prims, roi, TRUE))
{
printf("TestPrimitiveYUV (444) failed.\n");
goto end;
}
printf("---------------------- END --------------------------\n");
#endif
printf("-------------------- GENERIC ------------------------\n");
if (!TestPrimitiveYUV(generic, roi, FALSE))
{
printf("TestPrimitiveYUV (420) failed.\n");
goto end;
}
if (!TestPrimitiveYUVCombine()) {
printf("---------------------- END --------------------------\n");
printf("------------------- OPTIMIZED -----------------------\n");
if (!TestPrimitiveYUV(prims, roi, FALSE))
{
printf("TestPrimitiveYUV (420) failed.\n");
goto end;
}
printf("---------------------- END --------------------------\n");
printf("-------------------- GENERIC ------------------------\n");
if (!TestPrimitiveYUVCombine(generic, roi))
{
printf("TestPrimitiveYUVCombine failed.\n");
goto end;
}
printf("---------------------- END --------------------------\n");
printf("------------------- OPTIMIZED -----------------------\n");
if (!TestPrimitiveYUVCombine(prims, roi))
{
printf("TestPrimitiveYUVCombine failed.\n");
goto end;
}
printf("---------------------- END --------------------------\n");
}
rc = 0;