FreeRDP/libfreerdp/primitives/prim_set_opt.c

256 lines
6.3 KiB
C
Raw Normal View History

/* FreeRDP: A Remote Desktop Protocol Client
* Optimized routines to set a chunk of memory to a constant.
* vi:ts=4 sw=4:
*
* (c) Copyright 2012 Hewlett-Packard Development Company, L.P.
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
2022-02-16 13:20:38 +03:00
#include <freerdp/config.h>
#include <string.h>
#include <freerdp/types.h>
#include <freerdp/primitives.h>
#include <winpr/sysinfo.h>
#ifdef WITH_SSE2
2019-11-06 17:24:51 +03:00
#include <emmintrin.h>
#endif /* WITH_SSE2 */
#ifdef WITH_IPP
2019-11-06 17:24:51 +03:00
#include <ipps.h>
#endif /* WITH_IPP */
#include "prim_internal.h"
static primitives_t* generic = NULL;
/* ========================================================================= */
#ifdef WITH_SSE2
2019-11-06 17:24:51 +03:00
#if !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS)
2023-08-22 16:43:51 +03:00
static pstatus_t sse2_set_8u(BYTE val, BYTE* WINPR_RESTRICT pDst, UINT32 len)
{
BYTE byte, *dptr;
__m128i xmm0;
size_t count;
2019-11-06 17:24:51 +03:00
if (len < 16)
return generic->set_8u(val, pDst, len);
2019-11-06 17:24:51 +03:00
byte = val;
dptr = (BYTE*)pDst;
/* Seek 16-byte alignment. */
2019-11-06 17:24:51 +03:00
while ((ULONG_PTR)dptr & 0x0f)
{
*dptr++ = byte;
2019-11-06 17:24:51 +03:00
if (--len == 0)
return PRIMITIVES_SUCCESS;
}
xmm0 = _mm_set1_epi8(byte);
/* Cover 256-byte chunks via SSE register stores. */
count = len >> 8;
len -= count << 8;
/* Do 256-byte chunks using one XMM register. */
while (count--)
{
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
}
/* Cover 16-byte chunks via SSE register stores. */
count = len >> 4;
len -= count << 4;
/* Do 16-byte chunks using one XMM register. */
while (count--)
{
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 16;
}
/* Do leftover bytes. */
2019-11-06 17:24:51 +03:00
while (len--)
*dptr++ = byte;
return PRIMITIVES_SUCCESS;
}
2019-11-06 17:24:51 +03:00
#endif /* !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS) */
#endif /* WITH_SSE2 */
/* ------------------------------------------------------------------------- */
#ifdef WITH_SSE2
2019-11-06 17:24:51 +03:00
#if !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS)
2023-08-22 16:43:51 +03:00
static pstatus_t sse2_set_32u(UINT32 val, UINT32* WINPR_RESTRICT pDst, UINT32 len)
{
const primitives_t* prim = primitives_get_generic();
2019-11-06 17:24:51 +03:00
UINT32* dptr = (UINT32*)pDst;
__m128i xmm0;
size_t count;
/* If really short, just do it here. */
if (len < 32)
{
2019-11-06 17:24:51 +03:00
while (len--)
*dptr++ = val;
return PRIMITIVES_SUCCESS;
}
/* Assure we can reach 16-byte alignment. */
2019-11-06 17:24:51 +03:00
if (((ULONG_PTR)dptr & 0x03) != 0)
{
return prim->set_32u(val, pDst, len);
}
/* Seek 16-byte alignment. */
2019-11-06 17:24:51 +03:00
while ((ULONG_PTR)dptr & 0x0f)
{
*dptr++ = val;
2019-11-06 17:24:51 +03:00
if (--len == 0)
return PRIMITIVES_SUCCESS;
}
xmm0 = _mm_set1_epi32(val);
/* Cover 256-byte chunks via SSE register stores. */
count = len >> 6;
len -= count << 6;
/* Do 256-byte chunks using one XMM register. */
while (count--)
{
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
}
/* Cover 16-byte chunks via SSE register stores. */
count = len >> 2;
len -= count << 2;
/* Do 16-byte chunks using one XMM register. */
while (count--)
{
2019-11-06 17:24:51 +03:00
_mm_store_si128((__m128i*)dptr, xmm0);
dptr += 4;
}
/* Do leftover bytes. */
2019-11-06 17:24:51 +03:00
while (len--)
*dptr++ = val;
return PRIMITIVES_SUCCESS;
}
/* ------------------------------------------------------------------------- */
2023-08-22 16:43:51 +03:00
static pstatus_t sse2_set_32s(INT32 val, INT32* WINPR_RESTRICT pDst, UINT32 len)
{
2019-11-06 17:24:51 +03:00
UINT32 uval = *((UINT32*)&val);
return sse2_set_32u(uval, (UINT32*)pDst, len);
}
2019-11-06 17:24:51 +03:00
#endif /* !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS) */
#endif /* WITH_SSE2 */
#ifdef WITH_IPP
/* ------------------------------------------------------------------------- */
2023-08-22 16:43:51 +03:00
static pstatus_t ipp_wrapper_set_32u(UINT32 val, UINT32* WINPR_RESTRICT pDst, INT32 len)
{
/* A little type conversion, then use the signed version. */
2019-11-06 17:24:51 +03:00
INT32 sval = *((INT32*)&val);
return ippsSet_32s(sval, (INT32*)pDst, len);
}
#endif
/* ------------------------------------------------------------------------- */
2023-08-22 16:43:51 +03:00
void primitives_init_set_opt(primitives_t* WINPR_RESTRICT prims)
{
generic = primitives_get_generic();
primitives_init_set(prims);
/* Pick tuned versions if possible. */
#ifdef WITH_IPP
2019-11-06 17:24:51 +03:00
prims->set_8u = (__set_8u_t)ippsSet_8u;
prims->set_32s = (__set_32s_t)ippsSet_32s;
prims->set_32u = (__set_32u_t)ipp_wrapper_set_32u;
prims->zero = (__zero_t)ippsZero_8u;
#elif defined(WITH_SSE2)
2013-03-01 11:52:34 +04:00
if (IsProcessorFeaturePresent(PF_SSE2_INSTRUCTIONS_AVAILABLE))
{
2019-11-06 17:24:51 +03:00
prims->set_8u = sse2_set_8u;
prims->set_32s = sse2_set_32s;
prims->set_32u = sse2_set_32u;
}
#endif
}