2013-02-21 14:45:10 +04:00
|
|
|
/* FreeRDP: A Remote Desktop Protocol Client
|
|
|
|
* Optimized routines to set a chunk of memory to a constant.
|
|
|
|
* vi:ts=4 sw=4:
|
|
|
|
*
|
|
|
|
* (c) Copyright 2012 Hewlett-Packard Development Company, L.P.
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
* not use this file except in compliance with the License. You may obtain
|
|
|
|
* a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
* or implied. See the License for the specific language governing
|
|
|
|
* permissions and limitations under the License.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2022-02-16 13:20:38 +03:00
|
|
|
#include <freerdp/config.h>
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <freerdp/types.h>
|
|
|
|
#include <freerdp/primitives.h>
|
2013-02-27 18:58:06 +04:00
|
|
|
#include <winpr/sysinfo.h>
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
#ifdef WITH_SSE2
|
2019-11-06 17:24:51 +03:00
|
|
|
#include <emmintrin.h>
|
2013-02-21 14:45:10 +04:00
|
|
|
#endif /* WITH_SSE2 */
|
|
|
|
#ifdef WITH_IPP
|
2019-11-06 17:24:51 +03:00
|
|
|
#include <ipps.h>
|
2013-02-21 14:45:10 +04:00
|
|
|
#endif /* WITH_IPP */
|
|
|
|
|
|
|
|
#include "prim_internal.h"
|
2016-04-05 18:07:45 +03:00
|
|
|
|
|
|
|
static primitives_t* generic = NULL;
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
/* ========================================================================= */
|
|
|
|
#ifdef WITH_SSE2
|
2019-11-06 17:24:51 +03:00
|
|
|
#if !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS)
|
|
|
|
static pstatus_t sse2_set_8u(BYTE val, BYTE* pDst, UINT32 len)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
|
|
|
BYTE byte, *dptr;
|
|
|
|
__m128i xmm0;
|
|
|
|
size_t count;
|
|
|
|
|
2019-11-06 17:24:51 +03:00
|
|
|
if (len < 16)
|
|
|
|
return generic->set_8u(val, pDst, len);
|
2013-02-21 14:45:10 +04:00
|
|
|
|
2019-11-06 17:24:51 +03:00
|
|
|
byte = val;
|
|
|
|
dptr = (BYTE*)pDst;
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
/* Seek 16-byte alignment. */
|
2019-11-06 17:24:51 +03:00
|
|
|
while ((ULONG_PTR)dptr & 0x0f)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
|
|
|
*dptr++ = byte;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2019-11-06 17:24:51 +03:00
|
|
|
if (--len == 0)
|
|
|
|
return PRIMITIVES_SUCCESS;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
xmm0 = _mm_set1_epi8(byte);
|
|
|
|
/* Cover 256-byte chunks via SSE register stores. */
|
|
|
|
count = len >> 8;
|
|
|
|
len -= count << 8;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
/* Do 256-byte chunks using one XMM register. */
|
|
|
|
while (count--)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Cover 16-byte chunks via SSE register stores. */
|
|
|
|
count = len >> 4;
|
|
|
|
len -= count << 4;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
/* Do 16-byte chunks using one XMM register. */
|
|
|
|
while (count--)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 16;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do leftover bytes. */
|
2019-11-06 17:24:51 +03:00
|
|
|
while (len--)
|
|
|
|
*dptr++ = byte;
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
return PRIMITIVES_SUCCESS;
|
|
|
|
}
|
2019-11-06 17:24:51 +03:00
|
|
|
#endif /* !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS) */
|
2013-02-21 14:45:10 +04:00
|
|
|
#endif /* WITH_SSE2 */
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
#ifdef WITH_SSE2
|
2019-11-06 17:24:51 +03:00
|
|
|
#if !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS)
|
|
|
|
static pstatus_t sse2_set_32u(UINT32 val, UINT32* pDst, UINT32 len)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
2016-04-05 18:07:45 +03:00
|
|
|
const primitives_t* prim = primitives_get_generic();
|
2019-11-06 17:24:51 +03:00
|
|
|
UINT32* dptr = (UINT32*)pDst;
|
2013-02-21 14:45:10 +04:00
|
|
|
__m128i xmm0;
|
|
|
|
size_t count;
|
|
|
|
|
|
|
|
/* If really short, just do it here. */
|
|
|
|
if (len < 32)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
while (len--)
|
|
|
|
*dptr++ = val;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
return PRIMITIVES_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assure we can reach 16-byte alignment. */
|
2019-11-06 17:24:51 +03:00
|
|
|
if (((ULONG_PTR)dptr & 0x03) != 0)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
2016-04-05 18:07:45 +03:00
|
|
|
return prim->set_32u(val, pDst, len);
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Seek 16-byte alignment. */
|
2019-11-06 17:24:51 +03:00
|
|
|
while ((ULONG_PTR)dptr & 0x0f)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
|
|
|
*dptr++ = val;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2019-11-06 17:24:51 +03:00
|
|
|
if (--len == 0)
|
|
|
|
return PRIMITIVES_SUCCESS;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
xmm0 = _mm_set1_epi32(val);
|
|
|
|
/* Cover 256-byte chunks via SSE register stores. */
|
|
|
|
count = len >> 6;
|
|
|
|
len -= count << 6;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
/* Do 256-byte chunks using one XMM register. */
|
|
|
|
while (count--)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Cover 16-byte chunks via SSE register stores. */
|
|
|
|
count = len >> 2;
|
|
|
|
len -= count << 2;
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
/* Do 16-byte chunks using one XMM register. */
|
|
|
|
while (count--)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
_mm_store_si128((__m128i*)dptr, xmm0);
|
2016-04-05 18:07:45 +03:00
|
|
|
dptr += 4;
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do leftover bytes. */
|
2019-11-06 17:24:51 +03:00
|
|
|
while (len--)
|
|
|
|
*dptr++ = val;
|
2013-02-21 14:45:10 +04:00
|
|
|
|
|
|
|
return PRIMITIVES_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
2019-11-06 17:24:51 +03:00
|
|
|
static pstatus_t sse2_set_32s(INT32 val, INT32* pDst, UINT32 len)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
UINT32 uval = *((UINT32*)&val);
|
|
|
|
return sse2_set_32u(uval, (UINT32*)pDst, len);
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
2019-11-06 17:24:51 +03:00
|
|
|
#endif /* !defined(WITH_IPP) || defined(ALL_PRIMITIVES_VERSIONS) */
|
2013-02-21 14:45:10 +04:00
|
|
|
#endif /* WITH_SSE2 */
|
|
|
|
|
|
|
|
#ifdef WITH_IPP
|
|
|
|
/* ------------------------------------------------------------------------- */
|
2019-11-06 17:24:51 +03:00
|
|
|
static pstatus_t ipp_wrapper_set_32u(UINT32 val, UINT32* pDst, INT32 len)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
|
|
|
/* A little type conversion, then use the signed version. */
|
2019-11-06 17:24:51 +03:00
|
|
|
INT32 sval = *((INT32*)&val);
|
|
|
|
return ippsSet_32s(sval, (INT32*)pDst, len);
|
2013-02-21 14:45:10 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
2016-04-05 18:07:45 +03:00
|
|
|
void primitives_init_set_opt(primitives_t* prims)
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
2016-04-05 18:07:45 +03:00
|
|
|
generic = primitives_get_generic();
|
|
|
|
primitives_init_set(prims);
|
2013-02-21 14:45:10 +04:00
|
|
|
/* Pick tuned versions if possible. */
|
|
|
|
#ifdef WITH_IPP
|
2019-11-06 17:24:51 +03:00
|
|
|
prims->set_8u = (__set_8u_t)ippsSet_8u;
|
|
|
|
prims->set_32s = (__set_32s_t)ippsSet_32s;
|
|
|
|
prims->set_32u = (__set_32u_t)ipp_wrapper_set_32u;
|
|
|
|
prims->zero = (__zero_t)ippsZero_8u;
|
2013-02-21 14:45:10 +04:00
|
|
|
#elif defined(WITH_SSE2)
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-03-01 11:52:34 +04:00
|
|
|
if (IsProcessorFeaturePresent(PF_SSE2_INSTRUCTIONS_AVAILABLE))
|
2013-02-21 14:45:10 +04:00
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
prims->set_8u = sse2_set_8u;
|
2013-02-21 14:45:10 +04:00
|
|
|
prims->set_32s = sse2_set_32s;
|
|
|
|
prims->set_32u = sse2_set_32u;
|
|
|
|
}
|
2016-04-05 18:07:45 +03:00
|
|
|
|
2013-02-21 14:45:10 +04:00
|
|
|
#endif
|
|
|
|
}
|