Added AVC444v2 client support.

This commit is contained in:
Armin Novak 2017-04-10 17:16:57 +02:00
parent 9fd3974817
commit b0d3cfda4d
9 changed files with 302 additions and 190 deletions

View File

@ -276,6 +276,7 @@ UINT rdpgfx_decode(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
break;
case RDPGFX_CODECID_AVC444:
case RDPGFX_CODECID_AVC444v2:
if ((error = rdpgfx_decode_AVC444(gfx, cmd)))
WLog_ERR(TAG, "rdpgfx_decode_AVC444 failed with error %"PRIu32"", error);

View File

@ -104,6 +104,8 @@ static UINT rdpgfx_send_caps_advertise_pdu(RDPGFX_CHANNEL_CALLBACK* callback)
capsSets[pdu.capsSetCount] = *capsSet;
capsSets[pdu.capsSetCount++].version = RDPGFX_CAPVERSION_102;
capsSets[pdu.capsSetCount] = *capsSet;
capsSets[pdu.capsSetCount++].version = RDPGFX_CAPVERSION_103;
}
header.pduLength = RDPGFX_HEADER_SIZE + 2 + (pdu.capsSetCount *
@ -271,11 +273,13 @@ static UINT rdpgfx_recv_reset_graphics_pdu(RDPGFX_CHANNEL_CALLBACK* callback,
Stream_Seek(s, pad); /* pad (total size is 340 bytes) */
WLog_DBG(TAG, "RecvResetGraphicsPdu: width: %"PRIu32" height: %"PRIu32" count: %"PRIu32"",
pdu.width, pdu.height, pdu.monitorCount);
for (index = 0; index < pdu.monitorCount; index++)
{
monitor = &(pdu.monitorDefArray[index]);
WLog_DBG(TAG, "RecvResetGraphicsPdu: monitor left:%"PRIi32" top:%"PRIi32" right:%"PRIi32" left:%"PRIi32" flags:0x%"PRIx32"",
monitor->left, monitor->top, monitor->right, monitor->bottom, monitor->flags);
WLog_DBG(TAG,
"RecvResetGraphicsPdu: monitor left:%"PRIi32" top:%"PRIi32" right:%"PRIi32" left:%"PRIi32" flags:0x%"PRIx32"",
monitor->left, monitor->top, monitor->right, monitor->bottom, monitor->flags);
}
if (context)

View File

@ -88,6 +88,9 @@ const char* rdpgfx_get_codec_id_string(UINT16 codecId)
case RDPGFX_CODECID_AVC444:
return "RDPGFX_CODECID_AVC444";
case RDPGFX_CODECID_AVC444v2:
return "RDPGFX_CODECID_AVC444v2";
case RDPGFX_CODECID_ALPHA:
return "RDPGFX_CODECID_ALPHA";

View File

@ -93,8 +93,9 @@ typedef struct _RDPGFX_HEADER RDPGFX_HEADER;
#define RDPGFX_CAPVERSION_81 0x00080105 /** [MS-RDPEGFX] 2.2.3.2 */
#define RDPGFX_CAPVERSION_10 0x000A0002 /** [MS-RDPEGFX] 2.2.3.3 */
#define RDPGFX_CAPVERSION_102 0x000A0200 /** [MS-RDPEGFX] 2.2.3.4 */
#define RDPGFX_CAPVERSION_103 0x000A0301 /** [MS-RDPEGFX] 2.2.3.5 */
#define RDPGFX_NUMBER_CAPSETS 4
#define RDPGFX_NUMBER_CAPSETS 5
#define RDPGFX_CAPSET_SIZE 12
struct _RDPGFX_CAPSET
@ -144,6 +145,7 @@ typedef struct _RDPGFX_CAPSET_VERSION10 RDPGFX_CAPSET_VERSION10;
#define RDPGFX_CODECID_AVC420 0x000B
#define RDPGFX_CODECID_ALPHA 0x000C
#define RDPGFX_CODECID_AVC444 0x000E
#define RDPGFX_CODECID_AVC444v2 0x000F
#define RDPGFX_WIRE_TO_SURFACE_PDU_1_SIZE 17

View File

@ -103,7 +103,8 @@ FREERDP_API INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
RECTANGLE_16* auxRegionRects, UINT32 numAuxRegionRect,
const BYTE* pAuxSrcData, UINT32 AuxSrcSize,
BYTE* pDstData, DWORD DstFormat,
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight);
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight,
UINT32 codecId);
FREERDP_API BOOL h264_context_reset(H264_CONTEXT* h264, UINT32 width, UINT32 height);

View File

@ -63,6 +63,13 @@ typedef struct
UINT32 height;
} prim_size_t; /* like IppiSize */
typedef enum
{
AVC444_LUMA,
AVC444_CHROMAv1,
AVC444_CHROMAv2
} avc444_frame_type;
/* Function prototypes for all of the supported primitives. */
typedef pstatus_t (*__copy_t)(
const void* pSrc,
@ -181,10 +188,10 @@ typedef pstatus_t (*__RGBToYUV444_8u_P3AC4R_t)(
BYTE* pDst[3], UINT32 dstStep[3],
const prim_size_t* roi);
typedef pstatus_t (*__YUV420CombineToYUV444_t)(
const BYTE* pMainSrc[3], const UINT32 srcMainStep[3],
const BYTE* pAuxSrc[3], const UINT32 srcAuxStep[3],
avc444_frame_type type,
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const prim_size_t* roi);
const RECTANGLE_16* roi);
typedef pstatus_t (*__YUV444SplitToYUV420_t)(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pMainDst[3], const UINT32 dstMainStep[3],

View File

@ -1702,16 +1702,14 @@ static BOOL avc444_process_rects(H264_CONTEXT* h264, const BYTE* pSrcData,
UINT32 SrcSize, BYTE* pDstData, UINT32 DstFormat, UINT32 nDstStep,
UINT32 nDstWidth, UINT32 nDstHeight,
const RECTANGLE_16* rects, UINT32 nrRects,
BOOL main)
avc444_frame_type type)
{
const primitives_t* prims = primitives_get();
UINT32 x;
const BYTE* pYUVPoint[3] = { NULL, NULL, NULL };
BYTE* pYUVDstPoint[3];
UINT32* piDstStride = h264->iYUV444Stride;
BYTE** ppYUVDstData = h264->pYUV444Data;
const UINT32* piStride = h264->iStride;
BYTE** ppYUVData = h264->pYUVData;
const BYTE** ppYUVData = (const BYTE**)h264->pYUVData;
if (h264->subsystem->Decompress(h264, pSrcData, SrcSize) < 0)
return FALSE;
@ -1722,42 +1720,14 @@ static BOOL avc444_process_rects(H264_CONTEXT* h264, const BYTE* pSrcData,
for (x = 0; x < nrRects; x++)
{
const RECTANGLE_16* rect = &rects[x];
prim_size_t roi;
if (!check_rect(h264, rect, nDstWidth, nDstHeight))
continue;
pYUVPoint[0] = ppYUVData[0] + rect->top * piStride[0] +
rect->left;
pYUVPoint[1] = ppYUVData[1] + rect->top / 2 * piStride[1] +
rect->left / 2;
pYUVPoint[2] = ppYUVData[2] + rect->top / 2 * piStride[2] +
rect->left / 2;
pYUVDstPoint[0] = ppYUVDstData[0] + rect->top * piDstStride[0] +
rect->left;
pYUVDstPoint[1] = ppYUVDstData[1] + rect->top * piDstStride[1] +
rect->left;
pYUVDstPoint[2] = ppYUVDstData[2] + rect->top * piDstStride[2] +
rect->left;
roi.width = rect->right - rect->left + 1;
roi.height = rect->bottom - rect->top + 1;
if (main)
{
if (prims->YUV420CombineToYUV444(pYUVPoint, piStride,
NULL, NULL,
pYUVDstPoint, piDstStride,
&roi) != PRIMITIVES_SUCCESS)
return FALSE;
}
else
{
if (prims->YUV420CombineToYUV444(NULL, NULL,
pYUVPoint, piStride,
pYUVDstPoint, piDstStride,
&roi) != PRIMITIVES_SUCCESS)
return FALSE;
}
if (prims->YUV420CombineToYUV444(type, ppYUVData, piStride,
ppYUVDstData, piDstStride,
rect) != PRIMITIVES_SUCCESS)
return FALSE;
}
if (!avc_yuv_to_rgb(h264, rects, nrRects, nDstWidth,
@ -1789,9 +1759,11 @@ INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
RECTANGLE_16* auxRegionRects, UINT32 numAuxRegionRect,
const BYTE* pAuxSrcData, UINT32 AuxSrcSize,
BYTE* pDstData, DWORD DstFormat,
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight)
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight,
UINT32 codecId)
{
INT32 status = -1;
avc444_frame_type chroma = (codecId == RDPGFX_CODECID_AVC444) ? AVC444_CHROMAv1 : AVC444_CHROMAv2;
if (!h264 || !regionRects ||
!pSrcData || !pDstData)
@ -1803,11 +1775,11 @@ INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
* Chroma420 in stream 2 */
if (!avc444_process_rects(h264, pSrcData, SrcSize, pDstData, DstFormat, nDstStep, nDstWidth,
nDstHeight,
regionRects, numRegionRects, TRUE))
regionRects, numRegionRects, AVC444_LUMA))
status = -1;
else if (!avc444_process_rects(h264, pAuxSrcData, AuxSrcSize, pDstData, DstFormat, nDstStep,
nDstWidth, nDstHeight,
auxRegionRects, numAuxRegionRect, FALSE))
auxRegionRects, numAuxRegionRect, chroma))
status = -1;
else
status = 0;
@ -1817,7 +1789,7 @@ INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
case 2: /* Chroma420 in stream 1 */
if (!avc444_process_rects(h264, pSrcData, SrcSize, pDstData, DstFormat, nDstStep, nDstWidth,
nDstHeight,
regionRects, numRegionRects, FALSE))
regionRects, numRegionRects, chroma))
status = -1;
else
status = 0;
@ -1827,7 +1799,7 @@ INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
case 1: /* YUV420 in stream 1 */
if (!avc444_process_rects(h264, pSrcData, SrcSize, pDstData, DstFormat, nDstStep, nDstWidth,
nDstHeight,
regionRects, numRegionRects, TRUE))
regionRects, numRegionRects, AVC444_LUMA))
status = -1;
else
status = 0;

View File

@ -113,7 +113,7 @@ static UINT gdi_OutputUpdate(rdpGdi* gdi, gdiGfxSurface* surface)
&(surface->invalidRegion), &surfaceRect);
if (!(rects = region16_rects(&surface->invalidRegion, &nbRects)) || !nbRects)
return CHANNEL_RC_OK;
return CHANNEL_RC_OK;
update->BeginPaint(gdi->context);
@ -137,7 +137,6 @@ static UINT gdi_OutputUpdate(rdpGdi* gdi, gdiGfxSurface* surface)
}
update->EndPaint(gdi->context);
region16_clear(&(surface->invalidRegion));
return CHANNEL_RC_OK;
}
@ -213,11 +212,12 @@ static UINT gdi_SurfaceCommand_Uncompressed(rdpGdi* gdi,
UINT status = CHANNEL_RC_OK;
gdiGfxSurface* surface;
RECTANGLE_16 invalidRect;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -253,11 +253,12 @@ static UINT gdi_SurfaceCommand_RemoteFX(rdpGdi* gdi,
{
UINT status = CHANNEL_RC_OK;
gdiGfxSurface* surface;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -294,11 +295,12 @@ static UINT gdi_SurfaceCommand_ClearCodec(rdpGdi* gdi,
UINT status = CHANNEL_RC_OK;
gdiGfxSurface* surface;
RECTANGLE_16 invalidRect;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -342,11 +344,12 @@ static UINT gdi_SurfaceCommand_Planar(rdpGdi* gdi, RdpgfxClientContext* context,
BYTE* DstData = NULL;
gdiGfxSurface* surface;
RECTANGLE_16 invalidRect;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -390,11 +393,12 @@ static UINT gdi_SurfaceCommand_AVC420(rdpGdi* gdi,
gdiGfxSurface* surface;
RDPGFX_H264_METABLOCK* meta;
RDPGFX_AVC420_BITMAP_STREAM* bs;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -449,11 +453,12 @@ static UINT gdi_SurfaceCommand_AVC444(rdpGdi* gdi, RdpgfxClientContext* context,
RDPGFX_AVC420_BITMAP_STREAM* avc2;
RDPGFX_H264_METABLOCK* meta2;
RECTANGLE_16* regionRects = NULL;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -473,7 +478,7 @@ static UINT gdi_SurfaceCommand_AVC444(rdpGdi* gdi, RdpgfxClientContext* context,
avc2->data, avc2->length,
surface->data, surface->format,
surface->scanline, surface->width,
surface->height);
surface->height, cmd->codecId);
if (rc < 0)
{
@ -517,11 +522,12 @@ static UINT gdi_SurfaceCommand_Alpha(rdpGdi* gdi, RdpgfxClientContext* context,
UINT32 color;
gdiGfxSurface* surface;
RECTANGLE_16 invalidRect;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -561,17 +567,17 @@ static UINT gdi_SurfaceCommand_Progressive(rdpGdi* gdi,
INT32 rc;
UINT status = CHANNEL_RC_OK;
gdiGfxSurface* surface;
/**
* Note: Since this comes via a Wire-To-Surface-2 PDU the
* cmd's top/left/right/bottom/width/height members are always zero!
* The update region is determined during decompression.
*/
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
{
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__, cmd->surfaceId);
WLog_ERR(TAG, "%s: unable to retrieve surfaceData for surfaceId=%"PRIu32"", __FUNCTION__,
cmd->surfaceId);
return ERROR_NOT_FOUND;
}
@ -649,6 +655,7 @@ static UINT gdi_SurfaceCommand(RdpgfxClientContext* context,
status = gdi_SurfaceCommand_AVC420(gdi, context, cmd);
break;
case RDPGFX_CODECID_AVC444v2:
case RDPGFX_CODECID_AVC444:
status = gdi_SurfaceCommand_AVC444(gdi, context, cmd);
break;

View File

@ -30,91 +30,154 @@
#include <freerdp/codec/color.h>
#include "prim_internal.h"
/**
* @brief general_YUV420CombineToYUV444
*
* @param pMainSrc Pointer to luma YUV420 data
* @param srcMainStep Step width in luma YUV420 data
* @param pAuxSrc Pointer to chroma YUV420 data
* @param srcAuxStep Step width in chroma YUV420 data
* @param pDst Pointer to YUV444 data
* @param dstStep Step width in YUV444 data
* @param roi Region of source to combine in destination.
*
* @return PRIMITIVES_SUCCESS on success, an error code otherwise.
*/
static pstatus_t general_YUV420CombineToYUV444(
const BYTE* pMainSrc[3], const UINT32 srcMainStep[3],
const BYTE* pAuxSrc[3], const UINT32 srcAuxStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const prim_size_t* roi)
static pstatus_t general_LumaToYUV444(const BYTE* pSrcRaw[3], const UINT32 srcStep[3],
BYTE* pDstRaw[3], const UINT32 dstStep[3],
const RECTANGLE_16* roi)
{
UINT32 x, y;
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth + 1) / 2;
const UINT32 halfHeight = (nHeight + 1) / 2;
const UINT32 oddY = 1;
const UINT32 evenY = 0;
const UINT32 oddX = 1;
const UINT32 evenX = 0;
const BYTE* pSrc[3] =
{
pSrcRaw[0] + roi->top* srcStep[0] + roi->left,
pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2
};
BYTE* pDst[3] =
{
pDstRaw[0] + roi->top* dstStep[0] + roi->left,
pDstRaw[1] + roi->top* dstStep[1] + roi->left,
pDstRaw[2] + roi->top* dstStep[2] + roi->left
};
/* Y data is already here... */
/* B1 */
for (y = 0; y < nHeight; y++)
{
const BYTE* Ym = pSrc[0] + srcStep[0] * y;
BYTE* pY = pDst[0] + dstStep[0] * y;
memcpy(pY, Ym, nWidth);
}
/* The first half of U, V are already here part of this frame. */
/* B2 and B3 */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (2 * y + evenY);
const UINT32 val2y1 = val2y + oddY;
const BYTE* Um = pSrc[1] + srcStep[1] * y;
const BYTE* Vm = pSrc[2] + srcStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
for (x = 0; x < halfWidth; x++)
{
const UINT32 val2x = 2 * x + evenX;
const UINT32 val2x1 = val2x + oddX;
pU[val2x] = Um[x];
pV[val2x] = Vm[x];
pU[val2x1] = Um[x];
pV[val2x1] = Vm[x];
pU1[val2x] = Um[x];
pV1[val2x] = Vm[x];
pU1[val2x1] = Um[x];
pV1[val2x1] = Vm[x];
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t general_ChromaFilter(BYTE* pDst[3], const UINT32 dstStep[3],
const RECTANGLE_16* roi)
{
const UINT32 oddY = 1;
const UINT32 evenY = 0;
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfHeight = (nHeight + 1) / 2;
const UINT32 halfWidth = (nWidth + 1) / 2;
UINT32 x, y;
/* Filter */
for (y = roi->top; y < halfHeight + roi->top; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const UINT32 val2y1 = val2y + oddY;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
if (val2y1 > nHeight)
continue;
for (x = roi->left; x < halfWidth + roi->left; x++)
{
const UINT32 val2x = (x * 2);
const UINT32 val2x1 = val2x + 1;
const INT32 up = pU[val2x] * 4;
const INT32 vp = pV[val2x] * 4;
INT32 u2020;
INT32 v2020;
if (val2x1 > nWidth)
continue;
u2020 = up - pU[val2x1] - pU1[val2x] - pU1[val2x1];
v2020 = vp - pV[val2x1] - pV1[val2x] - pV1[val2x1];
pU[val2x] = CLIP(u2020);
pV[val2x] = CLIP(v2020);
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t general_ChromaV1ToYUV444(const BYTE* pSrcRaw[3], const UINT32 srcStep[3],
BYTE* pDstRaw[3], const UINT32 dstStep[3],
const RECTANGLE_16* roi)
{
const UINT32 mod = 16;
UINT32 uY = 0;
UINT32 vY = 0;
UINT32 x, y;
UINT32 nWidth, nHeight;
UINT32 halfWidth, halfHeight;
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth) / 2;
const UINT32 halfHeight = (nHeight) / 2;
const UINT32 oddY = 1;
const UINT32 evenY = 0;
const UINT32 oddX = 1;
const UINT32 evenX = 0;
/* The auxilary frame is aligned to multiples of 16x16.
* We need the padded height for B4 and B5 conversion. */
const UINT32 padHeigth = roi->height + 16 - roi->height % 16;
nWidth = roi->width;
nHeight = roi->height;
halfWidth = (nWidth) / 2;
halfHeight = (nHeight) / 2;
if (pMainSrc && pMainSrc[0] && pMainSrc[1] && pMainSrc[2])
const UINT32 padHeigth = nHeight + 16 - nHeight % 16;
const BYTE* pSrc[3] =
{
/* Y data is already here... */
/* B1 */
for (y = 0; y < nHeight; y++)
{
const BYTE* Ym = pMainSrc[0] + srcMainStep[0] * y;
BYTE* pY = pDst[0] + dstStep[0] * y;
memcpy(pY, Ym, nWidth);
}
/* The first half of U, V are already here part of this frame. */
/* B2 and B3 */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (2 * y + evenY);
const UINT32 val2y1 = val2y + oddY;
const BYTE* Um = pMainSrc[1] + srcMainStep[1] * y;
const BYTE* Vm = pMainSrc[2] + srcMainStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
for (x = 0; x < halfWidth; x++)
{
const UINT32 val2x = 2 * x + evenX;
const UINT32 val2x1 = val2x + oddX;
pU[val2x] = Um[x];
pV[val2x] = Vm[x];
pU[val2x1] = Um[x];
pV[val2x1] = Vm[x];
pU1[val2x] = Um[x];
pV1[val2x] = Vm[x];
pU1[val2x1] = Um[x];
pV1[val2x1] = Vm[x];
}
}
}
if (!pAuxSrc || !pAuxSrc[0] || !pAuxSrc[1] || !pAuxSrc[2])
return PRIMITIVES_SUCCESS;
pSrcRaw[0] + roi->top* srcStep[0] + roi->left,
pSrcRaw[1] + roi->top / 2 * srcStep[1] + roi->left / 2,
pSrcRaw[2] + roi->top / 2 * srcStep[2] + roi->left / 2
};
BYTE* pDst[3] =
{
pDstRaw[0] + roi->top* dstStep[0] + roi->left,
pDstRaw[1] + roi->top* dstStep[1] + roi->left,
pDstRaw[2] + roi->top* dstStep[2] + roi->left
};
/* The second half of U and V is a bit more tricky... */
/* B4 and B5 */
for (y = 0; y < padHeigth; y++)
{
const BYTE* Ya = pAuxSrc[0] + srcAuxStep[0] * y;
const BYTE* Ya = pSrc[0] + srcStep[0] * y;
BYTE* pX;
if ((y) % mod < (mod + 1) / 2)
@ -143,8 +206,8 @@ static pstatus_t general_YUV420CombineToYUV444(
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const BYTE* Ua = pAuxSrc[1] + srcAuxStep[1] * y;
const BYTE* Va = pAuxSrc[2] + srcAuxStep[2] * y;
const BYTE* Ua = pSrc[1] + srcStep[1] * y;
const BYTE* Va = pSrc[2] + srcStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
@ -157,38 +220,102 @@ static pstatus_t general_YUV420CombineToYUV444(
}
/* Filter */
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const UINT32 val2y1 = val2y + oddY;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
return general_ChromaFilter(pDst, dstStep, roi);
}
if (val2y1 > nHeight)
continue;
static pstatus_t general_ChromaV2ToYUV444(const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const RECTANGLE_16* roi)
{
UINT32 x, y;
const UINT32 nWidth = roi->right - roi->left;
const UINT32 nHeight = roi->bottom - roi->top;
const UINT32 halfWidth = (nWidth + 1) / 2;
const UINT32 halfHeight = (nHeight + 1) / 2;
const UINT32 quaterWidth = (nWidth + 3) / 4;
const UINT32 quaterHeight = (nHeight + 3) / 4;
/* B4 and B5: odd UV values for width/2, height */
for (y = 0; y < nHeight; y++)
{
const UINT32 yTop = y + roi->top;
const BYTE* pYaU = pSrc[0] + srcStep[0] * yTop + roi->left / 2;
const BYTE* pYaV = pYaU + srcStep[0] / 2;
BYTE* pU = pDst[1] + dstStep[1] * yTop + roi->left;
BYTE* pV = pDst[2] + dstStep[2] * yTop + roi->left;
for (x = 0; x < halfWidth; x++)
{
const UINT32 val2x = (x * 2);
const UINT32 val2x1 = val2x + 1;
const INT32 up = pU[val2x] * 4;
const INT32 vp = pV[val2x] * 4;
INT32 u2020;
INT32 v2020;
if (val2x1 > nWidth)
continue;
u2020 = up - pU[val2x1] - pU1[val2x] - pU1[val2x1];
v2020 = vp - pV[val2x1] - pV1[val2x] - pV1[val2x1];
pU[val2x] = CLIP(u2020);
pV[val2x] = CLIP(v2020);
const UINT32 odd = 2 * x + 1;
pU[odd] = *pYaU++;
pV[odd] = *pYaV++;
}
}
return PRIMITIVES_SUCCESS;
/* B6 - B9 */
for (y = 0; y < halfHeight; y++)
{
const BYTE* pUaU = pSrc[1] + srcStep[1] * (y + roi->top / 2) + roi->left / 4;
const BYTE* pUaV = pUaU + srcStep[1] / 2;
const BYTE* pVaU = pSrc[2] + srcStep[2] * (y + roi->top / 2) + roi->left / 4;
const BYTE* pVaV = pVaU + srcStep[2] / 2;
BYTE* pU = pDst[1] + dstStep[1] * (2 * y + 1 + roi->top) + roi->left;
BYTE* pV = pDst[2] + dstStep[2] * (2 * y + 1 + roi->top) + roi->left;
for (x = 0; x < quaterWidth; x++)
{
pU[4 * x + 0] = *pUaU++;
pV[4 * x + 0] = *pUaV++;
pU[4 * x + 2] = *pVaU++;
pV[4 * x + 2] = *pVaV++;
}
}
return general_ChromaFilter(pDst, dstStep, roi);
}
/**
* @brief general_YUV420CombineToYUV444
*
* @param pMainSrc Pointer to luma YUV420 data
* @param srcMainStep Step width in luma YUV420 data
* @param pAuxSrc Pointer to chroma YUV420 data
* @param srcAuxStep Step width in chroma YUV420 data
* @param pDst Pointer to YUV444 data
* @param dstStep Step width in YUV444 data
* @param roi Region of source to combine in destination.
*
* @return PRIMITIVES_SUCCESS on success, an error code otherwise.
*/
static pstatus_t general_YUV420CombineToYUV444(
avc444_frame_type type,
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const RECTANGLE_16* roi)
{
if (!pSrc || !pSrc[0] || !pSrc[1] || !pSrc[2])
return -1;
if (!pDst || !pDst[0] || !pDst[1] || !pDst[2])
return -1;
if (!roi)
return -1;
switch (type)
{
case AVC444_LUMA:
return general_LumaToYUV444(pSrc, srcStep, pDst, dstStep, roi);
case AVC444_CHROMAv1:
return general_ChromaV1ToYUV444(pSrc, srcStep, pDst, dstStep, roi);
case AVC444_CHROMAv2:
return general_ChromaV2ToYUV444(pSrc, srcStep, pDst, dstStep, roi);
default:
return -1;
}
}
static pstatus_t general_YUV444SplitToYUV420(
@ -704,21 +831,19 @@ static pstatus_t general_RGBToYUV420_8u_P3AC4R(
}
static INLINE pstatus_t general_RGBToAVC444YUV_BGRX(
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
{
/**
* Note:
* Read information in function general_RGBToAVC444YUV_ANY below !
*/
UINT32 x, y, n, numRows, numCols;
BOOL evenRow = TRUE;
BYTE *b1, *b2, *b3, *b4, *b5, *b6, *b7;
BYTE* b1, *b2, *b3, *b4, *b5, *b6, *b7;
const BYTE* pMaxSrc = pSrc + (roi->height - 1) * srcStep;
numRows = (roi->height + 1) & ~1;
numCols = (roi->width + 1) & ~1;
@ -726,7 +851,6 @@ static INLINE pstatus_t general_RGBToAVC444YUV_BGRX(
{
const BYTE* src = y < roi->height ? pSrc + y * srcStep : pMaxSrc;
UINT32 i = y >> 1;
b1 = pDst1[0] + y * dst1Step[0];
if (evenRow)
@ -746,11 +870,9 @@ static INLINE pstatus_t general_RGBToAVC444YUV_BGRX(
for (x = 0; x < numCols; x += 2)
{
BYTE R, G, B, Y1, Y2, U1, U2, V1, V2;
B = src[0];
G = src[1];
R = src[2];
Y1 = Y2 = RGB2Y(R, G, B);
U1 = U2 = RGB2U(R, G, B);
V1 = V2 = RGB2V(R, G, B);
@ -791,10 +913,10 @@ static INLINE pstatus_t general_RGBToAVC444YUV_BGRX(
}
static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
{
/**
* Note: According to [MS-RDPEGFX 2.2.4.4 RFX_AVC420_BITMAP_STREAM] the
@ -803,7 +925,6 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
* Hence the passed destination YUV420/CHROMA420 buffers must have been
* allocated accordingly !!
*/
/**
* [MS-RDPEGFX 3.3.8.3.2 YUV420p Stream Combination] defines the following "Bx areas":
*
@ -853,13 +974,11 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
* }
*
*/
const UINT32 bpp = GetBytesPerPixel(srcFormat);
UINT32 x, y, n, numRows, numCols;
BOOL evenRow = TRUE;
BYTE *b1, *b2, *b3, *b4, *b5, *b6, *b7;
BYTE* b1, *b2, *b3, *b4, *b5, *b6, *b7;
const BYTE* pMaxSrc = pSrc + (roi->height - 1) * srcStep;
numRows = (roi->height + 1) & ~1;
numCols = (roi->width + 1) & ~1;
@ -867,7 +986,6 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
{
const BYTE* src = y < roi->height ? pSrc + y * srcStep : pMaxSrc;
UINT32 i = y >> 1;
b1 = pDst1[0] + y * dst1Step[0];
if (evenRow)
@ -888,10 +1006,8 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
{
BYTE R, G, B, Y1, Y2, U1, U2, V1, V2;
UINT32 color;
color = ReadColor(src, srcFormat);
SplitColor(color, srcFormat, &R, &G, &B, NULL, NULL);
Y1 = Y2 = RGB2Y(R, G, B);
U1 = U2 = RGB2U(R, G, B);
V1 = V2 = RGB2V(R, G, B);
@ -900,7 +1016,6 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
{
color = ReadColor(src + bpp, srcFormat);
SplitColor(color, srcFormat, &R, &G, &B, NULL, NULL);
Y2 = RGB2Y(R, G, B);
U2 = RGB2U(R, G, B);
V2 = RGB2V(R, G, B);
@ -932,10 +1047,10 @@ static INLINE pstatus_t general_RGBToAVC444YUV_ANY(
}
static INLINE pstatus_t general_RGBToAVC444YUV(
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
const BYTE* pSrc, UINT32 srcFormat, UINT32 srcStep,
BYTE* pDst1[3], const UINT32 dst1Step[3],
BYTE* pDst2[3], const UINT32 dst2Step[3],
const prim_size_t* roi)
{
switch (srcFormat)
{