Implemented GFX AVC444 support.

This commit is contained in:
Armin Novak 2016-03-02 15:16:49 +01:00
parent 3309bf8f9c
commit 5bc333c626
16 changed files with 1739 additions and 583 deletions

View File

@ -217,10 +217,6 @@ if(CMAKE_COMPILER_IS_GNUCC)
if(CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_C_FLAGS_RELEASE "-DNDEBUG")
set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG")
if(NOT OPENBSD)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O2")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")
endif()
CHECK_C_COMPILER_FLAG (-Wno-builtin-macro-redefined Wno-builtin-macro-redefined)
if(Wno-builtin-macro-redefined)

View File

@ -38,7 +38,8 @@
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT rdpgfx_read_h264_metablock(RDPGFX_PLUGIN* gfx, wStream* s, RDPGFX_H264_METABLOCK* meta)
static UINT rdpgfx_read_h264_metablock(RDPGFX_PLUGIN* gfx, wStream* s,
RDPGFX_H264_METABLOCK* meta)
{
UINT32 index;
RECTANGLE_16* regionRect;
@ -56,7 +57,7 @@ static UINT rdpgfx_read_h264_metablock(RDPGFX_PLUGIN* gfx, wStream* s, RDPGFX_H2
Stream_Read_UINT32(s, meta->numRegionRects); /* numRegionRects (4 bytes) */
if (Stream_GetRemainingLength(s) < (meta->numRegionRects * 8))
if (Stream_GetRemainingLength(s) < (meta->numRegionRects * sizeof(RECTANGLE_16)))
{
WLog_ERR(TAG, "not enough data!");
goto error_out;
@ -128,11 +129,11 @@ error_out:
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT rdpgfx_decode_h264(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
static UINT rdpgfx_decode_AVC420(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
{
UINT error;
wStream* s;
RDPGFX_H264_BITMAP_STREAM h264;
RDPGFX_AVC420_BITMAP_STREAM h264;
RdpgfxClientContext* context = (RdpgfxClientContext*) gfx->iface.pInterface;
s = Stream_New(cmd->data, cmd->length);
@ -169,6 +170,91 @@ static UINT rdpgfx_decode_h264(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
return error;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT rdpgfx_decode_AVC444(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
{
UINT error;
UINT32 tmp;
size_t pos1, pos2;
wStream* s;
RDPGFX_AVC444_BITMAP_STREAM h264;
RdpgfxClientContext* context = (RdpgfxClientContext*) gfx->iface.pInterface;
s = Stream_New(cmd->data, cmd->length);
if (!s)
{
WLog_ERR(TAG, "Stream_New failed!");
return CHANNEL_RC_NO_MEMORY;
}
if (Stream_GetRemainingLength(s) < 4)
return ERROR_INVALID_DATA;
Stream_Read_UINT32(s, tmp);
h264.cbAvc420EncodedBitstream1 = tmp & 0x3FFFFFFFUL;
h264.LC = (tmp >> 30UL) & 0x03UL;
if (h264.LC == 0x03)
return ERROR_INVALID_DATA;
pos1 = Stream_GetPosition(s);
if ((error = rdpgfx_read_h264_metablock(gfx, s, &(h264.bitstream[0].meta))))
{
WLog_ERR(TAG, "rdpgfx_read_h264_metablock failed with error %lu!", error);
return error;
}
pos2 = Stream_GetPosition(s);
h264.bitstream[0].data = Stream_Pointer(s);
if (h264.LC == 0)
{
tmp = h264.cbAvc420EncodedBitstream1 - pos2 + pos1;
if (Stream_GetRemainingLength(s) < tmp)
return ERROR_INVALID_DATA;
h264.bitstream[0].length = tmp;
Stream_Seek(s, tmp);
if ((error = rdpgfx_read_h264_metablock(gfx, s, &(h264.bitstream[1].meta))))
{
WLog_ERR(TAG, "rdpgfx_read_h264_metablock failed with error %lu!", error);
return error;
}
h264.bitstream[1].data = Stream_Pointer(s);
h264.bitstream[1].length = Stream_GetRemainingLength(s);
}
else
{
h264.bitstream[0].length = Stream_GetRemainingLength(s);
memset(&h264.bitstream[1], 0, sizeof(h264.bitstream[1]));
}
Stream_Free(s, FALSE);
cmd->extra = (void*) &h264;
if (context)
{
IFCALLRET(context->SurfaceCommand, error, context, cmd);
if (error)
WLog_ERR(TAG, "context->SurfaceCommand failed with error %lu", error);
}
free(h264.bitstream[0].meta.regionRects);
free(h264.bitstream[0].meta.quantQualityVals);
free(h264.bitstream[1].meta.regionRects);
free(h264.bitstream[1].meta.quantQualityVals);
return error;
}
/**
* Function description
*
@ -181,10 +267,18 @@ UINT rdpgfx_decode(RDPGFX_PLUGIN* gfx, RDPGFX_SURFACE_COMMAND* cmd)
switch (cmd->codecId)
{
case RDPGFX_CODECID_H264:
if ((error = rdpgfx_decode_h264(gfx, cmd)))
case RDPGFX_CODECID_AVC420:
if ((error = rdpgfx_decode_AVC420(gfx, cmd)))
{
WLog_ERR(TAG, "rdpgfx_decode_h264 failed with error %lu", error);
WLog_ERR(TAG, "rdpgfx_decode_AVC420 failed with error %lu", error);
return error;
}
break;
case RDPGFX_CODECID_AVC444:
if ((error = rdpgfx_decode_AVC444(gfx, cmd)))
{
WLog_ERR(TAG, "rdpgfx_decode_AVC444 failed with error %lu", error);
return error;
}
break;

View File

@ -77,8 +77,10 @@ const char* rdpgfx_get_codec_id_string(UINT16 codecId)
return "RDPGFX_CODECID_CLEARCODEC";
case RDPGFX_CODECID_PLANAR:
return "RDPGFX_CODECID_PLANAR";
case RDPGFX_CODECID_H264:
return "RDPGFX_CODECID_H264";
case RDPGFX_CODECID_AVC420:
return "RDPGFX_CODECID_AVC420";
case RDPGFX_CODECID_AVC444:
return "RDPGFX_CODECID_AVC444";
case RDPGFX_CODECID_ALPHA:
return "RDPGFX_CODECID_ALPHA";
case RDPGFX_CODECID_CAPROGRESSIVE:

View File

@ -58,7 +58,7 @@ static UINT rdpgfx_send_caps_advertise_pdu(RDPGFX_CHANNEL_CALLBACK* callback)
RDPGFX_PLUGIN* gfx;
RDPGFX_HEADER header;
RDPGFX_CAPSET* capsSet;
RDPGFX_CAPSET capsSets[2];
RDPGFX_CAPSET capsSets[3];
RDPGFX_CAPS_ADVERTISE_PDU pdu;
gfx = (RDPGFX_PLUGIN*) callback->plugin;
@ -90,7 +90,17 @@ static UINT rdpgfx_send_caps_advertise_pdu(RDPGFX_CHANNEL_CALLBACK* callback)
capsSet->flags |= RDPGFX_CAPS_FLAG_SMALL_CACHE;
if (gfx->H264)
capsSet->flags |= RDPGFX_CAPS_FLAG_H264ENABLED;
capsSet->flags |= RDPGFX_CAPS_FLAG_AVC420ENABLED;
capsSet = &capsSets[pdu.capsSetCount++];
capsSet->version = RDPGFX_CAPVERSION_10;
capsSet->flags = 0;
if (gfx->SmallCache)
capsSet->flags |= RDPGFX_CAPS_FLAG_SMALL_CACHE;
if (!gfx->H264)
capsSet->flags |= RDPGFX_CAPS_FLAG_AVCDISABLED;
header.pduLength = RDPGFX_HEADER_SIZE + 2 + (pdu.capsSetCount * RDPGFX_CAPSET_SIZE);

View File

@ -31,7 +31,7 @@
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_ResetGraphics(RdpgfxClientContext* context, RDPGFX_RESET_GRAPHICS_PDU* resetGraphics)
static UINT xf_ResetGraphics(RdpgfxClientContext* context, RDPGFX_RESET_GRAPHICS_PDU* resetGraphics)
{
int index;
UINT16 count;
@ -70,7 +70,7 @@ UINT xf_ResetGraphics(RdpgfxClientContext* context, RDPGFX_RESET_GRAPHICS_PDU* r
return CHANNEL_RC_OK;
}
int xf_OutputUpdate(xfContext* xfc, xfGfxSurface* surface)
static int xf_OutputUpdate(xfContext* xfc, xfGfxSurface* surface)
{
UINT16 width, height;
UINT32 surfaceX, surfaceY;
@ -132,7 +132,7 @@ int xf_OutputUpdate(xfContext* xfc, xfGfxSurface* surface)
return 1;
}
int xf_UpdateSurfaces(xfContext* xfc)
static int xf_UpdateSurfaces(xfContext* xfc)
{
UINT16 count;
int index;
@ -220,7 +220,7 @@ int xf_OutputExpose(xfContext* xfc, int x, int y, int width, int height)
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_StartFrame(RdpgfxClientContext* context, RDPGFX_START_FRAME_PDU* startFrame)
static UINT xf_StartFrame(RdpgfxClientContext* context, RDPGFX_START_FRAME_PDU* startFrame)
{
xfContext* xfc = (xfContext*) context->custom;
@ -234,7 +234,7 @@ UINT xf_StartFrame(RdpgfxClientContext* context, RDPGFX_START_FRAME_PDU* startFr
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_EndFrame(RdpgfxClientContext* context, RDPGFX_END_FRAME_PDU* endFrame)
static UINT xf_EndFrame(RdpgfxClientContext* context, RDPGFX_END_FRAME_PDU* endFrame)
{
xfContext* xfc = (xfContext*) context->custom;
@ -250,7 +250,7 @@ UINT xf_EndFrame(RdpgfxClientContext* context, RDPGFX_END_FRAME_PDU* endFrame)
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_Uncompressed(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_Uncompressed(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
xfGfxSurface* surface;
RECTANGLE_16 invalidRect;
@ -281,7 +281,7 @@ UINT xf_SurfaceCommand_Uncompressed(xfContext* xfc, RdpgfxClientContext* context
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_RemoteFX(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_RemoteFX(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int j;
UINT16 i;
@ -371,7 +371,7 @@ UINT xf_SurfaceCommand_RemoteFX(xfContext* xfc, RdpgfxClientContext* context, RD
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_ClearCodec(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_ClearCodec(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
BYTE* DstData = NULL;
@ -415,7 +415,7 @@ UINT xf_SurfaceCommand_ClearCodec(xfContext* xfc, RdpgfxClientContext* context,
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_Planar(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_Planar(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
BYTE* DstData = NULL;
@ -453,45 +453,46 @@ UINT xf_SurfaceCommand_Planar(xfContext* xfc, RdpgfxClientContext* context, RDPG
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_H264(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_AVC420(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
UINT32 i;
BYTE* DstData = NULL;
xfGfxSurface* surface;
RDPGFX_H264_METABLOCK* meta;
RDPGFX_H264_BITMAP_STREAM* bs;
RDPGFX_AVC420_BITMAP_STREAM* bs;
surface = (xfGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
return ERROR_INTERNAL_ERROR;
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_H264))
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_AVC420))
return ERROR_INTERNAL_ERROR;
bs = (RDPGFX_H264_BITMAP_STREAM*) cmd->extra;
bs = (RDPGFX_AVC420_BITMAP_STREAM*) cmd->extra;
if (!bs)
return ERROR_INTERNAL_ERROR;
meta = &(bs->meta);
DstData = surface->data;
status = h264_decompress(surface->codecs->h264, bs->data, bs->length, &DstData,
surface->format, surface->scanline , surface->width,
surface->height, meta->regionRects, meta->numRegionRects);
status = avc420_decompress(surface->codecs->h264, bs->data, bs->length,
surface->data, surface->format,
surface->scanline , surface->width,
surface->height, meta->regionRects,
meta->numRegionRects);
if (status < 0)
{
WLog_WARN(TAG, "h264_decompress failure: %d, ignoring update.", status);
WLog_WARN(TAG, "avc420_decompress failure: %d, ignoring update.", status);
return CHANNEL_RC_OK;
}
for (i = 0; i < meta->numRegionRects; i++)
{
region16_union_rect(&surface->invalidRegion, &surface->invalidRegion, (RECTANGLE_16*) &(meta->regionRects[i]));
region16_union_rect(&surface->invalidRegion,
&surface->invalidRegion,
&(meta->regionRects[i]));
}
if (!xfc->inGfxFrame)
@ -505,7 +506,77 @@ UINT xf_SurfaceCommand_H264(xfContext* xfc, RdpgfxClientContext* context, RDPGFX
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_Alpha(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_AVC444(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
UINT32 i;
xfGfxSurface* surface;
RDPGFX_AVC444_BITMAP_STREAM* bs;
RDPGFX_AVC420_BITMAP_STREAM* avc1;
RDPGFX_AVC420_BITMAP_STREAM* avc2;
RDPGFX_H264_METABLOCK* meta1;
RDPGFX_H264_METABLOCK* meta2;
RECTANGLE_16* regionRects = NULL;
surface = (xfGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
return ERROR_INTERNAL_ERROR;
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_AVC444))
return ERROR_INTERNAL_ERROR;
bs = (RDPGFX_AVC444_BITMAP_STREAM*) cmd->extra;
if (!bs)
return ERROR_INTERNAL_ERROR;
avc1 = &bs->bitstream[0];
avc2 = &bs->bitstream[1];
meta1 = &avc1->meta;
meta2 = &avc2->meta;
status = avc444_decompress(surface->codecs->h264, bs->LC,
meta1->regionRects, meta1->numRegionRects,
avc1->data, avc1->length,
meta2->regionRects, meta2->numRegionRects,
avc2->data, avc2->length, surface->data,
surface->format, surface->scanline,
surface->width, surface->height);
if (status < 0)
{
WLog_WARN(TAG, "avc444_decompress failure: %d, ignoring update.", status);
return CHANNEL_RC_OK;
}
for (i = 0; i < meta1->numRegionRects; i++)
{
region16_union_rect(&surface->invalidRegion,
&surface->invalidRegion,
&(meta1->regionRects[i]));
}
for (i = 0; i < meta2->numRegionRects; i++)
{
region16_union_rect(&surface->invalidRegion,
&surface->invalidRegion,
&(meta2->regionRects[i]));
}
if (!xfc->inGfxFrame)
xf_UpdateSurfaces(xfc);
free (regionRects);
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT xf_SurfaceCommand_Alpha(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status = 0;
xfGfxSurface* surface;
@ -543,7 +614,7 @@ UINT xf_SurfaceCommand_Alpha(xfContext* xfc, RdpgfxClientContext* context, RDPGF
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand_Progressive(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand_Progressive(xfContext* xfc, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int i, j;
int status;
@ -645,7 +716,7 @@ UINT xf_SurfaceCommand_Progressive(xfContext* xfc, RdpgfxClientContext* context,
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceCommand(RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT xf_SurfaceCommand(RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
UINT status = CHANNEL_RC_OK;
xfContext* xfc = (xfContext*) context->custom;
@ -668,8 +739,12 @@ UINT xf_SurfaceCommand(RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd
status = xf_SurfaceCommand_Planar(xfc, context, cmd);
break;
case RDPGFX_CODECID_H264:
status = xf_SurfaceCommand_H264(xfc, context, cmd);
case RDPGFX_CODECID_AVC420:
status = xf_SurfaceCommand_AVC420(xfc, context, cmd);
break;
case RDPGFX_CODECID_AVC444:
status = xf_SurfaceCommand_AVC444(xfc, context, cmd);
break;
case RDPGFX_CODECID_ALPHA:
@ -697,7 +772,7 @@ UINT xf_SurfaceCommand(RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_DeleteEncodingContext(RdpgfxClientContext* context, RDPGFX_DELETE_ENCODING_CONTEXT_PDU* deleteEncodingContext)
static UINT xf_DeleteEncodingContext(RdpgfxClientContext* context, RDPGFX_DELETE_ENCODING_CONTEXT_PDU* deleteEncodingContext)
{
return CHANNEL_RC_OK;
}
@ -707,7 +782,7 @@ UINT xf_DeleteEncodingContext(RdpgfxClientContext* context, RDPGFX_DELETE_ENCODI
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_CreateSurface(RdpgfxClientContext* context, RDPGFX_CREATE_SURFACE_PDU* createSurface)
static UINT xf_CreateSurface(RdpgfxClientContext* context, RDPGFX_CREATE_SURFACE_PDU* createSurface)
{
size_t size;
UINT32 bytesPerPixel;
@ -795,7 +870,7 @@ UINT xf_CreateSurface(RdpgfxClientContext* context, RDPGFX_CREATE_SURFACE_PDU* c
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_DeleteSurface(RdpgfxClientContext* context, RDPGFX_DELETE_SURFACE_PDU* deleteSurface)
static UINT xf_DeleteSurface(RdpgfxClientContext* context, RDPGFX_DELETE_SURFACE_PDU* deleteSurface)
{
rdpCodecs* codecs = NULL;
xfGfxSurface* surface = NULL;
@ -827,7 +902,7 @@ UINT xf_DeleteSurface(RdpgfxClientContext* context, RDPGFX_DELETE_SURFACE_PDU* d
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SolidFill(RdpgfxClientContext* context, RDPGFX_SOLID_FILL_PDU* solidFill)
static UINT xf_SolidFill(RdpgfxClientContext* context, RDPGFX_SOLID_FILL_PDU* solidFill)
{
UINT16 index;
UINT32 color;
@ -879,7 +954,7 @@ UINT xf_SolidFill(RdpgfxClientContext* context, RDPGFX_SOLID_FILL_PDU* solidFill
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceToSurface(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_SURFACE_PDU* surfaceToSurface)
static UINT xf_SurfaceToSurface(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_SURFACE_PDU* surfaceToSurface)
{
UINT16 index;
BOOL sameSurface;
@ -944,7 +1019,7 @@ UINT xf_SurfaceToSurface(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_SURFACE
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_SurfaceToCache(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_CACHE_PDU* surfaceToCache)
static UINT xf_SurfaceToCache(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_CACHE_PDU* surfaceToCache)
{
size_t size;
RECTANGLE_16* rect;
@ -997,7 +1072,7 @@ UINT xf_SurfaceToCache(RdpgfxClientContext* context, RDPGFX_SURFACE_TO_CACHE_PDU
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_CacheToSurface(RdpgfxClientContext* context, RDPGFX_CACHE_TO_SURFACE_PDU* cacheToSurface)
static UINT xf_CacheToSurface(RdpgfxClientContext* context, RDPGFX_CACHE_TO_SURFACE_PDU* cacheToSurface)
{
UINT16 index;
RDPGFX_POINT16* destPt;
@ -1039,7 +1114,7 @@ UINT xf_CacheToSurface(RdpgfxClientContext* context, RDPGFX_CACHE_TO_SURFACE_PDU
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_CacheImportReply(RdpgfxClientContext* context, RDPGFX_CACHE_IMPORT_REPLY_PDU* cacheImportReply)
static UINT xf_CacheImportReply(RdpgfxClientContext* context, RDPGFX_CACHE_IMPORT_REPLY_PDU* cacheImportReply)
{
return CHANNEL_RC_OK;
}
@ -1049,7 +1124,7 @@ UINT xf_CacheImportReply(RdpgfxClientContext* context, RDPGFX_CACHE_IMPORT_REPLY
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_EvictCacheEntry(RdpgfxClientContext* context, RDPGFX_EVICT_CACHE_ENTRY_PDU* evictCacheEntry)
static UINT xf_EvictCacheEntry(RdpgfxClientContext* context, RDPGFX_EVICT_CACHE_ENTRY_PDU* evictCacheEntry)
{
xfGfxCacheEntry* cacheEntry;
@ -1071,7 +1146,7 @@ UINT xf_EvictCacheEntry(RdpgfxClientContext* context, RDPGFX_EVICT_CACHE_ENTRY_P
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_MapSurfaceToOutput(RdpgfxClientContext* context, RDPGFX_MAP_SURFACE_TO_OUTPUT_PDU* surfaceToOutput)
static UINT xf_MapSurfaceToOutput(RdpgfxClientContext* context, RDPGFX_MAP_SURFACE_TO_OUTPUT_PDU* surfaceToOutput)
{
xfGfxSurface* surface;
@ -1094,7 +1169,7 @@ UINT xf_MapSurfaceToOutput(RdpgfxClientContext* context, RDPGFX_MAP_SURFACE_TO_O
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT xf_MapSurfaceToWindow(RdpgfxClientContext* context, RDPGFX_MAP_SURFACE_TO_WINDOW_PDU* surfaceToWindow)
static UINT xf_MapSurfaceToWindow(RdpgfxClientContext* context, RDPGFX_MAP_SURFACE_TO_WINDOW_PDU* surfaceToWindow)
{
return CHANNEL_RC_OK;
}

View File

@ -90,6 +90,7 @@ typedef struct _RDPGFX_HEADER RDPGFX_HEADER;
#define RDPGFX_CAPVERSION_8 0x00080004
#define RDPGFX_CAPVERSION_81 0x00080105
#define RDPGFX_CAPVERSION_10 0x000A0002
#define RDPGFX_CAPSET_SIZE 12
@ -102,7 +103,8 @@ typedef struct _RDPGFX_CAPSET RDPGFX_CAPSET;
#define RDPGFX_CAPS_FLAG_THINCLIENT 0x00000001 /* 8.0+ */
#define RDPGFX_CAPS_FLAG_SMALL_CACHE 0x00000002 /* 8.0+ */
#define RDPGFX_CAPS_FLAG_H264ENABLED 0x00000010 /* 8.1+ */
#define RDPGFX_CAPS_FLAG_AVC420ENABLED 0x00000010 /* 8.1+ */
#define RDPGFX_CAPS_FLAG_AVCDISABLED 0x00000020 /* 10.0+ */
struct _RDPGFX_CAPSET_VERSION8
{
@ -120,6 +122,14 @@ struct _RDPGFX_CAPSET_VERSION81
};
typedef struct _RDPGFX_CAPSET_VERSION81 RDPGFX_CAPSET_VERSION81;
struct _RDPGFX_CAPSET_VERSION10
{
UINT32 version;
UINT32 capsDataLength;
UINT32 flags;
};
typedef struct _RDPGFX_CAPSET_VERSION10 RDPGFX_CAPSET_VERSION10;
/**
* Graphics Messages
*/
@ -128,8 +138,9 @@ typedef struct _RDPGFX_CAPSET_VERSION81 RDPGFX_CAPSET_VERSION81;
#define RDPGFX_CODECID_CAVIDEO 0x0003
#define RDPGFX_CODECID_CLEARCODEC 0x0008
#define RDPGFX_CODECID_PLANAR 0x000A
#define RDPGFX_CODECID_H264 0x000B
#define RDPGFX_CODECID_AVC420 0x000B
#define RDPGFX_CODECID_ALPHA 0x000C
#define RDPGFX_CODECID_AVC444 0x000E
struct _RDPGFX_WIRE_TO_SURFACE_PDU_1
{
@ -345,13 +356,22 @@ struct _RDPGFX_H264_METABLOCK
};
typedef struct _RDPGFX_H264_METABLOCK RDPGFX_H264_METABLOCK;
struct _RDPGFX_H264_BITMAP_STREAM
struct _RDPGFX_AVC420_BITMAP_STREAM
{
RDPGFX_H264_METABLOCK meta;
UINT32 length;
BYTE* data;
};
typedef struct _RDPGFX_H264_BITMAP_STREAM RDPGFX_H264_BITMAP_STREAM;
typedef struct _RDPGFX_AVC420_BITMAP_STREAM RDPGFX_AVC420_BITMAP_STREAM;
struct _RDPGFX_AVC444_BITMAP_STREAM
{
UINT32 cbAvc420EncodedBitstream1;
BYTE LC;
RDPGFX_AVC420_BITMAP_STREAM bitstream[2];
};
typedef struct _RDPGFX_AVC444_BITMAP_STREAM RDPGFX_AVC444_BITMAP_STREAM;
#endif /* FREERDP_CHANNEL_RDPGFX_H */

View File

@ -29,8 +29,10 @@ typedef struct _H264_CONTEXT H264_CONTEXT;
typedef BOOL (*pfnH264SubsystemInit)(H264_CONTEXT* h264);
typedef void (*pfnH264SubsystemUninit)(H264_CONTEXT* h264);
typedef int (*pfnH264SubsystemDecompress)(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize);
typedef int (*pfnH264SubsystemCompress)(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize);
typedef int (*pfnH264SubsystemDecompress)(H264_CONTEXT* h264, BYTE* pSrcData,
UINT32 SrcSize, UINT32 plane);
typedef int (*pfnH264SubsystemCompress)(H264_CONTEXT* h264, BYTE** ppDstData,
UINT32* pDstSize, UINT32 plane);
struct _H264_CONTEXT_SUBSYSTEM
{
@ -62,9 +64,14 @@ struct _H264_CONTEXT
UINT32 QP;
UINT32 NumberOfThreads;
int iStride[3];
BYTE* pYUVData[3];
UINT32 iStride[2][3];
BYTE* pYUVData[2][3];
UINT32 iYUV444Size[3];
UINT32 iYUV444Stride[3];
BYTE* pYUV444Data[3];
UINT32 numSystemData;
void* pSystemData;
H264_CONTEXT_SUBSYSTEM* subsystem;
};
@ -73,12 +80,30 @@ struct _H264_CONTEXT
extern "C" {
#endif
FREERDP_API int h264_compress(H264_CONTEXT* h264, BYTE* pSrcData, DWORD SrcFormat,
int nSrcStep, int nSrcWidth, int nSrcHeight, BYTE** ppDstData, UINT32* pDstSize);
FREERDP_API INT32 avc420_compress(H264_CONTEXT* h264, BYTE* pSrcData,
DWORD SrcFormat, UINT32 nSrcStep,
UINT32 nSrcWidth, UINT32 nSrcHeight,
BYTE** ppDstData, UINT32* pDstSize);
FREERDP_API int h264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize,
BYTE** ppDstData, DWORD DstFormat, int nDstStep, int nDstWidth, int nDstHeight,
RECTANGLE_16* regionRects, int numRegionRect);
FREERDP_API INT32 avc420_decompress(H264_CONTEXT* h264, BYTE* pSrcData,
UINT32 SrcSize, BYTE* pDstData,
DWORD DstFormat, UINT32 nDstStep,
UINT32 nDstWidth, UINT32 nDstHeight,
RECTANGLE_16* regionRects, UINT32 numRegionRect);
FREERDP_API INT32 avc444_compress(H264_CONTEXT* h264, BYTE* pSrcData, DWORD SrcFormat,
UINT32 nSrcStep, UINT32 nSrcWidth, UINT32 nSrcHeight,
BYTE* op,
BYTE** pDstData, UINT32* pDstSize,
BYTE** pAuxDstData, UINT32* pAuxDstSize);
FREERDP_API INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
RECTANGLE_16* regionRects, UINT32 numRegionRect,
BYTE* pSrcData, UINT32 SrcSize,
RECTANGLE_16* auxRegionRects, UINT32 numAuxRegionRect,
BYTE* pAuxSrcData, UINT32 AuxSrcSize,
BYTE* pDstData, DWORD DstFormat,
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight);
FREERDP_API BOOL h264_context_reset(H264_CONTEXT* h264, UINT32 width, UINT32 height);

View File

@ -39,7 +39,8 @@
#define FREERDP_CODEC_CLEARCODEC 0x00000010
#define FREERDP_CODEC_ALPHACODEC 0x00000020
#define FREERDP_CODEC_PROGRESSIVE 0x00000040
#define FREERDP_CODEC_H264 0x00000080
#define FREERDP_CODEC_AVC420 0x00000080
#define FREERDP_CODEC_AVC444 0x00000100
#define FREERDP_CODEC_ALL 0xFFFFFFFF
struct rdp_codecs

View File

@ -189,6 +189,11 @@ typedef pstatus_t (*__YUV420CombineToYUV444_t)(
const BYTE* pAuxSrc[3], const UINT32 srcAuxStep[3],
BYTE* pDst[3], const UINT32 dstStep[3],
const prim_size_t* roi);
typedef pstatus_t (*__YUV444SplitToYUV420_t)(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pMainDst[3], const UINT32 dstMainStep[3],
BYTE* pAuxDst[3], const UINT32 srcAuxStep[3],
const prim_size_t* roi);
typedef pstatus_t (*__andC_32u_t)(
const UINT32 *pSrc,
UINT32 val,
@ -239,6 +244,7 @@ typedef struct
__RGBToYUV420_8u_P3AC4R_t RGBToYUV420_8u_P3AC4R;
__RGBToYUV444_8u_P3AC4R_t RGBToYUV444_8u_P3AC4R;
__YUV420CombineToYUV444_t YUV420CombineToYUV444;
__YUV444SplitToYUV420_t YUV444SplitToYUV420;
__YUV420ToRGB_8u_P3AC4R_t YUV444ToRGB_8u_P3AC4R;
} primitives_t;

View File

@ -234,7 +234,7 @@ endif()
if(WITH_SSE2)
if(CMAKE_COMPILER_IS_GNUCC)
set(OPTIMIZATION "${OPTIMIZATION} -msse2 -mssse3 -O2 -Wdeclaration-after-statement")
set(OPTIMIZATION "${OPTIMIZATION} -msse2 -mssse3 -Wdeclaration-after-statement")
endif()
if(MSVC)
@ -251,12 +251,6 @@ if(DEFINED OPTIMIZATION)
set_source_files_properties(${PRIMITIVES_OPT_SRCS} PROPERTIES COMPILE_FLAGS ${OPTIMIZATION})
endif()
# always compile with optimization
if(CMAKE_COMPILER_IS_GNUCC)
set_source_files_properties(${PRIMITIVES_SRCS} PROPERTIES COMPILE_FLAGS "-O2")
endif()
set(PRIMITIVES_SRCS ${PRIMITIVES_SRCS} ${PRIMITIVES_OPT_SRCS})
freerdp_module_add(${PRIMITIVES_SRCS})

View File

@ -35,7 +35,7 @@
* Dummy subsystem
*/
static int dummy_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
static int dummy_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize, UINT32 plane)
{
return -1;
}
@ -231,7 +231,7 @@ error:
return hr;
}
static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize, UINT32 plane)
{
HRESULT hr;
BYTE* pbBuffer = NULL;
@ -243,6 +243,8 @@ static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
IMFMediaBuffer* outputBuffer = NULL;
MFT_OUTPUT_DATA_BUFFER outputDataBuffer;
H264_CONTEXT_MF* sys = (H264_CONTEXT_MF*) h264->pSystemData;
INT32* iStride = h264->iStride[plane];
BYTE** pYUVData = h264->pYUVData[plane];
hr = sys->MFCreateMemoryBuffer(SrcSize, &inputBuffer);
@ -321,7 +323,7 @@ static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
if (hr == MF_E_TRANSFORM_STREAM_CHANGE)
{
BYTE* pYUVData;
BYTE* pTmpYUVData;
int offset = 0;
UINT32 stride = 0;
UINT64 frameSize = 0;
@ -376,20 +378,20 @@ static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
goto error;
}
h264->iStride[0] = stride;
h264->iStride[1] = stride / 2;
h264->iStride[2] = stride / 2;
iStride[0] = stride;
iStride[1] = stride / 2;
iStride[2] = stride / 2;
pYUVData = (BYTE*) calloc(1, 2 * stride * sys->frameHeight);
pTmpYUVData = (BYTE*) calloc(1, 2 * stride * sys->frameHeight);
h264->pYUVData[0] = &pYUVData[offset];
pYUVData += h264->iStride[0] * sys->frameHeight;
pYUVData[0] = &pTmpYUVData[offset];
pTmpYUVData += iStride[0] * sys->frameHeight;
h264->pYUVData[1] = &pYUVData[offset];
pYUVData += h264->iStride[1] * (sys->frameHeight / 2);
pYUVData[1] = &pTmpYUVData[offset];
pTmpYUVData += iStride[1] * (sys->frameHeight / 2);
h264->pYUVData[2] = &pYUVData[offset];
pYUVData += h264->iStride[2] * (sys->frameHeight / 2);
pYUVData[2] = &pTmpYUVData[offset];
pTmpYUVData += iStride[2] * (sys->frameHeight / 2);
h264->width = sys->frameWidth;
h264->height = sys->frameHeight;
@ -435,14 +437,14 @@ static int mf_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
goto error;
}
CopyMemory(h264->pYUVData[0], &buffer[offset], h264->iStride[0] * sys->frameHeight);
offset += h264->iStride[0] * sys->frameHeight;
CopyMemory(pYUVData[0], &buffer[offset], iStride[0] * sys->frameHeight);
offset += iStride[0] * sys->frameHeight;
CopyMemory(h264->pYUVData[1], &buffer[offset], h264->iStride[1] * (sys->frameHeight / 2));
offset += h264->iStride[1] * (sys->frameHeight / 2);
CopyMemory(pYUVData[1], &buffer[offset], iStride[1] * (sys->frameHeight / 2));
offset += iStride[1] * (sys->frameHeight / 2);
CopyMemory(h264->pYUVData[2], &buffer[offset], h264->iStride[2] * (sys->frameHeight / 2));
offset += h264->iStride[2] * (sys->frameHeight / 2);
CopyMemory(pYUVData[2], &buffer[offset], iStride[2] * (sys->frameHeight / 2));
offset += iStride[2] * (sys->frameHeight / 2);
hr = outputBuffer->lpVtbl->Unlock(outputBuffer);
@ -464,7 +466,7 @@ error:
return -1;
}
static int mf_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize)
static int mf_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize, UINT32 plane)
{
H264_CONTEXT_MF* sys = (H264_CONTEXT_MF*) h264->pSystemData;
@ -473,6 +475,7 @@ static int mf_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize)
static void mf_uninit(H264_CONTEXT* h264)
{
UINT32 x;
H264_CONTEXT_MF* sys = (H264_CONTEXT_MF*) h264->pSystemData;
if (sys)
@ -513,9 +516,11 @@ static void mf_uninit(H264_CONTEXT* h264)
sys->mfplat = NULL;
}
free(h264->pYUVData[0]);
h264->pYUVData[0] = h264->pYUVData[1] = h264->pYUVData[2] = NULL;
h264->iStride[0] = h264->iStride[1] = h264->iStride[2] = 0;
for (x=0; x<sizeof(h264->pYUVData) / sizeof(h264->pYUVData[0]); x++)
free (h264->pYUVData[x][0]);
memset(h264->pYUVData, 0, sizeof(h264->pYUVData));
memset(h264->iStride, 0, sizeof(h264->iStride));
sys->MFShutdown();
@ -708,14 +713,14 @@ struct _H264_CONTEXT_X264
};
typedef struct _H264_CONTEXT_X264 H264_CONTEXT_X264;
static int x264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
static int x264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize, UINT32 plane)
{
//H264_CONTEXT_X264* sys = (H264_CONTEXT_X264*) h264->pSystemData;
return 1;
}
static int x264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize)
static int x264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize, UINT32 plane)
{
//H264_CONTEXT_X264* sys = (H264_CONTEXT_X264*) h264->pSystemData;
@ -737,7 +742,9 @@ static BOOL x264_init(H264_CONTEXT* h264)
{
H264_CONTEXT_X264* sys;
sys = (H264_CONTEXT_X264*) calloc(1, sizeof(H264_CONTEXT_X264));
h264->numSystemData = 1;
sys = (H264_CONTEXT_X264*) calloc(h264->numSystemData,
sizeof(H264_CONTEXT_X264));
if (!sys)
{
@ -815,12 +822,16 @@ static void openh264_trace_callback(H264_CONTEXT* h264, int level, const char* m
WLog_INFO(TAG, "%d - %s", level, message);
}
static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize, UINT32 plane)
{
DECODING_STATE state;
SBufferInfo sBufferInfo;
SSysMEMBuffer* pSystemBuffer;
H264_CONTEXT_OPENH264* sys = (H264_CONTEXT_OPENH264*) h264->pSystemData;
UINT32* iStride = h264->iStride[plane];
BYTE** pYUVData = h264->pYUVData[plane];
sys = &((H264_CONTEXT_OPENH264*) h264->pSystemData)[0];
if (!sys->pDecoder)
return -2001;
@ -829,25 +840,25 @@ static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSiz
* Decompress the image. The RDP host only seems to send I420 format.
*/
h264->pYUVData[0] = NULL;
h264->pYUVData[1] = NULL;
h264->pYUVData[2] = NULL;
pYUVData[0] = NULL;
pYUVData[1] = NULL;
pYUVData[2] = NULL;
ZeroMemory(&sBufferInfo, sizeof(sBufferInfo));
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, pSrcData, SrcSize, h264->pYUVData, &sBufferInfo);
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, pSrcData, SrcSize, pYUVData, &sBufferInfo);
if (sBufferInfo.iBufferStatus != 1)
{
if (state == dsNoParamSets)
{
/* this happens on the first frame due to missing parameter sets */
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, NULL, 0, h264->pYUVData, &sBufferInfo);
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, NULL, 0, pYUVData, &sBufferInfo);
}
else if (state == dsErrorFree)
{
/* call DecodeFrame2 again to decode without delay */
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, NULL, 0, h264->pYUVData, &sBufferInfo);
state = (*sys->pDecoder)->DecodeFrame2(sys->pDecoder, NULL, 0, pYUVData, &sBufferInfo);
}
else
{
@ -856,6 +867,12 @@ static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSiz
}
}
pSystemBuffer = &sBufferInfo.UsrData.sSystemBuffer;
iStride[0] = pSystemBuffer->iStride[0];
iStride[1] = pSystemBuffer->iStride[1];
iStride[2] = pSystemBuffer->iStride[1];
if (sBufferInfo.iBufferStatus != 1)
{
WLog_WARN(TAG, "DecodeFrame2 iBufferStatus: %d", sBufferInfo.iBufferStatus);
@ -868,11 +885,9 @@ static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSiz
return -2003;
}
pSystemBuffer = &sBufferInfo.UsrData.sSystemBuffer;
#if 0
WLog_INFO(TAG, "h264_decompress: state=%u, pYUVData=[%p,%p,%p], bufferStatus=%d, width=%d, height=%d, format=%d, stride=[%d,%d]",
state, h264->pYUVData[0], h264->pYUVData[1], h264->pYUVData[2], sBufferInfo.iBufferStatus,
state, pYUVData[0], pYUVData[1], pYUVData[2], sBufferInfo.iBufferStatus,
pSystemBuffer->iWidth, pSystemBuffer->iHeight, pSystemBuffer->iFormat,
pSystemBuffer->iStride[0], pSystemBuffer->iStride[1]);
#endif
@ -880,32 +895,29 @@ static int openh264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSiz
if (pSystemBuffer->iFormat != videoFormatI420)
return -2004;
if (!h264->pYUVData[0] || !h264->pYUVData[1] || !h264->pYUVData[2])
if (!pYUVData[0] || !pYUVData[1] || !pYUVData[2])
return -2005;
h264->iStride[0] = pSystemBuffer->iStride[0];
h264->iStride[1] = pSystemBuffer->iStride[1];
h264->iStride[2] = pSystemBuffer->iStride[1];
h264->width = pSystemBuffer->iWidth;
h264->height = pSystemBuffer->iHeight;
return 1;
}
static int openh264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize)
static int openh264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstSize, UINT32 plane)
{
int i, j;
int status;
SFrameBSInfo info;
SSourcePicture pic;
SBitrateInfo bitrate;
H264_CONTEXT_OPENH264* sys = (H264_CONTEXT_OPENH264*) h264->pSystemData;
H264_CONTEXT_OPENH264* sys;
BYTE** pYUVData = h264->pYUVData[plane];
UINT32* iStride = h264->iStride[plane];
sys = &((H264_CONTEXT_OPENH264*) h264->pSystemData)[0];
if (!sys->pEncoder)
return -1;
if (!h264->pYUVData[0] || !h264->pYUVData[1] || !h264->pYUVData[2])
if (!pYUVData[0] || !pYUVData[1] || !pYUVData[2])
return -1;
if ((sys->EncParamExt.iPicWidth != h264->width) || (sys->EncParamExt.iPicHeight != h264->height))
@ -1027,12 +1039,12 @@ static int openh264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstS
pic.iPicWidth = h264->width;
pic.iPicHeight = h264->height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = h264->iStride[0];
pic.iStride[1] = h264->iStride[1];
pic.iStride[2] = h264->iStride[2];
pic.pData[0] = h264->pYUVData[0];
pic.pData[1] = h264->pYUVData[1];
pic.pData[2] = h264->pYUVData[2];
pic.iStride[0] = iStride[0];
pic.iStride[1] = iStride[1];
pic.iStride[2] = iStride[2];
pic.pData[0] = pYUVData[0];
pic.pData[1] = pYUVData[1];
pic.pData[2] = pYUVData[2];
status = (*sys->pEncoder)->EncodeFrame(sys->pEncoder, &pic, &info);
@ -1058,10 +1070,15 @@ static int openh264_compress(H264_CONTEXT* h264, BYTE** ppDstData, UINT32* pDstS
static void openh264_uninit(H264_CONTEXT* h264)
{
H264_CONTEXT_OPENH264* sys = (H264_CONTEXT_OPENH264*) h264->pSystemData;
UINT32 x;
H264_CONTEXT_OPENH264* sysContexts = (H264_CONTEXT_OPENH264*) h264->pSystemData;
if (sys)
if (sysContexts)
{
for (x=0; x<h264->numSystemData; x++)
{
H264_CONTEXT_OPENH264* sys = &sysContexts[x];
if (sys->pDecoder)
{
(*sys->pDecoder)->Uninitialize(sys->pDecoder);
@ -1075,29 +1092,35 @@ static void openh264_uninit(H264_CONTEXT* h264)
WelsDestroySVCEncoder(sys->pEncoder);
sys->pEncoder = NULL;
}
free(sys);
}
free(h264->pSystemData);
h264->pSystemData = NULL;
}
}
static BOOL openh264_init(H264_CONTEXT* h264)
{
UINT32 x;
long status;
SDecodingParam sDecParam;
H264_CONTEXT_OPENH264* sys;
H264_CONTEXT_OPENH264* sysContexts;
static int traceLevel = WELS_LOG_DEBUG;
static EVideoFormatType videoFormat = videoFormatI420;
static WelsTraceCallback traceCallback = (WelsTraceCallback) openh264_trace_callback;
sys = (H264_CONTEXT_OPENH264*) calloc(1, sizeof(H264_CONTEXT_OPENH264));
h264->numSystemData = 1;
if (!sys)
{
sysContexts = (H264_CONTEXT_OPENH264*) calloc(h264->numSystemData,
sizeof(H264_CONTEXT_OPENH264));
if (!sysContexts)
goto EXCEPTION;
}
h264->pSystemData = (void*) sys;
h264->pSystemData = (void*) sysContexts;
for (x=0; x<h264->numSystemData; x++)
{
H264_CONTEXT_OPENH264* sys = &sysContexts[x];
if (h264->Compressor)
{
@ -1128,38 +1151,57 @@ static BOOL openh264_init(H264_CONTEXT* h264)
if (status != 0)
{
WLog_ERR(TAG, "Failed to initialize OpenH264 decoder (status=%ld)", status);
WLog_ERR(TAG, "Failed to initialize OpenH264 decoder (status=%ld)",
status);
goto EXCEPTION;
}
status = (*sys->pDecoder)->SetOption(sys->pDecoder, DECODER_OPTION_DATAFORMAT, &videoFormat);
status = (*sys->pDecoder)->SetOption(
sys->pDecoder, DECODER_OPTION_DATAFORMAT,
&videoFormat);
if (status != 0)
{
WLog_ERR(TAG, "Failed to set data format option on OpenH264 decoder (status=%ld)", status);
WLog_ERR(TAG, "Failed to set data format option on OpenH264 decoder (status=%ld)",
status);
goto EXCEPTION;
}
if (g_openh264_trace_enabled)
{
status = (*sys->pDecoder)->SetOption(sys->pDecoder, DECODER_OPTION_TRACE_LEVEL, &traceLevel);
status = (*sys->pDecoder)->SetOption(
sys->pDecoder, DECODER_OPTION_TRACE_LEVEL,
&traceLevel);
if (status != 0)
{
WLog_ERR(TAG, "Failed to set trace level option on OpenH264 decoder (status=%ld)", status);
WLog_ERR(TAG, "Failed to set trace level option on OpenH264 decoder (status=%ld)",
status);
goto EXCEPTION;
}
status = (*sys->pDecoder)->SetOption(sys->pDecoder, DECODER_OPTION_TRACE_CALLBACK, &traceCallback);
status = (*sys->pDecoder)->SetOption(
sys->pDecoder, DECODER_OPTION_TRACE_CALLBACK,
&traceCallback);
if (status != 0)
{
WLog_ERR(TAG, "Failed to set trace callback option on OpenH264 decoder (status=%ld)", status);
WLog_ERR(TAG, "Failed to set trace callback option on OpenH264 decoder (status=%ld)",
status);
goto EXCEPTION;
}
status = (*sys->pDecoder)->SetOption(sys->pDecoder, DECODER_OPTION_TRACE_CALLBACK_CONTEXT, &h264);
status = (*sys->pDecoder)->SetOption(
sys->pDecoder,
DECODER_OPTION_TRACE_CALLBACK_CONTEXT,
&h264);
if (status != 0)
{
WLog_ERR(TAG, "Failed to set trace callback context option on OpenH264 decoder (status=%ld)", status);
WLog_ERR(TAG, "Failed to set trace callback context option on OpenH264 decoder (status=%ld)",
status);
goto EXCEPTION;
}
}
}
}
@ -1201,12 +1243,14 @@ struct _H264_CONTEXT_LIBAVCODEC
};
typedef struct _H264_CONTEXT_LIBAVCODEC H264_CONTEXT_LIBAVCODEC;
static int libavcodec_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize)
static int libavcodec_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize, UINT32 plane)
{
int status;
int gotFrame = 0;
AVPacket packet;
H264_CONTEXT_LIBAVCODEC* sys = (H264_CONTEXT_LIBAVCODEC*) h264->pSystemData;
BYTE** pYUVData = h264->pYUVData[plane];
INT32* iStride = h264->iStride[plane];
av_init_packet(&packet);
@ -1231,13 +1275,13 @@ static int libavcodec_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcS
if (gotFrame)
{
h264->pYUVData[0] = sys->videoFrame->data[0];
h264->pYUVData[1] = sys->videoFrame->data[1];
h264->pYUVData[2] = sys->videoFrame->data[2];
pYUVData[0] = sys->videoFrame->data[0];
pYUVData[1] = sys->videoFrame->data[1];
pYUVData[2] = sys->videoFrame->data[2];
h264->iStride[0] = sys->videoFrame->linesize[0];
h264->iStride[1] = sys->videoFrame->linesize[1];
h264->iStride[2] = sys->videoFrame->linesize[2];
iStride[0] = sys->videoFrame->linesize[0];
iStride[1] = sys->videoFrame->linesize[1];
iStride[2] = sys->videoFrame->linesize[2];
h264->width = sys->videoFrame->width;
h264->height = sys->videoFrame->height;
@ -1351,34 +1395,111 @@ static H264_CONTEXT_SUBSYSTEM g_Subsystem_libavcodec =
#endif
int h264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize,
BYTE** ppDstData, DWORD DstFormat, int nDstStep, int nDstWidth,
int nDstHeight, RECTANGLE_16* regionRects, int numRegionRects)
static BOOL check_rect(const H264_CONTEXT* h264, const RECTANGLE_16* rect,
UINT32 nDstWidth, UINT32 nDstHeight)
{
int index;
int status;
int* iStride;
BYTE* pDstData;
/* Check, if the output rectangle is valid in decoded h264 frame. */
if ((rect->right > h264->width) || (rect->left > h264->width))
return FALSE;
if ((rect->top > h264->height) || (rect->bottom > h264->height))
return FALSE;
/* Check, if the output rectangle is valid in destination buffer. */
if ((rect->right > nDstWidth) || (rect->left > nDstWidth))
return FALSE;
if ((rect->bottom > nDstHeight) || (rect->top > nDstHeight))
return FALSE;
return TRUE;
}
static BOOL avc_yuv_to_rgb(H264_CONTEXT* h264, const RECTANGLE_16* regionRects,
UINT32 numRegionRects, UINT32 nDstWidth,
UINT32 nDstHeight, UINT32 nDstStep, BYTE* pDstData,
DWORD DstFormat, BOOL use444)
{
UINT32 x;
BYTE* pDstPoint;
prim_size_t roi;
BYTE** pYUVData;
int width, height;
BYTE* pYUVPoint[3];
RECTANGLE_16* rect;
const BYTE* pYUVPoint[3];
primitives_t* prims = primitives_get();
for (x=0; x<numRegionRects; x++)
{
const RECTANGLE_16* rect = &(regionRects[x]);
const UINT32* iStride;
BYTE** ppYUVData;
if (use444)
{
iStride = h264->iYUV444Stride;
ppYUVData = h264->pYUV444Data;
}
else
{
iStride = h264->iStride[0];
ppYUVData = h264->pYUVData[0];
}
if (!check_rect(h264, rect, nDstWidth, nDstHeight))
return -1003;
width = rect->right - rect->left;
height = rect->bottom - rect->top;
pDstPoint = pDstData + rect->top * nDstStep + rect->left * 4;
pYUVPoint[0] = ppYUVData[0] + rect->top * iStride[0] + rect->left;
pYUVPoint[1] = ppYUVData[1];
pYUVPoint[2] = ppYUVData[2];
if (use444)
{
pYUVPoint[1] += rect->top * iStride[1] + rect->left;
pYUVPoint[2] += rect->top * iStride[2] + rect->left;
}
else
{
pYUVPoint[1] += rect->top/2 * iStride[1] + rect->left/2;
pYUVPoint[2] += rect->top/2 * iStride[2] + rect->left/2;
}
roi.width = width;
roi.height = height;
if (use444)
{
if (prims->YUV444ToRGB_8u_P3AC4R(
pYUVPoint, iStride, pDstPoint,
nDstStep, &roi) != PRIMITIVES_SUCCESS)
{
return FALSE;
}
}
else
{
if (prims->YUV420ToRGB_8u_P3AC4R(pYUVPoint, iStride, pDstPoint,
nDstStep, &roi) != PRIMITIVES_SUCCESS)
return FALSE;
}
}
return TRUE;
}
INT32 avc420_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize,
BYTE* pDstData, DWORD DstFormat, UINT32 nDstStep,
UINT32 nDstWidth, UINT32 nDstHeight,
RECTANGLE_16* regionRects, UINT32 numRegionRects)
{
int status;
if (!h264)
return -1001;
#if 0
WLog_INFO(TAG, "h264_decompress: pSrcData=%p, SrcSize=%u, pDstData=%p, nDstStep=%d, nDstHeight=%d, numRegionRects=%d",
pSrcData, SrcSize, *ppDstData, nDstStep, nDstHeight, numRegionRects);
#endif
if (!(pDstData = *ppDstData))
return -1002;
status = h264->subsystem->Decompress(h264, pSrcData, SrcSize);
status = h264->subsystem->Decompress(h264, pSrcData, SrcSize, 0);
if (status == 0)
return 1;
@ -1386,93 +1507,324 @@ int h264_decompress(H264_CONTEXT* h264, BYTE* pSrcData, UINT32 SrcSize,
if (status < 0)
return status;
pYUVData = h264->pYUVData;
iStride = h264->iStride;
for (index = 0; index < numRegionRects; index++)
{
rect = &(regionRects[index]);
/* Check, if the output rectangle is valid in decoded h264 frame. */
if ((rect->right > h264->width) || (rect->left > h264->width))
return -1003;
if ((rect->top > h264->height) || (rect->bottom > h264->height))
return -1004;
/* Check, if the output rectangle is valid in destination buffer. */
if ((rect->right > nDstWidth) || (rect->left > nDstWidth))
return -1005;
if ((rect->bottom > nDstHeight) || (rect->top > nDstHeight))
return -1006;
width = rect->right - rect->left;
height = rect->bottom - rect->top;
pDstPoint = pDstData + rect->top * nDstStep + rect->left * 4;
pYUVPoint[0] = pYUVData[0] + rect->top * iStride[0] + rect->left;
pYUVPoint[1] = pYUVData[1] + rect->top/2 * iStride[1] + rect->left/2;
pYUVPoint[2] = pYUVData[2] + rect->top/2 * iStride[2] + rect->left/2;
#if 0
WLog_INFO(TAG, "regionRect: x: %d y: %d width: %d height: %d",
rect->left, rect->top, width, height);
#endif
roi.width = width;
roi.height = height;
prims->YUV420ToRGB_8u_P3AC4R((const BYTE**) pYUVPoint, iStride, pDstPoint, nDstStep, &roi);
}
if (!avc_yuv_to_rgb(h264, regionRects, numRegionRects, nDstWidth,
nDstHeight, nDstStep, pDstData, DstFormat, FALSE))
return -1002;
return 1;
}
int h264_compress(H264_CONTEXT* h264, BYTE* pSrcData, DWORD SrcFormat,
int nSrcStep, int nSrcWidth, int nSrcHeight, BYTE** ppDstData, UINT32* pDstSize)
INT32 avc420_compress(H264_CONTEXT* h264, BYTE* pSrcData, DWORD SrcFormat,
UINT32 nSrcStep, UINT32 nSrcWidth, UINT32 nSrcHeight,
BYTE** ppDstData, UINT32* pDstSize)
{
int status = -1;
prim_size_t roi;
int nWidth, nHeight;
primitives_t* prims = primitives_get();
UINT32* iStride;
BYTE** pYUVData;
if (!h264)
return -1;
if (!h264->subsystem->Compress)
return -1;
iStride = h264->iStride[0];
pYUVData = h264->pYUVData[0];
nWidth = (nSrcWidth + 1) & ~1;
nHeight = (nSrcHeight + 1) & ~1;
if (!(h264->pYUVData[0] = (BYTE*) malloc(nWidth * nHeight)))
if (!(pYUVData[0] = (BYTE*) malloc(nWidth * nHeight)))
return -1;
h264->iStride[0] = nWidth;
iStride[0] = nWidth;
if (!(h264->pYUVData[1] = (BYTE*) malloc(nWidth * nHeight / 4)))
if (!(pYUVData[1] = (BYTE*) malloc(nWidth * nHeight)))
goto error_1;
h264->iStride[1] = nWidth / 2;
iStride[1] = nWidth / 2;
if (!(h264->pYUVData[2] = (BYTE*) malloc(nWidth * nHeight / 4)))
if (!(pYUVData[2] = (BYTE*) malloc(nWidth * nHeight)))
goto error_2;
h264->iStride[2] = nWidth / 2;
iStride[2] = nWidth / 2;
h264->width = nWidth;
h264->height = nHeight;
roi.width = nSrcWidth;
roi.height = nSrcHeight;
prims->RGBToYUV420_8u_P3AC4R(pSrcData, nSrcStep, h264->pYUVData, h264->iStride, &roi);
prims->RGBToYUV420_8u_P3AC4R(pSrcData, nSrcStep, pYUVData, iStride, &roi);
status = h264->subsystem->Compress(h264, ppDstData, pDstSize);
status = h264->subsystem->Compress(h264, ppDstData, pDstSize, 0);
free(h264->pYUVData[2]);
h264->pYUVData[2] = NULL;
free(pYUVData[2]);
pYUVData[2] = NULL;
error_2:
free(h264->pYUVData[1]);
h264->pYUVData[1] = NULL;
free(pYUVData[1]);
pYUVData[1] = NULL;
error_1:
free(h264->pYUVData[0]);
h264->pYUVData[0] = NULL;
free(pYUVData[0]);
pYUVData[0] = NULL;
return status;
}
INT32 avc444_compress(H264_CONTEXT* h264, BYTE* pSrcData, DWORD SrcFormat,
UINT32 nSrcStep, UINT32 nSrcWidth, UINT32 nSrcHeight,
BYTE* op, BYTE** ppDstData, UINT32* pDstSize,
BYTE** ppAuxDstData, UINT32* pAuxDstSize)
{
return -1;
}
static BOOL avc444_process_rect(H264_CONTEXT* h264,
const RECTANGLE_16* rect,
UINT32 nDstWidth, UINT32 nDstHeight)
{
const primitives_t* prims = primitives_get();
prim_size_t roi;
UINT16 width, height;
const BYTE* pYUVMainPoint[3];
const BYTE* pYUVAuxPoint[3];
BYTE* pYUVDstPoint[3];
UINT32* piDstStride = h264->iYUV444Stride;
BYTE** ppYUVDstData = h264->pYUV444Data;
const UINT32* piAuxStride = h264->iStride[1];
const UINT32* piMainStride = h264->iStride[0];
BYTE** ppYUVAuxData = h264->pYUVData[1];
BYTE** ppYUVMainData = h264->pYUVData[0];
if (!check_rect(h264, rect, nDstWidth, nDstHeight))
return FALSE;
width = rect->right - rect->left;
height = rect->bottom - rect->top;
roi.width = width;
roi.height = height;
pYUVMainPoint[0] = ppYUVMainData[0] + rect->top * piMainStride[0] +
rect->left;
pYUVMainPoint[1] = ppYUVMainData[1] + rect->top/2 * piMainStride[1] +
rect->left/2;
pYUVMainPoint[2] = ppYUVMainData[2] + rect->top/2 * piMainStride[2] +
rect->left/2;
pYUVDstPoint[0] = ppYUVDstData[0] + rect->top * piDstStride[0] +
rect->left;
pYUVDstPoint[1] = ppYUVDstData[1] + rect->top * piDstStride[1] +
rect->left;
pYUVDstPoint[2] = ppYUVDstData[2] + rect->top * piDstStride[2] +
rect->left;
pYUVAuxPoint[0] = ppYUVAuxData[0] + rect->top * piAuxStride[0] +
rect->left;
pYUVAuxPoint[1] = ppYUVAuxData[1] + rect->top/2 * piAuxStride[1] +
rect->left/2;
pYUVAuxPoint[2] = ppYUVAuxData[2] + rect->top/2 * piAuxStride[2] +
rect->left/2;
pYUVDstPoint[0] = ppYUVDstData[0] + rect->top * piDstStride[0] +
rect->left;
pYUVDstPoint[1] = ppYUVDstData[1] + rect->top * piDstStride[1] +
rect->left;
pYUVDstPoint[2] = ppYUVDstData[2] + rect->top * piDstStride[2] +
rect->left;
if (prims->YUV420CombineToYUV444(pYUVMainPoint, piMainStride,
NULL, NULL,
pYUVDstPoint, piDstStride,
&roi) != PRIMITIVES_SUCCESS)
return FALSE;
return TRUE;
}
static void avc444_rectangle_max(RECTANGLE_16* dst, const RECTANGLE_16* add)
{
if (dst->left > add->left)
dst->left = add->left;
if (dst->right < add->right)
dst->right = add->right;
if (dst->top > add->top)
dst->top = add->top;
if (dst->bottom < add->bottom)
dst->bottom = add->bottom;
}
static BOOL avc444_combine_yuv(H264_CONTEXT* h264,
const RECTANGLE_16* mainRegionRects,
UINT32 numMainRegionRect,
const RECTANGLE_16* auxRegionRects,
UINT32 numAuxRegionRect, UINT32 nDstWidth,
DWORD nDstHeight, UINT32 nDstStep)
{
UINT32 x;
RECTANGLE_16 rect;
const UINT32* piMainStride = h264->iStride[0];
UINT32* piDstSize = h264->iYUV444Size;
UINT32* piDstStride = h264->iYUV444Stride;
BYTE** ppYUVDstData = h264->pYUV444Data;
UINT32 padDstHeight = nDstHeight + 16; /* Need alignment to 16x16 blocks */
if ((piMainStride[0] != piDstStride[0]) ||
(piDstSize[0] != piMainStride[0] * padDstHeight))
{
for (x=0; x<3; x++)
{
BYTE* ppYUVTmpData;
piDstStride[x] = piMainStride[0];
piDstSize[x] = piDstStride[x] * padDstHeight;
ppYUVTmpData = realloc(ppYUVDstData[x], piDstSize[x]);
if (!ppYUVTmpData)
goto fail;
ppYUVDstData[x] = ppYUVTmpData;
memset(ppYUVDstData[x], 0, piDstSize[x]);
}
}
for (x=0; x<3; x++)
{
if (!ppYUVDstData[x] || (piDstSize[x] == 0) || (piDstStride[x] == 0))
{
WLog_ERR(TAG, "YUV buffer not initialized! check your decoder settings");
goto fail;
}
}
rect.right = 0;
rect.bottom = 0;
rect.left = 0xFFFF;
rect.top = 0xFFFF;
for (x=0; x<numMainRegionRect; x++)
avc444_rectangle_max(&rect, &mainRegionRects[x]);
for (x=0; x<numAuxRegionRect; x++)
avc444_rectangle_max(&rect, &auxRegionRects[x]);
if (!avc444_process_rect(h264, &rect, nDstWidth, nDstHeight))
goto fail;
return TRUE;
fail:
free (ppYUVDstData[0]);
free (ppYUVDstData[1]);
free (ppYUVDstData[2]);
ppYUVDstData[0] = NULL;
ppYUVDstData[1] = NULL;
ppYUVDstData[2] = NULL;
return FALSE;
}
#if defined(AVC444_FRAME_STAT)
static UINT64 op1 = 0;
static double op1sum = 0;
static UINT64 op2 = 0;
static double op2sum = 0;
static UINT64 op3 = 0;
static double op3sum = 0;
static double avg(UINT64* count, double old, double size)
{
double tmp = size + *count * old;
(*count)++;
tmp = tmp / *count;
return tmp;
}
#endif
INT32 avc444_decompress(H264_CONTEXT* h264, BYTE op,
RECTANGLE_16* regionRects, UINT32 numRegionRects,
BYTE* pSrcData, UINT32 SrcSize,
RECTANGLE_16* auxRegionRects, UINT32 numAuxRegionRect,
BYTE* pAuxSrcData, UINT32 AuxSrcSize,
BYTE* pDstData, DWORD DstFormat,
UINT32 nDstStep, UINT32 nDstWidth, UINT32 nDstHeight)
{
INT32 status = -1;
UINT32 numYuvRects = 0;
RECTANGLE_16* yuvRects = NULL;
UINT32 numChromaRects = 0;
RECTANGLE_16* chromaRects = NULL;
if (!h264 || !regionRects ||
!pSrcData || !pDstData)
return -1001;
switch(op)
{
case 0: /* YUV420 in stream 1
* Chroma420 in stream 2 */
numYuvRects = numRegionRects;
yuvRects = regionRects;
numChromaRects = numAuxRegionRect;
chromaRects = auxRegionRects;
status = h264->subsystem->Decompress(h264, pSrcData, SrcSize, 0);
if (status >= 0)
status = h264->subsystem->Decompress(h264, pAuxSrcData, AuxSrcSize, 1);
break;
case 2: /* Chroma420 in stream 1 */
status = h264->subsystem->Decompress(h264, pSrcData, SrcSize, 1);
numChromaRects = numRegionRects;
chromaRects = regionRects;
break;
case 1: /* YUV420 in stream 1 */
status = h264->subsystem->Decompress(h264, pSrcData, SrcSize, 0);
numYuvRects = numRegionRects;
yuvRects = regionRects;
break;
default: /* WTF? */
break;
}
#if defined(AVC444_FRAME_STAT)
switch(op)
{
case 0:
op1sum = avg(&op1, op1sum, SrcSize + AuxSrcSize);
break;
case 1:
op2sum = avg(&op2, op2sum, SrcSize);
break;
case 2:
op3sum = avg(&op3, op3sum, SrcSize);
break;
default:
break;
}
WLog_INFO(TAG, "luma=%llu [avg=%lf] chroma=%llu [avg=%lf] combined=%llu [avg=%lf]",
op1, op1sum, op2, op2sum, op3, op3sum);
#endif
if (status >= 0)
{
if (!avc444_combine_yuv(h264, yuvRects, numYuvRects,
chromaRects, numChromaRects,
nDstWidth, nDstHeight, nDstStep))
status = -1002;
else
{
if (numYuvRects > 0)
{
if (!avc_yuv_to_rgb(h264, regionRects, numRegionRects, nDstWidth,
nDstHeight, nDstStep, pDstData, DstFormat, TRUE))
status = -1003;
}
if (numChromaRects > 0)
{
if (!avc_yuv_to_rgb(h264, auxRegionRects, numAuxRegionRect,
nDstWidth, nDstHeight, nDstStep, pDstData,
DstFormat, TRUE))
status = -1004;
}
}
}
return status;
}
@ -1560,6 +1912,9 @@ void h264_context_free(H264_CONTEXT* h264)
{
h264->subsystem->Uninit(h264);
free (h264->pYUV444Data[0]);
free (h264->pYUV444Data[1]);
free (h264->pYUV444Data[2]);
free(h264);
}
}

View File

@ -88,7 +88,7 @@ BOOL freerdp_client_codecs_prepare(rdpCodecs* codecs, UINT32 flags)
}
}
if ((flags & FREERDP_CODEC_H264) && !codecs->h264)
if ((flags & (FREERDP_CODEC_AVC420 | FREERDP_CODEC_AVC444)) && !codecs->h264)
{
if (!(codecs->h264 = h264_context_new(FALSE)))
{
@ -161,7 +161,7 @@ BOOL freerdp_client_codecs_reset(rdpCodecs* codecs, UINT32 flags,
}
}
if (flags & FREERDP_CODEC_H264)
if (flags & (FREERDP_CODEC_AVC420 | FREERDP_CODEC_AVC444))
{
if (codecs->h264)
{
@ -179,9 +179,7 @@ rdpCodecs* codecs_new(rdpContext* context)
codecs = (rdpCodecs*) calloc(1, sizeof(rdpCodecs));
if (codecs)
{
codecs->context = context;
}
return codecs;
}

View File

@ -461,39 +461,38 @@ UINT gdi_SurfaceCommand_Planar(rdpGdi* gdi, RdpgfxClientContext* context, RDPGFX
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT gdi_SurfaceCommand_H264(rdpGdi* gdi, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
static UINT gdi_SurfaceCommand_AVC420(rdpGdi* gdi, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
UINT32 i;
BYTE* DstData = NULL;
gdiGfxSurface* surface;
RDPGFX_H264_METABLOCK* meta;
RDPGFX_H264_BITMAP_STREAM* bs;
RDPGFX_AVC420_BITMAP_STREAM* bs;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
return ERROR_INTERNAL_ERROR;
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_H264))
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_AVC420))
return ERROR_INTERNAL_ERROR;
bs = (RDPGFX_H264_BITMAP_STREAM*) cmd->extra;
bs = (RDPGFX_AVC420_BITMAP_STREAM*) cmd->extra;
if (!bs)
return ERROR_INTERNAL_ERROR;
meta = &(bs->meta);
DstData = surface->data;
status = h264_decompress(surface->codecs->h264, bs->data, bs->length, &DstData,
PIXEL_FORMAT_XRGB32, surface->scanline, surface->width, surface->height,
meta->regionRects, meta->numRegionRects);
status = avc420_decompress(surface->codecs->h264, bs->data, bs->length,
surface->data, PIXEL_FORMAT_XRGB32,
surface->scanline, surface->width,
surface->height, meta->regionRects,
meta->numRegionRects);
if (status < 0)
{
WLog_WARN(TAG, "h264_decompress failure: %d, ignoring update.", status);
WLog_WARN(TAG, "avc420_decompress failure: %d, ignoring update.", status);
return CHANNEL_RC_OK;
}
@ -508,6 +507,77 @@ UINT gdi_SurfaceCommand_H264(rdpGdi* gdi, RdpgfxClientContext* context, RDPGFX_S
return CHANNEL_RC_OK;
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT gdi_SurfaceCommand_AVC444(rdpGdi* gdi, RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cmd)
{
int status;
UINT32 i;
gdiGfxSurface* surface;
RDPGFX_AVC444_BITMAP_STREAM* bs;
RDPGFX_AVC420_BITMAP_STREAM* avc1;
RDPGFX_H264_METABLOCK* meta1;
RDPGFX_AVC420_BITMAP_STREAM* avc2;
RDPGFX_H264_METABLOCK* meta2;
RECTANGLE_16* regionRects = NULL;
surface = (gdiGfxSurface*) context->GetSurfaceData(context, cmd->surfaceId);
if (!surface)
return ERROR_INTERNAL_ERROR;
if (!freerdp_client_codecs_prepare(surface->codecs, FREERDP_CODEC_AVC444))
return ERROR_INTERNAL_ERROR;
bs = (RDPGFX_AVC444_BITMAP_STREAM*) cmd->extra;
if (!bs)
return ERROR_INTERNAL_ERROR;
avc1 = &bs->bitstream[0];
avc2 = &bs->bitstream[1];
meta1 = &avc1->meta;
meta2 = &avc2->meta;
status = avc444_decompress(surface->codecs->h264, bs->LC,
meta1->regionRects, meta1->numRegionRects,
avc1->data, avc1->length,
meta2->regionRects, meta2->numRegionRects,
avc2->data, avc2->length,
surface->data, PIXEL_FORMAT_XRGB32,
surface->scanline, surface->width,
surface->height);
if (status < 0)
{
WLog_WARN(TAG, "avc444_decompress failure: %d, ignoring update.", status);
return CHANNEL_RC_OK;
}
for (i = 0; i < meta1->numRegionRects; i++)
{
region16_union_rect(&(surface->invalidRegion),
&(surface->invalidRegion),
&(meta1->regionRects[i]));
}
for (i = 0; i < meta2->numRegionRects; i++)
{
region16_union_rect(&(surface->invalidRegion),
&(surface->invalidRegion),
&(meta2->regionRects[i]));
}
if (!gdi->inGfxFrame)
gdi_UpdateSurfaces(gdi);
free(regionRects);
return CHANNEL_RC_OK;
}
/**
* Function description
*
@ -677,8 +747,12 @@ UINT gdi_SurfaceCommand(RdpgfxClientContext* context, RDPGFX_SURFACE_COMMAND* cm
status = gdi_SurfaceCommand_Planar(gdi, context, cmd);
break;
case RDPGFX_CODECID_H264:
status = gdi_SurfaceCommand_H264(gdi, context, cmd);
case RDPGFX_CODECID_AVC420:
status = gdi_SurfaceCommand_AVC420(gdi, context, cmd);
break;
case RDPGFX_CODECID_AVC444:
status = gdi_SurfaceCommand_AVC444(gdi, context, cmd);
break;
case RDPGFX_CODECID_ALPHA:

View File

@ -26,12 +26,18 @@
#include "prim_YUV.h"
#define CLIP(X) ( (X) > 255 ? 255 : (X) < 0 ? 0 : X)
static INLINE BYTE CLIP(INT32 X)
{
if (X > 255L)
return 255L;
if (X < 0L)
return 0L;
return X;
}
/**
* @brief general_YUV420CombineToYUV444
* U444(2x,2y) = Um(2x,2y) * 4 - Ua(2x+1,2y) - Ua(2x,2y+1) - Ua(2x+1,2y+1)
* V444(2x,2y) = Vm(2x,2y) * 4 - Va(2x+1,2y) - Va(2x,2y+1) - Va(2x+1,2y+1)
*
* @param pSrc Pointer to auxilary YUV420 data
* @param srcStep Step width in auxilary YUV420 data
* @param pDst Pointer to main YUV420 data
@ -46,14 +52,25 @@ static pstatus_t general_YUV420CombineToYUV444(
BYTE* pDst[3], const UINT32 dstStep[3],
const prim_size_t* roi)
{
const UINT32 mod = 16;
UINT32 uY = 0;
UINT32 vY = 0;
UINT32 x, y;
UINT32 nWidth, nHeight;
UINT32 halfWidth, halfHeight;
const UINT32 oddY = 1;
const UINT32 evenY = 0;
const UINT32 oddX = 1;
const UINT32 evenX = 0;
/* The auxilary frame is aligned to multiples of 16x16.
* We need the padded height for B4 and B5 conversion. */
const UINT32 padHeigth = roi->height + 16 - roi->height % 16;
nWidth = roi->width;
nHeight = roi->height;
halfWidth = (nWidth + 1) / 2;
halfHeight = (nHeight + 1) / 2;
halfWidth = (nWidth ) / 2;
halfHeight = (nHeight) / 2;
if (pMainSrc)
{
@ -71,55 +88,194 @@ static pstatus_t general_YUV420CombineToYUV444(
/* B2 and B3 */
for (y=0; y<halfHeight; y++)
{
const UINT32 val2y = (2 * y + evenY);
const UINT32 val2y1 = val2y + oddY;
const BYTE* Um = pMainSrc[1] + srcMainStep[1] * y;
const BYTE* Vm = pMainSrc[2] + srcMainStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * y * 2;
BYTE* pV = pDst[1] + dstStep[2] * y * 2;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
for (x=0; x<halfWidth; x++)
{
pU[2*x] = Um[x];
pV[2*x] = Vm[x];
const UINT32 val2x = 2*x + evenX;
const UINT32 val2x1 = val2x+oddX;
pU[val2x] = Um[x];
pV[val2x] = Vm[x];
pU[val2x1] = Um[x];
pV[val2x1] = Vm[x];
pU1[val2x] = Um[x];
pV1[val2x] = Vm[x];
pU1[val2x1] = Um[x];
pV1[val2x1] = Vm[x];
}
}
}
if (!pAuxSrc)
return PRIMITIVES_SUCCESS;
} else if (!pAuxSrc)
return -1;
/* The second half of U and V is a bit more tricky... */
/* B4 */
for (y=0; y<halfHeight; y++)
/* B4 and B5 */
for (y=0; y<padHeigth; y++)
{
const BYTE* Ya = pAuxSrc[0] + srcAuxStep[0] * y;
BYTE* pU = pDst[1] + dstStep[1] * y * 2 + 1;
BYTE* pX;
for (x=0; x<nWidth; x++)
pU[x] = Ya[x];
if ((y) % mod < (mod + 1)/2)
{
const UINT32 pos = (2 * uY++ + oddY);
if (pos >= nHeight)
continue;
pX = pDst[1] + dstStep[1] * pos;
}
else
{
const UINT32 pos = (2 * vY++ + oddY);
if (pos >= nHeight)
continue;
pX = pDst[2] + dstStep[2] * pos;
}
/* B5 */
for (y=halfHeight; y<nHeight; y++)
{
const BYTE* Ya = pAuxSrc[0] + srcAuxStep[0] * y;
BYTE* pV = pDst[1] + dstStep[2] * (y - halfHeight) * 2 + 1;
for (x=0; x<nWidth; x++)
pV[x] = Ya[x];
memcpy(pX, Ya, nWidth);
}
/* B6 and B7 */
for (y=0; y<halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const BYTE* Ua = pAuxSrc[1] + srcAuxStep[1] * y;
const BYTE* Va = pAuxSrc[2] + srcAuxStep[2] * y;
BYTE* pU = pDst[1] + dstStep[1] * y * 2;
BYTE* pV = pDst[2] + dstStep[2] * y * 2;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
for (x=0; x<halfWidth; x++)
{
pU[2*x+1] = Ua[x];
pV[2*x+1] = Va[x];
const UINT32 val2x1 = (x * 2 + oddX);
pU[val2x1] = Ua[x];
pV[val2x1] = Va[x];
}
}
/* Filter */
for (y=0; y<halfHeight; y++)
{
const UINT32 val2y = (y * 2 + evenY);
const UINT32 val2y1 = val2y + oddY;
BYTE* pU1 = pDst[1] + dstStep[1] * val2y1;
BYTE* pV1 = pDst[2] + dstStep[2] * val2y1;
BYTE* pU = pDst[1] + dstStep[1] * val2y;
BYTE* pV = pDst[2] + dstStep[2] * val2y;
if (val2y1 > nHeight)
continue;
for (x=0; x<halfWidth; x++)
{
const UINT32 val2x = (x * 2);
const UINT32 val2x1 = val2x + 1;
const INT32 up = pU[val2x] * 4;
const INT32 vp = pV[val2x] * 4;
INT32 u2020;
INT32 v2020;
if (val2x1 > nWidth)
continue;
u2020 = up - pU[val2x1] - pU1[val2x] - pU1[val2x1];
v2020 = vp - pV[val2x1] - pV1[val2x] - pV1[val2x1];
pU[val2x] = CLIP(u2020);
pV[val2x] = CLIP(v2020);
}
}
return PRIMITIVES_SUCCESS;
}
static pstatus_t general_YUV444SplitToYUV420(
const BYTE* pSrc[3], const UINT32 srcStep[3],
BYTE* pMainDst[3], const UINT32 dstMainStep[3],
BYTE* pAuxDst[3], const UINT32 dstAuxStep[3],
const prim_size_t* roi)
{
UINT32 x, y, uY = 0, vY = 0;
UINT32 halfWidth, halfHeight;
/* The auxilary frame is aligned to multiples of 16x16.
* We need the padded height for B4 and B5 conversion. */
const UINT32 padHeigth = roi->height + 16 - roi->height % 16;
halfWidth = (roi->width + 1) / 2;
halfHeight = (roi->height + 1) / 2;
/* B1 */
for (y=0; y<roi->height; y++)
{
const BYTE* pSrcY = pSrc[0] + y * srcStep[0];
BYTE* pY = pMainDst[0] + y * dstMainStep[0];
memcpy(pY, pSrcY, roi->width);
}
/* B2 and B3 */
for (y=0; y<halfHeight; y++)
{
const BYTE* pSrcU = pSrc[1] + 2 * y * srcStep[1];
const BYTE* pSrcV = pSrc[2] + 2 * y * srcStep[2];
const BYTE* pSrcU1 = pSrc[1] + (2 * y + 1) * srcStep[1];
const BYTE* pSrcV1 = pSrc[2] + (2 * y + 1) * srcStep[2];
BYTE* pU = pMainDst[1] + y * dstMainStep[1];
BYTE* pV = pMainDst[2] + y * dstMainStep[2];
for (x=0; x<halfWidth; x++)
{
/* Filter */
const INT32 u = pSrcU[2*x] + pSrcU[2*x+1] + pSrcU1[2*x]
+ pSrcU1[2*x+1];
const INT32 v = pSrcV[2*x] + pSrcV[2*x+1] + pSrcV1[2*x]
+ pSrcV1[2*x+1];
pU[x] = CLIP(u / 4L);
pV[x] = CLIP(v / 4L);
}
}
/* B4 and B5 */
for (y=0; y<padHeigth; y++)
{
BYTE* pY = pAuxDst[0] + y * dstAuxStep[0];
if (y % 16 < 8)
{
const UINT32 pos = (2 * uY++ + 1);
const BYTE* pSrcU = pSrc[1] + pos * srcStep[1];
if (pos >= roi->height)
continue;
memcpy(pY, pSrcU, roi->width);
}
else
{
const UINT32 pos = (2 * vY++ + 1);
const BYTE* pSrcV = pSrc[2] + pos * srcStep[2];
if (pos >= roi->height)
continue;
memcpy(pY, pSrcV, roi->width);
}
}
/* B6 and B7 */
for (y=0; y<halfHeight; y++)
{
const BYTE* pSrcU = pSrc[1] + 2 * y * srcStep[1];
const BYTE* pSrcV = pSrc[2] + 2 * y * srcStep[2];
BYTE* pU = pAuxDst[1] + y * dstAuxStep[1];
BYTE* pV = pAuxDst[2] + y * dstAuxStep[2];
for (x=0; x<halfWidth; x++)
{
pU[x] = pSrcU[2*x+1];
pV[x] = pSrcV[2*x+1];
}
}
@ -131,13 +287,41 @@ static pstatus_t general_YUV420CombineToYUV444(
* | G | = ( | 256 -48 -120 | | U - 128 | ) >> 8
* | B | ( | 256 475 0 | | V - 128 | )
*/
#define C(Y) ( (Y) - 0 )
#define D(U) ( (U) - 128 )
#define E(V) ( (V) - 128 )
static INLINE INT32 C(INT32 Y)
{
return (Y) - 0L;
}
#define YUV2R(Y, U, V) CLIP(( 256 * C(Y) + 0 * D(U) + 403 * E(V)) >> 8)
#define YUV2G(Y, U, V) CLIP(( 256 * C(Y) - 48 * D(U) - 120 * E(V)) >> 8)
#define YUV2B(Y, U, V) CLIP(( 256 * C(Y) + 475 * D(U) + 0 * E(V)) >> 8)
static INLINE INT32 D(INT32 U)
{
return (U) - 128L;
}
static INLINE INT32 E(INT32 V)
{
return (V) - 128L;
}
static INLINE BYTE YUV2R(INT32 Y, INT32 U, INT32 V)
{
const INT32 r = ( 256L * C(Y) + 0L * D(U) + 403L * E(V));
const INT32 r8 = r >> 8L;
return CLIP(r8);
}
static INLINE BYTE YUV2G(INT32 Y, INT32 U, INT32 V)
{
const INT32 g = ( 256L * C(Y) - 48L * D(U) - 120L * E(V));
const INT32 g8 = g >> 8L;
return CLIP(g8);
}
static INLINE BYTE YUV2B(INT32 Y, INT32 U, INT32 V)
{
const INT32 b = ( 256L * C(Y) + 475L * D(U) + 0L * E(V));
const INT32 b8 = b >> 8L;
return CLIP(b8);
}
static pstatus_t general_YUV444ToRGB_8u_P3AC4R(
const BYTE* pSrc[3], const UINT32 srcStep[3],
@ -159,13 +343,13 @@ static pstatus_t general_YUV444ToRGB_8u_P3AC4R(
for (x = 0; x < nWidth; x++)
{
const BYTE Y = pY[x];
const BYTE U = pU[x];
const BYTE V = pV[x];
const INT32 U = pU[x];
const INT32 V = pV[x];
pRGB[0] = YUV2B(Y, U, V);
pRGB[1] = YUV2G(Y, U, V);
pRGB[2] = YUV2R(Y, U, V);
pRGB[3] = 0xFF;
pRGB[4*x+0] = YUV2B(Y, U, V);
pRGB[4*x+1] = YUV2G(Y, U, V);
pRGB[4*x+2] = YUV2R(Y, U, V);
pRGB[4*x+3] = 0xFF;
}
}
@ -191,10 +375,6 @@ static pstatus_t general_YUV420ToRGB_8u_P3AC4R(
const BYTE* pY;
const BYTE* pU;
const BYTE* pV;
UINT32 R, G, B;
UINT32 Yp, Up, Vp;
UINT32 Up48, Up475;
UINT32 Vp403, Vp120;
BYTE* pRGB = pDst;
UINT32 nWidth, nHeight;
UINT32 lastRow, lastCol;
@ -231,73 +411,22 @@ static pstatus_t general_YUV420ToRGB_8u_P3AC4R(
U = *pU++;
V = *pV++;
Up = U - 128;
Vp = V - 128;
Up48 = 48 * Up;
Up475 = 475 * Up;
Vp403 = Vp * 403;
Vp120 = Vp * 120;
/* 1st pixel */
Y = *pY++;
Yp = Y << 8;
R = (Yp + Vp403) >> 8;
G = (Yp - Up48 - Vp120) >> 8;
B = (Yp + Up475) >> 8;
if (R < 0)
R = 0;
else if (R > 255)
R = 255;
if (G < 0)
G = 0;
else if (G > 255)
G = 255;
if (B < 0)
B = 0;
else if (B > 255)
B = 255;
*pRGB++ = (BYTE) B;
*pRGB++ = (BYTE) G;
*pRGB++ = (BYTE) R;
*pRGB++ = YUV2B(Y, U, V);
*pRGB++ = YUV2G(Y, U, V);
*pRGB++ = YUV2R(Y, U, V);
*pRGB++ = 0xFF;
/* 2nd pixel */
if (!(lastCol & 0x02))
{
Y = *pY++;
Yp = Y << 8;
R = (Yp + Vp403) >> 8;
G = (Yp - Up48 - Vp120) >> 8;
B = (Yp + Up475) >> 8;
if (R < 0)
R = 0;
else if (R > 255)
R = 255;
if (G < 0)
G = 0;
else if (G > 255)
G = 255;
if (B < 0)
B = 0;
else if (B > 255)
B = 255;
*pRGB++ = (BYTE) B;
*pRGB++ = (BYTE) G;
*pRGB++ = (BYTE) R;
*pRGB++ = YUV2B(Y, U, V);
*pRGB++ = YUV2G(Y, U, V);
*pRGB++ = YUV2R(Y, U, V);
*pRGB++ = 0xFF;
}
else
@ -313,6 +442,9 @@ static pstatus_t general_YUV420ToRGB_8u_P3AC4R(
pV -= halfWidth;
pRGB += dstPad;
if (lastRow & 0x02)
break;
for (x = 0; x < halfWidth; )
{
if (++x == halfWidth)
@ -321,73 +453,22 @@ static pstatus_t general_YUV420ToRGB_8u_P3AC4R(
U = *pU++;
V = *pV++;
Up = U - 128;
Vp = V - 128;
Up48 = 48 * Up;
Up475 = 475 * Up;
Vp403 = Vp * 403;
Vp120 = Vp * 120;
/* 3rd pixel */
Y = *pY++;
Yp = Y << 8;
R = (Yp + Vp403) >> 8;
G = (Yp - Up48 - Vp120) >> 8;
B = (Yp + Up475) >> 8;
if (R < 0)
R = 0;
else if (R > 255)
R = 255;
if (G < 0)
G = 0;
else if (G > 255)
G = 255;
if (B < 0)
B = 0;
else if (B > 255)
B = 255;
*pRGB++ = (BYTE) B;
*pRGB++ = (BYTE) G;
*pRGB++ = (BYTE) R;
*pRGB++ = YUV2B(Y, U, V);
*pRGB++ = YUV2G(Y, U, V);
*pRGB++ = YUV2R(Y, U, V);
*pRGB++ = 0xFF;
/* 4th pixel */
if (!(lastCol & 0x02))
{
Y = *pY++;
Yp = Y << 8;
R = (Yp + Vp403) >> 8;
G = (Yp - Up48 - Vp120) >> 8;
B = (Yp + Up475) >> 8;
if (R < 0)
R = 0;
else if (R > 255)
R = 255;
if (G < 0)
G = 0;
else if (G > 255)
G = 255;
if (B < 0)
B = 0;
else if (B > 255)
B = 255;
*pRGB++ = (BYTE) B;
*pRGB++ = (BYTE) G;
*pRGB++ = (BYTE) R;
*pRGB++ = YUV2B(Y, U, V);
*pRGB++ = YUV2G(Y, U, V);
*pRGB++ = YUV2R(Y, U, V);
*pRGB++ = 0xFF;
}
else
@ -412,9 +493,29 @@ static pstatus_t general_YUV420ToRGB_8u_P3AC4R(
* | U | = ( | -29 -99 128 | | G | ) >> 8 + | 128 |
* | V | ( | 128 -116 -12 | | B | ) | 128 |
*/
#define RGB2Y(R, G, B) CLIP(( ( 54 * (R) + 183 * (G) + 18 * (B) + 128) >> 8) + 0)
#define RGB2U(R, G, B) CLIP(( ( -29 * (R) - 99 * (G) + 128 * (B) + 128) >> 8) + 128)
#define RGB2V(R, G, B) CLIP(( ( 128 * (R) - 116 * (G) - 12 * (B) + 128) >> 8) + 128)
static INLINE BYTE RGB2Y(INT32 R, INT32 G, INT32 B)
{
const INT32 y = ( 54L * (R) + 183L * (G) + 18L * (B));
const INT32 y8 = (y >> 8L);
return CLIP(y8);
}
static INLINE BYTE RGB2U(INT32 R, INT32 G, INT32 B)
{
const INT32 u = ( -29L * (R) - 99L * (G) + 128L * (B));
const INT32 u8 = (u >> 8L) + 128L;
return CLIP(u8);
}
static INLINE BYTE RGB2V(INT32 R, INT32 G, INT32 B)
{
const INT32 v = ( 128L * (R) - 116L * (G) - 12L * (B));
const INT32 v8 = (v >> 8L) + 128L;
return CLIP(v8);
}
static pstatus_t general_RGBToYUV444_8u_P3AC4R(
const BYTE* pSrc, const UINT32 srcStep,
@ -428,18 +529,17 @@ static pstatus_t general_RGBToYUV444_8u_P3AC4R(
for (y=0; y<nHeight; y++)
{
const BYTE* pR = pSrc + y * srcStep * 4;
const BYTE* pG = pSrc + y * srcStep * 4 + 1;
const BYTE* pB = pSrc + y * srcStep * 4 + 2;
const BYTE* pRGB = pSrc + y * srcStep;
BYTE* pY = pDst[0] + y * dstStep[0];
BYTE* pU = pDst[1] + y * dstStep[1];
BYTE* pV = pDst[2] + y * dstStep[2];
for (x=0; x<nWidth; x++)
{
const BYTE R = pR[x];
const BYTE G = pG[x];
const BYTE B = pB[x];
const BYTE B = pRGB[4*x+0];
const BYTE G = pRGB[4*x+1];
const BYTE R = pRGB[4*x+2];
pY[x] = RGB2Y(R, G, B);
pU[x] = RGB2U(R, G, B);
@ -455,98 +555,75 @@ static pstatus_t general_RGBToYUV420_8u_P3AC4R(
BYTE* pDst[3], UINT32 dstStep[3], const prim_size_t* roi)
{
UINT32 x, y;
UINT32 dstPad[3];
UINT32 halfWidth;
UINT32 halfHeight;
BYTE* pY;
BYTE* pU;
BYTE* pV;
UINT32 Y, U, V;
UINT32 R, G, B;
UINT32 Ra, Ga, Ba;
const BYTE* pRGB;
UINT32 nWidth, nHeight;
pU = pDst[1];
pV = pDst[2];
nWidth = roi->width + roi->width % 2;
nHeight = roi->height + roi->height % 2;
nWidth = (roi->width + 1) & ~0x0001;
nHeight = (roi->height + 1) & ~0x0001;
halfWidth = nWidth / 2;
halfHeight = nHeight / 2;
dstPad[0] = (dstStep[0] - nWidth);
dstPad[1] = (dstStep[1] - halfWidth);
dstPad[2] = (dstStep[2] - halfWidth);
halfWidth = (nWidth + nWidth % 2) / 2;
halfHeight = (nHeight + nHeight % 2) / 2;
for (y = 0; y < halfHeight; y++)
{
const UINT32 val2y = (y * 2);
const UINT32 val2y1 = val2y + 1;
const BYTE* pRGB = pSrc + val2y * srcStep;
const BYTE* pRGB1 = pSrc + val2y1 * srcStep;
BYTE* pY = pDst[0] + val2y * dstStep[0];
BYTE* pY1 = pDst[0] + val2y1 * dstStep[0];
BYTE* pU = pDst[1] + y * dstStep[1];
BYTE* pV = pDst[2] + y * dstStep[2];
for (x = 0; x < halfWidth; x++)
{
/* 1st pixel */
pRGB = pSrc + y * 2 * srcStep + x * 2 * 4;
pY = pDst[0] + y * 2 * dstStep[0] + x * 2;
Ba = B = pRGB[0];
Ga = G = pRGB[1];
Ra = R = pRGB[2];
Y = (54 * R + 183 * G + 18 * B) >> 8;
pY[0] = (BYTE) Y;
INT32 R, G, B;
INT32 Ra, Ga, Ba;
const UINT32 val2x = (x * 2);
const UINT32 val2x1 = val2x + 1;
if (x * 2 + 1 < roi->width)
/* 1st pixel */
Ba = B = pRGB[val2x * 4 + 0];
Ga = G = pRGB[val2x * 4 + 1];
Ra = R = pRGB[val2x * 4 + 2];
pY[val2x] = RGB2Y(R, G, B);
if (val2x1 < nWidth)
{
/* 2nd pixel */
Ba += B = pRGB[4];
Ga += G = pRGB[5];
Ra += R = pRGB[6];
Y = (54 * R + 183 * G + 18 * B) >> 8;
pY[1] = (BYTE) Y;
Ba += B = pRGB[val2x * 4 + 4];
Ga += G = pRGB[val2x * 4 + 5];
Ra += R = pRGB[val2x * 4 + 6];
pY[val2x1] = RGB2Y(R, G, B);
}
if (y * 2 + 1 < roi->height)
if (val2y1 < nHeight)
{
/* 3rd pixel */
pRGB += srcStep;
pY += dstStep[0];
Ba += B = pRGB[0];
Ga += G = pRGB[1];
Ra += R = pRGB[2];
Y = (54 * R + 183 * G + 18 * B) >> 8;
pY[0] = (BYTE) Y;
Ba += B = pRGB1[val2x * 4 + 0];
Ga += G = pRGB1[val2x * 4 + 1];
Ra += R = pRGB1[val2x * 4 + 2];
pY1[val2x] = RGB2Y(R, G, B);
if (x * 2 + 1 < roi->width)
if (val2x1 < nWidth)
{
/* 4th pixel */
Ba += B = pRGB[4];
Ga += G = pRGB[5];
Ra += R = pRGB[6];
Y = (54 * R + 183 * G + 18 * B) >> 8;
pY[1] = (BYTE) Y;
Ba += B = pRGB1[val2x * 4 + 4];
Ga += G = pRGB1[val2x * 4 + 5];
Ra += R = pRGB1[val2x * 4 + 6];
pY1[val2x1] = RGB2Y(R, G, B);
}
}
/* U */
Ba >>= 2;
Ga >>= 2;
Ra >>= 2;
U = ((-29 * Ra - 99 * Ga + 128 * Ba) >> 8) + 128;
if (U < 0)
U = 0;
else if (U > 255)
U = 255;
*pU++ = (BYTE) U;
/* V */
V = ((128 * Ra - 116 * Ga - 12 * Ba) >> 8) + 128;
if (V < 0)
V = 0;
else if (V > 255)
V = 255;
*pV++ = (BYTE) V;
pU[x] = RGB2U(Ra, Ga, Ba);
pV[x] = RGB2V(Ra, Ga, Ba);
}
pU += dstPad[1];
pV += dstPad[2];
}
return PRIMITIVES_SUCCESS;
@ -555,10 +632,11 @@ static pstatus_t general_RGBToYUV420_8u_P3AC4R(
void primitives_init_YUV(primitives_t* prims)
{
prims->YUV420ToRGB_8u_P3AC4R = general_YUV420ToRGB_8u_P3AC4R;
prims->YUV444ToRGB_8u_P3AC4R = general_YUV444ToRGB_8u_P3AC4R;
prims->RGBToYUV420_8u_P3AC4R = general_RGBToYUV420_8u_P3AC4R;
prims->RGBToYUV444_8u_P3AC4R = general_RGBToYUV444_8u_P3AC4R;
prims->YUV420CombineToYUV444 = general_YUV420CombineToYUV444;
prims->YUV444ToRGB_8u_P3AC4R = general_YUV444ToRGB_8u_P3AC4R;
prims->YUV444SplitToYUV420 = general_YUV444SplitToYUV420;
primitives_init_YUV_opt(prims);
}

View File

@ -14,6 +14,7 @@ set(${MODULE_PREFIX}_TESTS
TestPrimitivesSet.c
TestPrimitivesShift.c
TestPrimitivesSign.c
TestPrimitivesYUV.c
TestPrimitivesYCbCr.c
TestPrimitivesYCoCg.c)

View File

@ -0,0 +1,427 @@
#include "prim_test.h"
#include <winpr/wlog.h>
#include <winpr/crypto.h>
#include <freerdp/primitives.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#define TAG __FILE__
/* YUV to RGB conversion is lossy, so consider every value only
* differing by less than 2 abs equal. */
static BOOL similar(const BYTE* src, const BYTE* dst, size_t size)
{
size_t x;
for (x=0; x<size; x++)
{
volatile double val1 = (double)src[x];
volatile double val2 = (double)dst[x];
volatile double diff = val1 - val2;
if (abs(diff) > 2)
{
fprintf(stderr, "%zd %02X : %02X diff=%lf\n", x, val1, val2, diff);
return FALSE;
}
}
return TRUE;
}
static void get_size(UINT32* width, UINT32* height)
{
winpr_RAND((BYTE*)width, sizeof(*width));
winpr_RAND((BYTE*)height, sizeof(*height));
// TODO: Algorithm only works on even resolutions...
*width = (*width % 4000) << 1;
*height = (*height % 4000 << 1);
}
static BOOL check_padding(const BYTE* psrc, size_t size, size_t padding, const char* buffer)
{
size_t x;
BOOL rc = TRUE;
const BYTE* src;
const BYTE* esrc;
size_t halfPad = (padding+1)/2;
if (!psrc)
return FALSE;
src = psrc - halfPad;
esrc = src + size + halfPad;
for (x=0; x<halfPad; x++)
{
const BYTE s = *src++;
const BYTE d = *esrc++;
if (s != 'A')
{
size_t start = x;
while((x < halfPad) && (*esrc++ != 'A'))
x++;
fprintf(stderr, "Buffer underflow detected %02x != %02X %s [%zd-%zd]\n",
d, 'A', buffer, start, x);
return FALSE;
}
if(d != 'A')
{
size_t start = x;
while((x < halfPad) && (*esrc++ != 'A'))
x++;
fprintf(stderr, "Buffer overflow detected %02x != %02X %s [%zd-%zd]\n",
d, 'A', buffer, start, x);
return FALSE;
}
}
return rc;
}
static void* set_padding(size_t size, size_t padding)
{
size_t halfPad = (padding + 1) / 2;
BYTE* psrc;
BYTE* src = calloc(1, size + 2 * halfPad);
if (!src)
return NULL;
memset(&src[0], 'A', halfPad);
memset(&src[halfPad+size], 'A', halfPad);
psrc = &src[halfPad];
if (!check_padding(psrc, size, padding, "init"))
{
free (src);
return NULL;
}
return psrc;
}
static void free_padding(void* src, size_t padding)
{
BYTE* ptr;
if (!src)
return;
ptr = ((BYTE*)src) - (padding+1)/2;
free(ptr);
}
/* Create 2 pseudo YUV420 frames of same size.
* Combine them and check, if the data is at the expected position. */
static BOOL TestPrimitiveYUVCombine(void)
{
UINT32 x, y, i;
UINT32 awidth, aheight;
BOOL rc = FALSE;
BYTE* luma[3] = { 0 };
BYTE* chroma[3] = { 0 };
BYTE* yuv[3] = { 0 };
BYTE* pmain[3] = { 0 };
BYTE* paux[3] = { 0 };
UINT32 lumaStride[3];
UINT32 chromaStride[3];
UINT32 yuvStride[3];
size_t padding = 10000;
prim_size_t roi;
primitives_t* prims = primitives_get();
get_size(&roi.width, &roi.height);
awidth = roi.width + 16 - roi.width % 16;
aheight = roi.height + 16 - roi.height % 16;
fprintf(stderr, "Running YUVCombine on frame size %lux%lu [%lux%lu]\n",
roi.width, roi.height, awidth, aheight);
if (!prims || !prims->YUV420CombineToYUV444)
goto fail;
for (x=0; x<3; x++)
{
size_t halfStride = ((x>0)?awidth/2:awidth);
size_t size = aheight * awidth;
size_t halfSize = ((x>0)?halfStride*aheight/2:awidth*aheight);
yuvStride[x] = awidth;
if (!(yuv[x] = set_padding(size, padding)))
goto fail;
lumaStride[x] = halfStride;
if (!(luma[x] = set_padding(halfSize, padding)))
goto fail;
if (!(pmain[x] = set_padding(halfSize, padding)))
goto fail;
chromaStride[x] = halfStride;
if (!(chroma[x] = set_padding(halfSize, padding)))
goto fail;
if (!(paux[x] = set_padding(halfSize, padding)))
goto fail;
memset(luma[x], 0xAB + 3*x, halfSize);
memset(chroma[x], 0x80 + 2*x, halfSize);
if (!check_padding(luma[x], halfSize, padding, "luma"))
goto fail;
if (!check_padding(chroma[x], halfSize, padding, "chroma"))
goto fail;
if (!check_padding(pmain[x], halfSize, padding, "main"))
goto fail;
if (!check_padding(paux[x], halfSize, padding, "aux"))
goto fail;
if (!check_padding(yuv[x], size, padding, "yuv"))
goto fail;
}
if (prims->YUV420CombineToYUV444((const BYTE**)luma, lumaStride,
(const BYTE**) chroma, chromaStride,
yuv, yuvStride, &roi) != PRIMITIVES_SUCCESS)
goto fail;
for (x=0; x<3; x++)
{
size_t halfStride = ((x>0)?awidth/2:awidth);
size_t size = aheight * awidth;
size_t halfSize = ((x>0)?halfStride*aheight/2:awidth*aheight);
if (!check_padding(luma[x], halfSize, padding, "luma"))
goto fail;
if (!check_padding(chroma[x], halfSize, padding, "chroma"))
goto fail;
if (!check_padding(yuv[x], size, padding, "yuv"))
goto fail;
}
if (prims->YUV444SplitToYUV420(yuv, yuvStride, pmain, lumaStride,
paux, chromaStride, &roi) != PRIMITIVES_SUCCESS)
goto fail;
for (x=0; x<3; x++)
{
size_t halfStride = ((x>0)?awidth/2:awidth);
size_t size = aheight * awidth;
size_t halfSize = ((x>0)?halfStride*aheight/2:awidth*aheight);
if (!check_padding(pmain[x], halfSize, padding, "main"))
goto fail;
if (!check_padding(paux[x], halfSize, padding, "aux"))
goto fail;
if (!check_padding(yuv[x], size, padding, "yuv"))
goto fail;
}
for (i=0; i<3; i++)
{
for (y=0; y<roi.height; y++)
{
UINT32 w = roi.width;
UINT32 lstride = lumaStride[i];
UINT32 cstride = chromaStride[i];
if (i > 0)
{
w = (roi.width+3) / 4;
if (roi.height > (roi.height+1)/2)
continue;
}
if (!similar(luma[i] + y * lstride,
pmain[i] + y * lstride,
w))
goto fail;
/* Need to ignore lines of destination Y plane,
* if the lines are not a multiple of 16
* as the UV planes are packed in 8 line stripes. */
if (i == 0)
{
/* TODO: This check is not perfect, it does not
* include the last V lines packed to the Y
* frame. */
UINT32 rem = roi.height % 16;
if (y > roi.height - rem)
continue;
}
if (!similar(chroma[i] + y * cstride,
paux[i] + y * cstride,
w))
goto fail;
}
}
rc = TRUE;
fail:
for (x=0; x<3; x++)
{
free_padding(yuv[x], padding);
free_padding(luma[x], padding);
free_padding(chroma[x], padding);
free_padding(pmain[x], padding);
free_padding(paux[x], padding);
}
return rc;
}
static BOOL TestPrimitiveYUV(BOOL use444)
{
BOOL rc = FALSE;
UINT32 x, y;
UINT32 awidth, aheight;
BYTE* yuv[3] = {0};
UINT32 yuv_step[3];
prim_size_t roi;
BYTE* rgb = NULL;
BYTE* rgb_dst = NULL;
size_t size;
primitives_t* prims = primitives_get();
size_t uvsize, uvwidth;
size_t padding = 10000;
size_t stride;
get_size(&roi.width, &roi.height);
/* Buffers need to be 16x16 aligned. */
awidth = roi.width + 16 - roi.width % 16;
aheight = roi.height + 16 - roi.height % 16;
stride = awidth * sizeof(UINT32);
size = awidth * aheight;
if (use444)
{
uvwidth = awidth;
uvsize = size;
if (!prims || !prims->RGBToYUV444_8u_P3AC4R || !prims->YUV444ToRGB_8u_P3AC4R)
return FALSE;
}
else
{
uvwidth = (awidth + 1) / 2;
uvsize = (aheight + 1) / 2 * uvwidth;
if (!prims || !prims->RGBToYUV420_8u_P3AC4R || !prims->YUV420ToRGB_8u_P3AC4R)
return FALSE;
}
fprintf(stderr, "Running AVC%s on frame size %lux%lu\n", use444 ? "444" : "420",
roi.width, roi.height);
/* Test RGB to YUV444 conversion and vice versa */
if (!(rgb = set_padding(size * sizeof(UINT32), padding)))
goto fail;
if (!(rgb_dst = set_padding(size * sizeof(UINT32), padding)))
goto fail;
if (!(yuv[0] = set_padding(size, padding)))
goto fail;
if (!(yuv[1] = set_padding(uvsize, padding)))
goto fail;
if (!(yuv[2] = set_padding(uvsize, padding)))
goto fail;
for (y=0; y<roi.height; y++)
{
BYTE* line = &rgb[y*stride];
for (x=0; x<roi.width; x++)
{
line[x*4+0] = 0x81;
line[x*4+1] = 0x33;
line[x*4+2] = 0xAB;
line[x*4+3] = 0xFF;
}
}
yuv_step[0] = awidth;
yuv_step[1] = uvwidth;
yuv_step[2] = uvwidth;
if (use444)
{
if (prims->RGBToYUV444_8u_P3AC4R(rgb, stride, yuv, yuv_step, &roi) != PRIMITIVES_SUCCESS)
goto fail;
}
else if (prims->RGBToYUV420_8u_P3AC4R(rgb, stride, yuv, yuv_step, &roi) != PRIMITIVES_SUCCESS)
goto fail;
if (!check_padding(rgb, size * sizeof(UINT32), padding, "rgb"))
goto fail;
if ((!check_padding(yuv[0], size, padding, "Y")) ||
(!check_padding(yuv[1], uvsize, padding, "U")) ||
(!check_padding(yuv[2], uvsize, padding, "V")))
goto fail;
if (use444)
{
if (prims->YUV444ToRGB_8u_P3AC4R((const BYTE**)yuv, yuv_step, rgb_dst, stride, &roi) != PRIMITIVES_SUCCESS)
goto fail;
}
else if (prims->YUV420ToRGB_8u_P3AC4R((const BYTE**)yuv, yuv_step, rgb_dst, stride, &roi) != PRIMITIVES_SUCCESS)
goto fail;
if (!check_padding(rgb_dst, size * sizeof(UINT32), padding, "rgb dst"))
goto fail;
if ((!check_padding(yuv[0], size, padding, "Y")) ||
(!check_padding(yuv[1], uvsize, padding, "U")) ||
(!check_padding(yuv[2], uvsize, padding, "V")))
goto fail;
for (y=0; y<roi.height; y++)
{
BYTE* srgb = &rgb[y*stride];
BYTE* drgb = &rgb_dst[y*stride];
if (!similar(srgb, drgb, roi.width*sizeof(UINT32)))
goto fail;
}
rc = TRUE;
fail:
free_padding (rgb, padding);
free_padding (rgb_dst, padding);
free_padding (yuv[0], padding);
free_padding (yuv[1], padding);
free_padding (yuv[2], padding);
return rc;
}
int TestPrimitivesYUV(int argc, char* argv[])
{
UINT32 x;
int rc = -1;
primitives_init();
for (x=0; x<10; x++)
{
/* TODO: This test fails on value comparison,
* there seems to be some issue left with encoder / decoder pass.
if (!TestPrimitiveYUV(FALSE))
goto end;
*/
if (!TestPrimitiveYUV(TRUE))
goto end;
if (!TestPrimitiveYUVCombine())
goto end;
}
rc = 0;
end:
primitives_deinit();
return rc;
}