Update avcodec to 20080825

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27553 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
David McPaul 2008-09-15 14:07:20 +00:00
parent 221fe9f455
commit 0978528eb1
7 changed files with 620 additions and 516 deletions

View File

@ -2,30 +2,30 @@
* Wing Commander/Xan Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
* This library is free software; you can redistribute it and/or
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file xan.c
* Xan video decoder for Wing Commander III & IV computer games
* Xan video decoder for Wing Commander III computer game
* by Mario Brito (mbrito@student.dei.uc.pt)
* and Mike Melanson (melanson@pcisys.net)
*
* The xan_wc3 decoder outputs the following colorspaces natively:
* PAL8 (default), RGB555, RGB565, RGB24, BGR24, RGBA32, YUV444P
* The xan_wc3 decoder outputs PAL8 data.
*/
#include <stdio.h>
@ -33,126 +33,61 @@
#include <string.h>
#include <unistd.h>
#include "common.h"
#include "avcodec.h"
#include "dsputil.h"
#define PALETTE_COUNT 256
#define PALETTE_CONTROL_SIZE ((256 * 3) + 1)
typedef struct XanContext {
AVCodecContext *avctx;
DSPContext dsp;
AVFrame last_frame;
AVFrame current_frame;
unsigned char *buf;
const unsigned char *buf;
int size;
unsigned char palette[PALETTE_COUNT * 4];
/* scratch space */
unsigned char *buffer1;
int buffer1_size;
unsigned char *buffer2;
int buffer2_size;
int frame_size;
} XanContext;
#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0])
#define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \
(((uint8_t*)(x))[2] << 16) | \
(((uint8_t*)(x))[1] << 8) | \
((uint8_t*)(x))[0])
/* RGB -> YUV conversion stuff */
#define SCALEFACTOR 65536
#define CENTERSAMPLE 128
#define COMPUTE_Y(r, g, b) \
(unsigned char) \
((y_r_table[r] + y_g_table[g] + y_b_table[b]) / SCALEFACTOR)
#define COMPUTE_U(r, g, b) \
(unsigned char) \
((u_r_table[r] + u_g_table[g] + u_b_table[b]) / SCALEFACTOR + CENTERSAMPLE)
#define COMPUTE_V(r, g, b) \
(unsigned char) \
((v_r_table[r] + v_g_table[g] + v_b_table[b]) / SCALEFACTOR + CENTERSAMPLE)
#define Y_R (SCALEFACTOR * 0.29900)
#define Y_G (SCALEFACTOR * 0.58700)
#define Y_B (SCALEFACTOR * 0.11400)
#define U_R (SCALEFACTOR * -0.16874)
#define U_G (SCALEFACTOR * -0.33126)
#define U_B (SCALEFACTOR * 0.50000)
#define V_R (SCALEFACTOR * 0.50000)
#define V_G (SCALEFACTOR * -0.41869)
#define V_B (SCALEFACTOR * -0.08131)
/*
* Precalculate all of the YUV tables since it requires fewer than
* 10 kilobytes to store them.
*/
static int y_r_table[256];
static int y_g_table[256];
static int y_b_table[256];
static int u_r_table[256];
static int u_g_table[256];
static int u_b_table[256];
static int v_r_table[256];
static int v_g_table[256];
static int v_b_table[256];
static int xan_decode_init(AVCodecContext *avctx)
static av_cold int xan_decode_init(AVCodecContext *avctx)
{
XanContext *s = avctx->priv_data;
int i;
s->avctx = avctx;
s->frame_size = 0;
if ((avctx->codec->id == CODEC_ID_XAN_WC3) &&
if ((avctx->codec->id == CODEC_ID_XAN_WC3) &&
(s->avctx->palctrl == NULL)) {
av_log(avctx, AV_LOG_ERROR, " WC3 Xan video: palette expected.\n");
return -1;
}
avctx->pix_fmt = PIX_FMT_PAL8;
avctx->has_b_frames = 0;
dsputil_init(&s->dsp, avctx);
/* initialize the RGB -> YUV tables */
for (i = 0; i < 256; i++) {
y_r_table[i] = Y_R * i;
y_g_table[i] = Y_G * i;
y_b_table[i] = Y_B * i;
if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
return -1;
u_r_table[i] = U_R * i;
u_g_table[i] = U_G * i;
u_b_table[i] = U_B * i;
v_r_table[i] = V_R * i;
v_g_table[i] = V_G * i;
v_b_table[i] = V_B * i;
}
s->buffer1 = av_malloc(avctx->width * avctx->height);
s->buffer2 = av_malloc(avctx->width * avctx->height);
s->buffer1_size = avctx->width * avctx->height;
s->buffer1 = av_malloc(s->buffer1_size);
s->buffer2_size = avctx->width * avctx->height;
s->buffer2 = av_malloc(s->buffer2_size);
if (!s->buffer1 || !s->buffer2)
return -1;
return 0;
}
/* This function is used in lieu of memcpy(). This decoder can not use
/* This function is used in lieu of memcpy(). This decoder cannot use
* memcpy because the memory locations often overlap and
* memcpy doesn't like that; it's not uncommon, for example, for
* dest = src+1, to turn byte A into pattern AAAAAAAA.
* This was originally repz movsb in Intel x86 ASM. */
static inline void bytecopy(unsigned char *dest, unsigned char *src, int count)
static inline void bytecopy(unsigned char *dest, const unsigned char *src, int count)
{
int i;
@ -160,13 +95,15 @@ static inline void bytecopy(unsigned char *dest, unsigned char *src, int count)
dest[i] = src[i];
}
static int xan_huffman_decode(unsigned char *dest, unsigned char *src)
static int xan_huffman_decode(unsigned char *dest, const unsigned char *src,
int dest_len)
{
unsigned char byte = *src++;
unsigned char ival = byte + 0x16;
unsigned char * ptr = src + byte*2;
const unsigned char * ptr = src + byte*2;
unsigned char val = ival;
int counter = 0;
unsigned char *dest_end = dest + dest_len;
unsigned char bits = *ptr++;
@ -177,6 +114,8 @@ static int xan_huffman_decode(unsigned char *dest, unsigned char *src)
val = src[val - 0x17];
if ( val < 0x16 ) {
if (dest + 1 > dest_end)
return 0;
*dest++ = val;
val = ival;
}
@ -190,12 +129,13 @@ static int xan_huffman_decode(unsigned char *dest, unsigned char *src)
return 0;
}
static void xan_unpack(unsigned char *dest, unsigned char *src)
static void xan_unpack(unsigned char *dest, const unsigned char *src, int dest_len)
{
unsigned char opcode;
int size;
int offset;
int byte1, byte2, byte3;
unsigned char *dest_end = dest + dest_len;
for (;;) {
opcode = *src++;
@ -205,9 +145,13 @@ static void xan_unpack(unsigned char *dest, unsigned char *src)
offset = *src++;
size = opcode & 3;
if (dest + size > dest_end)
return;
bytecopy(dest, src, size); dest += size; src += size;
size = ((opcode & 0x1c) >> 2) + 3;
if (dest + size > dest_end)
return;
bytecopy (dest, dest - (((opcode & 0x60) << 3) + offset + 1), size);
dest += size;
@ -217,9 +161,13 @@ static void xan_unpack(unsigned char *dest, unsigned char *src)
byte2 = *src++;
size = byte1 >> 6;
if (dest + size > dest_end)
return;
bytecopy (dest, src, size); dest += size; src += size;
size = (opcode & 0x3f) + 4;
if (dest + size > dest_end)
return;
bytecopy (dest, dest - (((byte1 & 0x3f) << 8) + byte2 + 1), size);
dest += size;
@ -230,9 +178,13 @@ static void xan_unpack(unsigned char *dest, unsigned char *src)
byte3 = *src++;
size = opcode & 3;
if (dest + size > dest_end)
return;
bytecopy (dest, src, size); dest += size; src += size;
size = byte3 + 5 + ((opcode & 0xc) << 6);
if (dest + size > dest_end)
return;
bytecopy (dest,
dest - ((((opcode & 0x10) >> 4) << 0x10) + 1 + (byte1 << 8) + byte2),
size);
@ -243,6 +195,8 @@ static void xan_unpack(unsigned char *dest, unsigned char *src)
if (size > 0x70)
break;
if (dest + size > dest_end)
return;
bytecopy (dest, src, size); dest += size; src += size;
}
}
@ -251,232 +205,36 @@ static void xan_unpack(unsigned char *dest, unsigned char *src)
bytecopy(dest, src, size); dest += size; src += size;
}
static void inline xan_wc3_build_palette(XanContext *s,
unsigned int *palette_data)
{
int i;
unsigned char r, g, b;
unsigned short *palette16;
unsigned int *palette32;
unsigned int pal_elem;
/* transform the palette passed through the palette control structure
* into the necessary internal format depending on colorspace */
switch (s->avctx->pix_fmt) {
case PIX_FMT_RGB555:
palette16 = (unsigned short *)s->palette;
for (i = 0; i < PALETTE_COUNT; i++) {
pal_elem = palette_data[i];
r = (pal_elem >> 16) & 0xff;
g = (pal_elem >> 8) & 0xff;
b = pal_elem & 0xff;
palette16[i] =
((r >> 3) << 10) |
((g >> 3) << 5) |
((b >> 3) << 0);
}
break;
case PIX_FMT_RGB565:
palette16 = (unsigned short *)s->palette;
for (i = 0; i < PALETTE_COUNT; i++) {
pal_elem = palette_data[i];
r = (pal_elem >> 16) & 0xff;
g = (pal_elem >> 8) & 0xff;
b = pal_elem & 0xff;
palette16[i] =
((r >> 3) << 11) |
((g >> 2) << 5) |
((b >> 3) << 0);
}
break;
case PIX_FMT_RGB24:
for (i = 0; i < PALETTE_COUNT; i++) {
pal_elem = palette_data[i];
r = (pal_elem >> 16) & 0xff;
g = (pal_elem >> 8) & 0xff;
b = pal_elem & 0xff;
s->palette[i * 4 + 0] = r;
s->palette[i * 4 + 1] = g;
s->palette[i * 4 + 2] = b;
}
break;
case PIX_FMT_BGR24:
for (i = 0; i < PALETTE_COUNT; i++) {
pal_elem = palette_data[i];
r = (pal_elem >> 16) & 0xff;
g = (pal_elem >> 8) & 0xff;
b = pal_elem & 0xff;
s->palette[i * 4 + 0] = b;
s->palette[i * 4 + 1] = g;
s->palette[i * 4 + 2] = r;
}
break;
case PIX_FMT_PAL8:
case PIX_FMT_RGBA32:
palette32 = (unsigned int *)s->palette;
memcpy (palette32, palette_data, PALETTE_COUNT * sizeof(unsigned int));
break;
case PIX_FMT_YUV444P:
for (i = 0; i < PALETTE_COUNT; i++) {
pal_elem = palette_data[i];
r = (pal_elem >> 16) & 0xff;
g = (pal_elem >> 8) & 0xff;
b = pal_elem & 0xff;
s->palette[i * 4 + 0] = COMPUTE_Y(r, g, b);
s->palette[i * 4 + 1] = COMPUTE_U(r, g, b);
s->palette[i * 4 + 2] = COMPUTE_V(r, g, b);
}
break;
default:
av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
break;
}
}
/* advance current_x variable; reset accounting variables if current_x
* moves beyond width */
#define ADVANCE_CURRENT_X() \
current_x++; \
if (current_x >= width) { \
index += line_inc; \
current_x = 0; \
}
static void inline xan_wc3_output_pixel_run(XanContext *s,
unsigned char *pixel_buffer, int x, int y, int pixel_count)
static inline void xan_wc3_output_pixel_run(XanContext *s,
const unsigned char *pixel_buffer, int x, int y, int pixel_count)
{
int stride;
int line_inc;
int index;
int current_x;
int width = s->avctx->width;
unsigned char pix;
unsigned char *palette_plane;
unsigned char *y_plane;
unsigned char *u_plane;
unsigned char *v_plane;
unsigned char *rgb_plane;
unsigned short *rgb16_plane;
unsigned short *palette16;
unsigned int *rgb32_plane;
unsigned int *palette32;
switch (s->avctx->pix_fmt) {
palette_plane = s->current_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
index = y * stride + x;
current_x = x;
while((pixel_count--) && (index < s->frame_size)) {
case PIX_FMT_PAL8:
palette_plane = s->current_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
index = y * stride + x;
current_x = x;
while(pixel_count--) {
/* don't do a memcpy() here; keyframes generally copy an entire
* frame of data and the stride needs to be accounted for */
palette_plane[index++] = *pixel_buffer++;
/* don't do a memcpy() here; keyframes generally copy an entire
* frame of data and the stride needs to be accounted for */
palette_plane[index++] = *pixel_buffer++;
ADVANCE_CURRENT_X();
current_x++;
if (current_x >= width) {
index += line_inc;
current_x = 0;
}
break;
case PIX_FMT_RGB555:
case PIX_FMT_RGB565:
rgb16_plane = (unsigned short *)s->current_frame.data[0];
palette16 = (unsigned short *)s->palette;
stride = s->current_frame.linesize[0] / 2;
line_inc = stride - width;
index = y * stride + x;
current_x = x;
while(pixel_count--) {
rgb16_plane[index++] = palette16[*pixel_buffer++];
ADVANCE_CURRENT_X();
}
break;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
rgb_plane = s->current_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width * 3;
index = y * stride + x * 3;
current_x = x;
while(pixel_count--) {
pix = *pixel_buffer++;
rgb_plane[index++] = s->palette[pix * 4 + 0];
rgb_plane[index++] = s->palette[pix * 4 + 1];
rgb_plane[index++] = s->palette[pix * 4 + 2];
ADVANCE_CURRENT_X();
}
break;
case PIX_FMT_RGBA32:
rgb32_plane = (unsigned int *)s->current_frame.data[0];
palette32 = (unsigned int *)s->palette;
stride = s->current_frame.linesize[0] / 4;
line_inc = stride - width;
index = y * stride + x;
current_x = x;
while(pixel_count--) {
rgb32_plane[index++] = palette32[*pixel_buffer++];
ADVANCE_CURRENT_X();
}
break;
case PIX_FMT_YUV444P:
y_plane = s->current_frame.data[0];
u_plane = s->current_frame.data[1];
v_plane = s->current_frame.data[2];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
index = y * stride + x;
current_x = x;
while(pixel_count--) {
pix = *pixel_buffer++;
y_plane[index] = s->palette[pix * 4 + 0];
u_plane[index] = s->palette[pix * 4 + 1];
v_plane[index] = s->palette[pix * 4 + 2];
index++;
ADVANCE_CURRENT_X();
}
break;
default:
av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
break;
}
}
#define ADVANCE_CURFRAME_X() \
curframe_x++; \
if (curframe_x >= width) { \
curframe_index += line_inc; \
curframe_x = 0; \
}
#define ADVANCE_PREVFRAME_X() \
prevframe_x++; \
if (prevframe_x >= width) { \
prevframe_index += line_inc; \
prevframe_x = 0; \
}
static void inline xan_wc3_copy_pixel_run(XanContext *s,
static inline void xan_wc3_copy_pixel_run(XanContext *s,
int x, int y, int pixel_count, int motion_x, int motion_y)
{
int stride;
@ -485,123 +243,31 @@ static void inline xan_wc3_copy_pixel_run(XanContext *s,
int curframe_x, prevframe_x;
int width = s->avctx->width;
unsigned char *palette_plane, *prev_palette_plane;
unsigned char *y_plane, *u_plane, *v_plane;
unsigned char *prev_y_plane, *prev_u_plane, *prev_v_plane;
unsigned char *rgb_plane, *prev_rgb_plane;
unsigned short *rgb16_plane, *prev_rgb16_plane;
unsigned int *rgb32_plane, *prev_rgb32_plane;
switch (s->avctx->pix_fmt) {
palette_plane = s->current_frame.data[0];
prev_palette_plane = s->last_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
while((pixel_count--) && (curframe_index < s->frame_size)) {
case PIX_FMT_PAL8:
palette_plane = s->current_frame.data[0];
prev_palette_plane = s->last_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
while(pixel_count--) {
palette_plane[curframe_index++] =
prev_palette_plane[prevframe_index++];
palette_plane[curframe_index++] =
prev_palette_plane[prevframe_index++];
ADVANCE_CURFRAME_X();
ADVANCE_PREVFRAME_X();
curframe_x++;
if (curframe_x >= width) {
curframe_index += line_inc;
curframe_x = 0;
}
break;
case PIX_FMT_RGB555:
case PIX_FMT_RGB565:
rgb16_plane = (unsigned short *)s->current_frame.data[0];
prev_rgb16_plane = (unsigned short *)s->last_frame.data[0];
stride = s->current_frame.linesize[0] / 2;
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
while(pixel_count--) {
rgb16_plane[curframe_index++] =
prev_rgb16_plane[prevframe_index++];
ADVANCE_CURFRAME_X();
ADVANCE_PREVFRAME_X();
prevframe_x++;
if (prevframe_x >= width) {
prevframe_index += line_inc;
prevframe_x = 0;
}
break;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
rgb_plane = s->current_frame.data[0];
prev_rgb_plane = s->last_frame.data[0];
stride = s->current_frame.linesize[0];
line_inc = stride - width * 3;
curframe_index = y * stride + x * 3;
curframe_x = x;
prevframe_index = (y + motion_y) * stride +
(3 * (x + motion_x));
prevframe_x = x + motion_x;
while(pixel_count--) {
rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
ADVANCE_CURFRAME_X();
ADVANCE_PREVFRAME_X();
}
break;
case PIX_FMT_RGBA32:
rgb32_plane = (unsigned int *)s->current_frame.data[0];
prev_rgb32_plane = (unsigned int *)s->last_frame.data[0];
stride = s->current_frame.linesize[0] / 4;
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
while(pixel_count--) {
rgb32_plane[curframe_index++] =
prev_rgb32_plane[prevframe_index++];
ADVANCE_CURFRAME_X();
ADVANCE_PREVFRAME_X();
}
break;
case PIX_FMT_YUV444P:
y_plane = s->current_frame.data[0];
u_plane = s->current_frame.data[1];
v_plane = s->current_frame.data[2];
prev_y_plane = s->last_frame.data[0];
prev_u_plane = s->last_frame.data[1];
prev_v_plane = s->last_frame.data[2];
stride = s->current_frame.linesize[0];
line_inc = stride - width;
curframe_index = y * stride + x;
curframe_x = x;
prevframe_index = (y + motion_y) * stride + x + motion_x;
prevframe_x = x + motion_x;
while(pixel_count--) {
y_plane[curframe_index] = prev_y_plane[prevframe_index];
u_plane[curframe_index] = prev_u_plane[prevframe_index];
v_plane[curframe_index] = prev_v_plane[prevframe_index];
curframe_index++;
ADVANCE_CURFRAME_X();
prevframe_index++;
ADVANCE_PREVFRAME_X();
}
break;
default:
av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
break;
}
}
@ -617,23 +283,24 @@ static void xan_wc3_decode_frame(XanContext *s) {
int x, y;
unsigned char *opcode_buffer = s->buffer1;
unsigned char *imagedata_buffer = s->buffer2;
int opcode_buffer_size = s->buffer1_size;
const unsigned char *imagedata_buffer = s->buffer2;
/* pointers to segments inside the compressed chunk */
unsigned char *huffman_segment;
unsigned char *size_segment;
unsigned char *vector_segment;
unsigned char *imagedata_segment;
const unsigned char *huffman_segment;
const unsigned char *size_segment;
const unsigned char *vector_segment;
const unsigned char *imagedata_segment;
huffman_segment = s->buf + LE_16(&s->buf[0]);
size_segment = s->buf + LE_16(&s->buf[2]);
vector_segment = s->buf + LE_16(&s->buf[4]);
imagedata_segment = s->buf + LE_16(&s->buf[6]);
huffman_segment = s->buf + AV_RL16(&s->buf[0]);
size_segment = s->buf + AV_RL16(&s->buf[2]);
vector_segment = s->buf + AV_RL16(&s->buf[4]);
imagedata_segment = s->buf + AV_RL16(&s->buf[6]);
xan_huffman_decode(opcode_buffer, huffman_segment);
xan_huffman_decode(opcode_buffer, huffman_segment, opcode_buffer_size);
if (imagedata_segment[0] == 2)
xan_unpack(imagedata_buffer, &imagedata_segment[1]);
xan_unpack(s->buffer2, &imagedata_segment[1], s->buffer2_size);
else
imagedata_buffer = &imagedata_segment[1];
@ -678,14 +345,13 @@ static void xan_wc3_decode_frame(XanContext *s) {
case 10:
case 20:
size = BE_16(&size_segment[0]);
size = AV_RB16(&size_segment[0]);
size_segment += 2;
break;
case 11:
case 21:
size = (size_segment[0] << 16) | (size_segment[1] << 8) |
size_segment[2];
size = AV_RB24(size_segment);
size_segment += 3;
break;
}
@ -731,13 +397,6 @@ static void xan_wc3_decode_frame(XanContext *s) {
}
}
}
/* for PAL8, make the palette available on the way out */
if (s->avctx->pix_fmt == PIX_FMT_PAL8) {
memcpy(s->current_frame.data[1], s->palette, PALETTE_COUNT * 4);
s->current_frame.palette_has_changed = 1;
s->avctx->palctrl->palette_changed = 0;
}
}
static void xan_wc4_decode_frame(XanContext *s) {
@ -745,20 +404,10 @@ static void xan_wc4_decode_frame(XanContext *s) {
static int xan_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
const uint8_t *buf, int buf_size)
{
XanContext *s = avctx->priv_data;
AVPaletteControl *palette_control = avctx->palctrl;
int keyframe = 0;
if (palette_control->palette_changed) {
/* load the new palette and reset the palette control */
xan_wc3_build_palette(s, palette_control->palette);
/* If pal8 we clear flag when we copy palette */
if (s->avctx->pix_fmt != PIX_FMT_PAL8)
palette_control->palette_changed = 0;
keyframe = 1;
}
if (avctx->get_buffer(avctx, &s->current_frame)) {
av_log(s->avctx, AV_LOG_ERROR, " Xan Video: get_buffer() failed\n");
@ -766,6 +415,14 @@ static int xan_decode_frame(AVCodecContext *avctx,
}
s->current_frame.reference = 3;
if (!s->frame_size)
s->frame_size = s->current_frame.linesize[0] * s->avctx->height;
palette_control->palette_changed = 0;
memcpy(s->current_frame.data[1], palette_control->palette,
AVPALETTE_SIZE);
s->current_frame.palette_has_changed = 1;
s->buf = buf;
s->size = buf_size;
@ -778,22 +435,25 @@ static int xan_decode_frame(AVCodecContext *avctx,
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
/* shuffle frames */
s->last_frame = s->current_frame;
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->current_frame;
/* shuffle frames */
FFSWAP(AVFrame, s->current_frame, s->last_frame);
/* always report that the buffer was completely consumed */
return buf_size;
}
static int xan_decode_end(AVCodecContext *avctx)
static av_cold int xan_decode_end(AVCodecContext *avctx)
{
XanContext *s = avctx->priv_data;
/* release the last frame */
avctx->release_buffer(avctx, &s->last_frame);
/* release the frames */
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
if (s->current_frame.data[0])
avctx->release_buffer(avctx, &s->current_frame);
av_free(s->buffer1);
av_free(s->buffer2);
@ -811,6 +471,7 @@ AVCodec xan_wc3_decoder = {
xan_decode_end,
xan_decode_frame,
CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
};
/*

View File

@ -0,0 +1,62 @@
/*
* Copyright (C) 2007 FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xiph.h"
int ff_split_xiph_headers(uint8_t *extradata, int extradata_size,
int first_header_size, uint8_t *header_start[3],
int header_len[3])
{
int i;
if (extradata_size >= 6 && AV_RB16(extradata) == first_header_size) {
int overall_len = 6;
for (i=0; i<3; i++) {
header_len[i] = AV_RB16(extradata);
extradata += 2;
header_start[i] = extradata;
extradata += header_len[i];
if (overall_len > extradata_size - header_len[i])
return -1;
overall_len += header_len[i];
}
} else if (extradata_size >= 3 && extradata_size < INT_MAX - 0x1ff && extradata[0] == 2) {
int overall_len = 3;
extradata++;
for (i=0; i<2; i++, extradata++) {
header_len[i] = 0;
for (; overall_len < extradata_size && *extradata==0xff; extradata++) {
header_len[i] += 0xff;
overall_len += 0xff + 1;
}
header_len[i] += *extradata;
overall_len += *extradata;
if (overall_len > extradata_size)
return -1;
}
header_len[2] = extradata_size - overall_len;
header_start[0] = extradata;
header_start[1] = header_start[0] + header_len[0];
header_start[2] = header_start[1] + header_len[1];
} else {
return -1;
}
return 0;
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (C) 2007 FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef FFMPEG_XIPH_H
#define FFMPEG_XIPH_H
#include "common.h"
/**
* Splits a single extradata buffer into the three headers that most
* Xiph codecs use. (e.g. Theora and Vorbis)
* Works both with Matroska's packing and lavc's packing.
*
* @param[in] extradata The single chunk that combines all three headers
* @param[in] extradata_size The size of the extradata buffer
* @param[in] first_header_size The size of the first header, used to
* differentiate between the Matroska packing and lavc packing.
* @param[out] header_start Pointers to the start of the three separate headers.
* @param[out] header_len The sizes of each of the three headers.
* @return On error a negative value is returned, on success zero.
*/
int ff_split_xiph_headers(uint8_t *extradata, int extradata_size,
int first_header_size, uint8_t *header_start[3],
int header_len[3]);
#endif /* FFMPEG_XIPH_H */

View File

@ -0,0 +1,139 @@
/*
* Miro VideoXL codec
* Copyright (c) 2004 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file xl.c
* Miro VideoXL codec.
*/
#include "avcodec.h"
typedef struct VideoXLContext{
AVCodecContext *avctx;
AVFrame pic;
} VideoXLContext;
static const int xl_table[32] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 12, 15, 20, 25, 34, 46,
64, 82, 94, 103, 108, 113, 116, 119,
120, 121, 122, 123, 124, 125, 126, 127};
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
VideoXLContext * const a = avctx->priv_data;
AVFrame * const p= (AVFrame*)&a->pic;
uint8_t *Y, *U, *V;
int i, j;
int stride;
uint32_t val;
int y0, y1, y2, y3, c0, c1;
if(p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if(avctx->get_buffer(avctx, p) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type= FF_I_TYPE;
p->key_frame= 1;
Y = a->pic.data[0];
U = a->pic.data[1];
V = a->pic.data[2];
stride = avctx->width - 4;
for (i = 0; i < avctx->height; i++) {
/* lines are stored in reversed order */
buf += stride;
for (j = 0; j < avctx->width; j += 4) {
/* value is stored in LE dword with word swapped */
val = AV_RL32(buf);
buf -= 4;
val = ((val >> 16) & 0xFFFF) | ((val & 0xFFFF) << 16);
if(!j)
y0 = (val & 0x1F) << 2;
else
y0 = y3 + xl_table[val & 0x1F];
val >>= 5;
y1 = y0 + xl_table[val & 0x1F];
val >>= 5;
y2 = y1 + xl_table[val & 0x1F];
val >>= 6; /* align to word */
y3 = y2 + xl_table[val & 0x1F];
val >>= 5;
if(!j)
c0 = (val & 0x1F) << 2;
else
c0 += xl_table[val & 0x1F];
val >>= 5;
if(!j)
c1 = (val & 0x1F) << 2;
else
c1 += xl_table[val & 0x1F];
Y[j + 0] = y0 << 1;
Y[j + 1] = y1 << 1;
Y[j + 2] = y2 << 1;
Y[j + 3] = y3 << 1;
U[j >> 2] = c0 << 1;
V[j >> 2] = c1 << 1;
}
buf += avctx->width + 4;
Y += a->pic.linesize[0];
U += a->pic.linesize[1];
V += a->pic.linesize[2];
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = a->pic;
return buf_size;
}
static av_cold int decode_init(AVCodecContext *avctx){
// VideoXLContext * const a = avctx->priv_data;
avctx->pix_fmt= PIX_FMT_YUV411P;
return 0;
}
AVCodec xl_decoder = {
"xl",
CODEC_TYPE_VIDEO,
CODEC_ID_VIXL,
sizeof(VideoXLContext),
decode_init,
NULL,
NULL,
decode_frame,
CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Miro VideoXL"),
};

View File

@ -0,0 +1,137 @@
/*
* XSUB subtitle decoder
* Copyright (c) 2007 Reimar Döffinger
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "bitstream.h"
#include "bytestream.h"
static av_cold int decode_init(AVCodecContext *avctx) {
avctx->pix_fmt = PIX_FMT_PAL8;
return 0;
}
static const uint8_t tc_offsets[9] = { 0, 1, 3, 4, 6, 7, 9, 10, 11 };
static const uint8_t tc_muls[9] = { 10, 6, 10, 6, 10, 6, 10, 10, 1 };
static uint64_t parse_timecode(const uint8_t *buf) {
int i;
int64_t ms = 0;
if (buf[2] != ':' || buf[5] != ':' || buf[8] != '.')
return AV_NOPTS_VALUE;
for (i = 0; i < sizeof(tc_offsets); i++) {
uint8_t c = buf[tc_offsets[i]] - '0';
if (c > 9) return AV_NOPTS_VALUE;
ms = (ms + c) * tc_muls[i];
}
return ms;
}
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
const uint8_t *buf, int buf_size) {
AVSubtitle *sub = data;
const uint8_t *buf_end = buf + buf_size;
uint8_t *bitmap;
int w, h, x, y, rlelen, i;
GetBitContext gb;
// check that at least header fits
if (buf_size < 27 + 7 * 2 + 4 * 3) {
av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
return -1;
}
// read start and end time
if (buf[0] != '[' || buf[13] != '-' || buf[26] != ']') {
av_log(avctx, AV_LOG_ERROR, "invalid time code\n");
return -1;
}
sub->start_display_time = parse_timecode(buf + 1);
sub->end_display_time = parse_timecode(buf + 14);
buf += 27;
// read header
w = bytestream_get_le16(&buf);
h = bytestream_get_le16(&buf);
if (avcodec_check_dimensions(avctx, w, h) < 0)
return -1;
x = bytestream_get_le16(&buf);
y = bytestream_get_le16(&buf);
// skip bottom right position, it gives no new information
bytestream_get_le16(&buf);
bytestream_get_le16(&buf);
rlelen = bytestream_get_le16(&buf);
// allocate sub and set values
if (!sub->rects) {
sub->rects = av_mallocz(sizeof(AVSubtitleRect));
sub->num_rects = 1;
}
av_freep(&sub->rects[0].bitmap);
sub->rects[0].x = x; sub->rects[0].y = y;
sub->rects[0].w = w; sub->rects[0].h = h;
sub->rects[0].linesize = w;
sub->rects[0].bitmap = av_malloc(w * h);
sub->rects[0].nb_colors = 4;
sub->rects[0].rgba_palette = av_malloc(sub->rects[0].nb_colors * 4);
// read palette
for (i = 0; i < sub->rects[0].nb_colors; i++)
sub->rects[0].rgba_palette[i] = bytestream_get_be24(&buf);
// make all except background (first entry) non-transparent
for (i = 1; i < sub->rects[0].nb_colors; i++)
sub->rects[0].rgba_palette[i] |= 0xff000000;
// process RLE-compressed data
rlelen = FFMIN(rlelen, buf_end - buf);
init_get_bits(&gb, buf, rlelen * 8);
bitmap = sub->rects[0].bitmap;
for (y = 0; y < h; y++) {
// interlaced: do odd lines
if (y == (h + 1) / 2) bitmap = sub->rects[0].bitmap + w;
for (x = 0; x < w; ) {
int log2 = ff_log2_tab[show_bits(&gb, 8)];
int run = get_bits(&gb, 14 - 4 * (log2 >> 1));
int color = get_bits(&gb, 2);
run = FFMIN(run, w - x);
// run length 0 means till end of row
if (!run) run = w - x;
memset(bitmap, color, run);
bitmap += run;
x += run;
}
// interlaced, skip every second line
bitmap += w;
align_get_bits(&gb);
}
*data_size = 1;
return buf_size;
}
AVCodec xsub_decoder = {
"xsub",
CODEC_TYPE_SUBTITLE,
CODEC_ID_XSUB,
0,
decode_init,
NULL,
NULL,
decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("XSUB"),
};

View File

@ -0,0 +1,75 @@
/*
* Copyright (C) 2003 Ivan Kalvachev
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef FFMPEG_XVMC_RENDER_H
#define FFMPEG_XVMC_RENDER_H
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/Xvlib.h>
#include <X11/extensions/XvMClib.h>
//the surface should be shown, video driver manipulates this
#define MP_XVMC_STATE_DISPLAY_PENDING 1
//the surface is needed for prediction, codec manipulates this
#define MP_XVMC_STATE_PREDICTION 2
//this surface is needed for subpicture rendering
#define MP_XVMC_STATE_OSD_SOURCE 4
// 1337 IDCT MCo
#define MP_XVMC_RENDER_MAGIC 0x1DC711C0
typedef struct{
//these are not changed by the decoder!
int magic;
short * data_blocks;
XvMCMacroBlock * mv_blocks;
int total_number_of_mv_blocks;
int total_number_of_data_blocks;
int mc_type;//XVMC_MPEG1/2/4,XVMC_H263 without XVMC_IDCT
int idct;//Do we use IDCT acceleration?
int chroma_format;//420,422,444
int unsigned_intra;//+-128 for intra pictures after clip
XvMCSurface* p_surface;//pointer to rendered surface, never changed
//these are changed by decoder
//used by XvMCRenderSurface function
XvMCSurface* p_past_surface;//pointer to the past surface
XvMCSurface* p_future_surface;//pointer to the future prediction surface
unsigned int picture_structure;//top/bottom fields or frame!
unsigned int flags;//XVMC_SECOND_FIELD - 1'st or 2'd field in the sequence
unsigned int display_flags; //1,2 or 1+2 fields for XvMCPutSurface,
//these are internal communication ones
int state;//0-free, 1 Waiting to Display, 2 Waiting for prediction
int start_mv_blocks_num;//offset in the array for the current slice, updated by vo
int filled_mv_blocks_num;//processed mv block in this slice, changed by decoder
int next_free_data_block_num;//used in add_mv_block, pointer to next free block
//extensions
void * p_osd_target_surface_render;//pointer to the surface where subpicture is rendered
} xvmc_render_state_t;
#endif /* FFMPEG_XVMC_RENDER_H */

View File

@ -2,19 +2,21 @@
* XVideo Motion Compensation
* Copyright (c) 2003 Ivan Kalvachev
*
* This library is free software; you can redistribute it and/or
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <limits.h>
@ -27,14 +29,8 @@
#undef NDEBUG
#include <assert.h>
#ifdef USE_FASTMEMCPY
#include "fastmemcpy.h"
#endif
#ifdef HAVE_XVMC
//X11 includes are in the xvmc_render.h
//by replacing it with none-X one
//X11 includes are in xvmc_render.h
//by replacing it with non-X one
//XvMC emulation could be performed
#include "xvmc_render.h"
@ -42,47 +38,36 @@
//#include "xvmc_debug.h"
//set s->block
inline void XVMC_init_block(MpegEncContext *s){
void XVMC_init_block(MpegEncContext *s){
xvmc_render_state_t * render;
render = (xvmc_render_state_t*)s->current_picture.data[2];
assert(render != NULL);
if( (render == NULL) || (render->magic != MP_XVMC_RENDER_MAGIC) ){
assert(0);
return;//make sure that this is render packet
return;//make sure that this is a render packet
}
s->block =(DCTELEM *)(render->data_blocks+(render->next_free_data_block_num)*64);
}
void XVMC_pack_pblocks(MpegEncContext *s, int cbp){
int i,j;
#define numblocks 6
const int mb_block_count = 4+(1<<s->chroma_format);
j=0;
for(i=0;i<numblocks;i++){
if(cbp & (1<<(numblocks-1-i)) ){
cbp<<= 12-mb_block_count;
for(i=0; i<mb_block_count; i++){
if(cbp & (1<<11)) {
s->pblocks[i] = (short *)(&s->block[(j++)]);
}else{
s->pblocks[i] = NULL;
}
cbp+=cbp;
// printf("s->pblocks[%d]=%p ,s->block=%p cbp=%d\n",i,s->pblocks[i],s->block,cbp);
}
}
static int calc_cbp(MpegEncContext *s, int blocknum){
/* compute cbp */
// for I420 bit_offset=5
int i,cbp = 0;
for(i=0; i<blocknum; i++) {
if(s->block_last_index[i] >= 0)
cbp |= 1 << (5 - i);
}
return cbp;
}
//these functions should be called on every new field or/and frame
//They should be safe if they are called few times for same field!
//These functions should be called on every new field and/or frame.
//They should be safe if they are called a few times for the same field!
int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx){
xvmc_render_state_t * render,* last, * next;
@ -103,9 +88,9 @@ xvmc_render_state_t * render,* last, * next;
render->p_past_surface = NULL;
switch(s->pict_type){
case I_TYPE:
case FF_I_TYPE:
return 0;// no prediction from other frames
case B_TYPE:
case FF_B_TYPE:
next = (xvmc_render_state_t*)s->next_picture.data[2];
assert(next!=NULL);
assert(next->state & MP_XVMC_STATE_PREDICTION);
@ -113,7 +98,7 @@ xvmc_render_state_t * render,* last, * next;
if(next->magic != MP_XVMC_RENDER_MAGIC) return -1;
render->p_future_surface = next->p_surface;
//no return here, going to set forward prediction
case P_TYPE:
case FF_P_TYPE:
last = (xvmc_render_state_t*)s->last_picture.data[2];
if(last == NULL)// && !s->first_field)
last = render;//predict second field from the first
@ -121,7 +106,7 @@ xvmc_render_state_t * render,* last, * next;
assert(last->state & MP_XVMC_STATE_PREDICTION);
render->p_past_surface = last->p_surface;
return 0;
}
}
return -1;
}
@ -146,8 +131,8 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
if(s->encoding){
fprintf(stderr,"XVMC doesn't support encoding!!!\n");
av_abort();
av_log(s->avctx, AV_LOG_ERROR, "XVMC doesn't support encoding!!!\n");
return;
}
//from MPV_decode_mb(),
@ -159,11 +144,11 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
}
//MC doesn't skip blocks
s->mb_skiped = 0;
s->mb_skipped = 0;
// do I need to export quant when I could not perform postprocessing?
// anyway, it doesn't hurrt
// Do I need to export quant when I could not perform postprocessing?
// Anyway, it doesn't hurt.
s->current_picture.qscale_table[mb_xy] = s->qscale;
//START OF XVMC specific code
@ -173,7 +158,7 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
assert(render->mv_blocks);
//take the next free macroblock
mv_block = &render->mv_blocks[render->start_mv_blocks_num +
mv_block = &render->mv_blocks[render->start_mv_blocks_num +
render->filled_mv_blocks_num ];
// memset(mv_block,0,sizeof(XvMCMacroBlock));
@ -181,7 +166,7 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
mv_block->x = s->mb_x;
mv_block->y = s->mb_y;
mv_block->dct_type = s->interlaced_dct;//XVMC_DCT_TYPE_FRAME/FIELD;
// mv_block->motion_type = 0; //zero to silense warnings
// mv_block->motion_type = 0; //zero to silence warnings
if(s->mb_intra){
mv_block->macroblock_type = XVMC_MB_TYPE_INTRA;//no MC, all done
}else{
@ -246,7 +231,7 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
mv_block->motion_vertical_field_select = 0;
//set correct field referenses
//set correct field references
if(s->mv_type == MV_TYPE_FIELD || s->mv_type == MV_TYPE_16X8){
if( s->field_select[0][0] ) mv_block->motion_vertical_field_select|=1;
if( s->field_select[1][0] ) mv_block->motion_vertical_field_select|=2;
@ -256,35 +241,39 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
}//!intra
//time to handle data blocks;
mv_block->index = render->next_free_data_block_num;
blocks_per_mb = 6;
/*
switch( s->chroma_format){
case CHROMA_422:
blocks_per_mb = 8;
break;
case CHROMA_444:
blocks_per_mb = 12;
break;
if( s->chroma_format >= 2){
blocks_per_mb = 4 + (1 << (s->chroma_format));
}
*/
// calculate cbp
cbp = 0;
for(i=0; i<blocks_per_mb; i++) {
cbp+= cbp;
if(s->block_last_index[i] >= 0)
cbp++;
}
if(s->flags & CODEC_FLAG_GRAY){
if(s->mb_intra){//intra frames are alwasy full chroma block
if(s->mb_intra){//intra frames are always full chroma block
for(i=4; i<blocks_per_mb; i++){
memset(s->pblocks[i],0,sizeof(short)*8*8);//so we need to clear them
if(!render->unsigned_intra)
s->pblocks[i][0] = 1<<10;
}
}else
blocks_per_mb = 4;//Luminance blocks only
}else{
cbp&= 0xf << (blocks_per_mb - 4);
blocks_per_mb = 4;//luminance blocks only
}
}
cbp = calc_cbp(s,blocks_per_mb);
mv_block->coded_block_pattern = cbp;
if(cbp == 0)
mv_block->macroblock_type &= ~XVMC_MB_TYPE_PATTERN;
for(i=0; i<blocks_per_mb; i++){
if(s->block_last_index[i] >= 0){
// i do not have unsigned_intra MOCO to test, hope it is OK
// I do not have unsigned_intra MOCO to test, hope it is OK.
if( (s->mb_intra) && ( render->idct || (!render->idct && !render->unsigned_intra)) )
s->pblocks[i][0]-=1<<10;
if(!render->idct){
@ -299,7 +288,7 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
/* if(s->pblocks[i] != &render->data_blocks[
(render->next_free_data_block_num)*64]){
printf("ERROR mb(%d,%d) s->pblocks[i]=%p data_block[]=%p\n",
s->mb_x,s->mb_y, s->pblocks[i],
s->mb_x,s->mb_y, s->pblocks[i],
&render->data_blocks[(render->next_free_data_block_num)*64]);
}*/
}
@ -319,5 +308,3 @@ const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
// DumpMBlockInfo(mv_block);
}
#endif