add lcl vmdav

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@6463 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
shatty 2004-02-02 01:05:21 +00:00
parent a96f066c38
commit a3fd835dad
3 changed files with 1442 additions and 0 deletions

View File

@ -40,6 +40,7 @@ StaticLibrary avcodec :
jfdctfst.c
jfdctint.c
jrevdct.c
lcl.c
mace.c
mdct.c
mem.c
@ -70,6 +71,7 @@ StaticLibrary avcodec :
truemotion1.c
utils.c
vcr1.c
vmdav.c
vp3.c
vqavideo.c
wmadec.c

View File

@ -0,0 +1,892 @@
/*
* LCL (LossLess Codec Library) Codec
* Copyright (c) 2002-2004 Roberto Togni
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/**
* @file lcl.c
* LCL (LossLess Codec Library) Video Codec
* Decoder for MSZH and ZLIB codecs
* Experimental encoder for ZLIB RGB24
*
* Fourcc: MSZH, ZLIB
*
* Original Win32 dll:
* Ver2.23 By Kenji Oshima 2000.09.20
* avimszh.dll, avizlib.dll
*
* A description of the decoding algorithm can be found here:
* http://www.pcisys.net/~melanson/codecs
*
* Supports: BGR24 (RGB 24bpp)
*
*/
#include <stdio.h>
#include <stdlib.h>
#include "common.h"
#include "avcodec.h"
#ifdef CONFIG_ZLIB
#include <zlib.h>
#endif
#define BMPTYPE_YUV 1
#define BMPTYPE_RGB 2
#define IMGTYPE_YUV111 0
#define IMGTYPE_YUV422 1
#define IMGTYPE_RGB24 2
#define IMGTYPE_YUV411 3
#define IMGTYPE_YUV211 4
#define IMGTYPE_YUV420 5
#define COMP_MSZH 0
#define COMP_MSZH_NOCOMP 1
#define COMP_ZLIB_HISPEED 1
#define COMP_ZLIB_HICOMP 9
#define COMP_ZLIB_NORMAL -1
#define FLAG_MULTITHREAD 1
#define FLAG_NULLFRAME 2
#define FLAG_PNGFILTER 4
#define FLAGMASK_UNUSED 0xf8
#define CODEC_MSZH 1
#define CODEC_ZLIB 3
#define FOURCC_MSZH mmioFOURCC('M','S','Z','H')
#define FOURCC_ZLIB mmioFOURCC('Z','L','I','B')
/*
* Decoder context
*/
typedef struct LclContext {
AVCodecContext *avctx;
AVFrame pic;
PutBitContext pb;
// Image type
int imgtype;
// Compression type
int compression;
// Flags
int flags;
// Decompressed data size
unsigned int decomp_size;
// Decompression buffer
unsigned char* decomp_buf;
// Maximum compressed data size
unsigned int max_comp_size;
// Compression buffer
unsigned char* comp_buf;
#ifdef CONFIG_ZLIB
z_stream zstream;
#endif
} LclContext;
/*
*
* Helper functions
*
*/
static inline unsigned char fix (int pix14)
{
int tmp;
tmp = (pix14 + 0x80000) >> 20;
if (tmp < 0)
return 0;
if (tmp > 255)
return 255;
return tmp;
}
static inline unsigned char get_b (unsigned char yq, signed char bq)
{
return fix((yq << 20) + bq * 1858076);
}
static inline unsigned char get_g (unsigned char yq, signed char bq, signed char rq)
{
return fix((yq << 20) - bq * 360857 - rq * 748830);
}
static inline unsigned char get_r (unsigned char yq, signed char rq)
{
return fix((yq << 20) + rq * 1470103);
}
static int mszh_decomp(unsigned char * srcptr, int srclen, unsigned char * destptr)
{
unsigned char *destptr_bak = destptr;
unsigned char mask = 0;
unsigned char maskbit = 0;
unsigned int ofs, cnt;
while (srclen > 0) {
if (maskbit == 0) {
mask = *(srcptr++);
maskbit = 8;
srclen--;
continue;
}
if ((mask & (1 << (--maskbit))) == 0) {
*(int*)destptr = *(int*)srcptr;
srclen -= 4;
destptr += 4;
srcptr += 4;
} else {
ofs = *(srcptr++);
cnt = *(srcptr++);
ofs += cnt * 256;;
cnt = ((cnt >> 3) & 0x1f) + 1;
ofs &= 0x7ff;
srclen -= 2;
cnt *= 4;
for (; cnt > 0; cnt--) {
*(destptr) = *(destptr - ofs);
destptr++;
}
}
}
return (destptr - destptr_bak);
}
/*
*
* Decode a frame
*
*/
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size)
{
LclContext * const c = (LclContext *)avctx->priv_data;
unsigned char *encoded = (unsigned char *)buf;
int pixel_ptr;
int row, col;
unsigned char *outptr;
unsigned int width = avctx->width; // Real image width
unsigned int height = avctx->height; // Real image height
unsigned int mszh_dlen;
unsigned char yq, y1q, uq, vq;
int uqvq;
unsigned int mthread_inlen, mthread_outlen;
#ifdef CONFIG_ZLIB
int zret; // Zlib return code
#endif
int len = buf_size;
/* no supplementary picture */
if (buf_size == 0)
return 0;
if(c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
c->pic.reference = 0;
c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
if(avctx->get_buffer(avctx, &c->pic) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
outptr = c->pic.data[0]; // Output image pointer
/* Decompress frame */
switch (avctx->codec_id) {
case CODEC_ID_MSZH:
switch (c->compression) {
case COMP_MSZH:
if (c->flags & FLAG_MULTITHREAD) {
mthread_inlen = *((unsigned int*)encoded);
mthread_outlen = *((unsigned int*)(encoded+4));
mszh_dlen = mszh_decomp(encoded + 8, mthread_inlen, c->decomp_buf);
if (mthread_outlen != mszh_dlen) {
av_log(avctx, AV_LOG_ERROR, "Mthread1 decoded size differs (%d != %d)\n",
mthread_outlen, mszh_dlen);
}
mszh_dlen = mszh_decomp(encoded + 8 + mthread_inlen, len - mthread_inlen,
c->decomp_buf + mthread_outlen);
if ((c->decomp_size - mthread_outlen) != mszh_dlen) {
av_log(avctx, AV_LOG_ERROR, "Mthread2 decoded size differs (%d != %d)\n",
c->decomp_size - mthread_outlen, mszh_dlen);
}
encoded = c->decomp_buf;
len = c->decomp_size;
} else {
mszh_dlen = mszh_decomp(encoded, len, c->decomp_buf);
if (c->decomp_size != mszh_dlen) {
av_log(avctx, AV_LOG_ERROR, "Decoded size differs (%d != %d)\n",
c->decomp_size, mszh_dlen);
}
encoded = c->decomp_buf;
len = mszh_dlen;
}
break;
case COMP_MSZH_NOCOMP:
break;
default:
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n");
return -1;
}
break;
case CODEC_ID_ZLIB:
#ifdef CONFIG_ZLIB
/* Using the original dll with normal compression (-1) and RGB format
* gives a file with ZLIB fourcc, but frame is really uncompressed.
* To be sure that's true check also frame size */
if ((c->compression == COMP_ZLIB_NORMAL) && (c->imgtype == IMGTYPE_RGB24) &&
(len == width * height * 3))
break;
zret = inflateReset(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
return -1;
}
if (c->flags & FLAG_MULTITHREAD) {
mthread_inlen = *((unsigned int*)encoded);
mthread_outlen = *((unsigned int*)(encoded+4));
c->zstream.next_in = encoded + 8;
c->zstream.avail_in = mthread_inlen;
c->zstream.next_out = c->decomp_buf;
c->zstream.avail_out = mthread_outlen;
zret = inflate(&(c->zstream), Z_FINISH);
if ((zret != Z_OK) && (zret != Z_STREAM_END)) {
av_log(avctx, AV_LOG_ERROR, "Mthread1 inflate error: %d\n", zret);
return -1;
}
if (mthread_outlen != (unsigned int)(c->zstream.total_out)) {
av_log(avctx, AV_LOG_ERROR, "Mthread1 decoded size differs (%u != %lu)\n",
mthread_outlen, c->zstream.total_out);
}
zret = inflateReset(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Mthread2 inflate reset error: %d\n", zret);
return -1;
}
c->zstream.next_in = encoded + 8 + mthread_inlen;
c->zstream.avail_in = len - mthread_inlen;
c->zstream.next_out = c->decomp_buf + mthread_outlen;
c->zstream.avail_out = mthread_outlen;
zret = inflate(&(c->zstream), Z_FINISH);
if ((zret != Z_OK) && (zret != Z_STREAM_END)) {
av_log(avctx, AV_LOG_ERROR, "Mthread2 inflate error: %d\n", zret);
return -1;
}
if ((c->decomp_size - mthread_outlen) != (unsigned int)(c->zstream.total_out)) {
av_log(avctx, AV_LOG_ERROR, "Mthread2 decoded size differs (%d != %lu)\n",
c->decomp_size - mthread_outlen, c->zstream.total_out);
}
} else {
c->zstream.next_in = encoded;
c->zstream.avail_in = len;
c->zstream.next_out = c->decomp_buf;
c->zstream.avail_out = c->decomp_size;
zret = inflate(&(c->zstream), Z_FINISH);
if ((zret != Z_OK) && (zret != Z_STREAM_END)) {
av_log(avctx, AV_LOG_ERROR, "Inflate error: %d\n", zret);
return -1;
}
if (c->decomp_size != (unsigned int)(c->zstream.total_out)) {
av_log(avctx, AV_LOG_ERROR, "Decoded size differs (%d != %lu)\n",
c->decomp_size, c->zstream.total_out);
}
}
encoded = c->decomp_buf;
len = c->decomp_size;;
#else
av_log(avctx, AV_LOG_ERROR, "BUG! Zlib support not compiled in frame decoder.\n");
return -1;
#endif
break;
default:
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown codec in frame decoder compression switch.\n");
return -1;
}
/* Apply PNG filter */
if ((avctx->codec_id == CODEC_ID_ZLIB) && (c->flags & FLAG_PNGFILTER)) {
switch (c->imgtype) {
case IMGTYPE_YUV111:
case IMGTYPE_RGB24:
for (row = 0; row < height; row++) {
pixel_ptr = row * width * 3;
yq = encoded[pixel_ptr++];
uqvq = encoded[pixel_ptr++];
uqvq+=(encoded[pixel_ptr++] << 8);
for (col = 1; col < width; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
uqvq -= (encoded[pixel_ptr+1] | (encoded[pixel_ptr+2]<<8));
encoded[pixel_ptr+1] = (uqvq) & 0xff;
encoded[pixel_ptr+2] = ((uqvq)>>8) & 0xff;
pixel_ptr += 3;
}
}
break;
case IMGTYPE_YUV422:
for (row = 0; row < height; row++) {
pixel_ptr = row * width * 2;
yq = uq = vq =0;
for (col = 0; col < width/4; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
encoded[pixel_ptr+1] = yq -= encoded[pixel_ptr+1];
encoded[pixel_ptr+2] = yq -= encoded[pixel_ptr+2];
encoded[pixel_ptr+3] = yq -= encoded[pixel_ptr+3];
encoded[pixel_ptr+4] = uq -= encoded[pixel_ptr+4];
encoded[pixel_ptr+5] = uq -= encoded[pixel_ptr+5];
encoded[pixel_ptr+6] = vq -= encoded[pixel_ptr+6];
encoded[pixel_ptr+7] = vq -= encoded[pixel_ptr+7];
pixel_ptr += 8;
}
}
break;
case IMGTYPE_YUV411:
for (row = 0; row < height; row++) {
pixel_ptr = row * width / 2 * 3;
yq = uq = vq =0;
for (col = 0; col < width/4; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
encoded[pixel_ptr+1] = yq -= encoded[pixel_ptr+1];
encoded[pixel_ptr+2] = yq -= encoded[pixel_ptr+2];
encoded[pixel_ptr+3] = yq -= encoded[pixel_ptr+3];
encoded[pixel_ptr+4] = uq -= encoded[pixel_ptr+4];
encoded[pixel_ptr+5] = vq -= encoded[pixel_ptr+5];
pixel_ptr += 6;
}
}
break;
case IMGTYPE_YUV211:
for (row = 0; row < height; row++) {
pixel_ptr = row * width * 2;
yq = uq = vq =0;
for (col = 0; col < width/2; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
encoded[pixel_ptr+1] = yq -= encoded[pixel_ptr+1];
encoded[pixel_ptr+2] = uq -= encoded[pixel_ptr+2];
encoded[pixel_ptr+3] = vq -= encoded[pixel_ptr+3];
pixel_ptr += 4;
}
}
break;
case IMGTYPE_YUV420:
for (row = 0; row < height/2; row++) {
pixel_ptr = row * width * 3;
yq = y1q = uq = vq =0;
for (col = 0; col < width/2; col++) {
encoded[pixel_ptr] = yq -= encoded[pixel_ptr];
encoded[pixel_ptr+1] = yq -= encoded[pixel_ptr+1];
encoded[pixel_ptr+2] = y1q -= encoded[pixel_ptr+2];
encoded[pixel_ptr+3] = y1q -= encoded[pixel_ptr+3];
encoded[pixel_ptr+4] = uq -= encoded[pixel_ptr+4];
encoded[pixel_ptr+5] = vq -= encoded[pixel_ptr+5];
pixel_ptr += 6;
}
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown imagetype in pngfilter switch.\n");
return -1;
}
}
/* Convert colorspace */
switch (c->imgtype) {
case IMGTYPE_YUV111:
for (row = height - 1; row >= 0; row--) {
pixel_ptr = row * c->pic.linesize[0];
for (col = 0; col < width; col++) {
outptr[pixel_ptr++] = get_b(encoded[0], encoded[1]);
outptr[pixel_ptr++] = get_g(encoded[0], encoded[1], encoded[2]);
outptr[pixel_ptr++] = get_r(encoded[0], encoded[2]);
encoded += 3;
}
}
break;
case IMGTYPE_YUV422:
for (row = height - 1; row >= 0; row--) {
pixel_ptr = row * c->pic.linesize[0];
for (col = 0; col < width/4; col++) {
outptr[pixel_ptr++] = get_b(encoded[0], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[0], encoded[4], encoded[6]);
outptr[pixel_ptr++] = get_r(encoded[0], encoded[6]);
outptr[pixel_ptr++] = get_b(encoded[1], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[1], encoded[4], encoded[6]);
outptr[pixel_ptr++] = get_r(encoded[1], encoded[6]);
outptr[pixel_ptr++] = get_b(encoded[2], encoded[5]);
outptr[pixel_ptr++] = get_g(encoded[2], encoded[5], encoded[7]);
outptr[pixel_ptr++] = get_r(encoded[2], encoded[7]);
outptr[pixel_ptr++] = get_b(encoded[3], encoded[5]);
outptr[pixel_ptr++] = get_g(encoded[3], encoded[5], encoded[7]);
outptr[pixel_ptr++] = get_r(encoded[3], encoded[7]);
encoded += 8;
}
}
break;
case IMGTYPE_RGB24:
for (row = height - 1; row >= 0; row--) {
pixel_ptr = row * c->pic.linesize[0];
for (col = 0; col < width; col++) {
outptr[pixel_ptr++] = encoded[0];
outptr[pixel_ptr++] = encoded[1];
outptr[pixel_ptr++] = encoded[2];
encoded += 3;
}
}
break;
case IMGTYPE_YUV411:
for (row = height - 1; row >= 0; row--) {
pixel_ptr = row * c->pic.linesize[0];
for (col = 0; col < width/4; col++) {
outptr[pixel_ptr++] = get_b(encoded[0], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[0], encoded[4], encoded[5]);
outptr[pixel_ptr++] = get_r(encoded[0], encoded[5]);
outptr[pixel_ptr++] = get_b(encoded[1], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[1], encoded[4], encoded[5]);
outptr[pixel_ptr++] = get_r(encoded[1], encoded[5]);
outptr[pixel_ptr++] = get_b(encoded[2], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[2], encoded[4], encoded[5]);
outptr[pixel_ptr++] = get_r(encoded[2], encoded[5]);
outptr[pixel_ptr++] = get_b(encoded[3], encoded[4]);
outptr[pixel_ptr++] = get_g(encoded[3], encoded[4], encoded[5]);
outptr[pixel_ptr++] = get_r(encoded[3], encoded[5]);
encoded += 6;
}
}
break;
case IMGTYPE_YUV211:
for (row = height - 1; row >= 0; row--) {
pixel_ptr = row * c->pic.linesize[0];
for (col = 0; col < width/2; col++) {
outptr[pixel_ptr++] = get_b(encoded[0], encoded[2]);
outptr[pixel_ptr++] = get_g(encoded[0], encoded[2], encoded[3]);
outptr[pixel_ptr++] = get_r(encoded[0], encoded[3]);
outptr[pixel_ptr++] = get_b(encoded[1], encoded[2]);
outptr[pixel_ptr++] = get_g(encoded[1], encoded[2], encoded[3]);
outptr[pixel_ptr++] = get_r(encoded[1], encoded[3]);
encoded += 4;
}
}
break;
case IMGTYPE_YUV420:
for (row = height / 2 - 1; row >= 0; row--) {
pixel_ptr = 2 * row * c->pic.linesize[0];
for (col = 0; col < width/2; col++) {
outptr[pixel_ptr] = get_b(encoded[0], encoded[4]);
outptr[pixel_ptr+1] = get_g(encoded[0], encoded[4], encoded[5]);
outptr[pixel_ptr+2] = get_r(encoded[0], encoded[5]);
outptr[pixel_ptr+3] = get_b(encoded[1], encoded[4]);
outptr[pixel_ptr+4] = get_g(encoded[1], encoded[4], encoded[5]);
outptr[pixel_ptr+5] = get_r(encoded[1], encoded[5]);
outptr[pixel_ptr-c->pic.linesize[0]] = get_b(encoded[2], encoded[4]);
outptr[pixel_ptr-c->pic.linesize[0]+1] = get_g(encoded[2], encoded[4], encoded[5]);
outptr[pixel_ptr-c->pic.linesize[0]+2] = get_r(encoded[2], encoded[5]);
outptr[pixel_ptr-c->pic.linesize[0]+3] = get_b(encoded[3], encoded[4]);
outptr[pixel_ptr-c->pic.linesize[0]+4] = get_g(encoded[3], encoded[4], encoded[5]);
outptr[pixel_ptr-c->pic.linesize[0]+5] = get_r(encoded[3], encoded[5]);
pixel_ptr += 6;
encoded += 6;
}
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown imagetype in image decoder.\n");
return -1;
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->pic;
/* always report that the buffer was completely consumed */
return buf_size;
}
/*
*
* Encode a frame
*
*/
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
LclContext *c = avctx->priv_data;
AVFrame *pict = data;
AVFrame * const p = &c->pic;
int i;
int zret; // Zlib return code
#ifndef CONFIG_ZLIB
av_log(avctx, AV_LOG_ERROR, "Zlib support not compiled in.\n");
return -1;
#else
init_put_bits(&c->pb, buf, buf_size);
*p = *pict;
p->pict_type= FF_I_TYPE;
p->key_frame= 1;
if(avctx->pix_fmt != PIX_FMT_BGR24){
av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
return -1;
}
zret = deflateReset(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret);
return -1;
}
c->zstream.next_in = p->data[0];
c->zstream.avail_in = c->decomp_size;
c->zstream.next_out = c->comp_buf;
c->zstream.avail_out = c->max_comp_size;
zret = deflate(&(c->zstream), Z_FINISH);
if ((zret != Z_OK) && (zret != Z_STREAM_END)) {
av_log(avctx, AV_LOG_ERROR, "Deflate error: %d\n", zret);
return -1;
}
for (i = 0; i < c->zstream.total_out; i++)
put_bits(&c->pb, 8, c->comp_buf[i]);
flush_put_bits(&c->pb);
return c->zstream.total_out;
#endif
}
/*
*
* Init lcl decoder
*
*/
static int decode_init(AVCodecContext *avctx)
{
LclContext * const c = (LclContext *)avctx->priv_data;
int basesize = avctx->width * avctx->height;
int zret; // Zlib return code
c->avctx = avctx;
avctx->has_b_frames = 0;
c->pic.data[0] = NULL;
#ifdef CONFIG_ZLIB
// Needed if zlib unused or init aborted before inflateInit
memset(&(c->zstream), 0, sizeof(z_stream));
#endif
if (avctx->extradata_size < 8) {
av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n");
return 1;
}
/* Check codec type */
if (((avctx->codec_id == CODEC_ID_MSZH) && (*((char *)avctx->extradata + 7) != CODEC_MSZH)) ||
((avctx->codec_id == CODEC_ID_ZLIB) && (*((char *)avctx->extradata + 7) != CODEC_ZLIB))) {
av_log(avctx, AV_LOG_ERROR, "Codec id and codec type mismatch. This should not happen.\n");
}
/* Detect image type */
switch (c->imgtype = *((char *)avctx->extradata + 4)) {
case IMGTYPE_YUV111:
c->decomp_size = basesize * 3;
av_log(avctx, AV_LOG_INFO, "Image type is YUV 1:1:1.\n");
break;
case IMGTYPE_YUV422:
c->decomp_size = basesize * 2;
av_log(avctx, AV_LOG_INFO, "Image type is YUV 4:2:2.\n");
break;
case IMGTYPE_RGB24:
c->decomp_size = basesize * 3;
av_log(avctx, AV_LOG_INFO, "Image type is RGB 24.\n");
break;
case IMGTYPE_YUV411:
c->decomp_size = basesize / 2 * 3;
av_log(avctx, AV_LOG_INFO, "Image type is YUV 4:1:1.\n");
break;
case IMGTYPE_YUV211:
c->decomp_size = basesize * 2;
av_log(avctx, AV_LOG_INFO, "Image type is YUV 2:1:1.\n");
break;
case IMGTYPE_YUV420:
c->decomp_size = basesize / 2 * 3;
av_log(avctx, AV_LOG_INFO, "Image type is YUV 4:2:0.\n");
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported image format %d.\n", c->imgtype);
return 1;
}
/* Detect compression method */
c->compression = *((char *)avctx->extradata + 5);
switch (avctx->codec_id) {
case CODEC_ID_MSZH:
switch (c->compression) {
case COMP_MSZH:
av_log(avctx, AV_LOG_INFO, "Compression enabled.\n");
break;
case COMP_MSZH_NOCOMP:
c->decomp_size = 0;
av_log(avctx, AV_LOG_INFO, "No compression.\n");
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported compression format for MSZH (%d).\n", c->compression);
return 1;
}
break;
case CODEC_ID_ZLIB:
#ifdef CONFIG_ZLIB
switch (c->compression) {
case COMP_ZLIB_HISPEED:
av_log(avctx, AV_LOG_INFO, "High speed compression.\n");
break;
case COMP_ZLIB_HICOMP:
av_log(avctx, AV_LOG_INFO, "High compression.\n");
break;
case COMP_ZLIB_NORMAL:
av_log(avctx, AV_LOG_INFO, "Normal compression.\n");
break;
default:
if ((c->compression < Z_NO_COMPRESSION) || (c->compression > Z_BEST_COMPRESSION)) {
av_log(avctx, AV_LOG_ERROR, "Unusupported compression level for ZLIB: (%d).\n", c->compression);
return 1;
}
av_log(avctx, AV_LOG_INFO, "Compression level for ZLIB: (%d).\n", c->compression);
}
#else
av_log(avctx, AV_LOG_ERROR, "Zlib support not compiled.\n");
return 1;
#endif
break;
default:
av_log(avctx, AV_LOG_ERROR, "BUG! Unknown codec in compression switch.\n");
return 1;
}
/* Allocate decompression buffer */
/* 4*8 max overflow space for mszh decomp algorithm */
if (c->decomp_size) {
if ((c->decomp_buf = av_malloc(c->decomp_size+4*8)) == NULL) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
return 1;
}
}
/* Detect flags */
c->flags = *((char *)avctx->extradata + 6);
if (c->flags & FLAG_MULTITHREAD)
av_log(avctx, AV_LOG_INFO, "Multithread encoder flag set.\n");
if (c->flags & FLAG_NULLFRAME)
av_log(avctx, AV_LOG_INFO, "Nullframe insertion flag set.\n");
if ((avctx->codec_id == CODEC_ID_ZLIB) && (c->flags & FLAG_PNGFILTER))
av_log(avctx, AV_LOG_INFO, "PNG filter flag set.\n");
if (c->flags & FLAGMASK_UNUSED)
av_log(avctx, AV_LOG_ERROR, "Unknown flag set (%d).\n", c->flags);
/* If needed init zlib */
if (avctx->codec_id == CODEC_ID_ZLIB) {
#ifdef CONFIG_ZLIB
c->zstream.zalloc = Z_NULL;
c->zstream.zfree = Z_NULL;
c->zstream.opaque = Z_NULL;
zret = inflateInit(&(c->zstream));
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
return 1;
}
#else
av_log(avctx, AV_LOG_ERROR, "Zlib support not compiled.\n");
return 1;
#endif
}
avctx->pix_fmt = PIX_FMT_BGR24;
return 0;
}
/*
*
* Init lcl encoder
*
*/
static int encode_init(AVCodecContext *avctx)
{
LclContext *c = avctx->priv_data;
int zret; // Zlib return code
#ifndef CONFIG_ZLIB
av_log(avctx, AV_LOG_ERROR, "Zlib support not compiled.\n");
return 1;
#else
c->avctx= avctx;
assert(avctx->width && avctx->height);
avctx->extradata= av_mallocz(8);
avctx->coded_frame= &c->pic;
// Will be user settable someday
c->compression = 6;
c->flags = 0;
switch(avctx->pix_fmt){
case PIX_FMT_BGR24:
c->imgtype = IMGTYPE_RGB24;
c->decomp_size = avctx->width * avctx->height * 3;
avctx->bits_per_sample= 24;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Format %d not supported\n", avctx->pix_fmt);
return -1;
}
((uint8_t*)avctx->extradata)[0]= 4;
((uint8_t*)avctx->extradata)[1]= 0;
((uint8_t*)avctx->extradata)[2]= 0;
((uint8_t*)avctx->extradata)[3]= 0;
((uint8_t*)avctx->extradata)[4]= c->imgtype;
((uint8_t*)avctx->extradata)[5]= c->compression;
((uint8_t*)avctx->extradata)[6]= c->flags;
((uint8_t*)avctx->extradata)[7]= 0;
c->avctx->extradata_size= 8;
c->zstream.zalloc = Z_NULL;
c->zstream.zfree = Z_NULL;
c->zstream.opaque = Z_NULL;
zret = deflateInit(&(c->zstream), c->compression);
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Deflate init error: %d\n", zret);
return 1;
}
/* Conservative upper bound taken from zlib v1.2.1 source */
c->max_comp_size = c->decomp_size + ((c->decomp_size + 7) >> 3) +
((c->decomp_size + 63) >> 6) + 11;
if ((c->comp_buf = av_malloc(c->max_comp_size)) == NULL) {
av_log(avctx, AV_LOG_ERROR, "Can't allocate compression buffer.\n");
return 1;
}
return 0;
#endif
}
/*
*
* Uninit lcl decoder
*
*/
static int decode_end(AVCodecContext *avctx)
{
LclContext * const c = (LclContext *)avctx->priv_data;
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
#ifdef CONFIG_ZLIB
inflateEnd(&(c->zstream));
#endif
return 0;
}
/*
*
* Uninit lcl encoder
*
*/
static int encode_end(AVCodecContext *avctx)
{
LclContext *c = avctx->priv_data;
av_freep(&avctx->extradata);
av_freep(c->comp_buf);
#ifdef CONFIG_ZLIB
deflateEnd(&(c->zstream));
#endif
return 0;
}
AVCodec mszh_decoder = {
"mszh",
CODEC_TYPE_VIDEO,
CODEC_ID_MSZH,
sizeof(LclContext),
decode_init,
NULL,
decode_end,
decode_frame,
CODEC_CAP_DR1,
};
AVCodec zlib_decoder = {
"zlib",
CODEC_TYPE_VIDEO,
CODEC_ID_ZLIB,
sizeof(LclContext),
decode_init,
NULL,
decode_end,
decode_frame,
CODEC_CAP_DR1,
};
#ifdef CONFIG_ENCODERS
AVCodec zlib_encoder = {
"zlib",
CODEC_TYPE_VIDEO,
CODEC_ID_ZLIB,
sizeof(LclContext),
encode_init,
encode_frame,
encode_end,
// .options = lcl_options,
};
#endif //CONFIG_ENCODERS

View File

@ -0,0 +1,548 @@
/*
* Sierra VMD Audio & Video Decoders
* Copyright (C) 2004 the ffmpeg project
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/**
* @file vmdvideo.c
* Sierra VMD audio & video decoders
* by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
*
* The video decoder outputs PAL8 colorspace data. The decoder expects
* a 0x330-byte VMD file header to be transmitted via extradata during
* codec initialization. Each encoded frame that is sent to this decoder
* is expected to be prepended with the appropriate 16-byte frame
* information record from the VMD file.
*
* The audio decoder, like the video decoder, expects each encoded data
* chunk to be prepended with the approriate 16-byte frame information
* record from the VMD file. It does not require the 0x330-byte VMD file
* header, but it does need the audio setup parameters passed in through
* normal libavcodec API means.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "common.h"
#include "avcodec.h"
#include "dsputil.h"
#define VMD_HEADER_SIZE 0x330
#define PALETTE_COUNT 256
#define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0])
#define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \
(((uint8_t*)(x))[2] << 16) | \
(((uint8_t*)(x))[1] << 8) | \
((uint8_t*)(x))[0])
/*
* Video Decoder
*/
typedef struct VmdVideoContext {
AVCodecContext *avctx;
DSPContext dsp;
AVFrame frame;
AVFrame prev_frame;
unsigned char *buf;
int size;
unsigned char palette[PALETTE_COUNT * 4];
unsigned char *unpack_buffer;
} VmdVideoContext;
#define QUEUE_SIZE 0x1000
#define QUEUE_MASK 0x0FFF
static void lz_unpack(unsigned char *src, unsigned char *dest)
{
unsigned char *s;
unsigned char *d;
unsigned char queue[QUEUE_SIZE];
unsigned int qpos;
unsigned int dataleft;
unsigned int chainofs;
unsigned int chainlen;
unsigned int speclen;
unsigned char tag;
unsigned int i, j;
s = src;
d = dest;
dataleft = LE_32(s);
s += 4;
memset(queue, QUEUE_SIZE, 0x20);
if (LE_32(s) == 0x56781234) {
s += 4;
qpos = 0x111;
speclen = 0xF + 3;
} else {
qpos = 0xFEE;
speclen = 100; /* no speclen */
}
while (dataleft > 0) {
tag = *s++;
if ((tag == 0xFF) && (dataleft > 8)) {
for (i = 0; i < 8; i++) {
queue[qpos++] = *d++ = *s++;
qpos &= QUEUE_MASK;
}
dataleft -= 8;
} else {
for (i = 0; i < 8; i++) {
if (dataleft == 0)
break;
if (tag & 0x01) {
queue[qpos++] = *d++ = *s++;
qpos &= QUEUE_MASK;
dataleft--;
} else {
chainofs = *s++;
chainofs |= ((*s & 0xF0) << 4);
chainlen = (*s++ & 0x0F) + 3;
if (chainlen == speclen)
chainlen = *s++ + 0xF + 3;
for (j = 0; j < chainlen; j++) {
*d = queue[chainofs++ & QUEUE_MASK];
queue[qpos++] = *d++;
qpos &= QUEUE_MASK;
}
dataleft -= chainlen;
}
tag >>= 1;
}
}
}
}
static int rle_unpack(unsigned char *src, unsigned char *dest, int len)
{
unsigned char *ps;
unsigned char *pd;
int i, l;
ps = src;
pd = dest;
if (len & 1)
*pd++ = *ps++;
len >>= 1;
i = 0;
do {
l = *ps++;
if (l & 0x80) {
l = (l & 0x7F) * 2;
memcpy(pd, ps, l);
ps += l;
pd += l;
} else {
for (i = 0; i < l; i++) {
*pd++ = ps[0];
*pd++ = ps[1];
}
ps += 2;
}
i += l;
} while (i < len);
return (ps - src);
}
static void vmd_decode(VmdVideoContext *s)
{
int i;
unsigned int *palette32;
unsigned char r, g, b;
/* point to the start of the encoded data */
unsigned char *p = s->buf + 16;
unsigned char *pb;
unsigned char meth;
unsigned char *dp; /* pointer to current frame */
unsigned char *pp; /* pointer to previous frame */
unsigned char len;
int ofs;
int frame_x, frame_y;
int frame_width, frame_height;
frame_x = LE_16(&s->buf[6]);
frame_y = LE_16(&s->buf[8]);
frame_width = LE_16(&s->buf[10]) - frame_x + 1;
frame_height = LE_16(&s->buf[12]) - frame_y + 1;
/* if only a certain region will be updated, copy the entire previous
* frame before the decode */
if (frame_x || frame_y || (frame_width != s->avctx->width) ||
(frame_height != s->avctx->height)) {
memcpy(s->frame.data[0], s->prev_frame.data[0],
s->avctx->height * s->frame.linesize[0]);
}
/* check if there is a new palette */
if (s->buf[15] & 0x02) {
p += 2;
palette32 = (unsigned int *)s->palette;
for (i = 0; i < PALETTE_COUNT; i++) {
r = *p++ * 4;
g = *p++ * 4;
b = *p++ * 4;
palette32[i] = (r << 16) | (g << 8) | (b);
}
s->size -= (256 * 3 + 2);
}
if (s->size >= 0) {
/* originally UnpackFrame in VAG's code */
pb = p;
meth = *pb++;
if (meth & 0x80) {
lz_unpack(pb, s->unpack_buffer);
meth &= 0x7F;
pb = s->unpack_buffer;
}
dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
switch (meth) {
case 1:
for (i = 0; i < frame_height; i++) {
ofs = 0;
do {
len = *pb++;
if (len & 0x80) {
len = (len & 0x7F) + 1;
memcpy(&dp[ofs], pb, len);
pb += len;
ofs += len;
} else {
/* interframe pixel copy */
memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1;
}
} while (ofs < frame_width);
if (ofs > frame_width) {
printf (" VMD video: offset > width (%d > %d)\n",
ofs, frame_width);
break;
}
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
case 2:
for (i = 0; i < frame_height; i++) {
memcpy(dp, pb, frame_width);
pb += frame_width;
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
case 3:
for (i = 0; i < frame_height; i++) {
ofs = 0;
do {
len = *pb++;
if (len & 0x80) {
len = (len & 0x7F) + 1;
if (*pb++ == 0xFF)
len = rle_unpack(pb, dp, len);
else
memcpy(&dp[ofs], pb, len);
pb += len;
ofs += len;
} else {
/* interframe pixel copy */
memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1;
}
} while (ofs < frame_width);
if (ofs > frame_width) {
printf (" VMD video: offset > width (%d > %d)\n",
ofs, frame_width);
}
dp += s->frame.linesize[0];
pp += s->prev_frame.linesize[0];
}
break;
}
}
}
static int vmdvideo_decode_init(AVCodecContext *avctx)
{
VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
int i;
unsigned int *palette32;
int palette_index = 0;
unsigned char r, g, b;
unsigned char *vmd_header;
unsigned char *raw_palette;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
avctx->has_b_frames = 0;
dsputil_init(&s->dsp, avctx);
/* make sure the VMD header made it */
if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
printf(" VMD video: expected extradata size of %d\n",
VMD_HEADER_SIZE);
return -1;
}
vmd_header = (unsigned char *)avctx->extradata;
s->unpack_buffer = av_malloc(LE_32(&vmd_header[800]));
if (!s->unpack_buffer)
return -1;
/* load up the initial palette */
raw_palette = &vmd_header[28];
palette32 = (unsigned int *)s->palette;
for (i = 0; i < PALETTE_COUNT; i++) {
r = raw_palette[palette_index++] * 4;
g = raw_palette[palette_index++] * 4;
b = raw_palette[palette_index++] * 4;
palette32[i] = (r << 16) | (g << 8) | (b);
}
s->frame.data[0] = s->prev_frame.data[0] = NULL;
return 0;
}
static int vmdvideo_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
s->buf = buf;
s->size = buf_size;
s->frame.reference = 1;
if (avctx->get_buffer(avctx, &s->frame)) {
printf (" VMD Video: get_buffer() failed\n");
return -1;
}
vmd_decode(s);
/* make the palette available on the way out */
memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
if (s->prev_frame.data[0])
avctx->release_buffer(avctx, &s->prev_frame);
/* shuffle frames */
s->prev_frame = s->frame;
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
}
static int vmdvideo_decode_end(AVCodecContext *avctx)
{
VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
if (s->prev_frame.data[0])
avctx->release_buffer(avctx, &s->prev_frame);
av_free(s->unpack_buffer);
return 0;
}
/*
* Audio Decoder
*/
typedef struct VmdAudioContext {
int channels;
int bits;
int block_align;
unsigned char steps8[16];
unsigned short steps16[16];
unsigned short steps128[256];
short predictors[2];
} VmdAudioContext;
static int vmdaudio_decode_init(AVCodecContext *avctx)
{
VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data;
int i;
s->channels = avctx->channels;
s->bits = avctx->bits_per_sample;
s->block_align = avctx->block_align;
printf (" %d channels, %d bits/sample, block align = %d\n",
s->channels, s->bits, s->block_align);
/* set up the steps8 and steps16 tables */
for (i = 0; i < 8; i++) {
if (i < 4)
s->steps8[i] = i;
else
s->steps8[i] = s->steps8[i - 1] + i - 1;
if (i == 0)
s->steps16[i] = 0;
else if (i == 1)
s->steps16[i] = 4;
else if (i == 2)
s->steps16[i] = 16;
else
s->steps16[i] = 1 << (i + 4);
}
/* set up the step128 table */
s->steps128[0] = 0;
s->steps128[1] = 8;
for (i = 0x02; i <= 0x20; i++)
s->steps128[i] = (i - 1) << 4;
for (i = 0x21; i <= 0x60; i++)
s->steps128[i] = (i + 0x1F) << 3;
for (i = 0x61; i <= 0x70; i++)
s->steps128[i] = (i - 0x51) << 6;
for (i = 0x71; i <= 0x78; i++)
s->steps128[i] = (i - 0x69) << 8;
for (i = 0x79; i <= 0x7D; i++)
s->steps128[i] = (i - 0x75) << 10;
s->steps128[0x7E] = 0x3000;
s->steps128[0x7F] = 0x4000;
/* set up the negative half of each table */
for (i = 0; i < 8; i++) {
s->steps8[i + 8] = -s->steps8[i];
s->steps16[i + 8] = -s->steps16[i];
}
for (i = 0; i < 128; i++)
s->steps128[i + 128] = -s->steps128[i];
return 0;
}
static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data,
uint8_t *buf, int ratio) {
}
static void vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
uint8_t *buf, int silence)
{
if (s->channels == 2) {
if ((s->block_align & 0x01) == 0) {
if (silence)
memset(data, 0, s->block_align * 2);
else
vmdaudio_decode_audio(s, data, buf, 1);
} else {
if (silence)
memset(data, 0, s->block_align * 2);
// else
// vmdaudio_decode_audio(s, data, buf, 1);
}
} else {
}
}
static int vmdaudio_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data;
unsigned int sound_flags;
unsigned char *output_samples = (unsigned char *)data;
/* point to the start of the encoded data */
unsigned char *p = buf + 16;
unsigned char *p_end = buf + buf_size;
if (buf[6] == 1) {
/* the chunk contains audio */
vmdaudio_loadsound(s, output_samples, p, 0);
} else if (buf[6] == 2) {
/* the chunk contains audio and silence mixed together */
sound_flags = LE_32(p);
p += 4;
/* do something with extrabufs here? */
while (p < p_end) {
if (sound_flags & 0x01)
/* audio */
vmdaudio_loadsound(s, output_samples, p, 1);
else
/* silence */
vmdaudio_loadsound(s, output_samples, p, 0);
p += s->block_align;
output_samples += (s->block_align * s->bits / 8);
sound_flags >>= 1;
}
} else if (buf[6] == 3) {
/* silent chunk */
vmdaudio_loadsound(s, output_samples, p, 1);
}
// *datasize = ;
return buf_size;
}
/*
* Public Data Structures
*/
AVCodec vmdvideo_decoder = {
"vmdvideo",
CODEC_TYPE_VIDEO,
CODEC_ID_VMDVIDEO,
sizeof(VmdVideoContext),
vmdvideo_decode_init,
NULL,
vmdvideo_decode_end,
vmdvideo_decode_frame,
CODEC_CAP_DR1,
};
AVCodec vmdaudio_decoder = {
"vmdaudio",
CODEC_TYPE_AUDIO,
CODEC_ID_VMDAUDIO,
sizeof(VmdAudioContext),
vmdaudio_decode_init,
NULL,
NULL,
vmdaudio_decode_frame,
};