Start of a video mixer node

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36893 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
David McPaul 2010-05-22 05:27:49 +00:00
parent da4fcd47f5
commit 0fe9fd368f
13 changed files with 2430 additions and 0 deletions

View File

@ -0,0 +1,88 @@
/*
* Copyright (C) 2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
// This class merges buffers together
// Merge is called everytime a primary buffer needs to be passed downstream
// This should allow different framerates to be handled by buffering slower
// buffer producers and discarding buffers from faster producers
// TODO ColorConversion
#include "BufferMixer.h"
BufferMixer::BufferMixer() {
}
BufferMixer::~BufferMixer() {
}
bool
BufferMixer::isBufferAvailable() {
return groupedBuffers[0] != NULL;
}
// Should only be called after checking with isBufferAvailable
BBuffer *
BufferMixer::GetOutputBuffer() {
// Do the merging of all buffers in the groupedBuffers map
// into the primary buffer and return that buffer.
// The primary buffer is removed;
BBuffer *outputBuffer = groupedBuffers[0];
groupedBuffers[0] = NULL;
map<int32, BBuffer*>::iterator each;
for (each=groupedBuffers.begin(); each != groupedBuffers.end(); each++) {
if (each->second != outputBuffer) {
if (each->second != NULL) {
Merge(each->second, outputBuffer);
}
}
}
return outputBuffer;
}
#define ALPHABLEND(source, destination, alpha) (((destination) * (256 - (alpha)) + (source) * (alpha)) >> 8)
void
BufferMixer::Merge(BBuffer *input, BBuffer *output) {
// Currently only deals with RGBA32
uint8 *source = (uint8 *)input->Data();
uint8 *destination = (uint8 *)output->Data();
uint32 size = input->Header()->size_used / 4;
uint8 alpha = 0;
uint8 c1, c2, c3;
for (uint32 i=0; i<size; i++) {
c1 = *source++;
c2 = *source++;
c3 = *source++;
alpha = 128; source++;
*destination++ = ALPHABLEND(c1, *destination, alpha);
*destination++ = ALPHABLEND(c2, *destination, alpha);
*destination++ = ALPHABLEND(c3, *destination, alpha);
*destination++ = 0x00;
}
}
void
BufferMixer::AddBuffer(int32 id, BBuffer *buffer, bool isPrimary) {
BBuffer *oldBuffer;
if (isPrimary) {
oldBuffer = groupedBuffers[0];
groupedBuffers[0] = buffer;
} else {
oldBuffer = groupedBuffers[id];
groupedBuffers[id] = buffer;
}
if (oldBuffer != NULL) {
oldBuffer->Recycle();
}
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (C) 2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef __BUFFER_MIXER__
#define __BUFFER_MIXER__
#include <media/Buffer.h>
#include <map>
class BufferMixer {
public:
BufferMixer();
~BufferMixer();
bool isBufferAvailable();
BBuffer *GetOutputBuffer();
void AddBuffer(int32 id, BBuffer *buffer, bool isPrimary);
void Merge(BBuffer *input, BBuffer *output);
private:
std::map<int32, BBuffer *> groupedBuffers;
};
#endif //__BUFFER_MIXER__

View File

@ -0,0 +1,146 @@
/*
* Copyright (C) 2009 David McPaul
*
* includes code from sysinfo.c which is
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de.
* Copyright (c) 2002, Carlos Hasan, for Haiku.
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <string.h>
#include <cpu_type.h>
#include "CpuCapabilities.h"
CPUCapabilities::~CPUCapabilities()
{
}
CPUCapabilities::CPUCapabilities()
{
#ifdef __INTEL__
setIntelCapabilities();
#endif
PrintCapabilities();
}
void
CPUCapabilities::setIntelCapabilities()
{
cpuid_info baseInfo;
cpuid_info cpuInfo;
int32 maxStandardFunction, maxExtendedFunction = 0;
if (get_cpuid(&baseInfo, 0L, 0L) != B_OK) {
// this CPU doesn't support cpuid
return;
}
maxStandardFunction = baseInfo.eax_0.max_eax;
if (maxStandardFunction >= 500) {
maxStandardFunction = 0; /* old Pentium sample chips has cpu signature here */
}
/* Extended cpuid */
get_cpuid(&cpuInfo, 0x80000000, 0L);
// extended cpuid is only supported if max_eax is greater than the service id
if (cpuInfo.eax_0.max_eax > 0x80000000) {
maxExtendedFunction = cpuInfo.eax_0.max_eax & 0xff;
}
if (maxStandardFunction > 0) {
get_cpuid(&cpuInfo, 1L, 0L);
if (cpuInfo.eax_1.features & (1UL << 23)) {
capabilities = CAPABILITY_MMX;
}
if (cpuInfo.eax_1.features & (1UL << 25)) {
capabilities = CAPABILITY_SSE1;
}
if (cpuInfo.eax_1.features & (1UL << 26)) {
capabilities = CAPABILITY_SSE2;
}
if (maxStandardFunction >= 1) {
/* Extended features */
if (cpuInfo.eax_1.extended_features & (1UL << 0)) {
capabilities = CAPABILITY_SSE3;
}
if (cpuInfo.eax_1.extended_features & (1UL << 9)) {
capabilities = CAPABILITY_SSSE3;
}
if (cpuInfo.eax_1.extended_features & (1UL << 19)) {
capabilities = CAPABILITY_SSE41;
}
if (cpuInfo.eax_1.extended_features & (1UL << 20)) {
capabilities = CAPABILITY_SSE42;
}
}
}
}
bool
CPUCapabilities::HasMMX()
{
return capabilities >= CAPABILITY_MMX;
}
bool
CPUCapabilities::HasSSE1()
{
return capabilities >= CAPABILITY_SSE1;
}
bool
CPUCapabilities::HasSSE2()
{
return capabilities >= CAPABILITY_SSE2;
}
bool
CPUCapabilities::HasSSE3()
{
return capabilities >= CAPABILITY_SSE3;
}
bool
CPUCapabilities::HasSSSE3()
{
return capabilities >= CAPABILITY_SSSE3;
}
bool
CPUCapabilities::HasSSE41()
{
return capabilities >= CAPABILITY_SSE41;
}
bool
CPUCapabilities::HasSSE42()
{
return capabilities >= CAPABILITY_SSE42;
}
void
CPUCapabilities::PrintCapabilities()
{
static const char *CapArray[8] = {
"", "MMX", "SSE1", "SSE2", "SSE3", "SSSE3", "SSE4.1", "SSE4.2"
};
printf("CPU is capable of running ");
if (capabilities) {
for (uint32 i=1;i<=capabilities;i++) {
printf("%s ",CapArray[i]);
}
} else {
printf("no extensions");
}
printf("\n");
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (C) 2009 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef __CPU_CAPABILITIES__
#define __CPU_CAPABILITIES__
#define CAPABILITY_MMX 1
#define CAPABILITY_SSE1 2
#define CAPABILITY_SSE2 3
#define CAPABILITY_SSE3 4
#define CAPABILITY_SSSE3 5
#define CAPABILITY_SSE41 6
#define CAPABILITY_SSE42 7
class CPUCapabilities {
public:
CPUCapabilities();
~CPUCapabilities();
bool HasMMX();
bool HasSSE1();
bool HasSSE2();
bool HasSSE3();
bool HasSSSE3();
bool HasSSE41();
bool HasSSE42();
void PrintCapabilities();
private:
uint32 capabilities;
void setIntelCapabilities();
};
#endif //__CPU_CAPABILITIES__

View File

@ -0,0 +1,15 @@
SubDir HAIKU_TOP src add-ons media media-add-ons video_mixer ;
UsePrivateHeaders media shared ;
Addon video_mixer.media_addon :
VideoMixerNode.cpp
VideoMixerNodeConsumer.cpp
VideoMixerNodeProducer.cpp
VideoMixerNodeEventLooper.cpp
VideoMixerAddOn.cpp
CpuCapabilities.cpp
BufferMixer.cpp
yuvrgb.nasm
: be media
;

View File

@ -0,0 +1,131 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
* VideoMixerAddOn.cpp
*
* The VideoMixerAddOn class
* makes instances of VideoMixerNode
*/
#include <Debug.h>
#include <MediaDefs.h>
#include <MediaAddOn.h>
#include <Errors.h>
#include "VideoMixerNode.h"
#include "VideoMixerAddOn.h"
#include <limits.h>
#include <stdio.h>
#include <string.h>
#define DEBUG 1
// instantiation function
extern "C" _EXPORT BMediaAddOn *make_media_addon(image_id image)
{
return new VideoMixerAddOn(image);
}
// -------------------------------------------------------- //
// ctor/dtor
// -------------------------------------------------------- //
VideoMixerAddOn::~VideoMixerAddOn()
{
}
VideoMixerAddOn::VideoMixerAddOn(image_id image) :
BMediaAddOn(image)
{
PRINT("VideoMixerAddOn::VideoMixerAddOn\n");
refCount = 0;
}
// -------------------------------------------------------- //
// BMediaAddOn impl
// -------------------------------------------------------- //
status_t VideoMixerAddOn::InitCheck(
const char **out_failure_text)
{
PRINT("VideoMixerAddOn::InitCheck\n");
return B_OK;
}
int32 VideoMixerAddOn::CountFlavors()
{
PRINT("VideoMixerAddOn::CountFlavors\n");
return 1;
}
status_t VideoMixerAddOn::GetFlavorAt(
int32 n,
const flavor_info **out_info)
{
PRINT("VideoMixerAddOn::GetFlavorAt\n");
if (n != 0) {
fprintf(stderr,"<- B_BAD_INDEX\n");
return B_BAD_INDEX;
}
flavor_info *infos = new flavor_info[1];
VideoMixerNode::GetFlavor(&infos[0], n);
(*out_info) = infos;
return B_OK;
}
BMediaNode * VideoMixerAddOn::InstantiateNodeFor(
const flavor_info * info,
BMessage * config,
status_t * out_error)
{
PRINT("VideoMixerAddOn::InstantiateNodeFor\n");
VideoMixerNode *node = new VideoMixerNode(info, config, this);
if (node == NULL) {
*out_error = B_NO_MEMORY;
fprintf(stderr,"<- B_NO_MEMORY\n");
} else {
*out_error = node->InitCheck();
}
return node;
}
status_t VideoMixerAddOn::GetConfigurationFor(
BMediaNode * your_node,
BMessage * into_message)
{
PRINT("VideoMixerAddOn::GetConfigurationFor\n");
VideoMixerNode *node = dynamic_cast<VideoMixerNode *>(your_node);
if (node == NULL) {
fprintf(stderr,"<- B_BAD_TYPE\n");
return B_BAD_TYPE;
}
return node->GetConfigurationFor(into_message);
}
bool VideoMixerAddOn::WantsAutoStart()
{
PRINT("VideoMixerAddOn::WantsAutoStart\n");
return false;
}
status_t VideoMixerAddOn::AutoStart(
int in_count,
BMediaNode **out_node,
int32 *out_internal_id,
bool *out_has_more)
{
PRINT("VideoMixerAddOn::AutoStart\n");
return B_OK;
}
// -------------------------------------------------------- //
// main
// -------------------------------------------------------- //
int main(int argc, char *argv[])
{
fprintf(stderr,"VideoMixerAddOn cannot be run\n");
}

View File

@ -0,0 +1,38 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef _VIDEO_MIXER_ADD_ON_H
#define _VIDEO_MIXER_ADD_ON_H
#include <MediaAddOn.h>
class VideoMixerAddOn : public BMediaAddOn {
public:
virtual ~VideoMixerAddOn(void);
explicit VideoMixerAddOn(image_id image);
virtual status_t InitCheck(const char **out_failure_text);
virtual int32 CountFlavors(void);
virtual status_t GetFlavorAt(int32 n,
const flavor_info **out_info);
virtual BMediaNode* InstantiateNodeFor(const flavor_info *info,
BMessage *config, status_t *out_error);
virtual status_t GetConfigurationFor(BMediaNode *your_node,
BMessage *into_message);
virtual bool WantsAutoStart(void);
virtual status_t AutoStart(int in_count, BMediaNode **out_node,
int32 *out_internal_id,
bool *out_has_more);
private:
uint32 refCount;
};
extern "C" _EXPORT BMediaAddOn *make_video_mixer_add_on(image_id you);
#endif /* _VIDEO_MIXER_ADD_ON_H */

View File

@ -0,0 +1,351 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
* VideoMixerNode.cpp
*
* The VideoMixerNode class takes in multiple video streams and supplies
* a single stream as the output.
* each stream is converted to the same colourspace and should match
* either the primary input OR the requested colourspace from the output
* destination.
*
* The first input is considered the primary input
* subsequent input framesize should match the primary input framesize
* The output framerate will be the same as the primary input
*
*/
#include <stdio.h>
#include <string.h>
#include "VideoMixerNode.h"
VideoMixerNode::~VideoMixerNode(void)
{
fprintf(stderr,"VideoMixerNode::~VideoMixerNode\n");
// Stop the BMediaEventLooper thread
Quit();
}
VideoMixerNode::VideoMixerNode(
const flavor_info *info = 0,
BMessage *config = 0,
BMediaAddOn *addOn = 0)
: BMediaNode("VideoMixerNode"),
BBufferConsumer(B_MEDIA_RAW_VIDEO), // Raw video buffers in
BBufferProducer(B_MEDIA_RAW_VIDEO), // Raw video buffers out
BMediaEventLooper()
{
fprintf(stderr,"VideoMixerNode::VideoMixerNode\n");
// keep our creator around for AddOn calls later
fAddOn = addOn;
// NULL out our latency estimates
fDownstreamLatency = 0;
fInternalLatency = 0;
// Start with 1 input and 1 output
ClearInput(&fInitialInput);
strncpy(fOutput.name,"VideoMixer Output", B_MEDIA_NAME_LENGTH-1);
fOutput.name[B_MEDIA_NAME_LENGTH-1] = '\0';
// initialize the output
fOutput.node = media_node::null; // until registration
fOutput.destination = media_destination::null;
fOutput.source.port = ControlPort();
fOutput.source.id = 0;
GetOutputFormat(&fOutput.format);
fInitCheckStatus = B_OK;
}
void VideoMixerNode::NodeRegistered(void)
{
fprintf(stderr,"VideoMixerNode::NodeRegistered\n");
// for every node created so far set to this Node;
for (uint32 i=0;i<fConnectedInputs.size();i++) {
fConnectedInputs[i]->node = Node();
fConnectedInputs[i]->destination.id = i;
fConnectedInputs[i]->destination.port = ControlPort();
}
fInitialInput.node = Node();
fInitialInput.destination.id = fConnectedInputs.size();
fInitialInput.destination.port = ControlPort();
GetOutputFormat(&fOutput.format);
fOutput.node = Node();
// start the BMediaEventLooper thread
SetPriority(B_REAL_TIME_PRIORITY);
Run();
}
media_input *
VideoMixerNode::CreateInput(uint32 inputID) {
media_input *input = new media_input();
ClearInput(input);
// don't overwrite available space, and be sure to terminate
sprintf(input->name, "VideoMixer Input %ld", inputID);
return input;
}
void
VideoMixerNode::ClearInput(media_input *input) {
// initialize the input
sprintf(input->name, "VideoMixer Input");
input->node = Node();
input->source = media_source::null;
input->destination = media_destination::null;
GetInputFormat(&input->format);
}
media_input *
VideoMixerNode::GetInput(const media_source &source) {
vector<media_input *>::iterator each;
for (each=fConnectedInputs.begin(); each<fConnectedInputs.end(); each++) {
if ((*each)->source == source) {
return *each;
}
}
return NULL;
}
media_input *
VideoMixerNode::GetInput(const media_destination &destination) {
vector<media_input *>::iterator each;
for (each=fConnectedInputs.begin(); each<fConnectedInputs.end(); each++) {
if ((*each)->destination == destination) {
return *each;
}
}
return NULL;
}
media_input *
VideoMixerNode::GetInput(const int32 id) {
vector<media_input *>::iterator each;
for (each=fConnectedInputs.begin(); each<fConnectedInputs.end(); each++) {
if ((*each)->destination.id == id) {
return *each;
}
}
return NULL;
}
status_t VideoMixerNode::InitCheck(void) const
{
fprintf(stderr,"VideoMixerNode::InitCheck\n");
return fInitCheckStatus;
}
status_t VideoMixerNode::GetConfigurationFor(
BMessage *into_message)
{
fprintf(stderr,"VideoMixerNode::GetConfigurationFor\n");
return B_OK;
}
// -------------------------------------------------------- //
// implementation of BMediaNode
// -------------------------------------------------------- //
BMediaAddOn *VideoMixerNode::AddOn(
int32 *internal_id) const
{
fprintf(stderr,"VideoMixerNode::AddOn\n");
// BeBook says this only gets called if we were in an add-on.
if (fAddOn != NULL) {
// If we get a null pointer then we just won't write.
if (internal_id != NULL) {
internal_id = 0;
}
}
return fAddOn;
}
void VideoMixerNode::Start(bigtime_t performance_time)
{
fprintf(stderr,"VideoMixerNode::Start(pt=%lld)\n", performance_time);
BMediaEventLooper::Start(performance_time);
}
void VideoMixerNode::Stop(
bigtime_t performance_time,
bool immediate)
{
if (immediate) {
fprintf(stderr,"VideoMixerNode::Stop(pt=%lld,<immediate>)\n", performance_time);
} else {
fprintf(stderr,"VideoMixerNode::Stop(pt=%lld,<scheduled>)\n", performance_time);
}
BMediaEventLooper::Stop(performance_time, immediate);
}
void VideoMixerNode::Seek(
bigtime_t media_time,
bigtime_t performance_time)
{
fprintf(stderr,"VideoMixerNode::Seek(mt=%lld,pt=%lld)\n", media_time,performance_time);
BMediaEventLooper::Seek(media_time, performance_time);
}
void VideoMixerNode::SetRunMode(run_mode mode)
{
fprintf(stderr,"VideoMixerNode::SetRunMode(%i)\n", mode);
BMediaEventLooper::SetRunMode(mode);
}
void VideoMixerNode::TimeWarp(
bigtime_t at_real_time,
bigtime_t to_performance_time)
{
fprintf(stderr,"VideoMixerNode::TimeWarp(rt=%lld,pt=%lld)\n", at_real_time, to_performance_time);
BMediaEventLooper::TimeWarp(at_real_time, to_performance_time);
}
void VideoMixerNode::Preroll(void)
{
fprintf(stderr,"VideoMixerNode::Preroll\n");
// XXX:Performance opportunity
BMediaNode::Preroll();
}
void VideoMixerNode::SetTimeSource(BTimeSource *time_source)
{
fprintf(stderr,"VideoMixerNode::SetTimeSource\n");
BMediaNode::SetTimeSource(time_source);
}
status_t VideoMixerNode::HandleMessage(
int32 message,
const void *data,
size_t size)
{
fprintf(stderr,"VideoMixerNode::HandleMessage\n");
status_t status = B_OK;
switch (message) {
// no special messages for now
default:
status = BBufferConsumer::HandleMessage(message, data, size);
if (status == B_OK) {
break;
}
status = BBufferProducer::HandleMessage(message, data, size);
if (status == B_OK) {
break;
}
status = BMediaNode::HandleMessage(message, data, size);
if (status == B_OK) {
break;
}
BMediaNode::HandleBadMessage(message, data, size);
status = B_ERROR;
break;
}
return status;
}
status_t VideoMixerNode::RequestCompleted(const media_request_info &info)
{
fprintf(stderr,"VideoMixerNode::RequestCompleted\n");
return BMediaNode::RequestCompleted(info);
}
status_t VideoMixerNode::DeleteHook(BMediaNode *node)
{
fprintf(stderr,"VideoMixerNode::DeleteHook\n");
return BMediaEventLooper::DeleteHook(node);
}
status_t VideoMixerNode::GetNodeAttributes(
media_node_attribute *outAttributes,
size_t inMaxCount)
{
fprintf(stderr,"VideoMixerNode::GetNodeAttributes\n");
return BMediaNode::GetNodeAttributes(outAttributes, inMaxCount);
}
status_t VideoMixerNode::AddTimer(
bigtime_t at_performance_time,
int32 cookie)
{
fprintf(stderr,"VideoMixerNode::AddTimer\n");
return BMediaEventLooper::AddTimer(at_performance_time, cookie);
}
// -------------------------------------------------------- //
// VideoMixerNode specific functions
// -------------------------------------------------------- //
// public:
void VideoMixerNode::GetFlavor(flavor_info *outInfo, int32 id)
{
fprintf(stderr,"VideoMixerNode::GetFlavor\n");
if (outInfo != NULL) {
outInfo->internal_id = id;
outInfo->name = "Haiku VideoMixer";
outInfo->info = "A VideoMixerNode node mixes multiple video streams into a single stream.";
outInfo->kinds = B_BUFFER_CONSUMER | B_BUFFER_PRODUCER;
outInfo->flavor_flags = B_FLAVOR_IS_LOCAL;
outInfo->possible_count = INT_MAX; // no limit
outInfo->in_format_count = 1;
media_format *inFormats = new media_format[outInfo->in_format_count];
GetInputFormat(&inFormats[0]);
outInfo->in_formats = inFormats;
outInfo->out_format_count = 1; // single output
media_format *outFormats = new media_format[outInfo->out_format_count];
GetOutputFormat(&outFormats[0]);
outInfo->out_formats = outFormats;
}
}
void VideoMixerNode::GetInputFormat(media_format *outFormat)
{
fprintf(stderr,"VideoMixerNode::GetInputFormat\n");
if (outFormat != NULL) {
outFormat->type = B_MEDIA_RAW_VIDEO;
outFormat->require_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
outFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
outFormat->u.raw_video = media_raw_video_format::wildcard;
}
}
void VideoMixerNode::GetOutputFormat(media_format *outFormat)
{
fprintf(stderr,"VideoMixerNode::GetOutputFormat\n");
if (outFormat != NULL) {
outFormat->type = B_MEDIA_RAW_VIDEO;
outFormat->require_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
outFormat->deny_flags = B_MEDIA_MAUI_UNDEFINED_FLAGS;
outFormat->u.raw_video = media_raw_video_format::wildcard;
}
}
// protected:
status_t VideoMixerNode::AddRequirements(media_format *format)
{
fprintf(stderr,"VideoMixerNode::AddRequirements\n");
return B_OK;
}

View File

@ -0,0 +1,330 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
*/
#ifndef _VIDEO_MIXER_NODE_H
#define _VIDEO_MIXER_NODE_H
#include <Buffer.h>
#include <BufferConsumer.h>
#include <BufferGroup.h>
#include <BufferProducer.h>
#include <MediaAddOn.h>
#include <MediaDefs.h>
#include <MediaEventLooper.h>
#include <MediaNode.h>
#include <TimeSource.h>
#include <vector>
#include "BufferMixer.h"
class VideoMixerNode :
public BBufferConsumer,
public BBufferProducer,
public BMediaEventLooper
{
protected:
virtual ~VideoMixerNode(void);
public:
explicit VideoMixerNode(
const flavor_info * info = 0,
BMessage *config = 0,
BMediaAddOn *addOn = 0);
virtual status_t InitCheck(void) const;
// see BMediaAddOn::GetConfigurationFor
virtual status_t GetConfigurationFor(
BMessage *into_message);
public:
// /* this port is what a media node listens to for commands */
// virtual port_id ControlPort(void) const;
virtual BMediaAddOn* AddOn(
int32 *internal_id) const; /* Who instantiated you -- or NULL for app class */
protected:
/* These don't return errors; instead, they use the global error condition reporter. */
/* A node is required to have a queue of at least one pending command (plus TimeWarp) */
/* and is recommended to allow for at least one pending command of each type. */
/* Allowing an arbitrary number of outstanding commands might be nice, but apps */
/* cannot depend on that happening. */
virtual void Start(
bigtime_t performance_time);
virtual void Stop(
bigtime_t performance_time,
bool immediate);
virtual void Seek(
bigtime_t media_time,
bigtime_t performance_time);
virtual void SetRunMode(
run_mode mode);
virtual void TimeWarp(
bigtime_t at_real_time,
bigtime_t to_performance_time);
virtual void Preroll(void);
virtual void SetTimeSource(BTimeSource *time_source);
public:
virtual status_t HandleMessage(
int32 message,
const void *data,
size_t size);
protected:
/* Called when requests have completed, or failed. */
virtual status_t RequestCompleted( /* reserved 0 */
const media_request_info &info);
protected:
virtual status_t DeleteHook(BMediaNode *node); /* reserved 1 */
virtual void NodeRegistered(void); /* reserved 2 */
public:
/* fill out your attributes in the provided array, returning however many you have. */
virtual status_t GetNodeAttributes( /* reserved 3 */
media_node_attribute *outAttributes,
size_t inMaxCount);
virtual status_t AddTimer(
bigtime_t at_performance_time,
int32 cookie);
/* Someone, probably the producer, is asking you about this format. Give */
/* your honest opinion, possibly modifying *format. Do not ask upstream */
/* producer about the format, since he's synchronously waiting for your */
/* reply. */
virtual status_t AcceptFormat(
const media_destination &dest,
media_format *format);
virtual status_t GetNextInput(
int32 * cookie,
media_input *out_input);
virtual void DisposeInputCookie(int32 cookie);
virtual void BufferReceived(BBuffer *buffer);
virtual void ProducerDataStatus(
const media_destination &for_whom,
int32 status,
bigtime_t at_performance_time);
virtual status_t GetLatencyFor(
const media_destination &for_whom,
bigtime_t *out_latency,
media_node_id *out_timesource);
virtual status_t Connected(
const media_source &producer, /* here's a good place to request buffer group usage */
const media_destination &where,
const media_format &with_format,
media_input *out_input);
virtual void Disconnected(
const media_source &producer,
const media_destination &where);
/* The notification comes from the upstream producer, so he's already cool with */
/* the format; you should not ask him about it in here. */
virtual status_t FormatChanged(
const media_source &producer,
const media_destination &consumer,
int32 change_tag,
const media_format &format);
/* Given a performance time of some previous buffer, retrieve the remembered tag */
/* of the closest (previous or exact) performance time. Set *out_flags to 0; the */
/* idea being that flags can be added later, and the understood flags returned in */
/* *out_flags. */
virtual status_t SeekTagRequested(
const media_destination &destination,
bigtime_t in_target_time,
uint32 in_flags,
media_seek_tag *out_seek_tag,
bigtime_t *out_tagged_time,
uint32 *out_flags);
protected:
/* functionality of BBufferProducer */
virtual status_t FormatSuggestionRequested(
media_type type,
int32 quality,
media_format *format);
virtual status_t FormatProposal(
const media_source &output,
media_format *format);
/* If the format isn't good, put a good format into *io_format and return error */
/* If format has wildcard, specialize to what you can do (and change). */
/* If you can change the format, return OK. */
/* The request comes from your destination sychronously, so you cannot ask it */
/* whether it likes it -- you should assume it will since it asked. */
virtual status_t FormatChangeRequested(
const media_source &source,
const media_destination &destination,
media_format *io_format,
int32 *_deprecated_);
virtual status_t GetNextOutput( /* cookie starts as 0 */
int32 *cookie,
media_output *out_output);
virtual status_t DisposeOutputCookie(
int32 cookie);
/* In this function, you should either pass on the group to your upstream guy, */
/* or delete your current group and hang on to this group. Deleting the previous */
/* group (unless you passed it on with the reclaim flag set to false) is very */
/* important, else you will 1) leak memory and 2) block someone who may want */
/* to reclaim the buffers living in that group. */
virtual status_t SetBufferGroup(
const media_source &for_source,
BBufferGroup * group);
/* Format of clipping is (as int16-s): <from line> <npairs> <startclip> <endclip>. */
/* Repeat for each line where the clipping is different from the previous line. */
/* If <npairs> is negative, use the data from line -<npairs> (there are 0 pairs after */
/* a negative <npairs>. Yes, we only support 32k*32k frame buffers for clipping. */
/* Any non-0 field of 'display' means that that field changed, and if you don't support */
/* that change, you should return an error and ignore the request. Note that the buffer */
/* offset values do not have wildcards; 0 (or -1, or whatever) are real values and must */
/* be adhered to. */
virtual status_t VideoClippingChanged(
const media_source &for_source,
int16 num_shorts,
int16 *clip_data,
const media_video_display_info &display,
int32 *_deprecated_);
/* Iterates over all outputs and maxes the latency found */
virtual status_t GetLatency(bigtime_t *out_latency);
virtual status_t PrepareToConnect(
const media_source &what,
const media_destination &where,
media_format *format,
media_source *out_source,
char *out_name);
virtual void Connect(
status_t error,
const media_source &source,
const media_destination &destination,
const media_format &format,
char *io_name);
virtual void Disconnect(
const media_source &what,
const media_destination &where);
virtual void LateNoticeReceived(
const media_source &what,
bigtime_t how_much,
bigtime_t performance_time);
virtual void EnableOutput(
const media_source &what,
bool enabled,
int32 *_deprecated_);
virtual status_t SetPlayRate(
int32 numer,
int32 denom);
virtual void AdditionalBufferRequested( // used to be Reserved 0
const media_source & source,
media_buffer_id prev_buffer,
bigtime_t prev_time,
const media_seek_tag *prev_tag); // may be NULL
virtual void LatencyChanged( // used to be Reserved 1
const media_source & source,
const media_destination & destination,
bigtime_t new_latency,
uint32 flags);
protected:
/* you must override to handle your events! */
/* you should not call HandleEvent directly */
virtual void HandleEvent( const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
/* override to clean up custom events you have added to your queue */
virtual void CleanUpEvent(const media_timed_event *event);
/* called from Offline mode to determine the current time of the node */
/* update your internal information whenever it changes */
virtual bigtime_t OfflineTime();
/* override only if you know what you are doing! */
/* otherwise much badness could occur */
/* the actual control loop function: */
/* waits for messages, Pops events off the queue and calls DispatchEvent */
virtual void ControlLoop();
protected:
virtual status_t HandleStart(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleSeek(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleWarp(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleStop(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleBuffer(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleDataStatus(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
virtual status_t HandleParameter(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
protected:
//void CreateBufferGroup(MediaOutputInfo *output_info);
void ComputeInternalLatency();
public:
static void GetFlavor(flavor_info *outInfo, int32 id);
private:
media_input *CreateInput(uint32 inputID);
void ClearInput(media_input *input);
media_input *GetInput(const media_source &source);
media_input *GetInput(const media_destination &destination);
media_input *GetInput(const int32 id);
static void GetInputFormat(media_format *outFormat);
static void GetOutputFormat(media_format *outFormat);
protected:
virtual status_t AddRequirements(media_format *format);
private:
status_t fInitCheckStatus;
BMediaAddOn *fAddOn;
media_input fInitialInput;
std::vector<media_input *> fConnectedInputs;
media_output fOutput;
bigtime_t fDownstreamLatency;
bigtime_t fInternalLatency;
BufferMixer bufferMixer;
};
#endif /* _VIDEO_MIXER_NODE_H */

View File

@ -0,0 +1,251 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
* VideoMixerNode.cpp
*
* The VideoMixerNode class
* takes in multiple video streams and supplies
* a single stream as the output.
* each stream is converted to the same colourspace
*/
#include "VideoMixerNode.h"
// -------------------------------------------------------- //
// implemention of BBufferConsumer
// -------------------------------------------------------- //
// Check to make sure the format is okay, then remove
// any wildcards corresponding to our requirements.
status_t VideoMixerNode::AcceptFormat(
const media_destination &dest,
media_format *format)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::AcceptFormat\n");
if (fInitialInput.destination != dest) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION");
return B_MEDIA_BAD_DESTINATION; // none of our inputs matched the dest
}
media_format myFormat;
GetInputFormat(&myFormat);
AddRequirements(format);
return B_OK;
}
status_t VideoMixerNode::GetNextInput(
int32 *cookie,
media_input *out_input)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::GetNextInput (%ld)\n",*cookie);
// Cookie 0 is the connecting input, all others are connected inputs
if (*cookie == fConnectedInputs.size()) {
*out_input = fInitialInput;
} else {
out_input = GetInput(*cookie);
if (out_input == NULL) {
fprintf(stderr,"<- B_ERROR (no more inputs)\n");
return B_ERROR;
}
}
// so next time they won't get the same input again
(*cookie)++;
return B_OK;
}
void VideoMixerNode::DisposeInputCookie(
int32 cookie)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::DisposeInputCookie\n");
// nothing to do since our cookies are just integers
}
void VideoMixerNode::BufferReceived(BBuffer *buffer)
{
switch (buffer->Header()->type) {
// case B_MEDIA_PARAMETERS:
// {
// status_t status = ApplyParameterData(buffer->Data(),buffer->SizeUsed());
// if (status != B_OK) {
// fprintf(stderr,"ApplyParameterData in MediaDemultiplexerNode::BufferReceived failed\n");
// }
// buffer->Recycle();
// }
// break;
case B_MEDIA_RAW_VIDEO:
if (buffer->Flags() & BBuffer::B_SMALL_BUFFER) {
fprintf(stderr,"NOT IMPLEMENTED: B_SMALL_BUFFER in VideoMixerNode::BufferReceived\n");
// XXX: implement this part
buffer->Recycle();
} else {
media_timed_event event(buffer->Header()->start_time, BTimedEventQueue::B_HANDLE_BUFFER,
buffer, BTimedEventQueue::B_RECYCLE_BUFFER);
status_t status = EventQueue()->AddEvent(event);
if (status != B_OK) {
fprintf(stderr,"EventQueue()->AddEvent(event) in VideoMixerNode::BufferReceived failed\n");
buffer->Recycle();
}
}
break;
default:
fprintf(stderr,"unexpected buffer type in VideoMixerNode::BufferReceived\n");
buffer->Recycle();
break;
}
}
void VideoMixerNode::ProducerDataStatus(
const media_destination &for_whom,
int32 status,
bigtime_t at_performance_time)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::ProducerDataStatus\n");
media_input *input = GetInput(for_whom);
if (input == NULL) {
fprintf(stderr,"invalid destination received in VideoMixerNode::ProducerDataStatus\n");
return;
}
media_timed_event event(at_performance_time, BTimedEventQueue::B_DATA_STATUS,
&input, BTimedEventQueue::B_NO_CLEANUP, status, 0, NULL);
EventQueue()->AddEvent(event);
}
status_t VideoMixerNode::GetLatencyFor(
const media_destination &for_whom,
bigtime_t *out_latency,
media_node_id *out_timesource)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::GetLatencyFor\n");
if ((out_latency == 0) || (out_timesource == 0)) {
fprintf(stderr,"<- B_BAD_VALUE\n");
return B_BAD_VALUE;
}
media_input *input = GetInput(for_whom);
if (input == NULL) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return B_MEDIA_BAD_DESTINATION;
}
*out_latency = EventLatency();
*out_timesource = TimeSource()->ID();
return B_OK;
}
status_t VideoMixerNode::Connected(
const media_source &producer, /* here's a good place to request buffer group usage */
const media_destination &where,
const media_format &with_format,
media_input *out_input)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::Connected\n");
if (fInitialInput.destination != where) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return B_MEDIA_BAD_DESTINATION;
}
media_input *input = CreateInput(fConnectedInputs.size());
fConnectedInputs.push_back(input);
// Specialise the output?
// compute the latency or just guess
fInternalLatency = 500; // just a guess
fprintf(stderr," internal latency guessed = %lld\n", fInternalLatency);
SetEventLatency(fInternalLatency);
// record the agreed upon values
input->destination = where;
input->source = producer;
input->format = with_format;
*out_input = *input;
// Reset the Initial Input
ClearInput(&fInitialInput);
fInitialInput.destination.id = fConnectedInputs.size();
fInitialInput.destination.port = ControlPort();
return B_OK;
}
void VideoMixerNode::Disconnected(
const media_source &producer,
const media_destination &where)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::Disconnected\n");
media_input *input = GetInput(where);
if (input == NULL) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return;
}
if (input->source != producer) {
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
// disconnected but not deleted (important)
input->source = media_source::null;
GetInputFormat(&input->format);
}
/* The notification comes from the upstream producer, so he's already cool with */
/* the format; you should not ask him about it in here. */
status_t VideoMixerNode::FormatChanged(
const media_source & producer,
const media_destination & consumer,
int32 change_tag,
const media_format & format)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::FormatChanged\n");
media_input *input = GetInput(producer);
if (input == NULL) {
return B_MEDIA_BAD_SOURCE;
}
if (input->destination != consumer) {
return B_MEDIA_BAD_DESTINATION;
}
input->format = format;
return B_OK;
}
/* Given a performance time of some previous buffer, retrieve the remembered tag */
/* of the closest (previous or exact) performance time. Set *out_flags to 0; the */
/* idea being that flags can be added later, and the understood flags returned in */
/* *out_flags. */
status_t VideoMixerNode::SeekTagRequested(
const media_destination & destination,
bigtime_t in_target_time,
uint32 in_flags,
media_seek_tag * out_seek_tag,
bigtime_t * out_tagged_time,
uint32 * out_flags)
{
fprintf(stderr,"VideoMixerNode(BBufferConsumer)::SeekTagRequested\n");
// XXX: implement this
return BBufferConsumer::SeekTagRequested(destination,in_target_time, in_flags,
out_seek_tag, out_tagged_time, out_flags);
}

View File

@ -0,0 +1,182 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
* VideoMixerNode.cpp
*
* The VideoMixerNode class
* takes in multiple video streams and supplies
* a single stream as the output.
* each stream is converted to the same colourspace
*/
#include "VideoMixerNode.h"
// -------------------------------------------------------- //
// implementation for BMediaEventLooper
// -------------------------------------------------------- //
void VideoMixerNode::HandleEvent(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
switch (event->type) {
case BTimedEventQueue::B_START:
HandleStart(event,lateness,realTimeEvent);
break;
case BTimedEventQueue::B_SEEK:
HandleSeek(event,lateness,realTimeEvent);
break;
case BTimedEventQueue::B_WARP:
HandleWarp(event,lateness,realTimeEvent);
break;
case BTimedEventQueue::B_STOP:
HandleStop(event,lateness,realTimeEvent);
break;
case BTimedEventQueue::B_HANDLE_BUFFER:
if (RunState() == BMediaEventLooper::B_STARTED) {
HandleBuffer(event,lateness,realTimeEvent);
}
break;
case BTimedEventQueue::B_DATA_STATUS:
HandleDataStatus(event, lateness, realTimeEvent);
break;
case BTimedEventQueue::B_PARAMETER:
HandleParameter(event,lateness,realTimeEvent);
break;
default:
fprintf(stderr," unknown event type: %ld\n",event->type);
break;
}
}
/* override to clean up custom events you have added to your queue */
void VideoMixerNode::CleanUpEvent(
const media_timed_event *event)
{
BMediaEventLooper::CleanUpEvent(event);
}
/* called from Offline mode to determine the current time of the node */
/* update your internal information whenever it changes */
bigtime_t VideoMixerNode::OfflineTime()
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::OfflineTime\n");
return BMediaEventLooper::OfflineTime();
// XXX: do something else?
}
/* override only if you know what you are doing! */
/* otherwise much badness could occur */
/* the actual control loop function: */
/* waits for messages, Pops events off the queue and calls DispatchEvent */
void VideoMixerNode::ControlLoop() {
BMediaEventLooper::ControlLoop();
}
// protected:
status_t VideoMixerNode::HandleStart(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleStart()\n");
if (RunState() != B_STARTED) {
media_timed_event firstBufferEvent(event->event_time, BTimedEventQueue::B_HANDLE_BUFFER);
HandleEvent(&firstBufferEvent, 0, false);
EventQueue()->AddEvent(firstBufferEvent);
}
return B_OK;
}
status_t VideoMixerNode::HandleSeek(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleSeek(t=%lld,d=%ld,bd=%lld)\n",event->event_time, event->data, event->bigdata);
return B_OK;
}
status_t VideoMixerNode::HandleWarp(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleWarp\n");
return B_OK;
}
status_t VideoMixerNode::HandleStop(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleStop\n");
// flush the queue so downstreamers don't get any more
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
return B_OK;
}
status_t VideoMixerNode::HandleBuffer(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
if (event->type != BTimedEventQueue::B_HANDLE_BUFFER) {
fprintf(stderr,"HandleBuffer called on non buffer event type\n");
return B_BAD_VALUE;
}
BBuffer *buffer = const_cast<BBuffer*>((BBuffer*)event->pointer);
if (buffer == NULL) {
fprintf(stderr,"NO BUFFER PASSED\n");
return B_BAD_VALUE;
}
media_input *input = GetInput(buffer->Header()->destination);
if (input == NULL) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return B_MEDIA_BAD_DESTINATION;
}
if (fOutput.format.u.raw_video == media_raw_video_format::wildcard) {
fprintf(stderr,"<- B_MEDIA_NOT_CONNECTED\n");
return B_MEDIA_NOT_CONNECTED;
}
status_t status = B_OK;
if (input == *fConnectedInputs.begin()) {
if (bufferMixer.isBufferAvailable()) {
status = SendBuffer(bufferMixer.GetOutputBuffer(), fOutput.source, fOutput.destination);
}
bufferMixer.AddBuffer(input->destination.id, buffer, true);
} else {
bufferMixer.AddBuffer(input->destination.id, buffer, false);
}
return status;
}
status_t VideoMixerNode::HandleDataStatus(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleDataStatus");
SendDataStatus(event->data, fOutput.destination, event->event_time);
return B_OK;
}
status_t VideoMixerNode::HandleParameter(
const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false)
{
fprintf(stderr,"VideoMixerNode(BMediaEventLooper)::HandleParameter");
return B_OK;
}

View File

@ -0,0 +1,370 @@
/*
* Copyright (C) 2009-2010 David McPaul
*
* All rights reserved. Distributed under the terms of the MIT License.
* VideoMixerNode.cpp
*
* The VideoMixerNode class
* takes in multiple video streams and supplies
* a single stream as the output.
* each stream is converted to the same colourspace
*/
#include "VideoMixerNode.h"
// -------------------------------------------------------- //
// implemention of BBufferProducer
// -------------------------------------------------------- //
// They are asking us to make the first offering.
// So, we get a fresh format and then add requirements
status_t VideoMixerNode::FormatSuggestionRequested(
media_type type,
int32 quality,
media_format * format)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::FormatSuggestionRequested\n");
if (format->type == B_MEDIA_NO_TYPE) {
format->type = B_MEDIA_RAW_VIDEO;
}
if (format->type != B_MEDIA_RAW_VIDEO) {
return B_MEDIA_BAD_FORMAT;
}
GetOutputFormat(format);
return B_OK;
}
// They made an offer to us. We should make sure that the offer is
// acceptable, and then we can add any requirements we have on top of
// that. We leave wildcards for anything that we don't care about.
status_t VideoMixerNode::FormatProposal(
const media_source &output_source,
media_format *format)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::FormatProposal\n");
fOutput.source = output_source;
// If we have an input then set our output as the same except for color_space
if (fConnectedInputs.size() > 0) {
if (fOutput.format.u.raw_video == media_raw_video_format::wildcard) {
// First proposal
fOutput.format = fConnectedInputs[0]->format;
fOutput.format.u.raw_video.display.format = B_NO_COLOR_SPACE;
} else {
// Second proposal
fOutput.format = fConnectedInputs[0]->format;
fOutput.format.u.raw_video.display.format = B_RGBA32;
}
}
*format = fOutput.format;
return B_OK;
}
// Presumably we have already agreed with them that this format is
// okay. But just in case, we check the offer. (and complain if it
// is invalid) Then as the last thing we do, we get rid of any
// remaining wilcards.
status_t VideoMixerNode::FormatChangeRequested(
const media_source &source,
const media_destination &destination,
media_format *io_format,
int32 * _deprecated_)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::FormatChangeRequested\n");
if (fOutput.source != source) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return B_MEDIA_BAD_SOURCE;
}
fOutput.destination = destination;
fOutput.format = *io_format;
return B_OK;
}
status_t VideoMixerNode::GetNextOutput( /* cookie starts as 0 */
int32 *cookie,
media_output *out_output)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::GetNextOutput (%ld)\n",*cookie);
// only 1 output
if (*cookie != 0) {
fprintf(stderr,"<- B_ERROR (no more outputs)\n");
return B_ERROR;
}
*out_output = fOutput;
*cookie = 1;
return B_OK;
}
status_t VideoMixerNode::DisposeOutputCookie(int32 cookie)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::DisposeOutputCookie\n");
// nothing to do since our cookies are part of the vector iterator
}
status_t VideoMixerNode::SetBufferGroup(
const media_source & for_source,
BBufferGroup * group)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::SetBufferGroup\n");
if (fOutput.source != for_source) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return B_MEDIA_BAD_SOURCE;
}
return B_OK;
}
/* Format of clipping is (as int16-s): <from line> <npairs> <startclip> <endclip>. */
/* Repeat for each line where the clipping is different from the previous line. */
/* If <npairs> is negative, use the data from line -<npairs> (there are 0 pairs after */
/* a negative <npairs>. Yes, we only support 32k*32k frame buffers for clipping. */
/* Any non-0 field of 'display' means that that field changed, and if you don't support */
/* that change, you should return an error and ignore the request. Note that the buffer */
/* offset values do not have wildcards; 0 (or -1, or whatever) are real values and must */
/* be adhered to. */
status_t VideoMixerNode::VideoClippingChanged(
const media_source & for_source,
int16 num_shorts,
int16 * clip_data,
const media_video_display_info & display,
int32 * _deprecated_)
{
return BBufferProducer::VideoClippingChanged(for_source, num_shorts, clip_data, display, _deprecated_);
}
status_t VideoMixerNode::GetLatency(
bigtime_t *out_latency)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::GetLatency\n");
if (out_latency == NULL) {
fprintf(stderr,"<- B_BAD_VALUE\n");
return B_BAD_VALUE;
}
*out_latency = EventLatency() + SchedulingLatency();
return B_OK;
}
status_t VideoMixerNode::PrepareToConnect(
const media_source &what,
const media_destination &where,
media_format *format,
media_source *out_source,
char *out_name)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::PrepareToConnect\n");
if (fOutput.source != what) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return B_MEDIA_BAD_SOURCE;
}
*out_source = fOutput.source;
strcpy(out_name, fOutput.name);
fOutput.destination = where;
return B_OK;
}
void VideoMixerNode::Connect(
status_t error,
const media_source &source,
const media_destination &destination,
const media_format &format,
char *io_name)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::Connect\n");
if (fOutput.source != source) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
if (error != B_OK) {
fprintf(stderr,"<- error already\n");
fOutput.destination = media_destination::null;
fOutput.format.u.raw_video = media_raw_video_format::wildcard;
return;
}
// calculate the downstream latency
// must happen before itr->Connect
bigtime_t downstreamLatency;
media_node_id id;
FindLatencyFor(fOutput.destination, &downstreamLatency, &id);
// record the agreed upon values
fOutput.format = format;
fOutput.destination = destination;
strcpy(io_name, fOutput.name);
// compute the internal latency
// must happen after itr->Connect
if (fInternalLatency == 0) {
fInternalLatency = 100; // temporary until we finish computing it
ComputeInternalLatency();
}
// If the downstream latency for this output is larger
// than our current downstream latency, we have to increase
// our current downstream latency to be the larger value.
if (downstreamLatency > fDownstreamLatency) {
SetEventLatency(fDownstreamLatency + fInternalLatency);
}
}
void VideoMixerNode::ComputeInternalLatency() {
fprintf(stderr,"VideoMixerNode(BBufferProducer)::ComputeInternalLatency\n");
fInternalLatency = 100; // just guess
fprintf(stderr," internal latency guessed = %lld\n",fInternalLatency);
}
void VideoMixerNode::Disconnect(
const media_source & what,
const media_destination & where)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::Disconnect\n");
if (fOutput.source != what) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
if (fOutput.destination != where) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return;
}
fOutput.destination = media_destination::null;
GetOutputFormat(&fOutput.format);
}
void VideoMixerNode::LateNoticeReceived(
const media_source & what,
bigtime_t how_much,
bigtime_t performance_time)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::LateNoticeReceived\n");
if (fOutput.source != what) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
switch (RunMode()) {
case B_OFFLINE:
// nothing to do
break;
case B_RECORDING:
// nothing to do
break;
case B_INCREASE_LATENCY:
fInternalLatency += how_much;
SetEventLatency(fDownstreamLatency + fInternalLatency);
break;
case B_DECREASE_PRECISION:
// XXX: try to catch up by producing buffers faster
break;
case B_DROP_DATA:
// XXX: should we really drop buffers? just for that output?
break;
default:
fprintf(stderr,"VideoMixerNode::LateNoticeReceived with unexpected run mode.\n");
break;
}
}
void VideoMixerNode::EnableOutput(
const media_source &what,
bool enabled,
int32 *_deprecated_)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::EnableOutput\n");
if (fOutput.source != what) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
status_t status = B_OK;
if (status != B_OK) {
fprintf(stderr," error in itr->EnableOutput\n");
}
}
status_t VideoMixerNode::SetPlayRate(
int32 numer,
int32 denom)
{
BBufferProducer::SetPlayRate(numer, denom); // XXX: do something intelligent later
return B_OK;
}
void VideoMixerNode::AdditionalBufferRequested( // used to be Reserved 0
const media_source & source,
media_buffer_id prev_buffer,
bigtime_t prev_time,
const media_seek_tag * prev_tag)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::AdditionalBufferRequested\n");
if (fOutput.source != source) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
// BBuffer * buffer;
// status_t status = itr->AdditionalBufferRequested(prev_buffer, prev_time, prev_tag);
// if (status != B_OK) {
// fprintf(stderr," itr->AdditionalBufferRequested returned an error.\n");
// }
}
void VideoMixerNode::LatencyChanged(
const media_source & source,
const media_destination & destination,
bigtime_t new_latency,
uint32 flags)
{
fprintf(stderr,"VideoMixerNode(BBufferProducer)::LatencyChanged\n");
if (fOutput.source != source) {
// we don't have that output
fprintf(stderr,"<- B_MEDIA_BAD_SOURCE\n");
return;
}
if (fOutput.destination != destination) {
fprintf(stderr,"<- B_MEDIA_BAD_DESTINATION\n");
return;
}
fDownstreamLatency = new_latency;
SetEventLatency(fDownstreamLatency + fInternalLatency);
// XXX: we may have to recompute the number of buffers that we are using
// see SetBufferGroup
}

View File

@ -0,0 +1,461 @@
;
; Copyright (C) 2009-2010 David McPaul
;
; All rights reserved. Distributed under the terms of the MIT License.
;
; A rather unoptimised set of yuv to rgb converters
; does 8 pixels at a time
; inputer:
; reads 128bits of yuv 8 bit data and puts
; the y values converted to 16 bit in xmm0
; the u values converted to 16 bit and duplicated into xmm1
; the v values converted to 16 bit and duplicated into xmm2
; conversion:
; does the yuv to rgb conversion using 16 bit fixed point and the
; results are placed into the following registers as 8 bit clamped values
; r values in xmm3
; g values in xmm4
; b values in xmm5
; outputer:
; writes out the rgba pixels as 8 bit values with 0 for alpha
; xmm6 used for scratch
; xmm7 used for scratch
%macro cglobal 1
global _%1
%define %1 _%1
align 16
%1:
%endmacro
; conversion code
%macro yuv2rgbsse2 0
; u = u - 128
; v = v - 128
; r = y + v + v >> 2 + v >> 3 + v >> 5
; g = y - (u >> 2 + u >> 4 + u >> 5) - (v >> 1 + v >> 3 + v >> 4 + v >> 5)
; b = y + u + u >> 1 + u >> 2 + u >> 6
; subtract 16 from y
movdqa xmm7, [Const16] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw xmm0,xmm7 ; y = y - 16
; subtract 128 from u and v
movdqa xmm7, [Const128] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw xmm1,xmm7 ; u = u - 128
psubsw xmm2,xmm7 ; v = v - 128
; load r,b with y
movdqa xmm3,xmm0 ; r = y
pshufd xmm5,xmm0, 0xE4 ; b = y
; r = y + v + v >> 2 + v >> 3 + v >> 5
paddsw xmm3, xmm2 ; add v to r
movdqa xmm7, xmm1 ; move u to scratch
pshufd xmm6, xmm2, 0xE4 ; move v to scratch
psraw xmm6,2 ; divide v by 4
paddsw xmm3, xmm6 ; and add to r
psraw xmm6,1 ; divide v by 2
paddsw xmm3, xmm6 ; and add to r
psraw xmm6,2 ; divide v by 4
paddsw xmm3, xmm6 ; and add to r
; b = y + u + u >> 1 + u >> 2 + u >> 6
paddsw xmm5, xmm1 ; add u to b
psraw xmm7,1 ; divide u by 2
paddsw xmm5, xmm7 ; and add to b
psraw xmm7,1 ; divide u by 2
paddsw xmm5, xmm7 ; and add to b
psraw xmm7,4 ; divide u by 32
paddsw xmm5, xmm7 ; and add to b
; g = y - u >> 2 - u >> 4 - u >> 5 - v >> 1 - v >> 3 - v >> 4 - v >> 5
movdqa xmm7,xmm2 ; move v to scratch
pshufd xmm6,xmm1, 0xE4 ; move u to scratch
movdqa xmm4,xmm0 ; g = y
psraw xmm6,2 ; divide u by 4
psubsw xmm4,xmm6 ; subtract from g
psraw xmm6,2 ; divide u by 4
psubsw xmm4,xmm6 ; subtract from g
psraw xmm6,1 ; divide u by 2
psubsw xmm4,xmm6 ; subtract from g
psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,2 ; divide v by 4
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
psraw xmm7,1 ; divide v by 2
psubsw xmm4,xmm7 ; subtract from g
%endmacro
; conversion code
%macro yuv2rgbsse 0
; u = u - 128
; v = v - 128
; r = y + v + v >> 2 + v >> 3 + v >> 5
; g = y - (u >> 2 + u >> 4 + u >> 5) - (v >> 1 + v >> 3 + v >> 4 + v >> 5)
; b = y + u + u >> 1 + u >> 2 + u >> 6
; subtract 16 from y
movq mm7, [Const16] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw mm0,mm7 ; y = y - 16
; subtract 128 from u and v
movq mm7, [Const128] ; loads a constant using data cache (slower on first fetch but then cached)
psubsw mm1,mm7 ; u = u - 128
psubsw mm2,mm7 ; v = v - 128
; load r,g,b with y
movq mm3,mm0 ; r = y
pshufw mm5,mm0, 0xE4 ; b = y
; r = r + v + v >> 2 + v >> 3 + v >> 5
paddsw mm3, mm2 ; add v to r
movq mm7, mm1 ; move u to scratch
pshufw mm6, mm2, 0xE4 ; move v to scratch
psraw mm6,2 ; divide v by 4
paddsw mm3, mm6 ; and add to r
psraw mm6,1 ; divide v by 2
paddsw mm3, mm6 ; and add to r
psraw mm6,2 ; divide v by 4
paddsw mm3, mm6 ; and add to r
; b = y + u + u >> 1 + u >> 2 + u >> 6
paddsw mm5, mm1 ; add u to b
psraw mm7,1 ; divide u by 2
paddsw mm5, mm7 ; and add to b
psraw mm7,1 ; divide u by 2
paddsw mm5, mm7 ; and add to b
psraw mm7,4 ; divide u by 32
paddsw mm5, mm7 ; and add to b
; g = y - u >> 2 - u >> 4 - u >> 5 - v >> 1 - v >> 3 - v >> 4 - v >> 5
movq mm7,mm2 ; move v to scratch
pshufw mm6,mm1, 0xE4 ; move u to scratch
movq mm4,mm0 ; g = y
psraw mm6,2 ; divide u by 4
psubsw mm4,mm6 ; subtract from g
psraw mm6,2 ; divide u by 4
psubsw mm4,mm6 ; subtract from g
psraw mm6,1 ; divide u by 2
psubsw mm4,mm6 ; subtract from g
psraw mm7,1 ; divide v by 2
psubsw mm4,mm7 ; subtract from g
psraw mm7,2 ; divide v by 4
psubsw mm4,mm7 ; subtract from g
psraw mm7,1 ; divide v by 2
psubsw mm4,mm7 ; subtract from g
psraw mm7,1 ; divide v by 2
psubsw mm4,mm7 ; subtract from g
%endmacro
; outputer
%macro rgba32sse2output 0
; clamp values
pxor xmm7,xmm7
packuswb xmm3,xmm7 ; clamp to 0,255 and pack R to 8 bit per pixel
packuswb xmm4,xmm7 ; clamp to 0,255 and pack G to 8 bit per pixel
packuswb xmm5,xmm7 ; clamp to 0,255 and pack B to 8 bit per pixel
; convert to bgra32 packed
punpcklbw xmm5,xmm4 ; bgbgbgbgbgbgbgbg
movdqa xmm0, xmm5 ; save bg values
punpcklbw xmm3,xmm7 ; r0r0r0r0r0r0r0r0
punpcklwd xmm5,xmm3 ; lower half bgr0bgr0bgr0bgr0
punpckhwd xmm0,xmm3 ; upper half bgr0bgr0bgr0bgr0
; write to output ptr
movntdq [edi], xmm5 ; output first 4 pixels bypassing cache
movntdq [edi+16], xmm0 ; output second 4 pixels bypassing cache
%endmacro
; outputer
%macro rgba32sseoutput 0
; clamp values
pxor mm7,mm7
packuswb mm3,mm7 ; clamp to 0,255 and pack R to 8 bit per pixel
packuswb mm4,mm7 ; clamp to 0,255 and pack G to 8 bit per pixel
packuswb mm5,mm7 ; clamp to 0,255 and pack B to 8 bit per pixel
; convert to bgra32 packed
punpcklbw mm5,mm4 ; bgbgbgbgbgbgbgbg
movq mm0, mm5 ; save bg values
punpcklbw mm3,mm7 ; r0r0r0r0
punpcklwd mm5,mm3 ; lower half bgr0bgr0
punpckhwd mm0,mm3 ; upper half bgr0bgr0
; write to output ptr
movq [edi], mm5 ; output first 2 pixels
movq [edi+8], mm0 ; output second 2 pixels
%endmacro
SECTION .data align=16
Const16 dw 16
dw 16
dw 16
dw 16
dw 16
dw 16
dw 16
dw 16
Const128 dw 128
dw 128
dw 128
dw 128
dw 128
dw 128
dw 128
dw 128
; void Convert_YUV422_RGBA32_SSE2(void *fromPtr, void *toPtr, int width)
width equ ebp+16
toPtr equ ebp+12
fromPtr equ ebp+8
; void Convert_YUV420P_RGBA32_SSE2(void *fromYPtr, void *fromUPtr, void *fromVPtr, void *toPtr, int width)
width1 equ ebp+24
toPtr1 equ ebp+20
fromVPtr equ ebp+16
fromUPtr equ ebp+12
fromYPtr equ ebp+8
SECTION .text align=16
cglobal Convert_YUV422_RGBA32_SSE2
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx
mov esi, [fromPtr]
mov edi, [toPtr]
mov ecx, [width]
; loop width / 8 times
shr ecx,3
test ecx,ecx
jng ENDLOOP
REPEATLOOP: ; loop over width / 8
; YUV422 packed inputer
movdqa xmm0, [esi] ; should have yuyv yuyv yuyv yuyv
pshufd xmm1, xmm0, 0xE4 ; copy to xmm1
movdqa xmm2, xmm0 ; copy to xmm2
; extract y
pxor xmm7,xmm7 ; 00000000000000000000000000000000
pcmpeqd xmm6,xmm6 ; ffffffffffffffffffffffffffffffff
punpcklbw xmm6,xmm7 ; interleave xmm7 into xmm6 ff00ff00ff00ff00ff00ff00ff00ff00
pand xmm0, xmm6 ; clear all but y values leaving y0y0 etc
; extract u and duplicate so each u in yuyv becomes 0u0u
psrld xmm6,8 ; 00ff0000 00ff0000 00ff0000 00ff0000
pand xmm1, xmm6 ; clear all yv values leaving 0u00 etc
psrld xmm1,8 ; rotate u to get u000
pshuflw xmm1,xmm1, 0xA0 ; copy u values
pshufhw xmm1,xmm1, 0xA0 ; to get u0u0
; extract v
pslld xmm6,16 ; 000000ff000000ff 000000ff000000ff
pand xmm2, xmm6 ; clear all yu values leaving 000v etc
psrld xmm2,8 ; rotate v to get 00v0
pshuflw xmm2,xmm2, 0xF5 ; copy v values
pshufhw xmm2,xmm2, 0xF5 ; to get v0v0
yuv2rgbsse2
rgba32sse2output
; endloop
add edi,32
add esi,16
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP
ENDLOOP:
; Cleanup
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret
cglobal Convert_YUV420P_RGBA32_SSE2
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx
push eax
push ebx
mov esi, [fromYPtr]
mov eax, [fromUPtr]
mov ebx, [fromVPtr]
mov edi, [toPtr1]
mov ecx, [width1]
; loop width / 8 times
shr ecx,3
test ecx,ecx
jng ENDLOOP1
REPEATLOOP1: ; loop over width / 8
; YUV420 Planar inputer
movq xmm0, [esi] ; fetch 8 y values (8 bit) yyyyyyyy00000000
movd xmm1, [eax] ; fetch 4 u values (8 bit) uuuu000000000000
movd xmm2, [ebx] ; fetch 4 v values (8 bit) vvvv000000000000
; extract y
pxor xmm7,xmm7 ; 00000000000000000000000000000000
punpcklbw xmm0,xmm7 ; interleave xmm7 into xmm0 y0y0y0y0y0y0y0y0
; extract u and duplicate so each becomes 0u0u
punpcklbw xmm1,xmm7 ; interleave xmm7 into xmm1 u0u0u0u000000000
punpcklwd xmm1,xmm7 ; interleave again u000u000u000u000
pshuflw xmm1,xmm1, 0xA0 ; copy u values
pshufhw xmm1,xmm1, 0xA0 ; to get u0u0
; extract v
punpcklbw xmm2,xmm7 ; interleave xmm7 into xmm1 v0v0v0v000000000
punpcklwd xmm2,xmm7 ; interleave again v000v000v000v000
pshuflw xmm2,xmm2, 0xA0 ; copy v values
pshufhw xmm2,xmm2, 0xA0 ; to get v0v0
yuv2rgbsse2
rgba32sse2output
; endloop
add edi,32
add esi,8
add eax,4
add ebx,4
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP1
ENDLOOP1:
; Cleanup
pop ebx
pop eax
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret
cglobal Convert_YUV422_RGBA32_SSE
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx
mov esi, [fromPtr]
mov ecx, [width]
mov edi, [toPtr]
; loop width / 4 times
shr ecx,2
test ecx,ecx
jng ENDLOOP2
REPEATLOOP2: ; loop over width / 4
; YUV422 packed inputer
movq mm0, [esi] ; should have yuyv yuyv
pshufw mm1, mm0, 0xE4 ; copy to mm1
movq mm2, mm0 ; copy to mm2
; extract y
pxor mm7,mm7 ; 0000000000000000
pcmpeqb mm6,mm6 ; ffffffffffffffff
punpckhbw mm6,mm7 ; interleave mm7 into mm6 ff00ff00ff00ff00
pand mm0, mm6 ; clear all but y values leaving y0y0 etc
; extract u and duplicate so each u in yuyv becomes 0u0u
psrld mm6,8 ; 00ff0000 00ff0000
pand mm1, mm6 ; clear all yv values leaving 0u00 etc
psrld mm1,8 ; rotate u to get u000
pshufw mm1,mm1, 0xA0 ; copy u values to get u0u0 (SSE not MMX)
; extract v
pslld mm6,16 ; 000000ff000000ff
pand mm2, mm6 ; clear all yu values leaving 000v etc
psrld mm2,8 ; rotate v to get 00v0
pshufw mm2,mm2, 0xF5 ; copy v values to get v0v0 (SSE not MMX)
yuv2rgbsse
rgba32sseoutput
; endloop
add edi,16
add esi,8
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP2
ENDLOOP2:
; Cleanup
emms ; reset mmx regs back to float
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret
cglobal Convert_YUV420P_RGBA32_SSE
; reserve variables
push ebp
mov ebp, esp
push edi
push esi
push ecx
push eax
push ebx
mov esi, [fromYPtr]
mov eax, [fromUPtr]
mov ebx, [fromVPtr]
mov edi, [toPtr1]
mov ecx, [width1]
; loop width / 4 times
shr ecx,2
test ecx,ecx
jng ENDLOOP3
REPEATLOOP3: ; loop over width / 4
; YUV420 Planar inputer
movq mm0, [esi] ; fetch 4 y values (8 bit) yyyy0000
movd mm1, [eax] ; fetch 2 u values (8 bit) uu000000
movd mm2, [ebx] ; fetch 2 v values (8 bit) vv000000
; extract y
pxor mm7,mm7 ; 0000000000000000
punpcklbw mm0,mm7 ; interleave xmm7 into xmm0 y0y0y0y
; extract u and duplicate so each becomes 0u0u
punpcklbw mm1,mm7 ; interleave xmm7 into xmm1 u0u00000
punpcklwd mm1,mm7 ; interleave again u000u000
pshufw mm1,mm1, 0xA0 ; copy u values to get u0u0
; extract v
punpcklbw mm2,mm7 ; interleave xmm7 into xmm1 v0v00000
punpcklwd mm2,mm7 ; interleave again v000v000
pshufw mm2,mm2, 0xA0 ; copy v values to get v0v0
yuv2rgbsse
rgba32sseoutput
; endloop
add edi,16
add esi,4
add eax,2
add ebx,2
sub ecx, 1 ; apparently sub is better than dec
jnz REPEATLOOP3
ENDLOOP3:
; Cleanup
emms
pop ebx
pop eax
pop ecx
pop esi
pop edi
mov esp, ebp
pop ebp
ret
SECTION .note.GNU-stack noalloc noexec nowrite progbits