Internal API rewritten, BMediaNode and derived classes functionality implemented.

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@3525 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
beveloper 2003-06-15 18:17:56 +00:00
parent 8636b80151
commit 7ee2c8049c
13 changed files with 311 additions and 884 deletions

View File

@ -1,10 +1,8 @@
// AudioMixer.cpp
/*
By David Shipman, 2002
*/
/* AudioMixer
*
* First implementation by David Shipman, 2002
* Rewritten by Marcus Overhagen, 2003
*/
#include <RealtimeAlloc.h>
#include <Buffer.h>
#include <TimeSource.h>
@ -27,13 +25,11 @@ AudioMixer::AudioMixer(BMediaAddOn *addOn)
BControllable(),
BMediaEventLooper(),
fAddOn(addOn),
fWeb(NULL),
fLatency(1),
fCore(new MixerCore),
fWeb(0),
fBufferGroup(0),
fDownstreamLatency(1),
fInternalLatency(1),
fStartTime(0),
fFramesSent(0),
fOutputEnabled(true),
fBufferGroup(NULL),
fDisableStop(false)
{
BMediaNode::AddNodeKind(B_SYSTEM_MIXER);
@ -52,13 +48,13 @@ AudioMixer::AudioMixer(BMediaAddOn *addOn)
AudioMixer::~AudioMixer()
{
BMediaEventLooper::Quit();
SetParameterWeb(NULL);
fWeb = NULL;
// any other cleanup goes here
delete fCore;
delete fBufferGroup;
DEBUG_ONLY(fCore = 0; fBufferGroup = 0; fWeb = 0);
}
void
@ -84,13 +80,8 @@ void AudioMixer::Stop(bigtime_t performance_time, bool immediate)
BMediaAddOn *
AudioMixer::AddOn(int32 *internal_id) const
{
if(fAddOn)
{
*internal_id = 0;
return fAddOn;
}
else
return NULL;
*internal_id = 0;
return fAddOn;
}
//
@ -99,14 +90,14 @@ AudioMixer::AddOn(int32 *internal_id) const
status_t
AudioMixer::GetParameterValue(int32 id, bigtime_t *last_change,
void *value, size_t *ioSize)
void *value, size_t *ioSize)
{
return B_ERROR;
}
void
AudioMixer::SetParameterValue(int32 id, bigtime_t when,
const void *value, size_t ioSize)
const void *value, size_t size)
{
}
@ -115,11 +106,10 @@ AudioMixer::SetParameterValue(int32 id, bigtime_t when,
//
status_t
AudioMixer::HandleMessage( int32 message, const void *data, size_t size)
AudioMixer::HandleMessage(int32 message, const void *data, size_t size)
{
// since we're using a mediaeventlooper, there shouldn't be any messages
return B_ERROR;
}
status_t
@ -281,8 +271,6 @@ AudioMixer::Connected(const media_source &producer, const media_destination &whe
if (where.id != 0 || where.port != ControlPort())
return B_MEDIA_BAD_DESTINATION;
media_input input;
fCore->Lock();
// we assign a new id (!= 0) to the newly created input
@ -327,14 +315,23 @@ AudioMixer::Disconnected(const media_source &producer, const media_destination &
}
status_t
AudioMixer::FormatChanged( const media_source &producer, const media_destination &consumer,
int32 change_tag, const media_format &format)
AudioMixer::FormatChanged(const media_source &producer, const media_destination &consumer,
int32 change_tag, const media_format &format)
{
// at some point in the future (indicated by change_tag and RequestCompleted()),
// we will receive buffers in a different format
if (consumer.port != ControlPort() || consumer.id == 0)
return B_MEDIA_BAD_DESTINATION;
// XXX we should not apply the format change at this point
// XXX tell core about format change
void InputFormatChanged(int32 inputID, const media_format *format);
printf("Format changed\n");
return B_ERROR;
// tell core about format change
fCore->Lock();
fCore->InputFormatChanged(consumer.id, &format);
fCore->Unlock();
return B_OK;
}
//
@ -375,11 +372,6 @@ AudioMixer::FormatProposal(const media_source &output, media_format *ioFormat)
// we require a raw audio format
if (ioFormat->type != B_MEDIA_RAW_AUDIO)
return B_MEDIA_BAD_FORMAT;
// XXX tell core about format change
void OutputFormatChanged(const media_format *format);
}
status_t
@ -387,7 +379,7 @@ AudioMixer::FormatChangeRequested(const media_source &source, const media_destin
media_format *io_format, int32 *_deprecated_)
{
// the downstream consumer node (soundcard) requested that we produce
// another format, we need to check if the format is accecptable and
// another format, we need to check if the format is acceptable and
// remove any wildcards before returning OK.
fCore->Lock();
@ -411,6 +403,13 @@ AudioMixer::FormatChangeRequested(const media_source &source, const media_destin
/* remove wildcards */
io_format->SpecializeTo(&fDefaultFormat);
// apply format change
fCore->Lock();
fCore->OutputFormatChanged(io_format);
fCore->Unlock();
return B_OK;
err:
fCore->Unlock();
@ -423,11 +422,8 @@ AudioMixer::GetNextOutput(int32 *cookie, media_output *out_output)
if (*cookie != 0)
return B_BAD_INDEX;
MixerOutput *output;
status_t rv;
fCore->Lock();
output = fCore->Output();
MixerOutput *output = fCore->Output();
if (output) {
*out_output = output->MediaOutput();
} else {
@ -441,7 +437,7 @@ AudioMixer::GetNextOutput(int32 *cookie, media_output *out_output)
fCore->Unlock();
*cookie += 1;
return B_OK
return B_OK;
}
status_t
@ -454,32 +450,21 @@ AudioMixer::DisposeOutputCookie(int32 cookie)
status_t
AudioMixer::SetBufferGroup(const media_source &for_source, BBufferGroup *newGroup)
{
if (! IsValidSource(for_source))
// the downstream consumer (soundcard) node asks us to use another
// BBufferGroup (might be NULL). We only have one output (id 0)
if (for_source.port != ControlPort() || for_source.id != 0)
return B_MEDIA_BAD_SOURCE;
if (newGroup == fBufferGroup) // we're already using this buffergroup
return B_OK;
// Ahh, someone wants us to use a different buffer group. At this point we delete
// the one we are using and use the specified one instead. If the specified group is
// NULL, we need to recreate one ourselves, and use *that*. Note that if we're
// caching a BBuffer that we requested earlier, we have to Recycle() that buffer
// *before* deleting the buffer group, otherwise we'll deadlock waiting for that
// buffer to be recycled!
delete fBufferGroup; // waits for all buffers to recycle
if (newGroup != NULL)
{
// we were given a valid group; just use that one from now on
fBufferGroup = newGroup;
}
else
{
// we were passed a NULL group pointer; that means we construct
// our own buffer group to use from now on
AllocateBuffers();
}
fCore->Lock();
if (!newGroup)
newGroup = CreateBufferGroup();
fCore->SetOutputBufferGroup(newGroup);
delete fBufferGroup;
fBufferGroup = newGroup;
fCore->Unlock();
return B_OK;
}
@ -497,146 +482,109 @@ AudioMixer::GetLatency(bigtime_t *out_latency)
status_t
AudioMixer::PrepareToConnect(const media_source &what, const media_destination &where,
media_format *format, media_source *out_source, char *out_name)
media_format *format, media_source *out_source, char *out_name)
{
// PrepareToConnect() is the second stage of format negotiations that happens
// inside BMediaRoster::Connect(). At this point, the consumer's AcceptFormat()
// inside BMediaRoster::Connect(). At this point, the consumer's AcceptFormat()
// method has been called, and that node has potentially changed the proposed
// format. It may also have left wildcards in the format. PrepareToConnect()
// format. It may also have left wildcards in the format. PrepareToConnect()
// *must* fully specialize the format before returning!
// we also create the new output connection and return it in out_source.
/*
he PrepareToConnect() hook is called before a new connection between the source whichSource and the destination whichDestination is established, in
order to give your producer one last chance to specialize any wildcards that remain in the format (although by this point there shouldn't be any, you should
check anyway).
Your implementation should, additionally, return in outSource the source to be used for the connection, and should fill the outName buffer with the name the
connection will be given; the consumer will see this in the outInput->name argument specified to BBufferConsumer::Connected(). If your node doesn't
care what the name is, you can leave the outName untouched.
*/
// trying to connect something that isn't our source?
if (! IsValidSource(what))
// is the source valid?
if (what.port != ControlPort() || what.id != 0)
return B_MEDIA_BAD_SOURCE;
// is the format acceptable?
if (format->type != B_MEDIA_RAW_AUDIO && format->type != B_MEDIA_UNKNOWN_TYPE)
return B_MEDIA_BAD_FORMAT;
fCore->Lock();
// are we already connected?
if (fOutput.destination != media_destination::null)
if (fCore->Output() != 0) {
fCore->Unlock();
return B_MEDIA_ALREADY_CONNECTED;
}
// the format may not yet be fully specialized (the consumer might have
// passed back some wildcards). Finish specializing it now, and return an
// error if we don't support the requested format.
if (format->type != B_MEDIA_RAW_AUDIO)
return B_MEDIA_BAD_FORMAT;
// CHANGE_THIS - we're messing around with formats, need to clean up
// still need to check u.raw_audio.format
/* set source and suggest a name */
*out_source = what;
strcpy(out_name, "Mixer Output");
/* remove wildcards */
format->SpecializeTo(&fDefaultFormat);
/* add output to core */
media_output output;
output.node = Node();
output.source = *out_source;
output.destination = where;
output.format = *format;
strcpy(output.name, out_name);
if (format->u.raw_audio.format == media_raw_audio_format::wildcard.format)
format->u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT;
if ((format->u.raw_audio.format != media_raw_audio_format::B_AUDIO_FLOAT) && // currently support floating point
(format->u.raw_audio.format != media_raw_audio_format::B_AUDIO_SHORT)) // and 16bit int audio
return B_MEDIA_BAD_FORMAT;
fCore->EnableOutput(false);
fCore->AddOutput(output);
// all fields MUST be validated here
// or else the consumer is prone to divide-by-zero errors
if(format->u.raw_audio.frame_rate == media_raw_audio_format::wildcard.frame_rate)
format->u.raw_audio.frame_rate = 44100;
if(format->u.raw_audio.channel_count == media_raw_audio_format::wildcard.channel_count)
format->u.raw_audio.channel_count = 2;
if(format->u.raw_audio.byte_order == media_raw_audio_format::wildcard.byte_order)
format->u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size)
format->u.raw_audio.buffer_size = 1024; // pick something comfortable to suggest
// Now reserve the connection, and return information about it
fOutput.destination = where;
fOutput.format = *format;
*out_source = fOutput.source;
strncpy(out_name, fOutput.name, B_MEDIA_NAME_LENGTH); // strncpy?
fCore->Unlock();
return B_OK;
}
void
AudioMixer::Connect( status_t error, const media_source &source, const media_destination &dest,
const media_format &format, char *io_name)
AudioMixer::Connect(status_t error, const media_source &source, const media_destination &dest,
const media_format &format, char *io_name)
{
printf("AudioMixer::Connect\n");
// we need to check which output dest refers to - we only have one for now
if (error)
{
fOutput.destination = media_destination::null;
fOutput.format = fPrefOutputFormat;
if (error != B_OK) {
// if an error occured, remove output from core
printf("AudioMixer::Connect failed, removing connction\n");
fCore->Lock();
fCore->RemoveOutput();
fCore->Unlock();
return;
}
// connection is confirmed, record information and send output name
fOutput.destination = dest;
fOutput.format = format;
strncpy(io_name, fOutput.name, B_MEDIA_NAME_LENGTH);
// if the connection has no name, we set it now
if (strlen(io_name) == 0)
strcpy(io_name, "Mixer Output");
// Now that we're connected, we can determine our downstream latency.
// Do so, then make sure we get our events early enough.
media_node_id id;
FindLatencyFor(fOutput.destination, &fLatency, &id);
printf("Downstream Latency is %Ld usecs\n", fLatency);
FindLatencyFor(dest, &fDownstreamLatency, &id);
printf("AudioMixer: Downstream Latency is %Ld usecs\n", fDownstreamLatency);
// we need at least the length of a full output buffer's latency (I think?)
// SetDuration of one buffer
SetBufferDuration((1000000 * (format.u.raw_audio.buffer_size / ((format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * format.u.raw_audio.channel_count))) / format.u.raw_audio.frame_rate);
size_t sample_size = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
printf("AudioMixer: buffer duration is %Ld usecs\n", BufferDuration());
size_t framesPerBuffer = (fOutput.format.u.raw_audio.buffer_size / sample_size) / fOutput.format.u.raw_audio.channel_count;
// Our internal latency is at least the length of a full output buffer
// XXX we use two for now
fInternalLatency = 2 * BufferDuration();
printf("AudioMixer: Internal latency is %Ld usecs\n", fInternalLatency);
//fInternalLatency = (framesPerBuffer / fOutput.format.u.raw_audio.frame_rate); // test * 1000000
void *mouse = malloc(fOutput.format.u.raw_audio.buffer_size);
bigtime_t latency_start = TimeSource()->RealTime();
FillMixBuffer(mouse, fOutput.format.u.raw_audio.buffer_size);
bigtime_t latency_end = TimeSource()->RealTime();
fInternalLatency = latency_end - latency_start;
printf("Internal latency is %Ld usecs\n", fInternalLatency);
// use a higher internal latency to be able to process buffers that arrive late
// XXX does this make sense?
if (fInternalLatency < 5000)
fInternalLatency = 5000;
delete mouse;
// might need to tweak the latency
SetEventLatency(fLatency + fInternalLatency);
SetEventLatency(fDownstreamLatency + fInternalLatency);
printf("AudioMixer: SendLatencyChange %Ld\n", EventLatency());
SendLatencyChange(source, dest, EventLatency());
// calculate buffer duration and set it
if (fOutput.format.u.raw_audio.frame_rate == 0) {
// XXX must be adjusted later when the format is known
SetBufferDuration((framesPerBuffer * 1000000LL) / 44100);
} else {
SetBufferDuration((framesPerBuffer * 1000000LL) / fOutput.format.u.raw_audio.frame_rate);
}
// Set up the buffer group for our connection, as long as nobody handed us a
// buffer group (via SetBufferGroup()) prior to this. That can happen, for example,
// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
// method.
if (!fBufferGroup)
AllocateBuffers();
fBufferGroup = CreateBufferGroup();
ASSERT(fCore->Output() != 0);
ASSERT(fCore->Output()->MediaOutput().format == format);
fCore->Lock();
fCore->EnableOutput(true);
fCore->SetTimeSource(TimeSource()->ID());
fCore->SetOutputBufferGroup(fBufferGroup);
fCore->Unlock();
}
@ -645,15 +593,28 @@ AudioMixer::Disconnect(const media_source &what, const media_destination &where)
{
printf("AudioMixer::Disconnect\n");
// Make sure that our connection is the one being disconnected
if ((where == fOutput.destination) && (what == fOutput.source)) // change later for multisource outputs
{
fOutput.destination = media_destination::null;
fOutput.format = fPrefOutputFormat;
delete fBufferGroup;
fBufferGroup = NULL;
}
fCore->Lock();
// Make sure that our connection is the one being disconnected
MixerOutput * output = fCore->Output();
if (!output || output->MediaOutput().node != Node() || output->MediaOutput().source != what || output->MediaOutput().destination != where) {
FATAL("AudioMixer::Disconnect can't disconnect (wrong connection)\n");
fCore->Unlock();
return;
}
// force a stop
fCore->Stop();
fCore->RemoveOutput();
// destroy buffer group
delete fBufferGroup;
fBufferGroup = 0;
fCore->SetOutputBufferGroup(0);
fCore->Unlock();
}
@ -664,7 +625,8 @@ AudioMixer::LateNoticeReceived(const media_source &what, bigtime_t how_much, big
// is the only runmode in which we can do anything about this
printf("AudioMixer::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
/*
if (what == fOutput.source) {
if (RunMode() == B_INCREASE_LATENCY) {
fInternalLatency += how_much;
@ -673,26 +635,26 @@ AudioMixer::LateNoticeReceived(const media_source &what, bigtime_t how_much, big
fInternalLatency = 50000;
printf("AudioMixer: increasing internal latency to %Ld usec\n", fInternalLatency);
SetEventLatency(fLatency + fInternalLatency);
SetEventLatency(fDownstreamLatency + fInternalLatency);
// printf("AudioMixer: SendLatencyChange %Ld (2)\n", EventLatency());
// SendLatencyChange(source, dest, EventLatency());
}
}
*/
}
void
AudioMixer::EnableOutput(const media_source &what, bool enabled, int32 *_deprecated_)
{
// right now we've only got one output... check this against the supplied
// media_source and set its state accordingly...
if (what == fOutput.source)
{
fOutputEnabled = enabled;
}
// we only have one output
if (what.id != 0 || what.port != ControlPort())
return;
fCore->Lock();
fCore->EnableOutput(enabled);
fCore->Unlock();
}
@ -703,33 +665,19 @@ AudioMixer::EnableOutput(const media_source &what, bool enabled, int32 *_depreca
void
AudioMixer::NodeRegistered()
{
Run();
fOutput.node = Node();
for (int i = 0; i < 5; i++)
{
media_input *newInput = new media_input;
newInput->format = fPrefInputFormat;
newInput->source = media_source::null;
newInput->destination.port = ControlPort();
newInput->destination.id = i;
newInput->node = Node();
strcpy(newInput->name, "Free"); //
mixer_input *mixerInput = new mixer_input(*newInput);
fMixerInputs.AddItem(mixerInput);
}
SetPriority(120);
SetPriority(8);
// SetPriority(120);
}
void
AudioMixer::SetTimeSource(BTimeSource * time_source)
{
printf("AudioMixer::SetTimeSource: timesource is now %ld\n", time_source->ID());
fCore->Lock();
fCore->SetTimeSource(time_source->ID());
fCore->Unlock();
}
void
AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness, bool realTimeEvent)
@ -744,142 +692,52 @@ AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness, boo
break;
}
case SEND_NEW_BUFFER_EVENT:
case BTimedEventQueue::B_START:
{
// if the output is connected and enabled, send a bufffer
if (fOutputEnabled && fOutput.destination != media_destination::null)
SendNewBuffer(event->event_time);
// if this is the first buffer, mark with the start time
// we need this to calculate the other buffer times
if (fStartTime == 0) {
fStartTime = event->event_time;
if (RunState() != B_STARTED) {
fCore->Lock();
fCore->Start(event->event_time);
fCore->Unlock();
}
// count frames that have been played
size_t sample_size = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
int framesperbuffer = (fOutput.format.u.raw_audio.buffer_size / (sample_size * fOutput.format.u.raw_audio.channel_count));
fFramesSent += framesperbuffer;
// calculate the start time for the next event and add the event
bigtime_t nextevent = bigtime_t(fStartTime + double(fFramesSent / fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
media_timed_event nextBufferEvent(nextevent, SEND_NEW_BUFFER_EVENT);
EventQueue()->AddEvent(nextBufferEvent);
break;
}
case BTimedEventQueue::B_START:
if (RunState() != B_STARTED)
{
// We want to start sending buffers now, so we set up the buffer-sending bookkeeping
// and fire off the first "produce a buffer" event.
fStartTime = 0;
fFramesSent = 0;
//fThread = spawn_thread(_mix_thread_, "audio mixer thread", B_REAL_TIME_PRIORITY, this);
media_timed_event firstBufferEvent(event->event_time, SEND_NEW_BUFFER_EVENT);
// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
// the event queue, like this:
//
// this->HandleEvent(&firstBufferEvent, 0, false);
//
EventQueue()->AddEvent(firstBufferEvent);
// fStartTime = event->event_time;
}
break;
case BTimedEventQueue::B_STOP:
{
// stopped - don't process any more buffers, flush all buffers from eventqueue
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, SEND_NEW_BUFFER_EVENT);
fCore->Lock();
fCore->Stop();
fCore->Unlock();
break;
}
case BTimedEventQueue::B_DATA_STATUS:
{
printf("DataStatus message\n");
mixer_input *mixerInput;
int inputcount = fMixerInputs.CountItems();
for (int i = 0; i < inputcount; i++)
{
mixerInput = (mixer_input *)fMixerInputs.ItemAt(i);
if (mixerInput->fInput.destination == (media_destination &)event->pointer)
{
printf("Valid DatasStatus destination\n");
mixerInput->fProducerDataStatus = event->data;
if (mixerInput->fProducerDataStatus == B_DATA_AVAILABLE)
printf("B_DATA_AVAILABLE\n");
else if (mixerInput->fProducerDataStatus == B_DATA_NOT_AVAILABLE)
printf("B_DATA_NOT_AVAILABLE\n");
else if (mixerInput->fProducerDataStatus == B_PRODUCER_STOPPED)
printf("B_PRODUCER_STOPPED\n");
i = inputcount;
}
}
break;
}
default:
break;
}
}
//
// AudioMixer methods
//
void
AudioMixer::AllocateBuffers()
BBufferGroup *
AudioMixer::CreateBufferGroup()
{
// allocate enough buffers to span our downstream latency, plus one
size_t size = fOutput.format.u.raw_audio.buffer_size;
int32 count = int32((fLatency / (BufferDuration() + 1)) + 1);
// allocate enough buffers to span our downstream latency (plus one for rounding up), plus one extra
int32 count = int32(fDownstreamLatency / BufferDuration()) + 2;
if (count < 3) {
printf("AudioMixer: calculated only %ld buffers, that's not enough\n", count);
count = 3;
}
printf("AudioMixer: allocating %ld buffers\n", count);
fBufferGroup = new BBufferGroup(size, count);
}
// use this later for separate threads
int32
AudioMixer::_mix_thread_(void *data)
{
return ((AudioMixer *)data)->MixThread();
}
int32
AudioMixer::MixThread()
{
while (1)
{
snooze(500000);
}
fCore->Lock();
uint32 size = fCore->OutputBufferSize();
fCore->Unlock();
printf("AudioMixer: allocating %ld buffers of %ld bytes each\n", count, size);
return new BBufferGroup(size, count);
}

View File

@ -46,19 +46,19 @@ class AudioMixer :
BParameterWeb * BuildParameterWeb(); // used to create the initial 'master' web
void MakeWebForInput(char *name, media_format format);
void AllocateBuffers();
status_t FillMixBuffer(void *outbuffer, size_t size);
void SendNewBuffer(bigtime_t event_time);
void HandleInputBuffer(BBuffer *buffer, bigtime_t lateness);
BBufferGroup * CreateBufferGroup();
// BMediaNode methods
BMediaAddOn * AddOn(int32*) const;
// void SetRunMode(run_mode);
// void Preroll();
// void SetTimeSource(BTimeSource* time_source);
// status_t RequestCompleted(const media_request_info & info);
// status_t RequestCompleted(const media_request_info & info);
void NodeRegistered();
void Stop(bigtime_t performance_time, bool immediate);
void SetTimeSource(BTimeSource * time_source);
protected:
@ -176,43 +176,20 @@ class AudioMixer :
// BMediaEventLooper methods
virtual void NodeRegistered();
virtual void Stop(bigtime_t performance_time,
bool immediate);
void HandleEvent( const media_timed_event *event,
bigtime_t lateness,
bool realTimeEvent = false);
// handle mixing in separate thread
// not implemented (yet)
static int32 _mix_thread_(void *data);
int32 MixThread();
private:
BMediaAddOn * fAddOn;
BParameterWeb * fWeb; // local pointer to parameterweb
bigtime_t fLatency, fInternalLatency; // latency (downstream and internal)
bigtime_t fStartTime; // time node started
uint64 fFramesSent; // audio frames sent
bool fOutputEnabled;
BBufferGroup * fBufferGroup;
BList fMixerInputs;
bool fDisableStop;
MixerCore *fCore;
media_format fDefaultFormat;
BMediaAddOn *fAddOn;
MixerCore *fCore;
BParameterWeb *fWeb; // local pointer to parameterweb
BBufferGroup *fBufferGroup;
bigtime_t fDownstreamLatency;
bigtime_t fInternalLatency;
bool fDisableStop;
media_format fDefaultFormat;
};
#endif

View File

@ -1,403 +0,0 @@
#include "AudioMixer.h"
#include "IOStructures.h"
#include <media/RealtimeAlloc.h>
#include <media/Buffer.h>
#include <media/TimeSource.h>
#include <media/ParameterWeb.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
status_t
AudioMixer::FillMixBuffer(void *outbuffer, size_t ioSize)
{
int32 outChannels = fOutput.format.u.raw_audio.channel_count;
// we have an output buffer - now it needs to be filled!
switch (fOutput.format.u.raw_audio.format)
{
case media_raw_audio_format::B_AUDIO_FLOAT:
{
float *outdata = (float*)outbuffer;
memset(outdata, 0, ioSize); // CHANGE_THIS
int sampleCount = int(fOutput.format.u.raw_audio.buffer_size / sizeof(float));
for (int s = 0; s < sampleCount; s++)
{
outdata[s] = 0.0; // CHANGE_THIS
}
int mixerInputCount = fMixerInputs.CountItems();
for (int c = 0; c < mixerInputCount; c++)
{
mixer_input *channel = (mixer_input *)fMixerInputs.ItemAt(c);
if (channel->enabled == true) // still broken... FIX_THIS
{
int32 inChannels = channel->fInput.format.u.raw_audio.channel_count;
bool split = outChannels > inChannels;
bool mix = outChannels < inChannels;
if (fOutput.format.u.raw_audio.frame_rate == channel->fInput.format.u.raw_audio.frame_rate)
{
switch (channel->fInput.format.u.raw_audio.format)
{
case media_raw_audio_format::B_AUDIO_FLOAT:
{
float *indata = (float *)channel->fData;
if (split) {
printf("#### FillMixBuffer(): 1.Should split from B_AUDIO_FLOAT!\n");
} else if (mix) {
printf("#### FillMixBuffer(): 1.Should mix from B_AUDIO_FLOAT!\n");
} else {
int baseOffset = channel->fEventOffset / 4;
int maxOffset = int(channel->fDataSize / 4);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int s = 0; s < sampleCount; s++)
{
outdata[s] = indata[inputSample];
indata[inputSample] = 0;
if (s == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
channel->fEventOffset = (channel->fEventOffset + fOutput.format.u.raw_audio.buffer_size) %
channel->fDataSize;
}
break;
}
case media_raw_audio_format::B_AUDIO_SHORT:
{
int16 *indata = (int16 *)channel->fData;
if (split) {
printf("#### FillMixBuffer(): 2.Should split from B_AUDIO_SHORT!\n");
} else if (mix) {
printf("#### FillMixBuffer(): 2.Should mix from B_AUDIO_SHORT!\n");
} else {
int baseOffset = channel->fEventOffset / 2;
int maxOffset = int(channel->fDataSize / 2);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int s = 0; s < sampleCount; s++)
{
outdata[s] = outdata[s] + (indata[inputSample] / 32768.0);
// CHANGE_THIS indata[inputSample] = 0;
if (s == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
channel->fEventOffset = (channel->fEventOffset + fOutput.format.u.raw_audio.buffer_size / 2) %
channel->fDataSize;
}
break;
}
} // input format
} else
printf("#### sample rate does not match - don't do anything\n");
} // data available
} // channel loop
/*
// The buffer is done - we still need to scale for the MainVolume...
// then check to see if anything is clipping, adjust if needed
*/
for (int frameStart = 0; frameStart < sampleCount; frameStart += outChannels)
{
for (int channel = 0; channel < outChannels; channel ++)
{
int sample = frameStart + channel;
outdata[sample] = outdata[sample] * fMasterGainScale[channel];
//if (outdata[sample] > 1.0)
// outdata[sample] = 1.0;
// else if (outdata[sample] < -1.0)
// outdata[sample] = -1.0;
}
}
break;
} // cur-case
case media_raw_audio_format::B_AUDIO_SHORT:
{
int16 *outdata = (int16*)outbuffer;
int sampleCount = int(fOutput.format.u.raw_audio.buffer_size / sizeof(int16));
int *clipoffset = new int[sampleCount];
// keep a running tally of +/- clipping so we don't get rollaround distortion
// we only need this for int/char audio types - not float
memset(outdata, 0, ioSize);
memset(clipoffset, 0, sizeof(int) * sampleCount);
// for (int s = 0; s < sampleCount; s++)
// {
// clipoffset[s] = 0;
// }
int mixerInputs = fMixerInputs.CountItems();
if (mixerInputs == 0)
printf("#### mixer null\n");
for (int c = 0; c < mixerInputs; c++)
{
mixer_input *channel = (mixer_input *)fMixerInputs.ItemAt(c);
if (channel->enabled == true) // only use if there are buffers waiting (seems to be broken atm...)
{
// if (channel->fProducerDataStatus == B_DATA_AVAILABLE)
// printf("B_DATA_AVAILABLE\n");
// else if (channel->fProducerDataStatus == B_DATA_NOT_AVAILABLE)
// printf("B_DATA_NOT_AVAILABLE\n");
// else if (channel->fProducerDataStatus == B_PRODUCER_STOPPED)
// printf("B_PRODUCER_STOPPED\n");
int32 inChannels = channel->fInput.format.u.raw_audio.channel_count;
bool split = outChannels > inChannels;
bool mix = outChannels < inChannels;
if (fOutput.format.u.raw_audio.frame_rate == channel->fInput.format.u.raw_audio.frame_rate)
{
switch (channel->fInput.format.u.raw_audio.format)
{
case media_raw_audio_format::B_AUDIO_FLOAT:
{
float *indata = (float *)channel->fData;
if (split) {
int baseOffset = channel->fEventOffset / 4;
int maxOffset = int(channel->fDataSize / 4);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int s = 0; s < sampleCount; s += 2)
{
if ((outdata[s] + int16(32767 * indata[inputSample])) > 32767)
clipoffset[s] = clipoffset[s] + 1;
else if ((outdata[s] + int16(32767 * indata[inputSample])) < -32768)
clipoffset[s] = clipoffset[s] - 1;
if ((outdata[s + 1] + int16(32767 * indata[inputSample])) > 32767)
clipoffset[s + 1] = clipoffset[s + 1] + 1;
else if ((outdata[s] + int16(32767 * indata[inputSample])) < -32768)
clipoffset[s + 1] = clipoffset[s + 1] - 1;
outdata[s] = outdata[s] + int16(32767 * indata[inputSample]); // CHANGE_THIS mixing
outdata[s + 1] = outdata[s];
indata[inputSample] = 0;
if (s == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
} else if (mix) {
printf("#### FillMixBuffer(): 3.Should mix from B_AUDIO_FLOAT!\n");
} else {
int baseOffset = channel->fEventOffset / 4;
int maxOffset = int(channel->fDataSize / 4);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int frameStart = 0; frameStart < sampleCount; frameStart += outChannels)
{
for (int chan = 0; chan < outChannels; chan ++)
{
int sample = frameStart + chan;
int inputValue = int((32767 * indata[inputSample] * channel->fGainScale[chan]) + outdata[sample]);
if (inputValue > 32767)
clipoffset[sample] = clipoffset[sample] + 1;
else if (inputValue < -32768)
clipoffset[sample] = clipoffset[sample] - 1;
outdata[sample] = inputValue; // CHANGE_THIS
indata[inputSample] = 0;
if (sample == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
}
channel->fEventOffset += (fOutput.format.u.raw_audio.buffer_size * 2);
if (channel->fEventOffset >= channel->fDataSize)
channel->fEventOffset -= channel->fDataSize;
}
break;
}
case media_raw_audio_format::B_AUDIO_SHORT:
{
int16 *indata = (int16 *)channel->fData;
if (split) {
printf("#### FillMixBuffer(): 4.Should split from B_AUDIO_SHORT!\n");
} else if (mix) {
printf("#### FillMixBuffer(): 4.Should mix from B_AUDIO_SHORT!\n");
} else {
// printf("#### FillMixBuffer(): 4.Should copy from B_AUDIO_SHORT!\n");
int baseOffset = channel->fEventOffset / 2;
int maxOffset = int(channel->fDataSize / 2);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int frameStart = 0; frameStart < sampleCount; frameStart += outChannels)
{
for (int chan = 0; chan < outChannels; chan ++)
{
int sample = frameStart + chan;
int clipTest = int(outdata[sample] + (indata[inputSample] * channel->fGainScale[chan]));
if (clipTest > 32767)
clipoffset[sample] = clipoffset[sample] + 1;
else if (clipTest < -32768)
clipoffset[sample] = clipoffset[sample] - 1;
outdata[sample] = int16(outdata[sample] + (indata[inputSample] * channel->fGainScale[chan]));
indata[inputSample] = 0;
if (sample == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
}
channel->fEventOffset = (channel->fEventOffset + fOutput.format.u.raw_audio.buffer_size);
if (channel->fEventOffset >= channel->fDataSize)
channel->fEventOffset -= channel->fDataSize;
}
break;
}
case media_raw_audio_format::B_AUDIO_INT:
{
int32 *indata = (int32 *)channel->fData;
if (split) {
int baseOffset = channel->fEventOffset / 4;
int maxOffset = int(channel->fDataSize / 4);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int s = 0; s < sampleCount; s += 2) {
if ((outdata[s] + (indata[inputSample] / 65536)) > 32767)
clipoffset[s] = clipoffset[s] + 1;
else if ((outdata[s] + (indata[inputSample] / 65536)) < -32768)
clipoffset[s] = clipoffset[s] - 1;
if ((outdata[s + 1] + (indata[inputSample] / 65536)) > 32767)
clipoffset[s + 1] = clipoffset[s + 1] + 1;
else if ((outdata[s + 1] + (indata[inputSample] / 65536)) < -32768)
clipoffset[s + 1] = clipoffset[s + 1] - 1;
outdata[s] = int16(outdata[s] + (indata[inputSample] / 65535)); // CHANGE_THIS mixing
outdata[s + 1] = outdata[s];
indata[inputSample] = 0;
if (s == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
} else if (mix) {
printf("#### FillMixBuffer(): 5.Should mix from B_AUDIO_INT!\n");
} else {
int baseOffset = channel->fEventOffset / 4;
int maxOffset = int(channel->fDataSize / 4);
int offsetWrap = maxOffset - baseOffset;
int inputSample = baseOffset;
for (int s = 0; s < sampleCount; s++) {
if ((outdata[s] + (indata[inputSample] / 65536)) > 32767)
clipoffset[s] = clipoffset[s] + 1;
else if ((outdata[s] + (indata[inputSample] / 65536)) < -32768)
clipoffset[s] = clipoffset[s] - 1;
outdata[s] = int16(outdata[s] + (indata[inputSample] / 65536)); // CHANGE_THIS mixing
indata[inputSample] = 0;
if (s == offsetWrap)
inputSample = 0;
else
inputSample ++;
}
}
channel->fEventOffset = (channel->fEventOffset + (fOutput.format.u.raw_audio.buffer_size * 2))
% channel->fDataSize;
break;
}
}
} else
printf("#### sample rate does not match - don't do anything\n");
}
}
// use our clipoffset to determine correct limits
for (int frameStart = 0; frameStart < sampleCount; frameStart += outChannels)
{
for (int channel = 0; channel < outChannels; channel ++)
{
int sample = frameStart + channel;
int scaledSample = int((outdata[sample] + (65536 * clipoffset[sample])) * fMasterGainScale[channel]);
if (scaledSample < -32768)
outdata[sample] = -32768;
else if (scaledSample > 32767)
outdata[sample] = 32767;
else
outdata[sample] = scaledSample; //(int16)
}
}
delete clipoffset;
break;
}
default:
printf("##### oh no, unknown conversion %ld #####\n", fOutput.format.u.raw_audio.format);
}
return B_OK;
}

View File

@ -1,43 +0,0 @@
// IOStructures.h
/*
Structures to represent IO channels in the AudioMixer node
By David Shipman, 2002
*/
#ifndef _IO_STRUCT_H
#define _IO_STRUCT_H
class mixer_input
{
public:
mixer_input(media_input &input);
~mixer_input();
media_input fInput;
int32 fProducerDataStatus; // status of upstream data
bool enabled; // there is a buffer ready for mixing
char * fData;
float * fGainScale; // multiplier for gain - computed at paramchange
float * fGainDisplay; // the value that will be displayed for gain
bigtime_t fGainDisplayLastChange;
int fMuteValue; // the value of the 'mute' control
bigtime_t fMuteValueLastChange;
float fPanValue; // value of 'pan' control (only if mono)
bigtime_t fPanValueLastChange;
size_t fEventOffset; // offset (in bytes) of the start of the next
// _output_ buffer - use this for keeping in sync
size_t fDataSize; // size of ringbuffer
};
#endif

View File

@ -4,7 +4,6 @@ UsePrivateHeaders media ;
Addon mixer.media_addon : media :
AudioMixer.cpp
FillMixBuffer.cpp
MixerAddOn.cpp
MixerCore.cpp
MixerInput.cpp

View File

@ -33,32 +33,94 @@ MixerCore::AddOutput(const media_output &output)
}
bool
MixerCore::RemoveInput(const media_input &input)
MixerCore::RemoveInput(int32 inputID)
{
return true;
}
bool
MixerCore::RemoveOutput(const media_output &output)
MixerCore::RemoveOutput()
{
return true;
}
void
MixerCore::OutputBufferLengthChanged(bigtime_t length)
int32
MixerCore::CreateInputID()
{
Lock();
Unlock();
return 1;
}
MixerInput *
MixerCore::Input(int i)
{
return (MixerInput *)fInputs->ItemAt(i);
}
MixerOutput *
MixerCore::Output()
{
return fOutput;
}
void
MixerCore::BufferReceived(BBuffer *buffer, bigtime_t lateness)
{
}
void
MixerCore::InputFormatChanged(int32 inputID, const media_format *format)
{
}
void
MixerCore::OutputFormatChanged(const media_format *format)
{
}
void
MixerCore::SetOutputBufferGroup(BBufferGroup *group)
{
}
void
MixerCore::SetTimeSource(media_node_id id)
{
}
void
MixerCore::EnableOutput(bool enabled)
{
}
void
MixerCore::Start(bigtime_t time)
{
}
void
MixerCore::Stop()
{
}
uint32
MixerCore::OutputBufferSize()
{
return 1;
}
bool
MixerCore::IsStarted()
{
return false;
}
void
MixerCore::OutputBufferLengthChanged(bigtime_t length)
{
}
/*
void BufferReceived(BBuffer *buffer, bigtime_t lateness);
@ -79,4 +141,23 @@ MixerCore::Input(int i)
break;
}
// use this later for separate threads
int32
AudioMixer::_mix_thread_(void *data)
{
return ((AudioMixer *)data)->MixThread();
}
int32
AudioMixer::MixThread()
{
while (1)
{
snooze(500000);
}
}
*/

View File

@ -23,7 +23,7 @@ public:
MixerInput *Input(int index); // index = 0 to count-1, NOT inputID
MixerOutput *Output();
void Lock();
void Unlock();
@ -32,8 +32,22 @@ public:
void InputFormatChanged(int32 inputID, const media_format *format);
void OutputFormatChanged(const media_format *format);
void SetOutputBufferGroup(BBufferGroup *group);
void SetTimeSource(media_node_id id);
void EnableOutput(bool enabled);
void Start(bigtime_t time);
void Stop();
uint32 OutputBufferSize();
bool IsStarted();
private:
void OutputBufferLengthChanged(bigtime_t length);
// handle mixing in separate thread
// not implemented (yet)
static int32 _mix_thread_(void *data);
int32 MixThread();
private:

View File

@ -16,3 +16,9 @@ void
MixerInput::BufferReceived(BBuffer *buffer)
{
}
media_input &
MixerInput::MediaInput()
{
return fInput;
}

View File

@ -15,6 +15,7 @@ public:
private:
MixerCore *fCore;
media_input fInput;
};
#endif

View File

@ -12,4 +12,8 @@ MixerOutput::~MixerOutput()
{
}
media_output MediaOutput();
media_output &
MixerOutput::MediaOutput()
{
return fOutput;
}

View File

@ -9,10 +9,11 @@ public:
MixerOutput(MixerCore *core);
~MixerOutput();
media_output MediaOutput();
media_output & MediaOutput();
private:
MixerCore *fCore;
media_output fOutput;
};
#endif

View File

@ -1,31 +0,0 @@
----------------------
Be Sample Code License
----------------------
Copyright 1991-1999, Be Incorporated.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions, and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,37 +0,0 @@
OpenBeOS Audio Mixer
David Shipman, 14/08/2002
Overview
The node is based on a mediaeventlooper, using the HandleEvent loop to compile the mixed buffer output.
Each input creates a ringbuffer (fixed size "looped" buffer) for its input - this way buffer contents can be
placed in the ringbuffer and the buffer recycled immediately.
Inputs are maintained using a list of mixer_input objects - inputs are created/destroyed dynamically when
producers connect/disconnect.
Done
- node functions as a replacement for the BeOS R5 mixer.media-addon
- IO/conversion between the major audio types (FLOAT, SHORT)
- interface mostly done (all gain controls operational)
- mixing with different gain levels is functional
Tested
- Output to emu10k1 (SBLive)
- Input from CL-Amp, file-readers, SoundPlay, music software - almost all work well
To Do (vaguely in order of importance)
- fix bugs
- format negotation fine-tuning - some nodes still connect with weird formats
- rewrite of buffer mixing routines - at the moment its really inefficient, and a total mess
- complete interface (add mutes, panning)
- multithreading (separate mix thread)
- mixer save (to save parameter states when a node is disconnected)
Notes :
Parts of this program are based on code under the Be Sample Code License.
This is included in the archive, in accordance with the license.