many fixes to improve timing
git-svn-id: file:///srv/svn/repos/haiku/trunk/current@3386 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
67833f26bc
commit
9bedd42cd1
@ -21,6 +21,8 @@
|
|||||||
|
|
||||||
//
|
//
|
||||||
|
|
||||||
|
#define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
|
||||||
|
|
||||||
mixer_input::mixer_input(media_input &input)
|
mixer_input::mixer_input(media_input &input)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -225,7 +227,7 @@ AudioMixer::AudioMixer(BMediaAddOn *addOn)
|
|||||||
fWeb(NULL),
|
fWeb(NULL),
|
||||||
fLatency(1),
|
fLatency(1),
|
||||||
fInternalLatency(1),
|
fInternalLatency(1),
|
||||||
fStartTime(0), fNextEventTime(0),
|
fStartTime(0),
|
||||||
fFramesSent(0),
|
fFramesSent(0),
|
||||||
fOutputEnabled(true),
|
fOutputEnabled(true),
|
||||||
fBufferGroup(NULL),
|
fBufferGroup(NULL),
|
||||||
@ -236,27 +238,21 @@ AudioMixer::AudioMixer(BMediaAddOn *addOn)
|
|||||||
|
|
||||||
BMediaNode::AddNodeKind(B_SYSTEM_MIXER);
|
BMediaNode::AddNodeKind(B_SYSTEM_MIXER);
|
||||||
|
|
||||||
// set up the preferred output format
|
// set up the preferred output format (although we will accept any format)
|
||||||
|
memset(&fPrefOutputFormat, 0, sizeof(fPrefOutputFormat)); // set everything to wildcard first
|
||||||
fPrefOutputFormat.type = B_MEDIA_RAW_AUDIO;
|
fPrefOutputFormat.type = B_MEDIA_RAW_AUDIO;
|
||||||
|
fPrefOutputFormat.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
|
||||||
|
fPrefOutputFormat.u.raw_audio.channel_count = 2;
|
||||||
|
|
||||||
fPrefOutputFormat.u.raw_audio.format = media_raw_audio_format::wildcard.format; // B_AUDIO_FLOAT
|
// set up the preferred input format (although we will accept any format)
|
||||||
fPrefOutputFormat.u.raw_audio.frame_rate = media_raw_audio_format::wildcard.frame_rate;
|
memset(&fPrefInputFormat, 0, sizeof(fPrefInputFormat)); // set everything to wildcard first
|
||||||
fPrefOutputFormat.u.raw_audio.byte_order = (B_HOST_IS_BENDIAN) ? B_MEDIA_BIG_ENDIAN : B_MEDIA_LITTLE_ENDIAN;
|
|
||||||
|
|
||||||
fPrefOutputFormat.u.raw_audio.buffer_size = media_raw_audio_format::wildcard.buffer_size; //2048
|
|
||||||
fPrefOutputFormat.u.raw_audio.channel_count = 2; //media_raw_audio_format::wildcard.channel_count;
|
|
||||||
|
|
||||||
// set up the preferred input format
|
|
||||||
|
|
||||||
fPrefInputFormat.type = B_MEDIA_RAW_AUDIO;
|
fPrefInputFormat.type = B_MEDIA_RAW_AUDIO;
|
||||||
|
|
||||||
fPrefInputFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT;
|
fPrefInputFormat.u.raw_audio.format = media_raw_audio_format::B_AUDIO_FLOAT;
|
||||||
fPrefInputFormat.u.raw_audio.frame_rate = 96000; //wildcard?
|
fPrefInputFormat.u.raw_audio.frame_rate = 96000; //wildcard?
|
||||||
fPrefInputFormat.u.raw_audio.byte_order = (B_HOST_IS_BENDIAN) ? B_MEDIA_BIG_ENDIAN : B_MEDIA_LITTLE_ENDIAN;
|
fPrefInputFormat.u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
|
||||||
|
|
||||||
fPrefInputFormat.u.raw_audio.buffer_size = 1024;
|
fPrefInputFormat.u.raw_audio.buffer_size = 1024;
|
||||||
fPrefInputFormat.u.raw_audio.channel_count = 2; //media_raw_audio_format::wildcard.channel_count;
|
fPrefInputFormat.u.raw_audio.channel_count = 2;
|
||||||
|
|
||||||
//fInput.source = media_source::null;
|
//fInput.source = media_source::null;
|
||||||
|
|
||||||
@ -489,19 +485,16 @@ AudioMixer::HandleMessage( int32 message, const void *data, size_t size)
|
|||||||
status_t
|
status_t
|
||||||
AudioMixer::AcceptFormat(const media_destination &dest, media_format *format)
|
AudioMixer::AcceptFormat(const media_destination &dest, media_format *format)
|
||||||
{
|
{
|
||||||
|
// check that the specified format is reasonable for the specified destination, and
|
||||||
|
// fill in any wildcard fields for which our BBufferConsumer has specific requirements.
|
||||||
|
|
||||||
// we accept any raw audio
|
if (!IsValidDest(dest))
|
||||||
|
|
||||||
if (IsValidDest(dest))
|
|
||||||
{
|
|
||||||
if ((format->type != B_MEDIA_UNKNOWN_TYPE) && (format->type != B_MEDIA_RAW_AUDIO))
|
|
||||||
return B_MEDIA_BAD_FORMAT;
|
|
||||||
else
|
|
||||||
return B_OK;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return B_MEDIA_BAD_DESTINATION;
|
return B_MEDIA_BAD_DESTINATION;
|
||||||
|
|
||||||
|
if ((format->type != B_MEDIA_UNKNOWN_TYPE) && (format->type != B_MEDIA_RAW_AUDIO))
|
||||||
|
return B_MEDIA_BAD_FORMAT;
|
||||||
|
else
|
||||||
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
status_t
|
status_t
|
||||||
@ -523,94 +516,116 @@ AudioMixer::GetNextInput(int32 *cookie, media_input *out_input)
|
|||||||
void
|
void
|
||||||
AudioMixer::DisposeInputCookie(int32 cookie)
|
AudioMixer::DisposeInputCookie(int32 cookie)
|
||||||
{
|
{
|
||||||
|
|
||||||
// nothing yet
|
// nothing yet
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
AudioMixer::BufferReceived(BBuffer *buffer)
|
AudioMixer::BufferReceived(BBuffer *buffer)
|
||||||
{
|
{
|
||||||
|
if (buffer->Header()->type == B_MEDIA_PARAMETERS) {
|
||||||
if (buffer)
|
printf("Control Buffer Received\n");
|
||||||
{
|
ApplyParameterData(buffer->Data(), buffer->SizeUsed());
|
||||||
if (buffer->Header()->type == B_MEDIA_PARAMETERS)
|
buffer->Recycle();
|
||||||
{
|
return;
|
||||||
printf("Control Buffer Received\n");
|
|
||||||
ApplyParameterData(buffer->Data(), buffer->SizeUsed());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
|
|
||||||
media_header *hdr = buffer->Header();
|
|
||||||
|
|
||||||
// check input
|
|
||||||
|
|
||||||
mixer_input *channel;
|
|
||||||
|
|
||||||
int inputcount = fMixerInputs.CountItems();
|
|
||||||
|
|
||||||
for (int i = 0; i < inputcount; i++)
|
|
||||||
{
|
|
||||||
|
|
||||||
channel = (mixer_input *)fMixerInputs.ItemAt(i);
|
|
||||||
|
|
||||||
if (channel->fInput.destination.id == hdr->destination)
|
|
||||||
{
|
|
||||||
|
|
||||||
i = inputcount;
|
|
||||||
|
|
||||||
bigtime_t now = TimeSource()->Now();
|
|
||||||
bigtime_t perf_time = hdr->start_time;
|
|
||||||
bigtime_t how_early = perf_time - fLatency - now;
|
|
||||||
|
|
||||||
if ((RunMode() != B_OFFLINE) &&
|
|
||||||
(RunMode() != B_RECORDING) &&
|
|
||||||
(how_early < 0))
|
|
||||||
{
|
|
||||||
printf("Received buffer %d usecs late from %s\n", -how_early, channel->fInput.name);
|
|
||||||
NotifyLateProducer(channel->fInput.source, -how_early, perf_time);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
|
|
||||||
size_t sample_size = channel->fInput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
|
|
||||||
|
|
||||||
// calculate total byte offset for writing to the ringbuffer
|
|
||||||
// this takes account of the Event offset, as well as the buffer's start time
|
|
||||||
|
|
||||||
size_t total_offset = int(channel->fEventOffset + ((((perf_time - fNextEventTime) / 1000000) *
|
|
||||||
channel->fInput.format.u.raw_audio.frame_rate) *
|
|
||||||
sample_size * channel->fInput.format.u.raw_audio.channel_count)) % int(channel->fDataSize);
|
|
||||||
|
|
||||||
char *indata = (char *)buffer->Data();
|
|
||||||
|
|
||||||
if (buffer->SizeUsed() > (channel->fDataSize - total_offset))
|
|
||||||
{
|
|
||||||
memcpy(channel->fData + total_offset, indata, channel->fDataSize - total_offset);
|
|
||||||
memcpy(channel->fData, indata + (channel->fDataSize - total_offset), buffer->SizeUsed() - (channel->fDataSize - total_offset));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
memcpy(channel->fData + total_offset, indata, buffer->SizeUsed());
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((B_OFFLINE == RunMode()) && (B_DATA_AVAILABLE == channel->fProducerDataStatus))
|
|
||||||
{
|
|
||||||
RequestAdditionalBuffer(channel->fInput.source, buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->Recycle();
|
// to receive the buffer at the right time,
|
||||||
|
// push it through the event looper
|
||||||
|
media_timed_event event(buffer->Header()->start_time,
|
||||||
|
BTimedEventQueue::B_HANDLE_BUFFER,
|
||||||
|
buffer,
|
||||||
|
BTimedEventQueue::B_RECYCLE_BUFFER);
|
||||||
|
EventQueue()->AddEvent(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void
|
||||||
|
AudioMixer::HandleInputBuffer(BBuffer *buffer)
|
||||||
|
{
|
||||||
|
media_header *hdr = buffer->Header();
|
||||||
|
|
||||||
|
// check input
|
||||||
|
int inputcount = fMixerInputs.CountItems();
|
||||||
|
|
||||||
|
for (int i = 0; i < inputcount; i++) {
|
||||||
|
|
||||||
|
mixer_input *channel = (mixer_input *)fMixerInputs.ItemAt(i);
|
||||||
|
|
||||||
|
if (channel->fInput.destination.id != hdr->destination)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
bigtime_t now = TimeSource()->Now();
|
||||||
|
bigtime_t perf_time = hdr->start_time;
|
||||||
|
bigtime_t how_late = now - perf_time;
|
||||||
|
bigtime_t event_latency = EventLatency();
|
||||||
|
|
||||||
|
if (how_late > (event_latency + 2000)) {
|
||||||
|
how_late -= event_latency;
|
||||||
|
printf("Received buffer %Ld usecs late from %s\n", how_late, channel->fInput.name);
|
||||||
|
if (RunMode() != B_OFFLINE && RunMode() != B_RECORDING) {
|
||||||
|
printf("sending notify\n");
|
||||||
|
NotifyLateProducer(channel->fInput.source, max_c(500, how_late), perf_time);
|
||||||
|
} else if (RunMode() == B_DROP_DATA) {
|
||||||
|
printf("dropping buffer\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t sample_size = channel->fInput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
|
||||||
|
|
||||||
|
// calculate total byte offset for writing to the ringbuffer
|
||||||
|
// this takes account of the Event offset, as well as the buffer's start time
|
||||||
|
/*
|
||||||
|
size_t total_offset = int(channel->fEventOffset + ((((perf_time - fNextEventTime) / 1000000) *
|
||||||
|
channel->fInput.format.u.raw_audio.frame_rate) *
|
||||||
|
sample_size * channel->fInput.format.u.raw_audio.channel_count)) % int(channel->fDataSize);
|
||||||
|
*/
|
||||||
|
size_t total_offset = int(channel->fEventOffset + channel->fInput.format.u.raw_audio.buffer_size) % int(channel->fDataSize);
|
||||||
|
|
||||||
|
char *indata = (char *)buffer->Data();
|
||||||
|
|
||||||
|
if (buffer->SizeUsed() > (channel->fDataSize - total_offset))
|
||||||
|
{
|
||||||
|
memcpy(channel->fData + total_offset, indata, channel->fDataSize - total_offset);
|
||||||
|
memcpy(channel->fData, indata + (channel->fDataSize - total_offset), buffer->SizeUsed() - (channel->fDataSize - total_offset));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
memcpy(channel->fData + total_offset, indata, buffer->SizeUsed());
|
||||||
|
|
||||||
|
if ((B_OFFLINE == RunMode()) && (B_DATA_AVAILABLE == channel->fProducerDataStatus))
|
||||||
|
{
|
||||||
|
RequestAdditionalBuffer(channel->fInput.source, buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
AudioMixer::SendNewBuffer(bigtime_t event_time)
|
||||||
|
{
|
||||||
|
|
||||||
|
BBuffer *outbuffer = fBufferGroup->RequestBuffer(fOutput.format.u.raw_audio.buffer_size, BufferDuration());
|
||||||
|
if (!outbuffer) {
|
||||||
|
printf("Could not allocate buffer\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
FillMixBuffer(outbuffer->Data(), outbuffer->SizeAvailable());
|
||||||
|
|
||||||
|
media_header *outheader = outbuffer->Header();
|
||||||
|
outheader->type = B_MEDIA_RAW_AUDIO;
|
||||||
|
outheader->size_used = fOutput.format.u.raw_audio.buffer_size;
|
||||||
|
outheader->time_source = TimeSource()->ID();
|
||||||
|
outheader->start_time = event_time;
|
||||||
|
|
||||||
|
if (B_OK != SendBuffer(outbuffer, fOutput.destination)) {
|
||||||
|
printf("Could not send buffer to output : %s\n", fOutput.name);
|
||||||
|
outbuffer->Recycle();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
AudioMixer::ProducerDataStatus( const media_destination &for_whom,
|
AudioMixer::ProducerDataStatus( const media_destination &for_whom,
|
||||||
int32 status, bigtime_t at_performance_time)
|
int32 status, bigtime_t at_performance_time)
|
||||||
@ -651,8 +666,14 @@ status_t
|
|||||||
AudioMixer::Connected( const media_source &producer, const media_destination &where,
|
AudioMixer::Connected( const media_source &producer, const media_destination &where,
|
||||||
const media_format &with_format, media_input *out_input)
|
const media_format &with_format, media_input *out_input)
|
||||||
{
|
{
|
||||||
if (! IsValidDest(where))
|
if (!IsValidDest(where))
|
||||||
return B_MEDIA_BAD_DESTINATION;
|
return B_MEDIA_BAD_DESTINATION;
|
||||||
|
|
||||||
|
// We need to make sure that the outInput's name field contains a valid name,
|
||||||
|
// the name given the connection by the producer may still be an empty string.
|
||||||
|
|
||||||
|
// If we want the producer to use a specific BBufferGroup, we now need to call
|
||||||
|
// BMediaRoster::SetOutputBuffersFor() here to set the producer's buffer group
|
||||||
|
|
||||||
char *name = out_input->name;
|
char *name = out_input->name;
|
||||||
mixer_input *mixerInput;
|
mixer_input *mixerInput;
|
||||||
@ -997,7 +1018,7 @@ AudioMixer::PrepareToConnect(const media_source &what, const media_destination &
|
|||||||
format->u.raw_audio.channel_count = 2;
|
format->u.raw_audio.channel_count = 2;
|
||||||
|
|
||||||
if(format->u.raw_audio.byte_order == media_raw_audio_format::wildcard.byte_order)
|
if(format->u.raw_audio.byte_order == media_raw_audio_format::wildcard.byte_order)
|
||||||
format->u.raw_audio.byte_order = fPrefOutputFormat.u.raw_audio.byte_order;
|
format->u.raw_audio.byte_order = B_MEDIA_HOST_ENDIAN;
|
||||||
|
|
||||||
if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size)
|
if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size)
|
||||||
format->u.raw_audio.buffer_size = 1024; // pick something comfortable to suggest
|
format->u.raw_audio.buffer_size = 1024; // pick something comfortable to suggest
|
||||||
@ -1038,6 +1059,7 @@ AudioMixer::Connect( status_t error, const media_source &source, const media_des
|
|||||||
// Do so, then make sure we get our events early enough.
|
// Do so, then make sure we get our events early enough.
|
||||||
media_node_id id;
|
media_node_id id;
|
||||||
FindLatencyFor(fOutput.destination, &fLatency, &id);
|
FindLatencyFor(fOutput.destination, &fLatency, &id);
|
||||||
|
printf("Downstream Latency is %Ld usecs\n", fLatency);
|
||||||
|
|
||||||
// we need at least the length of a full output buffer's latency (I think?)
|
// we need at least the length of a full output buffer's latency (I think?)
|
||||||
|
|
||||||
@ -1054,30 +1076,27 @@ AudioMixer::Connect( status_t error, const media_source &source, const media_des
|
|||||||
bigtime_t latency_end = TimeSource()->RealTime();
|
bigtime_t latency_end = TimeSource()->RealTime();
|
||||||
|
|
||||||
fInternalLatency = latency_end - latency_start;
|
fInternalLatency = latency_end - latency_start;
|
||||||
printf("Latency set at %d usecs\n", fInternalLatency);
|
printf("Internal latency is %Ld usecs\n", fInternalLatency);
|
||||||
|
|
||||||
delete mouse;
|
delete mouse;
|
||||||
|
|
||||||
|
|
||||||
// might need to tweak the latency
|
// might need to tweak the latency
|
||||||
|
|
||||||
SetEventLatency(fLatency + fInternalLatency);
|
SetEventLatency(fLatency + fInternalLatency);
|
||||||
|
|
||||||
// reset our buffer duration, etc. to avoid later calculations
|
// calculate buffer duration and set it
|
||||||
// crashes w/ divide-by-zero when connecting to a variety of nodes...
|
if (fOutput.format.u.raw_audio.frame_rate == 0) {
|
||||||
// if (fOutput.format.u.raw_audio.frame_rate == media_raw_audio_format::wildcard.frame_rate)
|
// XXX must be adjusted later when the format is known
|
||||||
// {
|
SetBufferDuration((framesPerBuffer * 1000000LL) / 44100);
|
||||||
// fOutput.format.u.raw_audio.frame_rate = 44100;
|
} else {
|
||||||
// }
|
SetBufferDuration((framesPerBuffer * 1000000LL) / fOutput.format.u.raw_audio.frame_rate);
|
||||||
|
}
|
||||||
bigtime_t duration = bigtime_t(1000000) * framesPerBuffer / bigtime_t(fOutput.format.u.raw_audio.frame_rate);
|
|
||||||
SetBufferDuration(duration);
|
|
||||||
|
|
||||||
// Set up the buffer group for our connection, as long as nobody handed us a
|
// Set up the buffer group for our connection, as long as nobody handed us a
|
||||||
// buffer group (via SetBufferGroup()) prior to this. That can happen, for example,
|
// buffer group (via SetBufferGroup()) prior to this. That can happen, for example,
|
||||||
// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
|
// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
|
||||||
// method.
|
// method.
|
||||||
if (!fBufferGroup) AllocateBuffers();
|
if (!fBufferGroup)
|
||||||
|
AllocateBuffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1101,19 +1120,19 @@ AudioMixer::Disconnect(const media_source &what, const media_destination &where)
|
|||||||
void
|
void
|
||||||
AudioMixer::LateNoticeReceived(const media_source &what, bigtime_t how_much, bigtime_t performance_time)
|
AudioMixer::LateNoticeReceived(const media_source &what, bigtime_t how_much, bigtime_t performance_time)
|
||||||
{
|
{
|
||||||
|
|
||||||
// We've produced some late buffers... Increase Latency
|
// We've produced some late buffers... Increase Latency
|
||||||
// is the only runmode in which we can do anything about this
|
// is the only runmode in which we can do anything about this
|
||||||
|
|
||||||
|
printf("AudioMixer::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
|
||||||
|
|
||||||
if (what == fOutput.source)
|
if (what == fOutput.source) {
|
||||||
if (RunMode() == B_INCREASE_LATENCY)
|
if (RunMode() == B_INCREASE_LATENCY) {
|
||||||
{
|
|
||||||
fInternalLatency += how_much;
|
fInternalLatency += how_much;
|
||||||
|
|
||||||
|
printf("AudioMixer: increasing internal latency to %Ld usec\n", fInternalLatency);
|
||||||
SetEventLatency(fLatency + fInternalLatency);
|
SetEventLatency(fLatency + fInternalLatency);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
printf("Late notice received. Buffer was %d usecs late\n", how_much);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1172,91 +1191,42 @@ AudioMixer::NodeRegistered()
|
|||||||
|
|
||||||
|
|
||||||
void
|
void
|
||||||
AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness,
|
AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness, bool realTimeEvent)
|
||||||
bool realTimeEvent = false)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
switch (event->type)
|
switch (event->type)
|
||||||
{
|
{
|
||||||
|
|
||||||
case BTimedEventQueue::B_HANDLE_BUFFER:
|
case BTimedEventQueue::B_HANDLE_BUFFER:
|
||||||
{
|
{
|
||||||
|
HandleInputBuffer((BBuffer *)event->pointer);
|
||||||
|
((BBuffer *)event->pointer)->Recycle();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
// check output
|
case SEND_NEW_BUFFER_EVENT:
|
||||||
if (fOutput.destination != media_destination::null && fOutputEnabled) // this is in the wrong order too
|
{
|
||||||
{
|
|
||||||
|
|
||||||
BBuffer *outbuffer = fBufferGroup->RequestBuffer(fOutput.format.u.raw_audio.buffer_size, BufferDuration());
|
|
||||||
|
|
||||||
if (outbuffer)
|
|
||||||
{
|
|
||||||
|
|
||||||
FillMixBuffer(outbuffer->Data(), outbuffer->SizeAvailable());
|
if (fOutputEnabled && fOutput.destination != media_destination::null)
|
||||||
|
SendNewBuffer(event->event_time);
|
||||||
media_header *outheader = outbuffer->Header();
|
|
||||||
outheader->type = B_MEDIA_RAW_AUDIO;
|
|
||||||
outheader->size_used = fOutput.format.u.raw_audio.buffer_size;
|
|
||||||
outheader->time_source = TimeSource()->ID();
|
|
||||||
|
|
||||||
// if this is the first buffer, mark with the start time
|
|
||||||
// we need this to calculate the other buffer times
|
|
||||||
|
|
||||||
if (fStartTime == 0) {
|
|
||||||
fStartTime = event->event_time;
|
|
||||||
fNextEventTime = fStartTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
bigtime_t stamp;
|
|
||||||
if (RunMode() == B_RECORDING)
|
|
||||||
stamp = event->event_time; // this is actually the same as the other modes, since we're using
|
|
||||||
else // a timedevent queue and adding events at fNextEventTime
|
|
||||||
{
|
|
||||||
|
|
||||||
// we're in a live performance mode
|
|
||||||
// use the start time we calculate at the end of the the mix loop
|
|
||||||
// this time is based on the offset of all media produced so far,
|
|
||||||
// plus fStartTime, which is the recorded time of our first event
|
|
||||||
|
|
||||||
stamp = fNextEventTime;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
outheader->start_time = stamp;
|
|
||||||
|
|
||||||
status_t err = SendBuffer(outbuffer, fOutput.destination);
|
|
||||||
|
|
||||||
if(err != B_OK)
|
|
||||||
{
|
|
||||||
outbuffer->Recycle();
|
|
||||||
printf("Could not send buffer to output : %s\n", fOutput.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
else
|
|
||||||
printf("Failed to allocate a buffer\n");
|
|
||||||
|
|
||||||
// even if we didn't get a buffer allocated, we still need to send the next event
|
|
||||||
|
|
||||||
size_t sample_size = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
|
|
||||||
|
|
||||||
int framesperbuffer = (fOutput.format.u.raw_audio.buffer_size / (sample_size * fOutput.format.u.raw_audio.channel_count));
|
|
||||||
|
|
||||||
fFramesSent += (framesperbuffer);
|
|
||||||
|
|
||||||
// calculate the start time for the next event
|
|
||||||
|
|
||||||
fNextEventTime = bigtime_t(fStartTime + double(fFramesSent / fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
|
|
||||||
|
|
||||||
media_timed_event nextBufferEvent(fNextEventTime, BTimedEventQueue::B_HANDLE_BUFFER);
|
|
||||||
EventQueue()->AddEvent(nextBufferEvent);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
break;
|
|
||||||
|
|
||||||
}
|
// if this is the first buffer, mark with the start time
|
||||||
|
// we need this to calculate the other buffer times
|
||||||
|
if (fStartTime == 0) {
|
||||||
|
fStartTime = event->event_time;
|
||||||
|
}
|
||||||
|
|
||||||
|
// count frames that have been played
|
||||||
|
size_t sample_size = fOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK;
|
||||||
|
int framesperbuffer = (fOutput.format.u.raw_audio.buffer_size / (sample_size * fOutput.format.u.raw_audio.channel_count));
|
||||||
|
|
||||||
|
fFramesSent += framesperbuffer;
|
||||||
|
|
||||||
|
// calculate the start time for the next event and add the event
|
||||||
|
bigtime_t nextevent = bigtime_t(fStartTime + double(fFramesSent / fOutput.format.u.raw_audio.frame_rate) * 1000000.0);
|
||||||
|
media_timed_event nextBufferEvent(nextevent, SEND_NEW_BUFFER_EVENT);
|
||||||
|
EventQueue()->AddEvent(nextBufferEvent);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case BTimedEventQueue::B_START:
|
case BTimedEventQueue::B_START:
|
||||||
|
|
||||||
@ -1269,7 +1239,7 @@ AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness,
|
|||||||
|
|
||||||
//fThread = spawn_thread(_mix_thread_, "audio mixer thread", B_REAL_TIME_PRIORITY, this);
|
//fThread = spawn_thread(_mix_thread_, "audio mixer thread", B_REAL_TIME_PRIORITY, this);
|
||||||
|
|
||||||
media_timed_event firstBufferEvent(event->event_time, BTimedEventQueue::B_HANDLE_BUFFER);
|
media_timed_event firstBufferEvent(event->event_time, SEND_NEW_BUFFER_EVENT);
|
||||||
|
|
||||||
// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
|
// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
|
||||||
// the event queue, like this:
|
// the event queue, like this:
|
||||||
@ -1289,6 +1259,7 @@ AudioMixer::HandleEvent( const media_timed_event *event, bigtime_t lateness,
|
|||||||
// stopped - don't process any more buffers, flush all buffers from eventqueue
|
// stopped - don't process any more buffers, flush all buffers from eventqueue
|
||||||
|
|
||||||
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
|
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, BTimedEventQueue::B_HANDLE_BUFFER);
|
||||||
|
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, SEND_NEW_BUFFER_EVENT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1344,9 +1315,14 @@ AudioMixer::AllocateBuffers()
|
|||||||
// allocate enough buffers to span our downstream latency, plus one
|
// allocate enough buffers to span our downstream latency, plus one
|
||||||
size_t size = fOutput.format.u.raw_audio.buffer_size;
|
size_t size = fOutput.format.u.raw_audio.buffer_size;
|
||||||
int32 count = int32((fLatency / (BufferDuration() + 1)) + 1);
|
int32 count = int32((fLatency / (BufferDuration() + 1)) + 1);
|
||||||
|
|
||||||
fBufferGroup = new BBufferGroup(size, count);
|
|
||||||
|
|
||||||
|
if (count < 2) {
|
||||||
|
printf("AudioMixer: calculated only %ld buffers, that's not enough\n", count);
|
||||||
|
count = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("AudioMixer: allocating %ld buffers\n", count);
|
||||||
|
fBufferGroup = new BBufferGroup(size, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
// use this later for separate threads
|
// use this later for separate threads
|
||||||
|
@ -47,6 +47,9 @@ class AudioMixer :
|
|||||||
|
|
||||||
void AllocateBuffers();
|
void AllocateBuffers();
|
||||||
status_t FillMixBuffer(void *outbuffer, size_t size);
|
status_t FillMixBuffer(void *outbuffer, size_t size);
|
||||||
|
|
||||||
|
void SendNewBuffer(bigtime_t event_time);
|
||||||
|
void HandleInputBuffer(BBuffer *buffer);
|
||||||
|
|
||||||
// BMediaNode methods
|
// BMediaNode methods
|
||||||
|
|
||||||
@ -200,8 +203,8 @@ class AudioMixer :
|
|||||||
BMediaAddOn * fAddOn;
|
BMediaAddOn * fAddOn;
|
||||||
BParameterWeb * fWeb; // local pointer to parameterweb
|
BParameterWeb * fWeb; // local pointer to parameterweb
|
||||||
|
|
||||||
bigtime_t fLatency, fInternalLatency; // latency (total and internal)
|
bigtime_t fLatency, fInternalLatency; // latency (downstream and internal)
|
||||||
bigtime_t fStartTime, fNextEventTime; // time node started, time of next (output) event
|
bigtime_t fStartTime; // time node started
|
||||||
uint64 fFramesSent; // audio frames sent
|
uint64 fFramesSent; // audio frames sent
|
||||||
bool fOutputEnabled;
|
bool fOutputEnabled;
|
||||||
|
|
||||||
|
@ -130,6 +130,7 @@ MultiAudioNode::MultiAudioNode(BMediaAddOn *addon, char* name, MultiAudioDevice
|
|||||||
AddNodeKind( B_PHYSICAL_INPUT );
|
AddNodeKind( B_PHYSICAL_INPUT );
|
||||||
|
|
||||||
// initialize our preferred format object
|
// initialize our preferred format object
|
||||||
|
memset(&fPreferredFormat, 0, sizeof(fPreferredFormat)); // set everything to wildcard first
|
||||||
fPreferredFormat.type = B_MEDIA_RAW_AUDIO;
|
fPreferredFormat.type = B_MEDIA_RAW_AUDIO;
|
||||||
fPreferredFormat.u.raw_audio.format = MultiAudioDevice::convert_multiaudio_format_to_media_format(fDevice->MFI.output.format);
|
fPreferredFormat.u.raw_audio.format = MultiAudioDevice::convert_multiaudio_format_to_media_format(fDevice->MFI.output.format);
|
||||||
fPreferredFormat.u.raw_audio.channel_count = 2;
|
fPreferredFormat.u.raw_audio.channel_count = 2;
|
||||||
|
@ -176,9 +176,9 @@ media_multistream_format media_multistream_format::wildcard = {0};
|
|||||||
*************************************************************/
|
*************************************************************/
|
||||||
|
|
||||||
bool
|
bool
|
||||||
media_format::Matches(const media_format *otherFormat) const
|
media_format::Matches(const media_format *other) const
|
||||||
{
|
{
|
||||||
return format_is_compatible(*this, *otherFormat);
|
return type == other->type; // XXX fixthis
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -412,10 +412,7 @@ bool operator==(const media_format & a, const media_format & b)
|
|||||||
/* return true if a and b are compatible (accounting for wildcards) */
|
/* return true if a and b are compatible (accounting for wildcards) */
|
||||||
bool format_is_compatible(const media_format & a, const media_format & b) /* a is the format you want to feed to something accepting b */
|
bool format_is_compatible(const media_format & a, const media_format & b) /* a is the format you want to feed to something accepting b */
|
||||||
{
|
{
|
||||||
UNIMPLEMENTED();
|
return a.Matches(&b);
|
||||||
if (a.type == b.type)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool string_for_format(const media_format & f, char * buf, size_t size)
|
bool string_for_format(const media_format & f, char * buf, size_t size)
|
||||||
|
@ -33,6 +33,7 @@ static char __copyright[] = "Copyright (c) 2002, 2003 Marcus Overhagen <Marcus@O
|
|||||||
#include <MediaEventLooper.h>
|
#include <MediaEventLooper.h>
|
||||||
#include <TimeSource.h>
|
#include <TimeSource.h>
|
||||||
#include <scheduler.h>
|
#include <scheduler.h>
|
||||||
|
#include <Buffer.h>
|
||||||
#include "debug.h"
|
#include "debug.h"
|
||||||
|
|
||||||
// XXX The bebook says that the latency is always calculated in realtime
|
// XXX The bebook says that the latency is always calculated in realtime
|
||||||
@ -177,6 +178,7 @@ BMediaEventLooper::SetRunMode(run_mode mode)
|
|||||||
if(fControlThread > 0) {
|
if(fControlThread > 0) {
|
||||||
set_thread_priority(fControlThread, fCurrentPriority);
|
set_thread_priority(fControlThread, fCurrentPriority);
|
||||||
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
||||||
|
printf("BMediaEventLooper: SchedulingLatency is %Ld\n", fSchedulingLatency);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +223,6 @@ BMediaEventLooper::ControlLoop()
|
|||||||
// BMediaEventLooper compensates your performance time by adding the event latency
|
// BMediaEventLooper compensates your performance time by adding the event latency
|
||||||
// (see SetEventLatency()) and the scheduling latency (or, for real-time events,
|
// (see SetEventLatency()) and the scheduling latency (or, for real-time events,
|
||||||
// only the scheduling latency).
|
// only the scheduling latency).
|
||||||
// latency = fOut.downstream_latency + fOut.processing_latency + fSchedulingLatency;
|
|
||||||
// XXX well, fix this later
|
// XXX well, fix this later
|
||||||
latency = fEventLatency + fSchedulingLatency;
|
latency = fEventLatency + fSchedulingLatency;
|
||||||
|
|
||||||
@ -261,10 +262,10 @@ BMediaEventLooper::ControlLoop()
|
|||||||
if (err == B_OK) {
|
if (err == B_OK) {
|
||||||
bigtime_t lateness;
|
bigtime_t lateness;
|
||||||
if (is_realtime)
|
if (is_realtime)
|
||||||
lateness = TimeSource()->RealTime() - event.event_time;
|
lateness = TimeSource()->RealTime() - event.event_time - fEventLatency;
|
||||||
else
|
else
|
||||||
lateness = TimeSource()->Now() - event.event_time;
|
lateness = TimeSource()->Now() - event.event_time - fEventLatency;
|
||||||
DispatchEvent(&event,lateness,is_realtime);
|
DispatchEvent(&event, lateness, is_realtime);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -356,6 +357,7 @@ BMediaEventLooper::SetPriority(int32 priority)
|
|||||||
if(fControlThread > 0) {
|
if(fControlThread > 0) {
|
||||||
set_thread_priority(fControlThread, fCurrentPriority);
|
set_thread_priority(fControlThread, fCurrentPriority);
|
||||||
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
||||||
|
printf("BMediaEventLooper: SchedulingLatency is %Ld\n", fSchedulingLatency);
|
||||||
}
|
}
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
@ -419,6 +421,7 @@ BMediaEventLooper::Run()
|
|||||||
|
|
||||||
// get latency information
|
// get latency information
|
||||||
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
fSchedulingLatency = estimate_max_scheduling_latency(fControlThread);
|
||||||
|
printf("BMediaEventLooper: SchedulingLatency is %Ld\n", fSchedulingLatency);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -471,7 +474,8 @@ BMediaEventLooper::DispatchEvent(const media_timed_event *event,
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_DispatchCleanUp(event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*************************************************************
|
/*************************************************************
|
||||||
|
@ -2109,6 +2109,8 @@ BMediaRosterEx::GetDormantFlavorInfo(media_addon_id addonid,
|
|||||||
dormant_flavor_info * out_flavor)
|
dormant_flavor_info * out_flavor)
|
||||||
{
|
{
|
||||||
CALLED();
|
CALLED();
|
||||||
|
if (out_flavor == NULL)
|
||||||
|
return B_BAD_VALUE;
|
||||||
|
|
||||||
xfer_server_get_dormant_flavor_info msg;
|
xfer_server_get_dormant_flavor_info msg;
|
||||||
xfer_server_get_dormant_flavor_info_reply *reply;
|
xfer_server_get_dormant_flavor_info_reply *reply;
|
||||||
@ -2300,8 +2302,27 @@ BMediaRoster::GetFormatFor(const media_output & output,
|
|||||||
media_format * io_format,
|
media_format * io_format,
|
||||||
uint32 flags)
|
uint32 flags)
|
||||||
{
|
{
|
||||||
UNIMPLEMENTED();
|
CALLED();
|
||||||
return B_ERROR;
|
if (io_format == NULL)
|
||||||
|
return B_BAD_VALUE;
|
||||||
|
if ((output.node.kind & B_BUFFER_PRODUCER) == 0)
|
||||||
|
return B_MEDIA_BAD_NODE;
|
||||||
|
if (IS_INVALID_SOURCE(output.source))
|
||||||
|
return B_MEDIA_BAD_SOURCE;
|
||||||
|
|
||||||
|
producer_format_suggestion_requested_request request;
|
||||||
|
producer_format_suggestion_requested_reply reply;
|
||||||
|
status_t rv;
|
||||||
|
|
||||||
|
request.type = B_MEDIA_UNKNOWN_TYPE;
|
||||||
|
request.quality = 0; // XXX what should this be?
|
||||||
|
|
||||||
|
rv = QueryPort(output.source.port, PRODUCER_FORMAT_SUGGESTION_REQUESTED, &request, sizeof(request), &reply, sizeof(reply));
|
||||||
|
if (rv != B_OK)
|
||||||
|
return rv;
|
||||||
|
|
||||||
|
*io_format = reply.format;
|
||||||
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2310,8 +2331,27 @@ BMediaRoster::GetFormatFor(const media_input & input,
|
|||||||
media_format * io_format,
|
media_format * io_format,
|
||||||
uint32 flags)
|
uint32 flags)
|
||||||
{
|
{
|
||||||
UNIMPLEMENTED();
|
CALLED();
|
||||||
return B_ERROR;
|
if (io_format == NULL)
|
||||||
|
return B_BAD_VALUE;
|
||||||
|
if ((input.node.kind & B_BUFFER_CONSUMER) == 0)
|
||||||
|
return B_MEDIA_BAD_NODE;
|
||||||
|
if (IS_INVALID_DESTINATION(input.destination))
|
||||||
|
return B_MEDIA_BAD_DESTINATION;
|
||||||
|
|
||||||
|
consumer_accept_format_request request;
|
||||||
|
consumer_accept_format_reply reply;
|
||||||
|
status_t rv;
|
||||||
|
|
||||||
|
request.dest = input.destination;
|
||||||
|
memset(&request.format, 0, sizeof(request.format)); // wildcard
|
||||||
|
|
||||||
|
rv = QueryPort(input.destination.port, CONSUMER_ACCEPT_FORMAT, &request, sizeof(request), &reply, sizeof(reply));
|
||||||
|
if (rv != B_OK)
|
||||||
|
return rv;
|
||||||
|
|
||||||
|
*io_format = reply.format;
|
||||||
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2321,6 +2361,14 @@ BMediaRoster::GetFormatFor(const media_node & node,
|
|||||||
float quality)
|
float quality)
|
||||||
{
|
{
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
|
if (io_format == NULL)
|
||||||
|
return B_BAD_VALUE;
|
||||||
|
if (IS_INVALID_NODE(node))
|
||||||
|
return B_MEDIA_BAD_NODE;
|
||||||
|
if ((node.kind & (B_BUFFER_CONSUMER | B_BUFFER_PRODUCER)) == 0)
|
||||||
|
return B_MEDIA_BAD_NODE;
|
||||||
|
|
||||||
|
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,6 +269,7 @@ _shared_buffer_list::RecycleBuffer(BBuffer *buffer)
|
|||||||
reclaimed_count++;
|
reclaimed_count++;
|
||||||
if (info[i].reclaimed) {
|
if (info[i].reclaimed) {
|
||||||
FATAL("_shared_buffer_list: Error, BBuffer %p, id = %ld already reclaimed\n", buffer, id);
|
FATAL("_shared_buffer_list: Error, BBuffer %p, id = %ld already reclaimed\n", buffer, id);
|
||||||
|
DEBUG_ONLY(debugger("buffer already reclaimed"));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
info[i].reclaimed = true;
|
info[i].reclaimed = true;
|
||||||
|
@ -31,7 +31,8 @@ _SoundPlayNode::_SoundPlayNode(const char *name, const media_multi_audio_format
|
|||||||
mInitCheckStatus(B_OK),
|
mInitCheckStatus(B_OK),
|
||||||
mOutputEnabled(true),
|
mOutputEnabled(true),
|
||||||
mBufferGroup(NULL),
|
mBufferGroup(NULL),
|
||||||
mFramesSent(0)
|
mFramesSent(0),
|
||||||
|
mTooEarlyCount(0)
|
||||||
{
|
{
|
||||||
CALLED();
|
CALLED();
|
||||||
mPreferredFormat.type = B_MEDIA_RAW_AUDIO;
|
mPreferredFormat.type = B_MEDIA_RAW_AUDIO;
|
||||||
@ -369,7 +370,7 @@ _SoundPlayNode::Connect(status_t error, const media_source& source, const media_
|
|||||||
FindLatencyFor(mOutput.destination, &mLatency, &id);
|
FindLatencyFor(mOutput.destination, &mLatency, &id);
|
||||||
fprintf(stderr, "\tdownstream latency = %Ld\n", mLatency);
|
fprintf(stderr, "\tdownstream latency = %Ld\n", mLatency);
|
||||||
|
|
||||||
mInternalLatency = 10LL;
|
mInternalLatency = 5000LL;
|
||||||
fprintf(stderr, "\tbuffer-filling took %Ld usec on this machine\n", mInternalLatency);
|
fprintf(stderr, "\tbuffer-filling took %Ld usec on this machine\n", mInternalLatency);
|
||||||
SetEventLatency(mLatency + mInternalLatency);
|
SetEventLatency(mLatency + mInternalLatency);
|
||||||
|
|
||||||
@ -421,6 +422,8 @@ _SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much,
|
|||||||
{
|
{
|
||||||
CALLED();
|
CALLED();
|
||||||
|
|
||||||
|
printf("_SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
|
||||||
|
|
||||||
// is this our output?
|
// is this our output?
|
||||||
if (what != mOutput.source)
|
if (what != mOutput.source)
|
||||||
{
|
{
|
||||||
@ -430,6 +433,7 @@ _SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much,
|
|||||||
|
|
||||||
// If we're late, we need to catch up. Respond in a manner appropriate to our
|
// If we're late, we need to catch up. Respond in a manner appropriate to our
|
||||||
// current run mode.
|
// current run mode.
|
||||||
|
/*
|
||||||
if (RunMode() == B_RECORDING)
|
if (RunMode() == B_RECORDING)
|
||||||
{
|
{
|
||||||
// A hardware capture node can't adjust; it simply emits buffers at
|
// A hardware capture node can't adjust; it simply emits buffers at
|
||||||
@ -438,13 +442,16 @@ _SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much,
|
|||||||
// can't choose to capture "sooner"....
|
// can't choose to capture "sooner"....
|
||||||
}
|
}
|
||||||
else if (RunMode() == B_INCREASE_LATENCY)
|
else if (RunMode() == B_INCREASE_LATENCY)
|
||||||
|
*/
|
||||||
|
if (RunMode() != B_DROP_DATA)
|
||||||
{
|
{
|
||||||
// We're late, and our run mode dictates that we try to produce buffers
|
// We're late, and our run mode dictates that we try to produce buffers
|
||||||
// earlier in order to catch up. This argues that the downstream nodes are
|
// earlier in order to catch up. This argues that the downstream nodes are
|
||||||
// not properly reporting their latency, but there's not much we can do about
|
// not properly reporting their latency, but there's not much we can do about
|
||||||
// that at the moment, so we try to start producing buffers earlier to
|
// that at the moment, so we try to start producing buffers earlier to
|
||||||
// compensate.
|
// compensate.
|
||||||
mInternalLatency += how_much;
|
// mInternalLatency += how_much;
|
||||||
|
mLatency += 1000;
|
||||||
SetEventLatency(mLatency + mInternalLatency);
|
SetEventLatency(mLatency + mInternalLatency);
|
||||||
|
|
||||||
fprintf(stderr, "\tincreasing latency to %Ld\n", mLatency + mInternalLatency);
|
fprintf(stderr, "\tincreasing latency to %Ld\n", mLatency + mInternalLatency);
|
||||||
@ -552,39 +559,70 @@ status_t
|
|||||||
_SoundPlayNode::HandleBuffer(
|
_SoundPlayNode::HandleBuffer(
|
||||||
const media_timed_event *event,
|
const media_timed_event *event,
|
||||||
bigtime_t lateness,
|
bigtime_t lateness,
|
||||||
bool realTimeEvent = false)
|
bool realTimeEvent)
|
||||||
{
|
{
|
||||||
CALLED();
|
CALLED();
|
||||||
|
|
||||||
// make sure we're both started *and* connected before delivering a buffer
|
// make sure we're both started *and* connected before delivering a buffer
|
||||||
if ((RunState() == BMediaEventLooper::B_STARTED) && (mOutput.destination != media_destination::null))
|
if ((RunState() != BMediaEventLooper::B_STARTED) || (mOutput.destination == media_destination::null))
|
||||||
{
|
return B_OK;
|
||||||
|
|
||||||
|
// The event->event_time is the time at which the buffer we are preparing here should
|
||||||
|
// arrive at it's destination. The MediaEventLooper should have scheduled us early enough
|
||||||
|
// (based on EventLatency() and the SchedulingLatency()) to make this possible.
|
||||||
|
|
||||||
|
bigtime_t scheduling_latency = SchedulingLatency();
|
||||||
|
|
||||||
|
if (lateness > 0) {
|
||||||
|
printf("_SoundPlayNode::HandleBuffer, event sheduled too late, lateness is %Ld\n", lateness);
|
||||||
|
mInternalLatency += 1000;
|
||||||
|
SetEventLatency(mLatency + mInternalLatency);
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip buffer creation if output not enabled
|
||||||
|
if (mOutputEnabled) {
|
||||||
|
|
||||||
// Get the next buffer of data
|
// Get the next buffer of data
|
||||||
BBuffer* buffer = FillNextBuffer(event->event_time);
|
BBuffer* buffer = FillNextBuffer(event->event_time);
|
||||||
if (buffer)
|
|
||||||
{
|
if (buffer) {
|
||||||
|
|
||||||
|
// If we are ready way too early, decrase internal latency
|
||||||
|
bigtime_t how_early = event->event_time - TimeSource()->Now() - mLatency;
|
||||||
|
if (how_early > (3 * scheduling_latency)) {
|
||||||
|
|
||||||
|
printf("_SoundPlayNode::HandleBuffer, event scheduled too early, how_early is %Ld\n", how_early);
|
||||||
|
|
||||||
|
if (mTooEarlyCount++ == 5) {
|
||||||
|
mInternalLatency -= how_early;
|
||||||
|
if (mInternalLatency < 500)
|
||||||
|
mInternalLatency = 500;
|
||||||
|
printf("_SoundPlayNode::HandleBuffer setting internal latency to %Ld\n", mInternalLatency);
|
||||||
|
SetEventLatency(mLatency + mInternalLatency);
|
||||||
|
mTooEarlyCount = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// send the buffer downstream if and only if output is enabled
|
// send the buffer downstream if and only if output is enabled
|
||||||
status_t err = B_ERROR;
|
if (B_OK != SendBuffer(buffer, mOutput.destination)) {
|
||||||
if (mOutputEnabled) err = SendBuffer(buffer, mOutput.destination);
|
// we need to recycle the buffer
|
||||||
if (err)
|
|
||||||
{
|
|
||||||
// we need to recycle the buffer ourselves if output is disabled or
|
|
||||||
// if the call to SendBuffer() fails
|
// if the call to SendBuffer() fails
|
||||||
buffer->Recycle();
|
buffer->Recycle();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// track how much media we've delivered so far
|
|
||||||
size_t nFrames = mOutput.format.u.raw_audio.buffer_size
|
|
||||||
/ (mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
|
|
||||||
/ mOutput.format.u.raw_audio.channel_count;
|
|
||||||
mFramesSent += nFrames;
|
|
||||||
|
|
||||||
// The buffer is on its way; now schedule the next one to go
|
|
||||||
bigtime_t nextEvent = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
|
|
||||||
media_timed_event nextBufferEvent(nextEvent, BTimedEventQueue::B_HANDLE_BUFFER);
|
|
||||||
EventQueue()->AddEvent(nextBufferEvent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// track how much media we've delivered so far
|
||||||
|
size_t nFrames = mOutput.format.u.raw_audio.buffer_size
|
||||||
|
/ (mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
|
||||||
|
/ mOutput.format.u.raw_audio.channel_count;
|
||||||
|
mFramesSent += nFrames;
|
||||||
|
|
||||||
|
// The buffer is on its way; now schedule the next one to go
|
||||||
|
// nextEvent is the time at which the buffer should arrive at it's destination
|
||||||
|
bigtime_t nextEvent = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
|
||||||
|
media_timed_event nextBufferEvent(nextEvent, BTimedEventQueue::B_HANDLE_BUFFER);
|
||||||
|
EventQueue()->AddEvent(nextBufferEvent);
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
@ -690,6 +728,10 @@ _SoundPlayNode::AllocateBuffers()
|
|||||||
|
|
||||||
DPRINTF("\tlatency = %Ld, buffer duration = %Ld\n", mLatency, BufferDuration());
|
DPRINTF("\tlatency = %Ld, buffer duration = %Ld\n", mLatency, BufferDuration());
|
||||||
DPRINTF("\tcreating group of %ld buffers, size = %lu\n", count, size);
|
DPRINTF("\tcreating group of %ld buffers, size = %lu\n", count, size);
|
||||||
|
|
||||||
|
if (count < 2)
|
||||||
|
count == 2;
|
||||||
|
|
||||||
mBufferGroup = new BBufferGroup(size, count);
|
mBufferGroup = new BBufferGroup(size, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -719,26 +761,8 @@ _SoundPlayNode::FillNextBuffer(bigtime_t event_time)
|
|||||||
hdr->type = B_MEDIA_RAW_AUDIO;
|
hdr->type = B_MEDIA_RAW_AUDIO;
|
||||||
hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
|
hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
|
||||||
hdr->time_source = TimeSource()->ID();
|
hdr->time_source = TimeSource()->ID();
|
||||||
|
hdr->start_time = event_time;
|
||||||
|
|
||||||
bigtime_t stamp;
|
|
||||||
if (RunMode() == B_RECORDING)
|
|
||||||
{
|
|
||||||
// In B_RECORDING mode, we stamp with the capture time. We're not
|
|
||||||
// really a hardware capture node, but we simulate it by using the (precalculated)
|
|
||||||
// time at which this buffer "should" have been created.
|
|
||||||
stamp = event_time;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// okay, we're in one of the "live" performance run modes. in these modes, we
|
|
||||||
// stamp the buffer with the time at which the buffer should be rendered to the
|
|
||||||
// output, not with the capture time. mStartTime is the cached value of the
|
|
||||||
// first buffer's performance time; we calculate this buffer's performance time as
|
|
||||||
// an offset from that time, based on the amount of media we've created so far.
|
|
||||||
// Recalculating every buffer like this avoids accumulation of error.
|
|
||||||
stamp = mStartTime + bigtime_t(double(mFramesSent) / double(mOutput.format.u.raw_audio.frame_rate) * 1000000.0);
|
|
||||||
}
|
|
||||||
hdr->start_time = stamp;
|
|
||||||
DPRINTF("TimeSource()->Now() : %li\n", TimeSource()->Now());
|
DPRINTF("TimeSource()->Now() : %li\n", TimeSource()->Now());
|
||||||
DPRINTF("hdr->start_time : %li\n", hdr->start_time);
|
DPRINTF("hdr->start_time : %li\n", hdr->start_time);
|
||||||
DPRINTF("mFramesSent : %li\n", mFramesSent);
|
DPRINTF("mFramesSent : %li\n", mFramesSent);
|
||||||
|
@ -169,6 +169,7 @@ private:
|
|||||||
bigtime_t mInternalLatency;
|
bigtime_t mInternalLatency;
|
||||||
bigtime_t mStartTime;
|
bigtime_t mStartTime;
|
||||||
uint64 mFramesSent;
|
uint64 mFramesSent;
|
||||||
|
int32 mTooEarlyCount;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -155,11 +155,11 @@ BSoundPlayer::Start()
|
|||||||
BTimeSource *timeSource = roster->MakeTimeSourceFor(_m_node->Node());
|
BTimeSource *timeSource = roster->MakeTimeSourceFor(_m_node->Node());
|
||||||
|
|
||||||
// make sure we give the producer enough time to run buffers through
|
// make sure we give the producer enough time to run buffers through
|
||||||
// the node chain, otherwise it'll start up already late
|
// the node chain, otherwise it'll start up already late
|
||||||
bigtime_t latency = 0;
|
bigtime_t latency = 0;
|
||||||
status_t err = roster->GetLatencyFor(_m_node->Node(), &latency);
|
status_t err = roster->GetLatencyFor(_m_node->Node(), &latency);
|
||||||
|
|
||||||
err = roster->StartNode(_m_node->Node(), timeSource->Now() + latency);
|
err = roster->StartNode(_m_node->Node(), timeSource->Now() + latency + 5000);
|
||||||
|
|
||||||
timeSource->Release();
|
timeSource->Release();
|
||||||
|
|
||||||
|
@ -155,11 +155,13 @@ _event_queue_imp::RemoveFirstEvent(media_timed_event * outEvent)
|
|||||||
if (fFirstEntry == 0)
|
if (fFirstEntry == 0)
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
|
|
||||||
if (outEvent != 0)
|
if (outEvent != 0) {
|
||||||
|
// No cleanup here
|
||||||
*outEvent = fFirstEntry->event;
|
*outEvent = fFirstEntry->event;
|
||||||
else
|
} else {
|
||||||
CleanupEvent(&fFirstEntry->event);
|
CleanupEvent(&fFirstEntry->event);
|
||||||
|
}
|
||||||
|
|
||||||
RemoveEntry(fFirstEntry);
|
RemoveEntry(fFirstEntry);
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
@ -560,6 +562,7 @@ _event_queue_imp::CleanupEvent(media_timed_event *event)
|
|||||||
// do nothing
|
// do nothing
|
||||||
} else if (event->type == BTimedEventQueue::B_HANDLE_BUFFER && event->cleanup == BTimedEventQueue::B_RECYCLE_BUFFER) {
|
} else if (event->type == BTimedEventQueue::B_HANDLE_BUFFER && event->cleanup == BTimedEventQueue::B_RECYCLE_BUFFER) {
|
||||||
((BBuffer *)event->pointer)->Recycle();
|
((BBuffer *)event->pointer)->Recycle();
|
||||||
|
DEBUG_ONLY(*const_cast<void **>(&event->pointer) = NULL);
|
||||||
} else if (event->cleanup == BTimedEventQueue::B_EXPIRE_TIMER) {
|
} else if (event->cleanup == BTimedEventQueue::B_EXPIRE_TIMER) {
|
||||||
// call TimerExpired() on the event->data
|
// call TimerExpired() on the event->data
|
||||||
debugger("BTimedEventQueue cleanup: calling TimerExpired() should be implemented here\n");
|
debugger("BTimedEventQueue cleanup: calling TimerExpired() should be implemented here\n");
|
||||||
|
@ -13,6 +13,9 @@
|
|||||||
/* no locking used in this file, we assume that the caller (NodeManager) does it.
|
/* no locking used in this file, we assume that the caller (NodeManager) does it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#define MAX_NODE_INFOS 10
|
||||||
|
|
||||||
DefaultManager::DefaultManager()
|
DefaultManager::DefaultManager()
|
||||||
: fMixerConnected(false),
|
: fMixerConnected(false),
|
||||||
fPhysicalVideoOut(-1),
|
fPhysicalVideoOut(-1),
|
||||||
@ -230,14 +233,14 @@ DefaultManager::FindPhysicalVideoIn()
|
|||||||
void
|
void
|
||||||
DefaultManager::FindPhysicalAudioOut()
|
DefaultManager::FindPhysicalAudioOut()
|
||||||
{
|
{
|
||||||
live_node_info info[2];
|
live_node_info info[MAX_NODE_INFOS];
|
||||||
media_format input; /* a physical audio output has a logical data input */
|
media_format input; /* a physical audio output has a logical data input */
|
||||||
int32 count;
|
int32 count;
|
||||||
status_t rv;
|
status_t rv;
|
||||||
|
|
||||||
memset(&input, 0, sizeof(input));
|
memset(&input, 0, sizeof(input));
|
||||||
input.type = B_MEDIA_RAW_AUDIO;
|
input.type = B_MEDIA_RAW_AUDIO;
|
||||||
count = 2;
|
count = MAX_NODE_INFOS;
|
||||||
rv = BMediaRoster::Roster()->GetLiveNodes(&info[0], &count, &input, NULL, NULL, B_BUFFER_CONSUMER | B_PHYSICAL_OUTPUT);
|
rv = BMediaRoster::Roster()->GetLiveNodes(&info[0], &count, &input, NULL, NULL, B_BUFFER_CONSUMER | B_PHYSICAL_OUTPUT);
|
||||||
if (rv != B_OK || count < 1) {
|
if (rv != B_OK || count < 1) {
|
||||||
printf("Couldn't find physical audio output node\n");
|
printf("Couldn't find physical audio output node\n");
|
||||||
@ -249,7 +252,7 @@ DefaultManager::FindPhysicalAudioOut()
|
|||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
if (0 == strcmp(info[i].name, "None Out")) // skip the Null audio driver
|
if (0 == strcmp(info[i].name, "None Out")) // skip the Null audio driver
|
||||||
continue;
|
continue;
|
||||||
printf("Default physical audio output created!\n");
|
printf("Default physical audio output \"%s\" created!\n", info[i].name);
|
||||||
fPhysicalAudioOut = info[i].node.node;
|
fPhysicalAudioOut = info[i].node.node;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -258,14 +261,14 @@ DefaultManager::FindPhysicalAudioOut()
|
|||||||
void
|
void
|
||||||
DefaultManager::FindPhysicalAudioIn()
|
DefaultManager::FindPhysicalAudioIn()
|
||||||
{
|
{
|
||||||
live_node_info info[2];
|
live_node_info info[MAX_NODE_INFOS];
|
||||||
media_format output; /* a physical audio input has a logical data output */
|
media_format output; /* a physical audio input has a logical data output */
|
||||||
int32 count;
|
int32 count;
|
||||||
status_t rv;
|
status_t rv;
|
||||||
|
|
||||||
memset(&output, 0, sizeof(output));
|
memset(&output, 0, sizeof(output));
|
||||||
output.type = B_MEDIA_RAW_AUDIO;
|
output.type = B_MEDIA_RAW_AUDIO;
|
||||||
count = 2;
|
count = MAX_NODE_INFOS;
|
||||||
rv = BMediaRoster::Roster()->GetLiveNodes(&info[0], &count, NULL, &output, NULL, B_BUFFER_PRODUCER | B_PHYSICAL_INPUT);
|
rv = BMediaRoster::Roster()->GetLiveNodes(&info[0], &count, NULL, &output, NULL, B_BUFFER_PRODUCER | B_PHYSICAL_INPUT);
|
||||||
if (rv != B_OK || count < 1) {
|
if (rv != B_OK || count < 1) {
|
||||||
printf("Couldn't find physical audio input node\n");
|
printf("Couldn't find physical audio input node\n");
|
||||||
@ -277,7 +280,7 @@ DefaultManager::FindPhysicalAudioIn()
|
|||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
if (0 == strcmp(info[i].name, "None In")) // skip the Null audio driver
|
if (0 == strcmp(info[i].name, "None In")) // skip the Null audio driver
|
||||||
continue;
|
continue;
|
||||||
printf("Default physical audio input created!\n");
|
printf("Default physical audio input \"%s\" created!\n", info[i].name);
|
||||||
fPhysicalAudioIn = info[i].node.node;
|
fPhysicalAudioIn = info[i].node.node;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -286,26 +289,54 @@ DefaultManager::FindPhysicalAudioIn()
|
|||||||
void
|
void
|
||||||
DefaultManager::FindTimeSource()
|
DefaultManager::FindTimeSource()
|
||||||
{
|
{
|
||||||
live_node_info info;
|
live_node_info info[MAX_NODE_INFOS];
|
||||||
media_format input; /* a physical audio output has a logical data input (DAC)*/
|
media_format input; /* a physical audio output has a logical data input (DAC)*/
|
||||||
int32 count;
|
int32 count;
|
||||||
status_t rv;
|
status_t rv;
|
||||||
|
|
||||||
/* We try to use the default physical audio out node,
|
/* First try to use the current default physical audio out
|
||||||
* as it most likely is a timesource.
|
*/
|
||||||
* XXX if that fails, we might use other audio or video clock timesources
|
if (fPhysicalAudioOut != -1) {
|
||||||
|
media_node clone;
|
||||||
|
if (B_OK == BMediaRoster::Roster()->GetNodeFor(fPhysicalAudioOut, &clone)) {
|
||||||
|
if (clone.kind & B_TIME_SOURCE) {
|
||||||
|
fTimeSource = clone.node;
|
||||||
|
BMediaRoster::Roster()->ReleaseNode(clone);
|
||||||
|
printf("Default DAC timesource created!\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
BMediaRoster::Roster()->ReleaseNode(clone);
|
||||||
|
} else {
|
||||||
|
printf("Default DAC is not a timesource!\n");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printf("Default DAC node does not exist!\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Now try to find another physical audio out node
|
||||||
*/
|
*/
|
||||||
|
|
||||||
memset(&input, 0, sizeof(input));
|
memset(&input, 0, sizeof(input));
|
||||||
input.type = B_MEDIA_RAW_AUDIO;
|
input.type = B_MEDIA_RAW_AUDIO;
|
||||||
count = 1;
|
count = MAX_NODE_INFOS;
|
||||||
rv = BMediaRoster::Roster()->GetLiveNodes(&info, &count, &input, NULL, NULL, B_TIME_SOURCE | B_PHYSICAL_OUTPUT);
|
rv = BMediaRoster::Roster()->GetLiveNodes(&info[0], &count, &input, NULL, NULL, B_TIME_SOURCE | B_PHYSICAL_OUTPUT);
|
||||||
if (rv != B_OK || count != 1) {
|
if (rv == B_OK && count >= 1) {
|
||||||
|
for (int i = 0; i < count; i++)
|
||||||
|
printf("info[%d].name %s\n", i, info[i].name);
|
||||||
|
|
||||||
|
for (int i = 0; i < count; i++) {
|
||||||
|
// The BeOS R5 None Out node pretend to be a physical time source, that is pretty dumb
|
||||||
|
if (0 == strcmp(info[i].name, "None Out")) // skip the Null audio driver
|
||||||
|
continue;
|
||||||
|
printf("Default DAC timesource \"%s\" created!\n", info[i].name);
|
||||||
|
fTimeSource = info[i].node.node;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
printf("Couldn't find DAC timesource node\n");
|
printf("Couldn't find DAC timesource node\n");
|
||||||
return;
|
}
|
||||||
}
|
|
||||||
fTimeSource = info.node.node;
|
/* XXX we might use other audio or video clock timesources
|
||||||
printf("Default DAC timesource created!\n");
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -334,6 +365,8 @@ DefaultManager::ConnectMixerToOutput()
|
|||||||
media_node soundcard;
|
media_node soundcard;
|
||||||
media_input input;
|
media_input input;
|
||||||
media_output output;
|
media_output output;
|
||||||
|
media_input newinput;
|
||||||
|
media_output newoutput;
|
||||||
media_format format;
|
media_format format;
|
||||||
BTimeSource * ts;
|
BTimeSource * ts;
|
||||||
bigtime_t start_at;
|
bigtime_t start_at;
|
||||||
@ -372,20 +405,58 @@ DefaultManager::ConnectMixerToOutput()
|
|||||||
rv = B_ERROR;
|
rv = B_ERROR;
|
||||||
goto finish;
|
goto finish;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&format, 0, sizeof(format));
|
|
||||||
format.type = B_MEDIA_RAW_AUDIO;
|
|
||||||
format.u.raw_audio.frame_rate = 44100;
|
|
||||||
format.u.raw_audio.channel_count = 2;
|
|
||||||
format.u.raw_audio.format = 0x2;
|
|
||||||
|
|
||||||
//roster->GetFormatFor(input, &format);
|
|
||||||
|
|
||||||
rv = roster->Connect(output.source, input.destination, &format, &output, &input);
|
|
||||||
if (rv != B_OK) {
|
|
||||||
printf("DefaultManager: connect failed\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for (int i = 0; i < 5; i++) {
|
||||||
|
switch (i) {
|
||||||
|
case 0:
|
||||||
|
printf("DefaultManager: Trying connect in native format\n");
|
||||||
|
if (B_OK != roster->GetFormatFor(input, &format)) {
|
||||||
|
FATAL("DefaultManager: GetFormatFor failed\n");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 1:
|
||||||
|
printf("DefaultManager: Trying connect in format 1\n");
|
||||||
|
memset(&format, 0, sizeof(format));
|
||||||
|
format.type = B_MEDIA_RAW_AUDIO;
|
||||||
|
format.u.raw_audio.frame_rate = 44100;
|
||||||
|
format.u.raw_audio.channel_count = 2;
|
||||||
|
format.u.raw_audio.format = 0x2;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
printf("DefaultManager: Trying connect in format 2\n");
|
||||||
|
memset(&format, 0, sizeof(format));
|
||||||
|
format.type = B_MEDIA_RAW_AUDIO;
|
||||||
|
format.u.raw_audio.frame_rate = 48000;
|
||||||
|
format.u.raw_audio.channel_count = 2;
|
||||||
|
format.u.raw_audio.format = 0x2;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 3:
|
||||||
|
printf("DefaultManager: Trying connect in format 3\n");
|
||||||
|
memset(&format, 0, sizeof(format));
|
||||||
|
format.type = B_MEDIA_RAW_AUDIO;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 4:
|
||||||
|
printf("DefaultManager: Trying connect in format 4\n");
|
||||||
|
memset(&format, 0, sizeof(format));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
rv = roster->Connect(output.source, input.destination, &format, &newoutput, &newinput);
|
||||||
|
if (rv == B_OK)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (rv != B_OK) {
|
||||||
|
FATAL("DefaultManager: connect failed\n");
|
||||||
|
goto finish;
|
||||||
|
}
|
||||||
|
|
||||||
|
roster->SetRunModeNode(mixer, BMediaNode::B_INCREASE_LATENCY);
|
||||||
|
roster->SetRunModeNode(soundcard, BMediaNode::B_RECORDING);
|
||||||
|
|
||||||
roster->GetTimeSource(×ource);
|
roster->GetTimeSource(×ource);
|
||||||
roster->SetTimeSourceFor(mixer.node, timesource.node);
|
roster->SetTimeSourceFor(mixer.node, timesource.node);
|
||||||
roster->SetTimeSourceFor(soundcard.node, timesource.node);
|
roster->SetTimeSourceFor(soundcard.node, timesource.node);
|
||||||
|
Loading…
Reference in New Issue
Block a user