2002-07-09 16:24:59 +04:00
|
|
|
/***********************************************************************
|
2003-05-11 21:41:38 +04:00
|
|
|
* AUTHOR: Marcus Overhagen, Jérôme Duval
|
2002-07-09 16:24:59 +04:00
|
|
|
* FILE: SoundPlayNode.cpp
|
|
|
|
* DESCR: This is the BBufferProducer, used internally by BSoundPlayer
|
|
|
|
* This belongs into a private namespace, but isn't for
|
|
|
|
* compatibility reasons.
|
|
|
|
***********************************************************************/
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
#include <TimeSource.h>
|
|
|
|
#include <string.h>
|
2002-07-09 16:24:59 +04:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
2003-05-26 16:37:52 +04:00
|
|
|
#include "SoundPlayNode.h"
|
|
|
|
#include "debug.h"
|
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
#define DPRINTF 1
|
2003-05-26 16:37:52 +04:00
|
|
|
|
|
|
|
#if DPRINTF
|
|
|
|
#undef DPRINTF
|
|
|
|
#define DPRINTF printf
|
|
|
|
#else
|
|
|
|
#undef DPRINTF
|
|
|
|
#define DPRINTF if (1) {} else printf
|
|
|
|
#endif
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
#define SEND_NEW_BUFFER_EVENT (BTimedEventQueue::B_USER_EVENT + 1)
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
_SoundPlayNode::_SoundPlayNode(const char *name, const media_multi_audio_format *format, BSoundPlayer *player) :
|
2003-05-11 21:41:38 +04:00
|
|
|
BMediaNode(name),
|
2002-07-09 16:24:59 +04:00
|
|
|
BBufferProducer(B_MEDIA_RAW_AUDIO),
|
2003-05-11 21:41:38 +04:00
|
|
|
BMediaEventLooper(),
|
|
|
|
mPlayer(player),
|
2003-05-26 16:37:52 +04:00
|
|
|
mInitCheckStatus(B_OK),
|
2003-05-11 21:41:38 +04:00
|
|
|
mOutputEnabled(true),
|
|
|
|
mBufferGroup(NULL),
|
2003-05-29 21:28:11 +04:00
|
|
|
mFramesSent(0),
|
|
|
|
mTooEarlyCount(0)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
2003-05-31 00:34:48 +04:00
|
|
|
mFormat.type = B_MEDIA_RAW_AUDIO;
|
|
|
|
mFormat.u.raw_audio = *format;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
2003-05-26 16:37:52 +04:00
|
|
|
DPRINTF("Format Info:\n");
|
2003-12-14 22:23:16 +03:00
|
|
|
DPRINTF(" frame_rate: %.1f (%ld)\n", mFormat.u.raw_audio.frame_rate, (int32)mFormat.u.raw_audio.frame_rate);
|
2003-05-26 16:37:52 +04:00
|
|
|
DPRINTF(" channel_count: %ld\n",mFormat.u.raw_audio.channel_count);
|
|
|
|
DPRINTF(" byte_order: %ld (",mFormat.u.raw_audio.byte_order);
|
2003-05-11 21:41:38 +04:00
|
|
|
switch (mFormat.u.raw_audio.byte_order) {
|
2003-05-26 16:37:52 +04:00
|
|
|
case B_MEDIA_BIG_ENDIAN: DPRINTF("B_MEDIA_BIG_ENDIAN)\n"); break;
|
|
|
|
case B_MEDIA_LITTLE_ENDIAN: DPRINTF("B_MEDIA_LITTLE_ENDIAN)\n"); break;
|
|
|
|
default: DPRINTF("unknown)\n"); break;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
2003-05-26 16:37:52 +04:00
|
|
|
DPRINTF(" buffer_size: %ld\n",mFormat.u.raw_audio.buffer_size);
|
|
|
|
DPRINTF(" format: %ld (",mFormat.u.raw_audio.format);
|
2003-05-11 21:41:38 +04:00
|
|
|
switch (mFormat.u.raw_audio.format) {
|
2003-05-26 16:37:52 +04:00
|
|
|
case media_raw_audio_format::B_AUDIO_FLOAT: DPRINTF("B_AUDIO_FLOAT)\n"); break;
|
|
|
|
case media_raw_audio_format::B_AUDIO_SHORT: DPRINTF("B_AUDIO_SHORT)\n"); break;
|
|
|
|
case media_raw_audio_format::B_AUDIO_INT: DPRINTF("B_AUDIO_INT)\n"); break;
|
|
|
|
case media_raw_audio_format::B_AUDIO_CHAR: DPRINTF("B_AUDIO_CHAR)\n"); break;
|
|
|
|
case media_raw_audio_format::B_AUDIO_UCHAR: DPRINTF("B_AUDIO_UCHAR)\n"); break;
|
|
|
|
default: DPRINTF("unknown)\n"); break;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
_SoundPlayNode::~_SoundPlayNode()
|
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
Quit();
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-06-04 01:02:38 +04:00
|
|
|
bool
|
|
|
|
_SoundPlayNode::IsPlaying()
|
|
|
|
{
|
|
|
|
return RunState() == B_STARTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
bigtime_t
|
|
|
|
_SoundPlayNode::Latency()
|
|
|
|
{
|
|
|
|
return EventLatency();
|
|
|
|
}
|
|
|
|
|
2002-07-09 16:24:59 +04:00
|
|
|
|
|
|
|
media_multi_audio_format
|
|
|
|
_SoundPlayNode::Format() const
|
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
return mFormat.u.raw_audio;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// -------------------------------------------------------- //
|
|
|
|
// implementation of BMediaNode
|
|
|
|
// -------------------------------------------------------- //
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
BMediaAddOn * _SoundPlayNode::AddOn(int32 * internal_id) const
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
// BeBook says this only gets called if we were in an add-on.
|
|
|
|
return NULL;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
void _SoundPlayNode::Preroll(void)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
// XXX:Performance opportunity
|
|
|
|
BMediaNode::Preroll();
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
status_t _SoundPlayNode::HandleMessage(int32 message, const void * data, size_t size)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
2002-07-09 16:24:59 +04:00
|
|
|
return B_ERROR;
|
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
void _SoundPlayNode::NodeRegistered(void)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
if (mInitCheckStatus != B_OK) {
|
|
|
|
ReportError(B_NODE_IN_DISTRESS);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SetPriority(B_URGENT_PRIORITY);
|
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
mOutput.format = mFormat;
|
2003-05-11 21:41:38 +04:00
|
|
|
mOutput.destination = media_destination::null;
|
|
|
|
mOutput.source.port = ControlPort();
|
2003-10-01 21:00:38 +04:00
|
|
|
mOutput.source.id = 0;
|
2003-05-11 21:41:38 +04:00
|
|
|
mOutput.node = Node();
|
2003-09-09 13:07:06 +04:00
|
|
|
strcpy(mOutput.name, Name());
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
Run();
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t _SoundPlayNode::RequestCompleted(const media_request_info &info)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
void _SoundPlayNode::SetTimeSource(BTimeSource *timeSource)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
BMediaNode::SetTimeSource(timeSource);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
void
|
|
|
|
_SoundPlayNode::SetRunMode(run_mode mode)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-06-19 18:53:41 +04:00
|
|
|
TRACE("_SoundPlayNode::SetRunMode mode:%i\n", mode);
|
2003-05-11 21:41:38 +04:00
|
|
|
BMediaNode::SetRunMode(mode);
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// -------------------------------------------------------- //
|
|
|
|
// implementation for BBufferProducer
|
|
|
|
// -------------------------------------------------------- //
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_SoundPlayNode::FormatSuggestionRequested(media_type type, int32 /*quality*/, media_format* format)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
// FormatSuggestionRequested() is not necessarily part of the format negotiation
|
|
|
|
// process; it's simply an interrogation -- the caller wants to see what the node's
|
|
|
|
// preferred data format is, given a suggestion by the caller.
|
|
|
|
CALLED();
|
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
// a wildcard type is okay; but we only support raw audio
|
|
|
|
if (type != B_MEDIA_RAW_AUDIO && type != B_MEDIA_UNKNOWN_TYPE)
|
|
|
|
return B_MEDIA_BAD_FORMAT;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// this is the format we'll be returning (our preferred format)
|
2003-05-31 00:34:48 +04:00
|
|
|
*format = mFormat;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::FormatProposal(const media_source& output, media_format* format)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
// FormatProposal() is the first stage in the BMediaRoster::Connect() process. We hand
|
|
|
|
// out a suggested format, with wildcards for any variations we support.
|
|
|
|
CALLED();
|
2003-06-02 02:09:26 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// is this a proposal for our one output?
|
2003-06-02 02:09:26 +04:00
|
|
|
if (output != mOutput.source) {
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_SOURCE\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_MEDIA_BAD_SOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// we only support floating-point raw audio, so we always return that, but we
|
|
|
|
// supply an error code depending on whether we found the proposal acceptable.
|
|
|
|
media_type requestedType = format->type;
|
2003-05-31 00:34:48 +04:00
|
|
|
*format = mFormat;
|
2003-06-02 02:09:26 +04:00
|
|
|
if ((requestedType != B_MEDIA_UNKNOWN_TYPE) && (requestedType != B_MEDIA_RAW_AUDIO)) {
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::FormatProposal returning B_MEDIA_BAD_FORMAT\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_MEDIA_BAD_FORMAT;
|
|
|
|
}
|
2003-06-02 02:09:26 +04:00
|
|
|
else
|
|
|
|
return B_OK; // raw audio or wildcard type, either is okay by us
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::FormatChangeRequested(const media_source& source, const media_destination& destination, media_format* io_format, int32* _deprecated_)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// we don't support any other formats, so we just reject any format changes.
|
2002-07-09 16:24:59 +04:00
|
|
|
return B_ERROR;
|
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::GetNextOutput(int32* cookie, media_output* out_output)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
2003-10-01 21:00:38 +04:00
|
|
|
if (*cookie == 0) {
|
2003-05-11 21:41:38 +04:00
|
|
|
*out_output = mOutput;
|
|
|
|
*cookie += 1;
|
|
|
|
return B_OK;
|
2003-10-01 21:00:38 +04:00
|
|
|
} else {
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_BAD_INDEX;
|
2003-10-01 21:00:38 +04:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::DisposeOutputCookie(int32 cookie)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
// do nothing because we don't use the cookie for anything special
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::SetBufferGroup(const media_source& for_source, BBufferGroup* newGroup)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// is this our output?
|
2003-06-02 02:09:26 +04:00
|
|
|
if (for_source != mOutput.source) {
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::SetBufferGroup returning B_MEDIA_BAD_SOURCE\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_MEDIA_BAD_SOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Are we being passed the buffer group we're already using?
|
|
|
|
if (newGroup == mBufferGroup)
|
|
|
|
return B_OK;
|
|
|
|
|
|
|
|
// Ahh, someone wants us to use a different buffer group. At this point we delete
|
|
|
|
// the one we are using and use the specified one instead. If the specified group is
|
|
|
|
// NULL, we need to recreate one ourselves, and use *that*. Note that if we're
|
|
|
|
// caching a BBuffer that we requested earlier, we have to Recycle() that buffer
|
|
|
|
// *before* deleting the buffer group, otherwise we'll deadlock waiting for that
|
|
|
|
// buffer to be recycled!
|
|
|
|
delete mBufferGroup; // waits for all buffers to recycle
|
|
|
|
if (newGroup != NULL)
|
|
|
|
{
|
|
|
|
// we were given a valid group; just use that one from now on
|
|
|
|
mBufferGroup = newGroup;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// we were passed a NULL group pointer; that means we construct
|
|
|
|
// our own buffer group to use from now on
|
|
|
|
size_t size = mOutput.format.u.raw_audio.buffer_size;
|
|
|
|
int32 count = int32(mLatency / BufferDuration() + 1 + 1);
|
2003-06-02 02:09:26 +04:00
|
|
|
if (count < 3)
|
|
|
|
count = 3;
|
2003-05-11 21:41:38 +04:00
|
|
|
mBufferGroup = new BBufferGroup(size, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::GetLatency(bigtime_t* out_latency)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// report our *total* latency: internal plus downstream plus scheduling
|
|
|
|
*out_latency = EventLatency() + SchedulingLatency();
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::PrepareToConnect(const media_source& what, const media_destination& where, media_format* format, media_source* out_source, char* out_name)
|
|
|
|
{
|
|
|
|
// PrepareToConnect() is the second stage of format negotiations that happens
|
|
|
|
// inside BMediaRoster::Connect(). At this point, the consumer's AcceptFormat()
|
|
|
|
// method has been called, and that node has potentially changed the proposed
|
|
|
|
// format. It may also have left wildcards in the format. PrepareToConnect()
|
|
|
|
// *must* fully specialize the format before returning!
|
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// is this our output?
|
|
|
|
if (what != mOutput.source)
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::PrepareToConnect returning B_MEDIA_BAD_SOURCE\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_MEDIA_BAD_SOURCE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// are we already connected?
|
|
|
|
if (mOutput.destination != media_destination::null)
|
|
|
|
return B_MEDIA_ALREADY_CONNECTED;
|
|
|
|
|
|
|
|
// the format may not yet be fully specialized (the consumer might have
|
|
|
|
// passed back some wildcards). Finish specializing it now, and return an
|
|
|
|
// error if we don't support the requested format.
|
|
|
|
if (format->type != B_MEDIA_RAW_AUDIO)
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("\tnon-raw-audio format?!\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_MEDIA_BAD_FORMAT;
|
|
|
|
}
|
2003-06-03 23:00:34 +04:00
|
|
|
// !!! validate all other fields except for buffer_size here, because the consumer might have
|
2003-05-11 21:41:38 +04:00
|
|
|
// supplied different values from AcceptFormat()?
|
|
|
|
|
|
|
|
// check the buffer size, which may still be wildcarded
|
|
|
|
if (format->u.raw_audio.buffer_size == media_raw_audio_format::wildcard.buffer_size)
|
|
|
|
{
|
|
|
|
format->u.raw_audio.buffer_size = 2048; // pick something comfortable to suggest
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("\tno buffer size provided, suggesting %lu\n", format->u.raw_audio.buffer_size);
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("\tconsumer suggested buffer_size %lu\n", format->u.raw_audio.buffer_size);
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now reserve the connection, and return information about it
|
|
|
|
mOutput.destination = where;
|
|
|
|
mOutput.format = *format;
|
|
|
|
*out_source = mOutput.source;
|
2003-09-09 13:07:06 +04:00
|
|
|
strcpy(out_name, Name());
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
void
|
|
|
|
_SoundPlayNode::Connect(status_t error, const media_source& source, const media_destination& destination, const media_format& format, char* io_name)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// is this our output?
|
|
|
|
if (source != mOutput.source)
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::Connect returning\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If something earlier failed, Connect() might still be called, but with a non-zero
|
|
|
|
// error code. When that happens we simply unreserve the connection and do
|
|
|
|
// nothing else.
|
|
|
|
if (error)
|
|
|
|
{
|
|
|
|
mOutput.destination = media_destination::null;
|
2003-05-31 00:34:48 +04:00
|
|
|
mOutput.format = mFormat;
|
2003-05-11 21:41:38 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Okay, the connection has been confirmed. Record the destination and format
|
|
|
|
// that we agreed on, and report our connection name again.
|
|
|
|
mOutput.destination = destination;
|
|
|
|
mOutput.format = format;
|
2003-09-09 13:07:06 +04:00
|
|
|
strcpy(io_name, Name());
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// Now that we're connected, we can determine our downstream latency.
|
|
|
|
// Do so, then make sure we get our events early enough.
|
|
|
|
media_node_id id;
|
|
|
|
FindLatencyFor(mOutput.destination, &mLatency, &id);
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::Connect: downstream latency = %Ld\n", mLatency);
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// reset our buffer duration, etc. to avoid later calculations
|
2003-12-14 22:23:16 +03:00
|
|
|
bigtime_t duration = ((mOutput.format.u.raw_audio.buffer_size * 1000000LL)
|
|
|
|
/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK) * mOutput.format.u.raw_audio.channel_count))
|
|
|
|
/ (int32)mOutput.format.u.raw_audio.frame_rate;
|
2003-05-11 21:41:38 +04:00
|
|
|
SetBufferDuration(duration);
|
2003-12-14 22:23:16 +03:00
|
|
|
TRACE("_SoundPlayNode::Connect: buffer duration is %Ld\n", duration);
|
2003-09-08 02:39:35 +04:00
|
|
|
|
|
|
|
mInternalLatency = (3 * BufferDuration()) / 4;
|
|
|
|
TRACE("_SoundPlayNode::Connect: using %Ld as internal latency\n", mInternalLatency);
|
|
|
|
SetEventLatency(mLatency + mInternalLatency);
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// Set up the buffer group for our connection, as long as nobody handed us a
|
|
|
|
// buffer group (via SetBufferGroup()) prior to this. That can happen, for example,
|
|
|
|
// if the consumer calls SetOutputBuffersFor() on us from within its Connected()
|
|
|
|
// method.
|
|
|
|
if (!mBufferGroup)
|
|
|
|
AllocateBuffers();
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2003-05-11 21:41:38 +04:00
|
|
|
_SoundPlayNode::Disconnect(const media_source& what, const media_destination& where)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// is this our output?
|
|
|
|
if (what != mOutput.source)
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::Disconnect returning\n");
|
2002-07-09 16:24:59 +04:00
|
|
|
return;
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure that our connection is the one being disconnected
|
|
|
|
if ((where == mOutput.destination) && (what == mOutput.source))
|
|
|
|
{
|
|
|
|
mOutput.destination = media_destination::null;
|
2003-05-31 00:34:48 +04:00
|
|
|
mOutput.format = mFormat;
|
2003-05-11 21:41:38 +04:00
|
|
|
delete mBufferGroup;
|
|
|
|
mBufferGroup = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
fprintf(stderr, "\tDisconnect() called with wrong source/destination (%ld/%ld), ours is (%ld/%ld)\n",
|
|
|
|
what.id, where.id, mOutput.source.id, mOutput.destination.id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_SoundPlayNode::LateNoticeReceived(const media_source& what, bigtime_t how_much, bigtime_t performance_time)
|
|
|
|
{
|
|
|
|
CALLED();
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-29 21:28:11 +04:00
|
|
|
printf("_SoundPlayNode::LateNoticeReceived, %Ld too late at %Ld\n", how_much, performance_time);
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// is this our output?
|
|
|
|
if (what != mOutput.source)
|
|
|
|
{
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::LateNoticeReceived returning\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2003-05-29 21:28:11 +04:00
|
|
|
if (RunMode() != B_DROP_DATA)
|
2003-05-11 21:41:38 +04:00
|
|
|
{
|
|
|
|
// We're late, and our run mode dictates that we try to produce buffers
|
|
|
|
// earlier in order to catch up. This argues that the downstream nodes are
|
|
|
|
// not properly reporting their latency, but there's not much we can do about
|
|
|
|
// that at the moment, so we try to start producing buffers earlier to
|
|
|
|
// compensate.
|
2003-06-01 21:18:24 +04:00
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
mInternalLatency += how_much;
|
2003-05-31 00:34:48 +04:00
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
if (mInternalLatency > 30000) // avoid getting a too high latency
|
|
|
|
mInternalLatency = 30000;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
2003-06-02 02:09:26 +04:00
|
|
|
SetEventLatency(mLatency + mInternalLatency);
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::LateNoticeReceived: increasing latency to %Ld\n", mLatency + mInternalLatency);
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// The other run modes dictate various strategies for sacrificing data quality
|
|
|
|
// in the interests of timely data delivery. The way *we* do this is to skip
|
|
|
|
// a buffer, which catches us up in time by one buffer duration.
|
2003-06-02 02:09:26 +04:00
|
|
|
|
|
|
|
size_t nFrames = mOutput.format.u.raw_audio.buffer_size
|
|
|
|
/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
|
|
|
|
* mOutput.format.u.raw_audio.channel_count);
|
|
|
|
|
|
|
|
mFramesSent += nFrames;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
2003-09-08 02:39:35 +04:00
|
|
|
TRACE("_SoundPlayNode::LateNoticeReceived: skipping a buffer to try to catch up\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2003-05-11 21:41:38 +04:00
|
|
|
_SoundPlayNode::EnableOutput(const media_source& what, bool enabled, int32* _deprecated_)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// If I had more than one output, I'd have to walk my list of output records to see
|
|
|
|
// which one matched the given source, and then enable/disable that one. But this
|
|
|
|
// node only has one output, so I just make sure the given source matches, then set
|
|
|
|
// the enable state accordingly.
|
|
|
|
// is this our output?
|
|
|
|
if (what != mOutput.source)
|
|
|
|
{
|
|
|
|
fprintf(stderr, "_SoundPlayNode::EnableOutput returning\n");
|
2002-07-09 16:24:59 +04:00
|
|
|
return;
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
mOutputEnabled = enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_SoundPlayNode::AdditionalBufferRequested(const media_source& source, media_buffer_id prev_buffer, bigtime_t prev_time, const media_seek_tag* prev_tag)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
// we don't support offline mode
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_SoundPlayNode::LatencyChanged(const media_source& source, const media_destination& destination, bigtime_t new_latency, uint32 flags)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
printf("_SoundPlayNode::LatencyChanged: new_latency %Ld\n", new_latency);
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// something downstream changed latency, so we need to start producing
|
|
|
|
// buffers earlier (or later) than we were previously. Make sure that the
|
|
|
|
// connection that changed is ours, and adjust to the new downstream
|
|
|
|
// latency if so.
|
|
|
|
if ((source == mOutput.source) && (destination == mOutput.destination))
|
|
|
|
{
|
|
|
|
mLatency = new_latency;
|
|
|
|
SetEventLatency(mLatency + mInternalLatency);
|
2003-05-31 00:34:48 +04:00
|
|
|
} else {
|
|
|
|
printf("_SoundPlayNode::LatencyChanged: ignored\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// -------------------------------------------------------- //
|
|
|
|
// implementation for BMediaEventLooper
|
|
|
|
// -------------------------------------------------------- //
|
|
|
|
|
|
|
|
void _SoundPlayNode::HandleEvent(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
switch (event->type) {
|
|
|
|
case BTimedEventQueue::B_START:
|
|
|
|
HandleStart(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_SEEK:
|
|
|
|
HandleSeek(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_WARP:
|
|
|
|
HandleWarp(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_STOP:
|
|
|
|
HandleStop(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_HANDLE_BUFFER:
|
2003-05-31 00:34:48 +04:00
|
|
|
// we don't get any buffers
|
|
|
|
break;
|
|
|
|
case SEND_NEW_BUFFER_EVENT:
|
2003-05-11 21:41:38 +04:00
|
|
|
if (RunState() == BMediaEventLooper::B_STARTED) {
|
2003-05-31 00:34:48 +04:00
|
|
|
SendNewBuffer(event, lateness, realTimeEvent);
|
2003-05-11 21:41:38 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_DATA_STATUS:
|
|
|
|
HandleDataStatus(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
case BTimedEventQueue::B_PARAMETER:
|
|
|
|
HandleParameter(event,lateness,realTimeEvent);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fprintf(stderr," unknown event type: %li\n",event->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// protected:
|
|
|
|
|
|
|
|
// how should we handle late buffers? drop them?
|
|
|
|
// notify the producer?
|
|
|
|
status_t
|
2003-06-02 02:09:26 +04:00
|
|
|
_SoundPlayNode::SendNewBuffer(const media_timed_event *event, bigtime_t lateness, bool realTimeEvent)
|
2003-05-11 21:41:38 +04:00
|
|
|
{
|
|
|
|
CALLED();
|
2003-10-01 21:00:38 +04:00
|
|
|
// printf("latency = %12Ld, event = %12Ld, sched = %5Ld, arrive at %12Ld, now %12Ld, current lateness %12Ld\n", EventLatency() + SchedulingLatency(), EventLatency(), SchedulingLatency(), event->event_time, TimeSource()->Now(), lateness);
|
2003-05-31 00:34:48 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// make sure we're both started *and* connected before delivering a buffer
|
2003-05-29 21:28:11 +04:00
|
|
|
if ((RunState() != BMediaEventLooper::B_STARTED) || (mOutput.destination == media_destination::null))
|
|
|
|
return B_OK;
|
|
|
|
|
|
|
|
// The event->event_time is the time at which the buffer we are preparing here should
|
|
|
|
// arrive at it's destination. The MediaEventLooper should have scheduled us early enough
|
|
|
|
// (based on EventLatency() and the SchedulingLatency()) to make this possible.
|
2003-06-02 02:09:26 +04:00
|
|
|
// lateness is independent of EventLatency()!
|
2003-05-29 21:28:11 +04:00
|
|
|
|
2003-10-01 21:00:38 +04:00
|
|
|
if (lateness > (BufferDuration() / 3) ) {
|
2003-06-02 02:09:26 +04:00
|
|
|
printf("_SoundPlayNode::SendNewBuffer, event scheduled much too late, lateness is %Ld\n", lateness);
|
2003-05-29 21:28:11 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// skip buffer creation if output not enabled
|
|
|
|
if (mOutputEnabled) {
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// Get the next buffer of data
|
|
|
|
BBuffer* buffer = FillNextBuffer(event->event_time);
|
2003-05-29 21:28:11 +04:00
|
|
|
|
|
|
|
if (buffer) {
|
|
|
|
|
|
|
|
// If we are ready way too early, decrase internal latency
|
2003-05-31 00:34:48 +04:00
|
|
|
/*
|
|
|
|
bigtime_t how_early = event->event_time - TimeSource()->Now() - mLatency - mInternalLatency;
|
|
|
|
if (how_early > 5000) {
|
2003-05-29 21:28:11 +04:00
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
printf("_SoundPlayNode::SendNewBuffer, event scheduled too early, how_early is %Ld\n", how_early);
|
2003-05-29 21:28:11 +04:00
|
|
|
|
|
|
|
if (mTooEarlyCount++ == 5) {
|
|
|
|
mInternalLatency -= how_early;
|
|
|
|
if (mInternalLatency < 500)
|
|
|
|
mInternalLatency = 500;
|
2003-05-31 00:34:48 +04:00
|
|
|
printf("_SoundPlayNode::SendNewBuffer setting internal latency to %Ld\n", mInternalLatency);
|
2003-05-29 21:28:11 +04:00
|
|
|
SetEventLatency(mLatency + mInternalLatency);
|
|
|
|
mTooEarlyCount = 0;
|
|
|
|
}
|
|
|
|
}
|
2003-05-31 00:34:48 +04:00
|
|
|
*/
|
2003-05-11 21:41:38 +04:00
|
|
|
// send the buffer downstream if and only if output is enabled
|
2003-05-29 21:28:11 +04:00
|
|
|
if (B_OK != SendBuffer(buffer, mOutput.destination)) {
|
|
|
|
// we need to recycle the buffer
|
2003-05-11 21:41:38 +04:00
|
|
|
// if the call to SendBuffer() fails
|
2003-09-08 02:39:35 +04:00
|
|
|
printf("_SoundPlayNode::SendNewBuffer: Buffer sending failed\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
buffer->Recycle();
|
|
|
|
}
|
|
|
|
}
|
2003-05-29 21:28:11 +04:00
|
|
|
}
|
2003-05-11 21:41:38 +04:00
|
|
|
|
2003-05-29 21:28:11 +04:00
|
|
|
// track how much media we've delivered so far
|
|
|
|
size_t nFrames = mOutput.format.u.raw_audio.buffer_size
|
2003-05-31 00:34:48 +04:00
|
|
|
/ ((mOutput.format.u.raw_audio.format & media_raw_audio_format::B_AUDIO_SIZE_MASK)
|
|
|
|
* mOutput.format.u.raw_audio.channel_count);
|
2003-05-29 21:28:11 +04:00
|
|
|
mFramesSent += nFrames;
|
2003-05-11 21:41:38 +04:00
|
|
|
|
2003-05-29 21:28:11 +04:00
|
|
|
// The buffer is on its way; now schedule the next one to go
|
|
|
|
// nextEvent is the time at which the buffer should arrive at it's destination
|
2003-12-14 22:23:16 +03:00
|
|
|
bigtime_t nextEvent = mStartTime + bigtime_t((1000000LL * mFramesSent) / (int32)mOutput.format.u.raw_audio.frame_rate);
|
2003-05-31 00:34:48 +04:00
|
|
|
media_timed_event nextBufferEvent(nextEvent, SEND_NEW_BUFFER_EVENT);
|
2003-05-29 21:28:11 +04:00
|
|
|
EventQueue()->AddEvent(nextBufferEvent);
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleDataStatus(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
2003-08-31 06:18:11 +04:00
|
|
|
TRACE("_SoundPlayNode::HandleDataStatus status: %li, lateness: %Li\n", event->data, lateness);
|
2003-05-11 21:41:38 +04:00
|
|
|
switch(event->data) {
|
|
|
|
case B_DATA_NOT_AVAILABLE:
|
|
|
|
break;
|
|
|
|
case B_DATA_AVAILABLE:
|
|
|
|
break;
|
|
|
|
case B_PRODUCER_STOPPED:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return B_OK;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleStart(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
2002-07-09 16:24:59 +04:00
|
|
|
{
|
2003-05-11 21:41:38 +04:00
|
|
|
CALLED();
|
|
|
|
// don't do anything if we're already running
|
|
|
|
if (RunState() != B_STARTED)
|
|
|
|
{
|
|
|
|
// We want to start sending buffers now, so we set up the buffer-sending bookkeeping
|
|
|
|
// and fire off the first "produce a buffer" event.
|
2003-05-31 00:34:48 +04:00
|
|
|
|
|
|
|
mFramesSent = 0;
|
2003-06-03 05:15:18 +04:00
|
|
|
mStartTime = event->event_time;
|
2003-06-01 21:18:24 +04:00
|
|
|
media_timed_event firstBufferEvent(event->event_time, SEND_NEW_BUFFER_EVENT);
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// Alternatively, we could call HandleEvent() directly with this event, to avoid a trip through
|
|
|
|
// the event queue, like this:
|
|
|
|
//
|
|
|
|
// this->HandleEvent(&firstBufferEvent, 0, false);
|
|
|
|
//
|
|
|
|
EventQueue()->AddEvent(firstBufferEvent);
|
|
|
|
}
|
|
|
|
return B_OK;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleSeek(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
|
|
|
CALLED();
|
2003-05-26 16:37:52 +04:00
|
|
|
DPRINTF("_SoundPlayNode::HandleSeek(t=%lld,d=%li,bd=%lld)\n",event->event_time,event->data,event->bigdata);
|
2003-05-11 21:41:38 +04:00
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleWarp(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
return B_OK;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleStop(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
// flush the queue so downstreamers don't get any more
|
2003-05-31 00:34:48 +04:00
|
|
|
EventQueue()->FlushEvents(0, BTimedEventQueue::B_ALWAYS, true, SEND_NEW_BUFFER_EVENT);
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_t
|
|
|
|
_SoundPlayNode::HandleParameter(
|
|
|
|
const media_timed_event *event,
|
|
|
|
bigtime_t lateness,
|
|
|
|
bool realTimeEvent = false)
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
return B_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_SoundPlayNode::AllocateBuffers()
|
|
|
|
{
|
|
|
|
CALLED();
|
|
|
|
|
|
|
|
// allocate enough buffers to span our downstream latency, plus one
|
|
|
|
size_t size = mOutput.format.u.raw_audio.buffer_size;
|
|
|
|
int32 count = int32(mLatency / BufferDuration() + 1 + 1);
|
|
|
|
|
2003-10-01 21:00:38 +04:00
|
|
|
DPRINTF("\tlatency = %Ld, buffer duration = %Ld, count %ld\n", mLatency, BufferDuration(), count);
|
2003-05-29 21:28:11 +04:00
|
|
|
|
2003-05-31 00:34:48 +04:00
|
|
|
if (count < 3)
|
2003-06-01 21:18:24 +04:00
|
|
|
count = 3;
|
2003-05-29 21:28:11 +04:00
|
|
|
|
2003-10-01 21:00:38 +04:00
|
|
|
DPRINTF("\tcreating group of %ld buffers, size = %lu\n", count, size);
|
2003-05-11 21:41:38 +04:00
|
|
|
mBufferGroup = new BBufferGroup(size, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
BBuffer*
|
|
|
|
_SoundPlayNode::FillNextBuffer(bigtime_t event_time)
|
|
|
|
{
|
|
|
|
CALLED();
|
2003-05-31 00:34:48 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// get a buffer from our buffer group
|
2003-06-02 02:09:26 +04:00
|
|
|
BBuffer* buf = mBufferGroup->RequestBuffer(mOutput.format.u.raw_audio.buffer_size, BufferDuration() / 2);
|
2003-05-11 21:41:38 +04:00
|
|
|
|
|
|
|
// if we fail to get a buffer (for example, if the request times out), we skip this
|
|
|
|
// buffer and go on to the next, to avoid locking up the control thread
|
2003-06-02 02:09:26 +04:00
|
|
|
if (!buf) {
|
2003-10-01 21:00:38 +04:00
|
|
|
ERROR("_SoundPlayNode::FillNextBuffer: RequestBuffer failed\n");
|
2003-05-11 21:41:38 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
memset(buf->Data(), 0, mOutput.format.u.raw_audio.buffer_size);
|
2003-10-01 21:00:38 +04:00
|
|
|
if (mPlayer->HasData()) {
|
2003-05-11 21:41:38 +04:00
|
|
|
mPlayer->PlayBuffer(buf->Data(),
|
|
|
|
mOutput.format.u.raw_audio.buffer_size, mOutput.format.u.raw_audio);
|
|
|
|
}
|
2002-07-09 16:24:59 +04:00
|
|
|
|
2003-05-11 21:41:38 +04:00
|
|
|
// fill in the buffer header
|
|
|
|
media_header* hdr = buf->Header();
|
|
|
|
hdr->type = B_MEDIA_RAW_AUDIO;
|
|
|
|
hdr->size_used = mOutput.format.u.raw_audio.buffer_size;
|
|
|
|
hdr->time_source = TimeSource()->ID();
|
2003-05-29 21:28:11 +04:00
|
|
|
hdr->start_time = event_time;
|
2003-05-11 21:41:38 +04:00
|
|
|
return buf;
|
2002-07-09 16:24:59 +04:00
|
|
|
}
|