tsmf gstreamer fixes

- Update patch based on feedback
- Fix gstreamer 1.0 compatibility/build issue from first patch
This commit is contained in:
bjcollins 2015-07-08 13:10:21 -05:00
parent e8704544f4
commit 8d692995d6
5 changed files with 159 additions and 92 deletions

View File

@ -62,7 +62,11 @@ struct X11Handle
Display *disp;
Window subwin;
BOOL subwinMapped;
#if GST_VERSION_MAJOR > 0
GstVideoOverlay *overlay;
#else
GstXOverlay *overlay;
#endif
int subwinWidth;
int subwinHeight;
int subwinX;
@ -85,31 +89,32 @@ static GstBusSyncReply tsmf_platform_bus_sync_handler(GstBus *bus, GstMessage *m
if (GST_MESSAGE_TYPE (message) != GST_MESSAGE_ELEMENT)
return GST_BUS_PASS;
#if GST_VERSION_MAJOR > 0
if (!gst_is_video_overlay_prepare_window_handle_message (message))
return GST_BUS_PASS;
#else
if (!gst_structure_has_name (message->structure, "prepare-xwindow-id"))
return GST_BUS_PASS;
#endif
hdl = (struct X11Handle*) decoder->platform;
if (hdl->subwin)
{
hdl->overlay = GST_X_OVERLAY (GST_MESSAGE_SRC (message));
#if GST_VERSION_MAJOR > 0
hdl->overlay = GST_VIDEO_OVERLAY (GST_MESSAGE_SRC (message));
gst_video_overlay_set_window_handle(hdl->overlay, hdl->subwin);
#else
gst_x_overlay_set_window_handle(hdl->overlay, hdl->subwin);
#endif
#if GST_VERSION_MAJOR > 0
gst_video_overlay_handle_events(hdl->overlay, TRUE);
#else
hdl->overlay = GST_X_OVERLAY (GST_MESSAGE_SRC (message));
gst_x_overlay_set_window_handle(hdl->overlay, hdl->subwin);
gst_x_overlay_handle_events(hdl->overlay, TRUE);
#endif
if (hdl->subwinWidth != -1 && hdl->subwinHeight != -1 && hdl->subwinX != -1 && hdl->subwinY != -1)
{
#if GST_VERSION_MAJOR > 0
if (!gst_video_overlay_set_render_rectangle(hdl->overlay, 0, 0, hdl->swubwinWidth, hdl->subwinHeight))
if (!gst_video_overlay_set_render_rectangle(hdl->overlay, 0, 0, hdl->subwinWidth, hdl->subwinHeight))
{
WLog_ERR(TAG, "Could not resize overlay!");
}
@ -142,7 +147,6 @@ const char* tsmf_platform_get_video_sink(void)
const char* tsmf_platform_get_audio_sink(void)
{
//return "alsasink";
return "autoaudiosink";
}
@ -219,7 +223,11 @@ int tsmf_platform_register_handler(TSMFGstreamerDecoder* decoder)
bus = gst_pipeline_get_bus(GST_PIPELINE(decoder->pipe));
#if GST_VERSION_MAJOR > 0
gst_bus_set_sync_handler (bus, (GstBusSyncHandler) tsmf_platform_bus_sync_handler, decoder, NULL);
#else
gst_bus_set_sync_handler (bus, (GstBusSyncHandler) tsmf_platform_bus_sync_handler, decoder);
#endif
if (!bus)
{
@ -397,7 +405,7 @@ int tsmf_window_map(TSMFGstreamerDecoder* decoder)
hdl = (struct X11Handle*) decoder->platform;
// Only need to map the window if it is not currently mapped
/* Only need to map the window if it is not currently mapped */
if ((hdl->subwin) && (!hdl->subwinMapped))
{
XLockDisplay(hdl->disp);
@ -418,7 +426,7 @@ int tsmf_window_unmap(TSMFGstreamerDecoder* decoder)
hdl = (struct X11Handle*) decoder->platform;
// only need to unmap window if it is currently mapped
/* only need to unmap window if it is currently mapped */
if ((hdl->subwin) && (hdl->subwinMapped))
{
XLockDisplay(hdl->disp);

View File

@ -45,8 +45,8 @@
#include <inttypes.h>
#endif
// 1 second
#define SEEK_TOLERANCE 10000000
/* 1 second = 10,000,000 100ns units*/
#define SEEK_TOLERANCE 10*1000*1000
static BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder);
static void tsmf_gstreamer_clean_up(TSMFGstreamerDecoder* mdecoder);
@ -59,12 +59,15 @@ const char* get_type(TSMFGstreamerDecoder* mdecoder)
if (!mdecoder)
return NULL;
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
return "VIDEO";
else if (mdecoder->media_type == TSMF_MAJOR_TYPE_AUDIO)
return "AUDIO";
else
return "UNKNOWN";
switch (mdecoder->media_type)
{
case TSMF_MAJOR_TYPE_VIDEO:
return "VIDEO";
case TSMF_MAJOR_TYPE_AUDIO:
return "AUDIO";
default:
return "UNKNOWN";
}
}
static void cb_child_added(GstChildProxy *child_proxy, GObject *object, TSMFGstreamerDecoder* mdecoder)
@ -73,20 +76,20 @@ static void cb_child_added(GstChildProxy *child_proxy, GObject *object, TSMFGstr
if (!g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstXvImageSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstXImageSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstFluVAAutoSink"))
{
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); //nanoseconds
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); //synchronize on the clock
g_object_set(G_OBJECT(object), "async", TRUE, NULL); //no async state changes - doc says not to do for streams synced to clock
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); /* nanoseconds */
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); /* synchronize on the clock */
g_object_set(G_OBJECT(object), "async", TRUE, NULL); /* no async state changes */
}
else if (!g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstAlsaSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstPulseSink"))
{
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); //nanoseconds
g_object_set(G_OBJECT(object), "slave-method", 1, NULL); //DEFAULT
g_object_set(G_OBJECT(object), "buffer-time", (gint64) 20000, NULL); //microseconds
g_object_set(G_OBJECT(object), "drift-tolerance", (gint64) 20000, NULL); //microseconds
g_object_set(G_OBJECT(object), "latency-time", (gint64) 10000, NULL); //microseconds
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); //synchronize on the clock
g_object_set(G_OBJECT(object), "async", TRUE, NULL); //no async state changes - doc says not to do for streams synced to clock
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); /* nanoseconds */
g_object_set(G_OBJECT(object), "slave-method", 1, NULL);
g_object_set(G_OBJECT(object), "buffer-time", (gint64) 20000, NULL); /* microseconds */
g_object_set(G_OBJECT(object), "drift-tolerance", (gint64) 20000, NULL); /* microseconds */
g_object_set(G_OBJECT(object), "latency-time", (gint64) 10000, NULL); /* microseconds */
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); /* synchronize on the clock */
g_object_set(G_OBJECT(object), "async", TRUE, NULL); /* no async state changes */
}
}
@ -256,7 +259,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
"wmvversion", G_TYPE_INT, 3,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "WVC1",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'V', 'C', '1'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
NULL);
@ -267,7 +274,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"bitrate", G_TYPE_UINT, media_type->BitRate,
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "MP42",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '2'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
NULL);
break;
@ -277,7 +288,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"bitrate", G_TYPE_UINT, media_type->BitRate,
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '2'),
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "MP42",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '2'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
NULL);
break;
@ -287,7 +302,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"bitrate", G_TYPE_UINT, media_type->BitRate,
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "MP43",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '3'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
NULL);
break;
@ -296,7 +315,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"mpegversion", G_TYPE_INT, 4,
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "M4S2",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', '4', 'S', '2'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
NULL);
break;
@ -347,7 +370,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
"wmvversion", G_TYPE_INT, 1,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "WMV1",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '1'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
NULL);
break;
@ -356,7 +383,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
"wmvversion", G_TYPE_INT, 2,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "WMV2",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '2'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
NULL);
@ -367,7 +398,11 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
"width", G_TYPE_INT, media_type->Width,
"height", G_TYPE_INT, media_type->Height,
"wmvversion", G_TYPE_INT, 3,
#if GST_VERSION_MAJOR > 0
"format", G_TYPE_STRING, "WMV3",
#else
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '3'),
#endif
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
NULL);
@ -506,8 +541,13 @@ void tsmf_gstreamer_clean_up(TSMFGstreamerDecoder* mdecoder)
BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
{
#if GST_VERSION_MAJOR > 0
const char* video = "appsrc name=videosource ! queue2 name=videoqueue ! decodebin name=videodecoder !";
const char* audio = "appsrc name=audiosource ! queue2 name=audioqueue ! decodebin name=audiodecoder ! audioconvert ! audiorate ! audioresample ! volume name=audiovolume !";
#else
const char* video = "appsrc name=videosource ! queue2 name=videoqueue ! decodebin2 name=videodecoder !";
const char* audio = "appsrc name=audiosource ! queue2 name=audioqueue ! decodebin2 name=audiodecoder ! audioconvert ! audiorate ! audioresample ! volume name=audiovolume !";
#endif
char pipeline[1024];
if (!mdecoder)
@ -601,23 +641,23 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
g_object_set(G_OBJECT(mdecoder->queue), "max-size-bytes", 0, NULL);
g_object_set(G_OBJECT(mdecoder->queue), "max-size-time", (guint64) 0, NULL);
// Only set these properties if not an autosink, otherwise we will set properties when real sinks are added
/* Only set these properties if not an autosink, otherwise we will set properties when real sinks are added */
if (!g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoVideoSink") && !g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoAudioSink"))
{
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
{
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); //nanoseconds
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); /* nanoseconds */
}
else
{
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); //nanoseconds
g_object_set(G_OBJECT(mdecoder->outsink), "buffer-time", (gint64) 20000, NULL); //microseconds
g_object_set(G_OBJECT(mdecoder->outsink), "drift-tolerance", (gint64) 20000, NULL); //microseconds
g_object_set(G_OBJECT(mdecoder->outsink), "latency-time", (gint64) 10000, NULL); //microseconds
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); /* nanoseconds */
g_object_set(G_OBJECT(mdecoder->outsink), "buffer-time", (gint64) 20000, NULL); /* microseconds */
g_object_set(G_OBJECT(mdecoder->outsink), "drift-tolerance", (gint64) 20000, NULL); /* microseconds */
g_object_set(G_OBJECT(mdecoder->outsink), "latency-time", (gint64) 10000, NULL); /* microseconds */
g_object_set(G_OBJECT(mdecoder->outsink), "slave-method", 1, NULL);
}
g_object_set(G_OBJECT(mdecoder->outsink), "sync", TRUE, NULL); //synchronize on the clock
g_object_set(G_OBJECT(mdecoder->outsink), "async", TRUE, NULL); //no async state changes
g_object_set(G_OBJECT(mdecoder->outsink), "sync", TRUE, NULL); /* synchronize on the clock */
g_object_set(G_OBJECT(mdecoder->outsink), "async", TRUE, NULL); /* no async state changes */
}
tsmf_window_create(mdecoder);
@ -685,22 +725,23 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
return FALSE;
}
// Relative timestamping will sometimes be set to 0
// so we ignore these timestamps just to be safe(bit 8)
/* Relative timestamping will sometimes be set to 0
* so we ignore these timestamps just to be safe(bit 8)
*/
if (extensions & 0x00000080)
{
DEBUG_TSMF("Ignoring the timestamps - relative - bit 8");
useTimestamps = FALSE;
}
//If no timestamps exist then we dont want to look at the timestamp values (bit 7)
/* If no timestamps exist then we dont want to look at the timestamp values (bit 7) */
if (extensions & 0x00000040)
{
DEBUG_TSMF("Ignoring the timestamps - none - bit 7");
useTimestamps = FALSE;
}
// If performing a seek
/* If performing a seek */
if (mdecoder->seeking)
{
mdecoder->seeking = FALSE;
@ -710,17 +751,18 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
if (mdecoder->pipeline_start_time_valid)
{
// Adjusted the condition for a seek to be based on start time only
// WMV1 and WMV2 files in particular have bad end time and duration values
// there seems to be no real side effects of just using the start time instead
/* Adjusted the condition for a seek to be based on start time only
* WMV1 and WMV2 files in particular have bad end time and duration values
* there seems to be no real side effects of just using the start time instead
*/
UINT64 minTime = mdecoder->last_sample_start_time - (UINT64) SEEK_TOLERANCE;
UINT64 maxTime = mdecoder->last_sample_start_time + (UINT64) SEEK_TOLERANCE;
// Make sure the minTime stops at 0 , should we be at the beginning of the stream
/* Make sure the minTime stops at 0 , should we be at the beginning of the stream */
if (mdecoder->last_sample_start_time < (UINT64) SEEK_TOLERANCE)
minTime = 0;
// If the start_time is valid and different from the previous start time by more than the seek tolerance, then we have a seek condition
/* If the start_time is valid and different from the previous start time by more than the seek tolerance, then we have a seek condition */
if (((start_time > maxTime) || (start_time < minTime)) && useTimestamps)
{
DEBUG_TSMF("tsmf_gstreamer_decodeEx: start_time=[%d] > last_sample_start_time=[%d] OR ", (int)start_time, (int)mdecoder->last_sample_start_time);
@ -729,22 +771,24 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
mdecoder->seeking = TRUE;
// since we cant make the gstreamer pipeline jump to the new start time after a seek - we just maintain
// a offset between realtime and gstreamer time
/* since we cant make the gstreamer pipeline jump to the new start time after a seek - we just maintain
* a offset between realtime and gstreamer time
*/
mdecoder->seek_offset = start_time;
}
}
else
{
DEBUG_TSMF("%s start time %d", get_type(mdecoder), start_time);
// Always set base/start time to 0. Will use seek offset to translate real buffer times
// back to 0. This allows the video to be started from anywhere and the ability to handle seeks
// without rebuilding the pipeline, etc. since that is costly
/* Always set base/start time to 0. Will use seek offset to translate real buffer times
* back to 0. This allows the video to be started from anywhere and the ability to handle seeks
* without rebuilding the pipeline, etc. since that is costly
*/
gst_element_set_base_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
gst_element_set_start_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
mdecoder->pipeline_start_time_valid = 1;
// Set the seek offset if buffer has valid timestamps.
/* Set the seek offset if buffer has valid timestamps. */
if (useTimestamps)
mdecoder->seek_offset = start_time;
@ -769,13 +813,13 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
#endif
GST_BUFFER_DURATION(gst_buf) = GST_CLOCK_TIME_NONE;
GST_BUFFER_OFFSET(gst_buf) = GST_BUFFER_OFFSET_NONE;
#if GST_VERSION_MAJOR > 0
#else
gst_buffer_set_caps(gst_buf, mdecoder->gst_caps);
#endif
gst_app_src_push_buffer(GST_APP_SRC(mdecoder->src), gst_buf);
if (mdecoder->ack_cb)
mdecoder->ack_cb(mdecoder->stream, FALSE);
// Should only update the last timestamps if the current ones are valid
/* Should only update the last timestamps if the current ones are valid */
if (useTimestamps)
{
mdecoder->last_sample_start_time = start_time;
@ -948,7 +992,7 @@ BOOL tsmf_gstreamer_ack(ITSMFDecoder* decoder, BOOL (*cb)(void *, BOOL), void *s
{
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
DEBUG_TSMF("");
mdecoder->ack_cb = NULL;//cb;
mdecoder->ack_cb = NULL;
mdecoder->stream = stream;
return TRUE;
}

View File

@ -655,8 +655,9 @@ UINT tsmf_ifman_on_flush(TSMF_IFMAN* ifman)
return ERROR_NOT_FOUND;
}
// Flush message is for a stream, not the entire presentation
// therefore we only flush the stream as intended per the MS-RDPEV spec
/* Flush message is for a stream, not the entire presentation
* therefore we only flush the stream as intended per the MS-RDPEV spec
*/
stream = tsmf_stream_find_by_id(presentation, StreamId);
if (stream)
if (!tsmf_stream_flush(stream))

View File

@ -38,7 +38,7 @@
void tsmf_send_eos_response(IWTSVirtualChannelCallback* pChannelCallback, UINT32 message_id)
{
wStream* s;
wStream* s = NULL;
int status;
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*) pChannelCallback;
@ -79,7 +79,7 @@ void tsmf_send_eos_response(IWTSVirtualChannelCallback* pChannelCallback, UINT32
void tsmf_playback_ack(IWTSVirtualChannelCallback *pChannelCallback,
UINT32 message_id, UINT64 duration, UINT32 data_size)
{
wStream *s;
wStream *s = NULL;
int status = -1;
TSMF_CHANNEL_CALLBACK *callback = (TSMF_CHANNEL_CALLBACK *) pChannelCallback;
@ -87,6 +87,12 @@ void tsmf_playback_ack(IWTSVirtualChannelCallback *pChannelCallback,
if (!s)
return FALSE;
if (s == NULL)
{
WLog_ERR(TAG, "Stream creation error!");
return;
}
Stream_Write_UINT32(s, TSMF_INTERFACE_CLIENT_NOTIFICATIONS | STREAM_ID_PROXY);
Stream_Write_UINT32(s, message_id);
Stream_Write_UINT32(s, PLAYBACK_ACK); /* FunctionId */

View File

@ -56,8 +56,8 @@
#define AUDIO_TOLERANCE 10000000LL
// 1 second
#define VIDEO_ADJUST_MAX 10000000
/* 1 second = 10,000,000 100ns units*/
#define VIDEO_ADJUST_MAX 10*1000*1000
#define MAX_ACK_TIME 666667
@ -193,8 +193,9 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
if (stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
{
/* Check if some other stream has earlier sample that needs to be played first */
// Start time is more reliable than end time as some stream types seem to have incorrect
// end times from the server
/* Start time is more reliable than end time as some stream types seem to have incorrect
* end times from the server
*/
if (stream->last_start_time > AUDIO_TOLERANCE)
{
ArrayList_Lock(presentation->stream_list);
@ -204,8 +205,9 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
{
s = (TSMF_STREAM *) ArrayList_GetItem(presentation->stream_list, index);
// Start time is more reliable than end time as some stream types seem to have incorrect
// end times from the server
/* Start time is more reliable than end time as some stream types seem to have incorrect
* end times from the server
*/
if (s != stream && !s->eos && s->last_start_time &&
s->last_start_time < stream->last_start_time - AUDIO_TOLERANCE)
{
@ -220,8 +222,9 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
}
else
{
// Start time is more reliable than end time as some stream types seem to have incorrect
// end times from the server
/* Start time is more reliable than end time as some stream types seem to have incorrect
* end times from the server
*/
if (stream->last_start_time > presentation->audio_start_time)
{
DEBUG_TSMF("Pending due to stream start time > audio start time");
@ -237,11 +240,11 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
sample = (TSMF_SAMPLE *) Queue_Dequeue(stream->sample_list);
// Only update stream last end time if the sample end time is valid and greater than the current stream end time
/* Only update stream last end time if the sample end time is valid and greater than the current stream end time */
if (sample && (sample->end_time > stream->last_end_time) && (!sample->invalidTimestamps))
stream->last_end_time = sample->end_time;
// Only update stream last start time if the sample start time is valid and greater than the current stream start time
/* Only update stream last start time if the sample start time is valid and greater than the current stream start time */
if (sample && (sample->start_time > stream->last_start_time) && (!sample->invalidTimestamps))
stream->last_start_time = sample->start_time;
@ -278,8 +281,9 @@ static BOOL tsmf_sample_queue_ack(TSMF_SAMPLE* sample)
return Queue_Enqueue(sample->stream->sample_ack_list, sample);
}
// Returns TRUE if no more samples are currently available
// Returns FALSE otherwise
/* Returns TRUE if no more samples are currently available
* Returns FALSE otherwise
*/
static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
{
TSMF_STREAM* stream = arg;
@ -301,18 +305,18 @@ static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
if (!force)
{
// Do some min/max ack limiting if we have access to Buffer level information
/* Do some min/max ack limiting if we have access to Buffer level information */
if (stream->decoder->BufferLevel)
{
// Try to keep buffer level below max by withholding acks
/* Try to keep buffer level below max by withholding acks */
if (stream->currentBufferLevel > stream->maxBufferLevel)
goto finally;
// Try to keep buffer level above min by pushing acks through quickly
/* Try to keep buffer level above min by pushing acks through quickly */
else if (stream->currentBufferLevel < stream->minBufferLevel)
goto dequeue;
}
// Time based acks only
/* Time based acks only */
ack_time = get_current_time();
if (sample->ack_time > ack_time)
@ -427,8 +431,9 @@ static BOOL tsmf_sample_playback_video(TSMF_SAMPLE* sample)
{
t = get_current_time();
// Start time is more reliable than end time as some stream types seem to have incorrect
// end times from the server
/* Start time is more reliable than end time as some stream types seem to have incorrect
* end times from the server
*/
if (stream->next_start_time > t &&
((sample->start_time >= presentation->audio_start_time) ||
((sample->start_time < stream->last_start_time) && (!sample->invalidTimestamps))))
@ -507,7 +512,7 @@ static BOOL tsmf_sample_playback_audio(TSMF_SAMPLE* sample)
sample->ack_time = latency + get_current_time();
//Only update stream times if the sample timestamps are valid
/* Only update stream times if the sample timestamps are valid */
if (!sample->invalidTimestamps)
{
stream->last_start_time = sample->start_time + latency;
@ -530,9 +535,10 @@ static BOOL tsmf_sample_playback(TSMF_SAMPLE* sample)
{
if (stream->decoder->DecodeEx)
{
// Try to "sync" video buffers to audio buffers by looking at the running time for each stream
// The difference between the two running times causes an offset between audio and video actual
// render times. So, we try to adjust timestamps on the video buffer to match those on the audio buffer.
/* Try to "sync" video buffers to audio buffers by looking at the running time for each stream
* The difference between the two running times causes an offset between audio and video actual
* render times. So, we try to adjust timestamps on the video buffer to match those on the audio buffer.
*/
if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
{
TSMF_STREAM* temp_stream = NULL;
@ -633,7 +639,7 @@ static BOOL tsmf_sample_playback(TSMF_SAMPLE* sample)
UINT64 ack_anticipation_time = get_current_time();
BOOL buffer_filled = TRUE;
// Classify the buffer as filled once it reaches minimum level
/* Classify the buffer as filled once it reaches minimum level */
if (stream->decoder->BufferLevel)
{
if (stream->currentBufferLevel < stream->minBufferLevel)
@ -720,7 +726,7 @@ static void* tsmf_stream_ack_func(void *arg)
}
}
// Stream stopped force all of the acks to happen
/* Stream stopped force all of the acks to happen */
if (ev == WAIT_OBJECT_0)
{
DEBUG_TSMF("ack: Stream stopped!");
@ -852,16 +858,17 @@ static BOOL tsmf_stream_stop(TSMF_STREAM* stream)
if (!stream || !stream->decoder || !stream->decoder->Control)
return TRUE;
// If stopping after eos - we delay until the eos has been processed
// this allows us to process any buffers that have been acked even though
// they have not actually been completely processes by the decoder
/* If stopping after eos - we delay until the eos has been processed
* this allows us to process any buffers that have been acked even though
* they have not actually been completely processes by the decoder
*/
if (stream->eos)
{
DEBUG_TSMF("Setting up a delayed stop for once the eos has been processed.");
stream->delayed_stop = 1;
return TRUE;
}
// Otherwise force stop immediately
/* Otherwise force stop immediately */
else
{
DEBUG_TSMF("Stop with no pending eos response, so do it immediately.");
@ -1059,8 +1066,9 @@ BOOL tsmf_presentation_set_geometry_info(TSMF_PRESENTATION* presentation,
if (!width || !height)
return TRUE;
// Streams can be added/removed from the presentation and the server will resend geometry info when a new stream is
// added to the presentation.
/* Streams can be added/removed from the presentation and the server will resend geometry info when a new stream is
* added to the presentation.
*/
/*
if ((width == presentation->width) && (height == presentation->height) &&
(x == presentation->x) && (y == presentation->y) &&