- Use decodebin2 instead of old decodebin - decodebin has issues
- Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
This commit is contained in:
parent
f8ceb3f606
commit
e8704544f4
@ -61,6 +61,12 @@ struct X11Handle
|
||||
#endif
|
||||
Display *disp;
|
||||
Window subwin;
|
||||
BOOL subwinMapped;
|
||||
GstXOverlay *overlay;
|
||||
int subwinWidth;
|
||||
int subwinHeight;
|
||||
int subwinX;
|
||||
int subwinY;
|
||||
};
|
||||
|
||||
static const char* get_shm_id()
|
||||
@ -70,13 +76,73 @@ static const char* get_shm_id()
|
||||
return shm_id;
|
||||
}
|
||||
|
||||
static GstBusSyncReply tsmf_platform_bus_sync_handler(GstBus *bus, GstMessage *message, gpointer user_data)
|
||||
{
|
||||
struct X11Handle* hdl;
|
||||
|
||||
TSMFGstreamerDecoder* decoder = user_data;
|
||||
|
||||
if (GST_MESSAGE_TYPE (message) != GST_MESSAGE_ELEMENT)
|
||||
return GST_BUS_PASS;
|
||||
|
||||
if (!gst_structure_has_name (message->structure, "prepare-xwindow-id"))
|
||||
return GST_BUS_PASS;
|
||||
|
||||
hdl = (struct X11Handle*) decoder->platform;
|
||||
|
||||
if (hdl->subwin)
|
||||
{
|
||||
hdl->overlay = GST_X_OVERLAY (GST_MESSAGE_SRC (message));
|
||||
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
gst_video_overlay_set_window_handle(hdl->overlay, hdl->subwin);
|
||||
#else
|
||||
gst_x_overlay_set_window_handle(hdl->overlay, hdl->subwin);
|
||||
#endif
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
gst_video_overlay_handle_events(hdl->overlay, TRUE);
|
||||
#else
|
||||
gst_x_overlay_handle_events(hdl->overlay, TRUE);
|
||||
#endif
|
||||
|
||||
if (hdl->subwinWidth != -1 && hdl->subwinHeight != -1 && hdl->subwinX != -1 && hdl->subwinY != -1)
|
||||
{
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
|
||||
if (!gst_video_overlay_set_render_rectangle(hdl->overlay, 0, 0, hdl->swubwinWidth, hdl->subwinHeight))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
|
||||
gst_video_overlay_expose(hdl->overlay);
|
||||
#else
|
||||
if (!gst_x_overlay_set_render_rectangle(hdl->overlay, 0, 0, hdl->subwinWidth, hdl->subwinHeight))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
|
||||
gst_x_overlay_expose(hdl->overlay);
|
||||
#endif
|
||||
XMoveResizeWindow(hdl->disp, hdl->subwin, hdl->subwinX, hdl->subwinY, hdl->subwinWidth, hdl->subwinHeight);
|
||||
XSync(hdl->disp, FALSE);
|
||||
}
|
||||
} else {
|
||||
g_warning ("Window was not available before retrieving the overlay!");
|
||||
}
|
||||
|
||||
gst_message_unref (message);
|
||||
|
||||
return GST_BUS_DROP;
|
||||
}
|
||||
|
||||
const char* tsmf_platform_get_video_sink(void)
|
||||
{
|
||||
return "xvimagesink";
|
||||
return "autovideosink";
|
||||
}
|
||||
|
||||
const char* tsmf_platform_get_audio_sink(void)
|
||||
{
|
||||
//return "alsasink";
|
||||
return "autoaudiosink";
|
||||
}
|
||||
|
||||
@ -119,6 +185,12 @@ int tsmf_platform_create(TSMFGstreamerDecoder* decoder)
|
||||
return -4;
|
||||
}
|
||||
|
||||
hdl->subwinMapped = FALSE;
|
||||
hdl->subwinX = -1;
|
||||
hdl->subwinY = -1;
|
||||
hdl->subwinWidth = -1;
|
||||
hdl->subwinHeight = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -147,12 +219,16 @@ int tsmf_platform_register_handler(TSMFGstreamerDecoder* decoder)
|
||||
|
||||
bus = gst_pipeline_get_bus(GST_PIPELINE(decoder->pipe));
|
||||
|
||||
gst_bus_set_sync_handler (bus, (GstBusSyncHandler) tsmf_platform_bus_sync_handler, decoder);
|
||||
|
||||
if (!bus)
|
||||
{
|
||||
WLog_ERR(TAG, "gst_pipeline_get_bus failed!");
|
||||
return 1;
|
||||
}
|
||||
|
||||
gst_object_unref (bus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -189,12 +265,6 @@ int tsmf_window_create(TSMFGstreamerDecoder* decoder)
|
||||
}
|
||||
else
|
||||
{
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
GstVideoOverlay *overlay = GST_VIDEO_OVERLAY(decoder->outsink);
|
||||
#else
|
||||
GstXOverlay *overlay = GST_X_OVERLAY(decoder->outsink);
|
||||
#endif
|
||||
|
||||
if (!decoder)
|
||||
return -1;
|
||||
|
||||
@ -205,38 +275,28 @@ int tsmf_window_create(TSMFGstreamerDecoder* decoder)
|
||||
|
||||
if (!hdl->subwin)
|
||||
{
|
||||
int event, error;
|
||||
hdl->subwin = XCreateSimpleWindow(hdl->disp, *(int *)hdl->xfwin, 0, 0, 1, 1, 0, 0, 0);
|
||||
|
||||
if (!hdl->subwin)
|
||||
{
|
||||
WLog_ERR(TAG, "Could not create subwindow!");
|
||||
}
|
||||
|
||||
XMapWindow(hdl->disp, hdl->subwin);
|
||||
XSync(hdl->disp, FALSE);
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
gst_video_overlay_set_window_handle(overlay, hdl->subwin);
|
||||
#else
|
||||
gst_x_overlay_set_window_handle(overlay, hdl->subwin);
|
||||
#endif
|
||||
decoder->ready = TRUE;
|
||||
#if defined(WITH_XEXT)
|
||||
hdl->has_shape = XShapeQueryExtension(hdl->disp, &event, &error);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
gst_video_overlay_handle_events(overlay, TRUE);
|
||||
#else
|
||||
gst_x_overlay_handle_events(overlay, TRUE);
|
||||
tsmf_window_map(decoder);
|
||||
|
||||
decoder->ready = TRUE;
|
||||
#if defined(WITH_XEXT)
|
||||
int event, error;
|
||||
hdl->has_shape = XShapeQueryExtension(hdl->disp, &event, &error);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsmf_window_resize(TSMFGstreamerDecoder* decoder, int x, int y, int width,
|
||||
int height, int nr_rects, RDP_RECT *rects)
|
||||
int height, int nr_rects, RDP_RECT *rects)
|
||||
{
|
||||
struct X11Handle* hdl;
|
||||
|
||||
@ -259,26 +319,33 @@ int tsmf_window_resize(TSMFGstreamerDecoder* decoder, int x, int y, int width,
|
||||
hdl = (struct X11Handle*) decoder->platform;
|
||||
DEBUG_TSMF("resize: x=%d, y=%d, w=%d, h=%d", x, y, width, height);
|
||||
|
||||
if (hdl->overlay)
|
||||
{
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
|
||||
if (!gst_video_overlay_set_render_rectangle(overlay, 0, 0, width, height))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
if (!gst_video_overlay_set_render_rectangle(overlay, 0, 0, width, height))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
|
||||
gst_video_overlay_expose(overlay);
|
||||
gst_video_overlay_expose(overlay);
|
||||
#else
|
||||
if (!gst_x_overlay_set_render_rectangle(overlay, 0, 0, width, height))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
if (!gst_x_overlay_set_render_rectangle(overlay, 0, 0, width, height))
|
||||
{
|
||||
WLog_ERR(TAG, "Could not resize overlay!");
|
||||
}
|
||||
|
||||
gst_x_overlay_expose(overlay);
|
||||
gst_x_overlay_expose(overlay);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (hdl->subwin)
|
||||
{
|
||||
XMoveResizeWindow(hdl->disp, hdl->subwin, x, y, width, height);
|
||||
hdl->subwinX = x;
|
||||
hdl->subwinY = y;
|
||||
hdl->subwinWidth = width;
|
||||
hdl->subwinHeight = height;
|
||||
XMoveResizeWindow(hdl->disp, hdl->subwin, hdl->subwinX, hdl->subwinY, hdl->subwinWidth, hdl->subwinHeight);
|
||||
#if defined(WITH_XEXT)
|
||||
|
||||
if (hdl->has_shape)
|
||||
@ -299,7 +366,6 @@ int tsmf_window_resize(TSMFGstreamerDecoder* decoder, int x, int y, int width,
|
||||
XShapeCombineRectangles(hdl->disp, hdl->subwin, ShapeBounding, x, y, xrects, nr_rects, ShapeSet, 0);
|
||||
free(xrects);
|
||||
}
|
||||
|
||||
#endif
|
||||
XSync(hdl->disp, FALSE);
|
||||
}
|
||||
@ -323,6 +389,49 @@ int tsmf_window_resume(TSMFGstreamerDecoder* decoder)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsmf_window_map(TSMFGstreamerDecoder* decoder)
|
||||
{
|
||||
struct X11Handle* hdl;
|
||||
if (!decoder)
|
||||
return -1;
|
||||
|
||||
hdl = (struct X11Handle*) decoder->platform;
|
||||
|
||||
// Only need to map the window if it is not currently mapped
|
||||
if ((hdl->subwin) && (!hdl->subwinMapped))
|
||||
{
|
||||
XLockDisplay(hdl->disp);
|
||||
XMapWindow(hdl->disp, hdl->subwin);
|
||||
hdl->subwinMapped = TRUE;
|
||||
XSync(hdl->disp, FALSE);
|
||||
XUnlockDisplay(hdl->disp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tsmf_window_unmap(TSMFGstreamerDecoder* decoder)
|
||||
{
|
||||
struct X11Handle* hdl;
|
||||
if (!decoder)
|
||||
return -1;
|
||||
|
||||
hdl = (struct X11Handle*) decoder->platform;
|
||||
|
||||
// only need to unmap window if it is currently mapped
|
||||
if ((hdl->subwin) && (hdl->subwinMapped))
|
||||
{
|
||||
XLockDisplay(hdl->disp);
|
||||
XUnmapWindow(hdl->disp, hdl->subwin);
|
||||
hdl->subwinMapped = FALSE;
|
||||
XSync(hdl->disp, FALSE);
|
||||
XUnlockDisplay(hdl->disp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int tsmf_window_destroy(TSMFGstreamerDecoder* decoder)
|
||||
{
|
||||
struct X11Handle* hdl;
|
||||
@ -345,7 +454,13 @@ int tsmf_window_destroy(TSMFGstreamerDecoder* decoder)
|
||||
XSync(hdl->disp, FALSE);
|
||||
}
|
||||
|
||||
hdl->overlay = NULL;
|
||||
hdl->subwin = 0;
|
||||
hdl->subwinMapped = FALSE;
|
||||
hdl->subwinX = -1;
|
||||
hdl->subwinY = -1;
|
||||
hdl->subwinWidth = -1;
|
||||
hdl->subwinHeight = -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -45,10 +45,14 @@
|
||||
#include <inttypes.h>
|
||||
#endif
|
||||
|
||||
// 1 second
|
||||
#define SEEK_TOLERANCE 10000000
|
||||
|
||||
static BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder);
|
||||
static void tsmf_gstreamer_clean_up(TSMFGstreamerDecoder* mdecoder);
|
||||
static int tsmf_gstreamer_pipeline_set_state(TSMFGstreamerDecoder* mdecoder,
|
||||
GstState desired_state);
|
||||
static BOOL tsmf_gstreamer_buffer_level(ITSMFDecoder* decoder);
|
||||
|
||||
const char* get_type(TSMFGstreamerDecoder* mdecoder)
|
||||
{
|
||||
@ -57,8 +61,33 @@ const char* get_type(TSMFGstreamerDecoder* mdecoder)
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
return "VIDEO";
|
||||
else
|
||||
else if (mdecoder->media_type == TSMF_MAJOR_TYPE_AUDIO)
|
||||
return "AUDIO";
|
||||
else
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
static void cb_child_added(GstChildProxy *child_proxy, GObject *object, TSMFGstreamerDecoder* mdecoder)
|
||||
{
|
||||
DEBUG_TSMF("NAME: %s", G_OBJECT_TYPE_NAME(object));
|
||||
|
||||
if (!g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstXvImageSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstXImageSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstFluVAAutoSink"))
|
||||
{
|
||||
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); //nanoseconds
|
||||
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); //synchronize on the clock
|
||||
g_object_set(G_OBJECT(object), "async", TRUE, NULL); //no async state changes - doc says not to do for streams synced to clock
|
||||
}
|
||||
|
||||
else if (!g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstAlsaSink") || !g_strcmp0(G_OBJECT_TYPE_NAME(object), "GstPulseSink"))
|
||||
{
|
||||
gst_base_sink_set_max_lateness((GstBaseSink *) object, 10000000); //nanoseconds
|
||||
g_object_set(G_OBJECT(object), "slave-method", 1, NULL); //DEFAULT
|
||||
g_object_set(G_OBJECT(object), "buffer-time", (gint64) 20000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(object), "drift-tolerance", (gint64) 20000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(object), "latency-time", (gint64) 10000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(object), "sync", TRUE, NULL); //synchronize on the clock
|
||||
g_object_set(G_OBJECT(object), "async", TRUE, NULL); //no async state changes - doc says not to do for streams synced to clock
|
||||
}
|
||||
}
|
||||
|
||||
static void tsmf_gstreamer_enough_data(GstAppSrc *src, gpointer user_data)
|
||||
@ -81,20 +110,34 @@ static gboolean tsmf_gstreamer_seek_data(GstAppSrc *src, guint64 offset, gpointe
|
||||
(void) mdecoder;
|
||||
DEBUG_TSMF("%s offset=%llu", get_type(mdecoder), offset);
|
||||
|
||||
if (!mdecoder->paused)
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED);
|
||||
|
||||
gst_app_src_end_of_stream((GstAppSrc*) mdecoder->src);
|
||||
|
||||
if (!mdecoder->paused)
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
|
||||
|
||||
if (mdecoder->sync_cb)
|
||||
mdecoder->sync_cb(mdecoder->stream);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void tsmf_gstreamer_change_volume(ITSMFDecoder* decoder, UINT32 newVolume, UINT32 muted)
|
||||
{
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
|
||||
if (!mdecoder || !mdecoder->pipe)
|
||||
return;
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
return;
|
||||
|
||||
mdecoder->gstMuted = (BOOL) muted;
|
||||
DEBUG_TSMF("mute=[%d]", mdecoder->gstMuted);
|
||||
mdecoder->gstVolume = (double) newVolume / (double) 10000;
|
||||
DEBUG_TSMF("gst_new_vol=[%f]", mdecoder->gstVolume);
|
||||
|
||||
if (!mdecoder->volume)
|
||||
return;
|
||||
|
||||
if (!G_IS_OBJECT(mdecoder->volume))
|
||||
return;
|
||||
|
||||
g_object_set(mdecoder->volume, "mute", mdecoder->gstMuted, NULL);
|
||||
g_object_set(mdecoder->volume, "volume", mdecoder->gstVolume, NULL);
|
||||
}
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
static inline GstClockTime tsmf_gstreamer_timestamp_ms_to_gst(UINT64 ms_timestamp)
|
||||
#else
|
||||
@ -209,10 +252,13 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
{
|
||||
case TSMF_SUB_TYPE_WVC1:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("video/x-wmv",
|
||||
"bitrate", G_TYPE_UINT, media_type->BitRate,
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"wmvversion", G_TYPE_INT, 3,
|
||||
"format", G_TYPE_STRING, "WVC1",
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'V', 'C', '1'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_MP4S:
|
||||
@ -221,6 +267,8 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"bitrate", G_TYPE_UINT, media_type->BitRate,
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '2'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_MP42:
|
||||
@ -229,15 +277,29 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"bitrate", G_TYPE_UINT, media_type->BitRate,
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '2'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_MP43:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("video/x-msmpeg",
|
||||
"msmpegversion", G_TYPE_INT, 43,
|
||||
"bitrate", G_TYPE_UINT, media_type->BitRate,
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', 'P', '4', '3'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_M4S2:
|
||||
mdecoder->gst_caps = gst_caps_new_simple ("video/mpeg",
|
||||
"mpegversion", G_TYPE_INT, 4,
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('M', '4', 'S', '2'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_WMA9:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("audio/x-wma",
|
||||
"wmaversion", G_TYPE_INT, 3,
|
||||
@ -249,6 +311,17 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"block_align", G_TYPE_INT, media_type->BlockAlign,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_WMA1:
|
||||
mdecoder->gst_caps = gst_caps_new_simple ("audio/x-wma",
|
||||
"wmaversion", G_TYPE_INT, 1,
|
||||
"rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
|
||||
"channels", G_TYPE_INT, media_type->Channels,
|
||||
"bitrate", G_TYPE_INT, media_type->BitRate,
|
||||
"depth", G_TYPE_INT, media_type->BitsPerSample,
|
||||
"width", G_TYPE_INT, media_type->BitsPerSample,
|
||||
"block_align", G_TYPE_INT, media_type->BlockAlign,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_WMA2:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("audio/x-wma",
|
||||
"wmaversion", G_TYPE_INT, 2,
|
||||
@ -274,6 +347,8 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"wmvversion", G_TYPE_INT, 1,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '1'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_WMV2:
|
||||
@ -281,6 +356,9 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"wmvversion", G_TYPE_INT, 2,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '2'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_WMV3:
|
||||
@ -289,6 +367,9 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"wmvversion", G_TYPE_INT, 3,
|
||||
"format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'M', 'V', '3'),
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_AVC1:
|
||||
@ -296,6 +377,10 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
mdecoder->gst_caps = gst_caps_new_simple("video/x-h264",
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
"pixel-aspect-ratio", GST_TYPE_FRACTION, 1 , 1,
|
||||
"stream-format", G_TYPE_STRING, "byte-stream",
|
||||
"alignment", G_TYPE_STRING, "nal",
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_AC3:
|
||||
@ -319,6 +404,8 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
|
||||
"channels", G_TYPE_INT, media_type->Channels,
|
||||
"mpegversion", G_TYPE_INT, 4,
|
||||
"framed", G_TYPE_BOOLEAN, TRUE,
|
||||
"stream-format", G_TYPE_STRING, "raw",
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_MP1A:
|
||||
@ -347,6 +434,7 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
"format", G_TYPE_STRING, "YUY2",
|
||||
"width", G_TYPE_INT, media_type->Width,
|
||||
"height", G_TYPE_INT, media_type->Height,
|
||||
"framerate", GST_TYPE_FRACTION, media_type->SamplesPerSecond.Numerator, media_type->SamplesPerSecond.Denominator,
|
||||
NULL);
|
||||
#endif
|
||||
break;
|
||||
@ -358,11 +446,15 @@ static BOOL tsmf_gstreamer_set_format(ITSMFDecoder* decoder, TS_AM_MEDIA_TYPE* m
|
||||
break;
|
||||
case TSMF_SUB_TYPE_MP2A:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("audio/mpeg",
|
||||
"mpegversion", G_TYPE_INT, 2,
|
||||
"mpegversion", G_TYPE_INT, 1,
|
||||
"rate", G_TYPE_INT, media_type->SamplesPerSecond.Numerator,
|
||||
"channels", G_TYPE_INT, media_type->Channels,
|
||||
NULL);
|
||||
break;
|
||||
case TSMF_SUB_TYPE_FLAC:
|
||||
mdecoder->gst_caps = gst_caps_new_simple("audio/x-flac",
|
||||
NULL);
|
||||
break;
|
||||
default:
|
||||
WLog_ERR(TAG, "unknown format:(%d).", media_type->SubType);
|
||||
return FALSE;
|
||||
@ -404,17 +496,18 @@ void tsmf_gstreamer_clean_up(TSMFGstreamerDecoder* mdecoder)
|
||||
gst_object_unref(mdecoder->pipe);
|
||||
}
|
||||
|
||||
tsmf_window_destroy(mdecoder);
|
||||
mdecoder->ready = FALSE;
|
||||
mdecoder->paused = FALSE;
|
||||
|
||||
mdecoder->pipe = NULL;
|
||||
mdecoder->src = NULL;
|
||||
mdecoder->queue = NULL;
|
||||
}
|
||||
|
||||
BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
{
|
||||
const char* appsrc = "appsrc name=source ! decodebin name=decoder !";
|
||||
const char* video = "autovideoconvert ! videoscale !";
|
||||
const char* audio = "audioconvert ! audiorate ! audioresample ! volume name=audiovolume !";
|
||||
const char* video = "appsrc name=videosource ! queue2 name=videoqueue ! decodebin2 name=videodecoder !";
|
||||
const char* audio = "appsrc name=audiosource ! queue2 name=audioqueue ! decodebin2 name=audiodecoder ! audioconvert ! audiorate ! audioresample ! volume name=audiovolume !";
|
||||
char pipeline[1024];
|
||||
|
||||
if (!mdecoder)
|
||||
@ -424,9 +517,9 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
* The only fixed elements necessary are appsrc and the volume element for audio streams.
|
||||
* The rest could easily be provided in gstreamer pipeline notation from command line. */
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
sprintf_s(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, video, tsmf_platform_get_video_sink());
|
||||
sprintf_s(pipeline, sizeof(pipeline), "%s %s name=videosink", video, tsmf_platform_get_video_sink());
|
||||
else
|
||||
sprintf_s(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, audio, tsmf_platform_get_audio_sink());
|
||||
sprintf_s(pipeline, sizeof(pipeline), "%s %s name=audiosink", audio, tsmf_platform_get_audio_sink());
|
||||
|
||||
DEBUG_TSMF("pipeline=%s", pipeline);
|
||||
mdecoder->pipe = gst_parse_launch(pipeline, NULL);
|
||||
@ -437,7 +530,10 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "source");
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videosource");
|
||||
else
|
||||
mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiosource");
|
||||
|
||||
if (!mdecoder->src)
|
||||
{
|
||||
@ -445,7 +541,21 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "outsink");
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
mdecoder->queue = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videoqueue");
|
||||
else
|
||||
mdecoder->queue = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audioqueue");
|
||||
|
||||
if (!mdecoder->queue)
|
||||
{
|
||||
WLog_ERR(TAG, "Failed to get queue");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videosink");
|
||||
else
|
||||
mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiosink");
|
||||
|
||||
if (!mdecoder->outsink)
|
||||
{
|
||||
@ -453,7 +563,9 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (mdecoder->media_type != TSMF_MAJOR_TYPE_VIDEO)
|
||||
g_signal_connect(mdecoder->outsink, "child-added", G_CALLBACK(cb_child_added), mdecoder);
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_AUDIO)
|
||||
{
|
||||
mdecoder->volume = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiovolume");
|
||||
|
||||
@ -462,6 +574,8 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
WLog_ERR(TAG, "Failed to get volume");
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
tsmf_gstreamer_change_volume((ITSMFDecoder*)mdecoder, mdecoder->gstVolume*((double) 10000), mdecoder->gstMuted);
|
||||
}
|
||||
|
||||
tsmf_platform_register_handler(mdecoder);
|
||||
@ -473,16 +587,45 @@ BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder)
|
||||
tsmf_gstreamer_seek_data
|
||||
};
|
||||
g_object_set(mdecoder->src, "format", GST_FORMAT_TIME, NULL);
|
||||
g_object_set(mdecoder->src, "is-live", TRUE, NULL);
|
||||
g_object_set(mdecoder->src, "block", TRUE, NULL);
|
||||
g_object_set(mdecoder->src, "is-live", FALSE, NULL);
|
||||
g_object_set(mdecoder->src, "block", FALSE, NULL);
|
||||
g_object_set(mdecoder->src, "blocksize", 1024, NULL);
|
||||
gst_app_src_set_caps((GstAppSrc *) mdecoder->src, mdecoder->gst_caps);
|
||||
gst_app_src_set_callbacks((GstAppSrc *)mdecoder->src, &callbacks, mdecoder, NULL);
|
||||
gst_app_src_set_stream_type((GstAppSrc *) mdecoder->src, GST_APP_STREAM_TYPE_SEEKABLE);
|
||||
gst_app_src_set_latency((GstAppSrc *) mdecoder->src, 0, -1);
|
||||
gst_app_src_set_max_bytes((GstAppSrc *) mdecoder->src, (guint64) 0);//unlimited
|
||||
g_object_set(G_OBJECT(mdecoder->queue), "use-buffering", FALSE, NULL);
|
||||
g_object_set(G_OBJECT(mdecoder->queue), "use-rate-estimate", FALSE, NULL);
|
||||
g_object_set(G_OBJECT(mdecoder->queue), "max-size-buffers", 0, NULL);
|
||||
g_object_set(G_OBJECT(mdecoder->queue), "max-size-bytes", 0, NULL);
|
||||
g_object_set(G_OBJECT(mdecoder->queue), "max-size-time", (guint64) 0, NULL);
|
||||
|
||||
// Only set these properties if not an autosink, otherwise we will set properties when real sinks are added
|
||||
if (!g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoVideoSink") && !g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoAudioSink"))
|
||||
{
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
{
|
||||
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); //nanoseconds
|
||||
}
|
||||
else
|
||||
{
|
||||
gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); //nanoseconds
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "buffer-time", (gint64) 20000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "drift-tolerance", (gint64) 20000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "latency-time", (gint64) 10000, NULL); //microseconds
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "slave-method", 1, NULL);
|
||||
}
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "sync", TRUE, NULL); //synchronize on the clock
|
||||
g_object_set(G_OBJECT(mdecoder->outsink), "async", TRUE, NULL); //no async state changes
|
||||
}
|
||||
|
||||
tsmf_window_create(mdecoder);
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_READY);
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
|
||||
mdecoder->pipeline_start_time_valid = 0;
|
||||
mdecoder->shutdown = 0;
|
||||
mdecoder->paused = FALSE;
|
||||
|
||||
GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(mdecoder->pipe), GST_DEBUG_GRAPH_SHOW_ALL, get_type(mdecoder));
|
||||
|
||||
@ -495,7 +638,7 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
|
||||
GstBuffer *gst_buf;
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
UINT64 sample_time = tsmf_gstreamer_timestamp_ms_to_gst(start_time);
|
||||
UINT64 sample_duration = tsmf_gstreamer_timestamp_ms_to_gst(duration);
|
||||
BOOL useTimestamps = TRUE;
|
||||
|
||||
if (!mdecoder)
|
||||
{
|
||||
@ -509,9 +652,15 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
|
||||
* We don't expect to block here often, since the pipeline should
|
||||
* have more than enough buffering.
|
||||
*/
|
||||
DEBUG_TSMF("%s. Start:(%llu) End:(%llu) Duration:(%llu) Last End:(%llu)",
|
||||
get_type(mdecoder), start_time, end_time, duration,
|
||||
mdecoder->last_sample_end_time);
|
||||
DEBUG_TSMF("%s. Start:(%d) End:(%d) Duration:(%d) Last Start:(%d)",
|
||||
get_type(mdecoder), (int)start_time, (int)end_time, (int)duration,
|
||||
(int)mdecoder->last_sample_start_time);
|
||||
|
||||
if (mdecoder->shutdown)
|
||||
{
|
||||
WLog_ERR(TAG, "decodeEx called on shutdown decoder");
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
if (mdecoder->gst_caps == NULL)
|
||||
{
|
||||
@ -519,6 +668,9 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
if (!mdecoder->pipe)
|
||||
tsmf_gstreamer_pipeline_build(mdecoder);
|
||||
|
||||
if (!mdecoder->src)
|
||||
{
|
||||
WLog_ERR(TAG, "failed to construct pipeline correctly. Unable to push buffer to source element.");
|
||||
@ -533,53 +685,108 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
// Relative timestamping will sometimes be set to 0
|
||||
// so we ignore these timestamps just to be safe(bit 8)
|
||||
if (extensions & 0x00000080)
|
||||
{
|
||||
DEBUG_TSMF("Ignoring the timestamps - relative - bit 8");
|
||||
useTimestamps = FALSE;
|
||||
}
|
||||
|
||||
//If no timestamps exist then we dont want to look at the timestamp values (bit 7)
|
||||
if (extensions & 0x00000040)
|
||||
{
|
||||
DEBUG_TSMF("Ignoring the timestamps - none - bit 7");
|
||||
useTimestamps = FALSE;
|
||||
}
|
||||
|
||||
// If performing a seek
|
||||
if (mdecoder->seeking)
|
||||
{
|
||||
mdecoder->seeking = FALSE;
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED);
|
||||
mdecoder->pipeline_start_time_valid = 0;
|
||||
}
|
||||
|
||||
if (mdecoder->pipeline_start_time_valid)
|
||||
{
|
||||
long long diff = start_time;
|
||||
diff -= mdecoder->last_sample_end_time;
|
||||
// Adjusted the condition for a seek to be based on start time only
|
||||
// WMV1 and WMV2 files in particular have bad end time and duration values
|
||||
// there seems to be no real side effects of just using the start time instead
|
||||
UINT64 minTime = mdecoder->last_sample_start_time - (UINT64) SEEK_TOLERANCE;
|
||||
UINT64 maxTime = mdecoder->last_sample_start_time + (UINT64) SEEK_TOLERANCE;
|
||||
|
||||
if (diff < 0)
|
||||
diff *= -1;
|
||||
// Make sure the minTime stops at 0 , should we be at the beginning of the stream
|
||||
if (mdecoder->last_sample_start_time < (UINT64) SEEK_TOLERANCE)
|
||||
minTime = 0;
|
||||
|
||||
/* The pipe is initialized, but there is a discontinuity.
|
||||
* Seek to the start position... */
|
||||
if (diff > 50)
|
||||
// If the start_time is valid and different from the previous start time by more than the seek tolerance, then we have a seek condition
|
||||
if (((start_time > maxTime) || (start_time < minTime)) && useTimestamps)
|
||||
{
|
||||
DEBUG_TSMF("%s seeking to %lld", get_type(mdecoder), start_time);
|
||||
DEBUG_TSMF("tsmf_gstreamer_decodeEx: start_time=[%d] > last_sample_start_time=[%d] OR ", (int)start_time, (int)mdecoder->last_sample_start_time);
|
||||
DEBUG_TSMF("tsmf_gstreamer_decodeEx: start_time=[%d] < last_sample_start_time=[%d] with", (int)start_time, (int)mdecoder->last_sample_start_time);
|
||||
DEBUG_TSMF("tsmf_gstreamer_decodeEX: a tolerance of more than [%d] from the last sample", (int) SEEK_TOLERANCE);
|
||||
|
||||
if (!gst_element_seek(mdecoder->pipe, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_ACCURATE,
|
||||
GST_SEEK_TYPE_SET, sample_time,
|
||||
GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE))
|
||||
{
|
||||
WLog_ERR(TAG, "seek failed");
|
||||
}
|
||||
mdecoder->seeking = TRUE;
|
||||
|
||||
mdecoder->pipeline_start_time_valid = 0;
|
||||
// since we cant make the gstreamer pipeline jump to the new start time after a seek - we just maintain
|
||||
// a offset between realtime and gstreamer time
|
||||
mdecoder->seek_offset = start_time;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
DEBUG_TSMF("%s start time %llu", get_type(mdecoder), sample_time);
|
||||
DEBUG_TSMF("%s start time %d", get_type(mdecoder), start_time);
|
||||
// Always set base/start time to 0. Will use seek offset to translate real buffer times
|
||||
// back to 0. This allows the video to be started from anywhere and the ability to handle seeks
|
||||
// without rebuilding the pipeline, etc. since that is costly
|
||||
gst_element_set_base_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
|
||||
gst_element_set_start_time(mdecoder->pipe, tsmf_gstreamer_timestamp_ms_to_gst(0));
|
||||
mdecoder->pipeline_start_time_valid = 1;
|
||||
|
||||
// Set the seek offset if buffer has valid timestamps.
|
||||
if (useTimestamps)
|
||||
mdecoder->seek_offset = start_time;
|
||||
|
||||
if (!gst_element_seek(mdecoder->pipe, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
|
||||
GST_SEEK_TYPE_SET, 0,
|
||||
GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE))
|
||||
{
|
||||
WLog_ERR(TAG, "seek failed");
|
||||
}
|
||||
}
|
||||
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
GST_BUFFER_PTS(gst_buf) = sample_time;
|
||||
if (useTimestamps)
|
||||
GST_BUFFER_PTS(gst_buf) = sample_time - tsmf_gstreamer_timestamp_ms_to_gst(mdecoder->seek_offset);
|
||||
else
|
||||
GST_BUFFER_PTS(gst_buf) = GST_CLOCK_TIME_NONE;
|
||||
#else
|
||||
GST_BUFFER_TIMESTAMP(gst_buf) = sample_time;
|
||||
if (useTimestamps)
|
||||
GST_BUFFER_TIMESTAMP(gst_buf) = sample_time - tsmf_gstreamer_timestamp_ms_to_gst(mdecoder->seek_offset);
|
||||
else
|
||||
GST_BUFFER_TIMESTAMP(gst_buf) = GST_CLOCK_TIME_NONE;
|
||||
#endif
|
||||
GST_BUFFER_DURATION(gst_buf) = sample_duration;
|
||||
GST_BUFFER_DURATION(gst_buf) = GST_CLOCK_TIME_NONE;
|
||||
GST_BUFFER_OFFSET(gst_buf) = GST_BUFFER_OFFSET_NONE;
|
||||
gst_buffer_set_caps(gst_buf, mdecoder->gst_caps);
|
||||
gst_app_src_push_buffer(GST_APP_SRC(mdecoder->src), gst_buf);
|
||||
|
||||
if (mdecoder->ack_cb)
|
||||
mdecoder->ack_cb(mdecoder->stream, TRUE);
|
||||
mdecoder->ack_cb(mdecoder->stream, FALSE);
|
||||
|
||||
mdecoder->last_sample_end_time = end_time;
|
||||
|
||||
if (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING)
|
||||
// Should only update the last timestamps if the current ones are valid
|
||||
if (useTimestamps)
|
||||
{
|
||||
DEBUG_TSMF("%s: state=%s", get_type(mdecoder), gst_element_state_get_name(GST_STATE(mdecoder->pipe)));
|
||||
mdecoder->last_sample_start_time = start_time;
|
||||
mdecoder->last_sample_end_time = end_time;
|
||||
}
|
||||
|
||||
if (mdecoder->pipe && (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING))
|
||||
{
|
||||
DEBUG_TSMF("%s: state=%s", get_type(mdecoder), gst_element_state_get_name(GST_STATE(mdecoder->pipe)));
|
||||
|
||||
DEBUG_TSMF("Paused: %i Shutdown: %i Ready: %i", mdecoder->paused, mdecoder->shutdown, mdecoder->ready);
|
||||
if (!mdecoder->paused && !mdecoder->shutdown && mdecoder->ready)
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
|
||||
}
|
||||
@ -587,32 +794,6 @@ static BOOL tsmf_gstreamer_decodeEx(ITSMFDecoder* decoder, const BYTE *data, UIN
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static BOOL tsmf_gstreamer_change_volume(ITSMFDecoder* decoder, UINT32 newVolume, UINT32 muted)
|
||||
{
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
|
||||
if (!mdecoder || !mdecoder->pipe)
|
||||
return FALSE;
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
return TRUE;
|
||||
|
||||
mdecoder->gstMuted = (BOOL) muted;
|
||||
DEBUG_TSMF("mute=[%d]", mdecoder->gstMuted);
|
||||
mdecoder->gstVolume = (double) newVolume / (double) 10000;
|
||||
DEBUG_TSMF("gst_new_vol=[%f]", mdecoder->gstVolume);
|
||||
|
||||
if (!mdecoder->volume)
|
||||
return FALSE;
|
||||
|
||||
if (!G_IS_OBJECT(mdecoder->volume))
|
||||
return FALSE;
|
||||
|
||||
g_object_set(mdecoder->volume, "mute", mdecoder->gstMuted, NULL);
|
||||
g_object_set(mdecoder->volume, "volume", mdecoder->gstVolume, NULL);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static BOOL tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg control_msg, UINT32 *arg)
|
||||
{
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
@ -626,15 +807,13 @@ static BOOL tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg contro
|
||||
|
||||
if (mdecoder->paused)
|
||||
{
|
||||
WLog_ERR(TAG, "%s: Ignoring control PAUSE, already received!", get_type(mdecoder));
|
||||
WLog_ERR(TAG, "%s: Ignoring Control_Pause, already received!", get_type(mdecoder));
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED);
|
||||
mdecoder->shutdown = 0;
|
||||
mdecoder->paused = TRUE;
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
tsmf_window_pause(mdecoder);
|
||||
}
|
||||
else if (control_msg == Control_Resume)
|
||||
{
|
||||
@ -642,17 +821,12 @@ static BOOL tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg contro
|
||||
|
||||
if (!mdecoder->paused && !mdecoder->shutdown)
|
||||
{
|
||||
WLog_ERR(TAG, "%s: Ignoring control RESUME, already received!", get_type(mdecoder));
|
||||
WLog_ERR(TAG, "%s: Ignoring Control_Resume, already received!", get_type(mdecoder));
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
mdecoder->shutdown = 0;
|
||||
mdecoder->paused = FALSE;
|
||||
mdecoder->shutdown = FALSE;
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
tsmf_window_resume(mdecoder);
|
||||
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
|
||||
}
|
||||
else if (control_msg == Control_Stop)
|
||||
{
|
||||
@ -660,18 +834,29 @@ static BOOL tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg contro
|
||||
|
||||
if (mdecoder->shutdown)
|
||||
{
|
||||
WLog_ERR(TAG, "%s: Ignoring control STOP, already received!", get_type(mdecoder));
|
||||
WLog_ERR(TAG, "%s: Ignoring Control_Stop, already received!", get_type(mdecoder));
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
mdecoder->shutdown = TRUE;
|
||||
/* Reset stamps, flush buffers, etc */
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PAUSED);
|
||||
if (mdecoder->pipe)
|
||||
{
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_NULL);
|
||||
tsmf_window_destroy(mdecoder);
|
||||
tsmf_gstreamer_clean_up(mdecoder);
|
||||
}
|
||||
mdecoder->seek_offset = 0;
|
||||
mdecoder->pipeline_start_time_valid = 0;
|
||||
mdecoder->shutdown = 1;
|
||||
}
|
||||
else if (control_msg == Control_Restart)
|
||||
{
|
||||
DEBUG_TSMF("Control_Restart %s", get_type(mdecoder));
|
||||
mdecoder->shutdown = 0;
|
||||
mdecoder->paused = FALSE;
|
||||
|
||||
if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
tsmf_window_pause(mdecoder);
|
||||
|
||||
gst_app_src_end_of_stream((GstAppSrc *)mdecoder->src);
|
||||
if (mdecoder->pipeline_start_time_valid)
|
||||
tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING);
|
||||
}
|
||||
else
|
||||
WLog_ERR(TAG, "Unknown control message %08x", control_msg);
|
||||
@ -679,7 +864,7 @@ static BOOL tsmf_gstreamer_control(ITSMFDecoder* decoder, ITSMFControlMsg contro
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static BOOL tsmf_gstreamer_buffer_filled(ITSMFDecoder* decoder)
|
||||
static BOOL tsmf_gstreamer_buffer_level(ITSMFDecoder* decoder)
|
||||
{
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
DEBUG_TSMF("");
|
||||
@ -687,10 +872,13 @@ static BOOL tsmf_gstreamer_buffer_filled(ITSMFDecoder* decoder)
|
||||
if (!mdecoder)
|
||||
return FALSE;
|
||||
|
||||
guint buff_max = 0;
|
||||
guint clbuff = 0;
|
||||
DEBUG_TSMF("%s buffer fill %u/%u", get_type(mdecoder), clbuff, buff_max);
|
||||
return clbuff >= buff_max ? TRUE : FALSE;
|
||||
|
||||
if (G_IS_OBJECT(mdecoder->queue))
|
||||
g_object_get(mdecoder->queue, "current-level-buffers", &clbuff, NULL);
|
||||
|
||||
DEBUG_TSMF("%s buffer level %u", get_type(mdecoder), clbuff);
|
||||
return clbuff;
|
||||
}
|
||||
|
||||
static void tsmf_gstreamer_free(ITSMFDecoder* decoder)
|
||||
@ -700,7 +888,7 @@ static void tsmf_gstreamer_free(ITSMFDecoder* decoder)
|
||||
|
||||
if (mdecoder)
|
||||
{
|
||||
mdecoder->shutdown = 1;
|
||||
tsmf_window_destroy(mdecoder);
|
||||
tsmf_gstreamer_clean_up(mdecoder);
|
||||
|
||||
if (mdecoder->gst_caps)
|
||||
@ -721,7 +909,10 @@ static UINT64 tsmf_gstreamer_get_running_time(ITSMFDecoder* decoder)
|
||||
return 0;
|
||||
|
||||
if (!mdecoder->outsink)
|
||||
return mdecoder->last_sample_end_time;
|
||||
return mdecoder->last_sample_start_time;
|
||||
|
||||
if (!mdecoder->pipe)
|
||||
return 0;
|
||||
|
||||
if (GST_STATE(mdecoder->pipe) != GST_STATE_PLAYING)
|
||||
return 0;
|
||||
@ -729,11 +920,11 @@ static UINT64 tsmf_gstreamer_get_running_time(ITSMFDecoder* decoder)
|
||||
GstFormat fmt = GST_FORMAT_TIME;
|
||||
gint64 pos = 0;
|
||||
#if GST_VERSION_MAJOR > 0
|
||||
gst_element_query_position(mdecoder->outsink, fmt, &pos);
|
||||
gst_element_query_position(mdecoder->pipe, fmt, &pos);
|
||||
#else
|
||||
gst_element_query_position(mdecoder->outsink, &fmt, &pos);
|
||||
gst_element_query_position(mdecoder->pipe, &fmt, &pos);
|
||||
#endif
|
||||
return pos/100;
|
||||
return (UINT64) (pos/100 + mdecoder->seek_offset);
|
||||
}
|
||||
|
||||
static BOOL tsmf_gstreamer_update_rendering_area(ITSMFDecoder* decoder,
|
||||
@ -757,7 +948,7 @@ BOOL tsmf_gstreamer_ack(ITSMFDecoder* decoder, BOOL (*cb)(void *, BOOL), void *s
|
||||
{
|
||||
TSMFGstreamerDecoder* mdecoder = (TSMFGstreamerDecoder *) decoder;
|
||||
DEBUG_TSMF("");
|
||||
mdecoder->ack_cb = cb;
|
||||
mdecoder->ack_cb = NULL;//cb;
|
||||
mdecoder->stream = stream;
|
||||
return TRUE;
|
||||
}
|
||||
@ -800,13 +991,17 @@ ITSMFDecoder* freerdp_tsmf_client_subsystem_entry(void)
|
||||
decoder->iface.Control = tsmf_gstreamer_control;
|
||||
decoder->iface.DecodeEx = tsmf_gstreamer_decodeEx;
|
||||
decoder->iface.ChangeVolume = tsmf_gstreamer_change_volume;
|
||||
decoder->iface.BufferFilled = tsmf_gstreamer_buffer_filled;
|
||||
decoder->iface.BufferLevel = tsmf_gstreamer_buffer_level;
|
||||
decoder->iface.SetAckFunc = tsmf_gstreamer_ack;
|
||||
decoder->iface.SetSyncFunc = tsmf_gstreamer_sync;
|
||||
decoder->paused = FALSE;
|
||||
decoder->gstVolume = 0.5;
|
||||
decoder->gstMuted = FALSE;
|
||||
decoder->state = GST_STATE_VOID_PENDING; /* No real state yet */
|
||||
decoder->last_sample_start_time = 0;
|
||||
decoder->last_sample_end_time = 0;
|
||||
decoder->seek_offset = 0;
|
||||
decoder->seeking = FALSE;
|
||||
|
||||
if (tsmf_platform_create(decoder) < 0)
|
||||
{
|
||||
|
@ -38,12 +38,16 @@ typedef struct _TSMFGstreamerDecoder
|
||||
|
||||
GstElement *pipe;
|
||||
GstElement *src;
|
||||
GstElement *queue;
|
||||
GstElement *outsink;
|
||||
GstElement *volume;
|
||||
|
||||
BOOL ready;
|
||||
BOOL paused;
|
||||
UINT64 last_sample_start_time;
|
||||
UINT64 last_sample_end_time;
|
||||
BOOL seeking;
|
||||
UINT64 seek_offset;
|
||||
|
||||
double gstVolume;
|
||||
BOOL gstMuted;
|
||||
@ -74,8 +78,8 @@ int tsmf_window_resize(TSMFGstreamerDecoder* decoder, int x, int y,
|
||||
int width, int height, int nr_rect, RDP_RECT *visible);
|
||||
int tsmf_window_destroy(TSMFGstreamerDecoder* decoder);
|
||||
|
||||
int tsmf_window_pause(TSMFGstreamerDecoder* decoder);
|
||||
int tsmf_window_resume(TSMFGstreamerDecoder* decoder);
|
||||
int tsmf_window_map(TSMFGstreamerDecoder* decoder);
|
||||
int tsmf_window_unmap(TSMFGstreamerDecoder* decoder);
|
||||
|
||||
BOOL tsmf_gstreamer_add_pad(TSMFGstreamerDecoder* mdecoder);
|
||||
void tsmf_gstreamer_remove_pad(TSMFGstreamerDecoder* mdecoder);
|
||||
|
@ -75,6 +75,13 @@ static const TSMFMediaTypeMap tsmf_sub_type_map[] =
|
||||
TSMF_SUB_TYPE_WVC1
|
||||
},
|
||||
|
||||
/* 00000160-0000-0010-8000-00AA00389B71 */
|
||||
{
|
||||
{ 0x60, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71 },
|
||||
"MEDIASUBTYPE_WMAudioV1", /* V7, V8 has the same GUID */
|
||||
TSMF_SUB_TYPE_WMA1
|
||||
},
|
||||
|
||||
/* 00000161-0000-0010-8000-00AA00389B71 */
|
||||
{
|
||||
{ 0x61, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71 },
|
||||
@ -173,6 +180,13 @@ static const TSMFMediaTypeMap tsmf_sub_type_map[] =
|
||||
TSMF_SUB_TYPE_MP42
|
||||
},
|
||||
|
||||
/* 3253344D-0000-0010-8000-00AA00389B71 */
|
||||
{
|
||||
{ 0x4D, 0x34, 0x53, 0x32, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71 },
|
||||
"MEDIASUBTYPE_MP42",
|
||||
TSMF_SUB_TYPE_M4S2
|
||||
},
|
||||
|
||||
/* E436EB81-524F-11CE-9F53-0020AF0BA770 */
|
||||
{
|
||||
{ 0x81, 0xEB, 0x36, 0xE4, 0x4F, 0x52, 0xCE, 0x11, 0x9F, 0x53, 0x00, 0x20, 0xAF, 0x0B, 0xA7, 0x70 },
|
||||
@ -605,11 +619,22 @@ BOOL tsmf_codec_parse_media_type(TS_AM_MEDIA_TYPE* mediatype, wStream* s)
|
||||
BOOL tsmf_codec_check_media_type(const char* decoder_name, wStream* s)
|
||||
{
|
||||
BYTE* m;
|
||||
BOOL ret;
|
||||
BOOL ret = FALSE;
|
||||
TS_AM_MEDIA_TYPE mediatype;
|
||||
|
||||
static BOOL decoderAvailable = FALSE;
|
||||
static BOOL firstRun = TRUE;
|
||||
|
||||
if (firstRun)
|
||||
{
|
||||
firstRun =FALSE;
|
||||
if (tsmf_check_decoder_available(decoder_name))
|
||||
decoderAvailable = TRUE;
|
||||
}
|
||||
|
||||
Stream_GetPointer(s, m);
|
||||
ret = tsmf_codec_parse_media_type(&mediatype, s);
|
||||
if (decoderAvailable)
|
||||
ret = tsmf_codec_parse_media_type(&mediatype, s);
|
||||
Stream_SetPointer(s, m);
|
||||
|
||||
if (ret)
|
||||
|
@ -125,6 +125,8 @@
|
||||
#define TSMF_SUB_TYPE_VP8 24
|
||||
#define TSMF_SUB_TYPE_VP9 25
|
||||
#define TSMF_SUB_TYPE_H263 26
|
||||
#define TSMF_SUB_TYPE_M4S2 27
|
||||
#define TSMF_SUB_TYPE_WMA1 28
|
||||
|
||||
/* FormatType */
|
||||
#define TSMF_FORMAT_TYPE_UNKNOWN 0
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include "tsmf_constants.h"
|
||||
#include "tsmf_decoder.h"
|
||||
|
||||
static ITSMFDecoder* tsmf_load_decoder_by_name(const char *name, TS_AM_MEDIA_TYPE *media_type)
|
||||
static ITSMFDecoder* tsmf_load_decoder_by_name(const char *name)
|
||||
{
|
||||
ITSMFDecoder* decoder;
|
||||
TSMF_DECODER_ENTRY entry;
|
||||
@ -50,33 +50,74 @@ static ITSMFDecoder* tsmf_load_decoder_by_name(const char *name, TS_AM_MEDIA_TYP
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!decoder->SetFormat(decoder, media_type))
|
||||
{
|
||||
decoder->Free(decoder);
|
||||
decoder = NULL;
|
||||
}
|
||||
|
||||
return decoder;
|
||||
}
|
||||
|
||||
static BOOL tsmf_decoder_set_format(ITSMFDecoder *decoder, TS_AM_MEDIA_TYPE* media_type)
|
||||
{
|
||||
if (decoder->SetFormat(decoder, media_type))
|
||||
return TRUE;
|
||||
else
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
ITSMFDecoder* tsmf_load_decoder(const char* name, TS_AM_MEDIA_TYPE* media_type)
|
||||
{
|
||||
ITSMFDecoder* decoder = NULL;
|
||||
|
||||
if (name)
|
||||
{
|
||||
decoder = tsmf_load_decoder_by_name(name, media_type);
|
||||
decoder = tsmf_load_decoder_by_name(name);
|
||||
}
|
||||
|
||||
#if defined(WITH_GSTREAMER_1_0) || defined(WITH_GSTREAMER_0_10)
|
||||
if (!decoder)
|
||||
decoder = tsmf_load_decoder_by_name("gstreamer", media_type);
|
||||
decoder = tsmf_load_decoder_by_name("gstreamer");
|
||||
#endif
|
||||
|
||||
#if defined(WITH_FFMPEG)
|
||||
if (!decoder)
|
||||
decoder = tsmf_load_decoder_by_name("ffmpeg", media_type);
|
||||
decoder = tsmf_load_decoder_by_name("ffmpeg");
|
||||
#endif
|
||||
|
||||
if (decoder)
|
||||
{
|
||||
if (!tsmf_decoder_set_format(decoder, media_type))
|
||||
{
|
||||
decoder->Free(decoder);
|
||||
decoder = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return decoder;
|
||||
}
|
||||
|
||||
BOOL tsmf_check_decoder_available(const char* name)
|
||||
{
|
||||
ITSMFDecoder* decoder = NULL;
|
||||
BOOL retValue = FALSE;
|
||||
|
||||
if (name)
|
||||
{
|
||||
decoder = tsmf_load_decoder_by_name(name);
|
||||
}
|
||||
#if defined(WITH_GSTREAMER_1_0) || defined(WITH_GSTREAMER_0_10)
|
||||
if (!decoder)
|
||||
decoder = tsmf_load_decoder_by_name("gstreamer");
|
||||
#endif
|
||||
|
||||
#if defined(WITH_FFMPEG)
|
||||
if (!decoder)
|
||||
decoder = tsmf_load_decoder_by_name("ffmpeg");
|
||||
#endif
|
||||
|
||||
if (decoder)
|
||||
{
|
||||
decoder->Free(decoder);
|
||||
decoder = NULL;
|
||||
retValue = TRUE;
|
||||
}
|
||||
|
||||
return retValue;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ typedef enum _ITSMFControlMsg
|
||||
{
|
||||
Control_Pause,
|
||||
Control_Resume,
|
||||
Control_Restart,
|
||||
Control_Stop
|
||||
} ITSMFControlMsg;
|
||||
|
||||
@ -58,7 +59,7 @@ struct _ITSMFDecoder
|
||||
/* Change Gstreamer Audio Volume */
|
||||
BOOL (*ChangeVolume)(ITSMFDecoder *decoder, UINT32 newVolume, UINT32 muted);
|
||||
/* Check buffer level */
|
||||
BOOL (*BufferFilled)(ITSMFDecoder *decoder);
|
||||
BOOL (*BufferLevel)(ITSMFDecoder *decoder);
|
||||
/* Register a callback for frame ack. */
|
||||
BOOL (*SetAckFunc)(ITSMFDecoder *decoder, BOOL (*cb)(void *,BOOL), void *stream);
|
||||
/* Register a callback for stream seek detection. */
|
||||
@ -69,6 +70,7 @@ struct _ITSMFDecoder
|
||||
typedef ITSMFDecoder *(*TSMF_DECODER_ENTRY)(void);
|
||||
|
||||
ITSMFDecoder *tsmf_load_decoder(const char *name, TS_AM_MEDIA_TYPE *media_type);
|
||||
BOOL tsmf_check_decoder_available(const char* name);
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -553,6 +553,7 @@ UINT tsmf_ifman_set_allocator(TSMF_IFMAN* ifman)
|
||||
UINT tsmf_ifman_notify_preroll(TSMF_IFMAN* ifman)
|
||||
{
|
||||
DEBUG_TSMF("");
|
||||
tsmf_ifman_on_playback_paused(ifman);
|
||||
ifman->output_pending = TRUE;
|
||||
return CHANNEL_RC_OK;
|
||||
}
|
||||
@ -637,6 +638,7 @@ UINT tsmf_ifman_on_flush(TSMF_IFMAN* ifman)
|
||||
{
|
||||
UINT32 StreamId;
|
||||
TSMF_PRESENTATION* presentation;
|
||||
TSMF_STREAM* stream;
|
||||
|
||||
if (Stream_GetRemainingLength(ifman->input) < 20)
|
||||
return ERROR_INVALID_DATA;
|
||||
@ -653,8 +655,15 @@ UINT tsmf_ifman_on_flush(TSMF_IFMAN* ifman)
|
||||
return ERROR_NOT_FOUND;
|
||||
}
|
||||
|
||||
if (!tsmf_presentation_flush(presentation))
|
||||
return ERROR_INVALID_OPERATION;
|
||||
// Flush message is for a stream, not the entire presentation
|
||||
// therefore we only flush the stream as intended per the MS-RDPEV spec
|
||||
stream = tsmf_stream_find_by_id(presentation, StreamId);
|
||||
if (stream)
|
||||
if (!tsmf_stream_flush(stream))
|
||||
return ERROR_INVALID_OPERATION;
|
||||
else
|
||||
WLog_ERR(TAG, "unknown stream id");
|
||||
|
||||
ifman->output_pending = TRUE;
|
||||
|
||||
return CHANNEL_RC_OK;
|
||||
@ -668,7 +677,7 @@ UINT tsmf_ifman_on_flush(TSMF_IFMAN* ifman)
|
||||
UINT tsmf_ifman_on_end_of_stream(TSMF_IFMAN* ifman)
|
||||
{
|
||||
UINT32 StreamId;
|
||||
TSMF_STREAM* stream;
|
||||
TSMF_STREAM* stream = NULL;
|
||||
TSMF_PRESENTATION* presentation;
|
||||
|
||||
if (Stream_GetRemainingLength(ifman->input) < 20)
|
||||
@ -684,17 +693,12 @@ UINT tsmf_ifman_on_end_of_stream(TSMF_IFMAN* ifman)
|
||||
stream = tsmf_stream_find_by_id(presentation, StreamId);
|
||||
|
||||
if (stream)
|
||||
tsmf_stream_end(stream);
|
||||
tsmf_stream_end(stream, ifman->message_id, ifman->channel_callback);
|
||||
}
|
||||
|
||||
DEBUG_TSMF("StreamId %d", StreamId);
|
||||
if (!Stream_EnsureRemainingCapacity(ifman->output, 16))
|
||||
return ERROR_OUTOFMEMORY;
|
||||
|
||||
Stream_Write_UINT32(ifman->output, CLIENT_EVENT_NOTIFICATION); /* FunctionId */
|
||||
Stream_Write_UINT32(ifman->output, StreamId); /* StreamId */
|
||||
Stream_Write_UINT32(ifman->output, TSMM_CLIENT_EVENT_ENDOFSTREAM); /* EventId */
|
||||
Stream_Write_UINT32(ifman->output, 0); /* cbData */
|
||||
ifman->output_pending = TRUE;
|
||||
|
||||
ifman->output_interface_id = TSMF_INTERFACE_CLIENT_NOTIFICATIONS | STREAM_ID_PROXY;
|
||||
return CHANNEL_RC_OK;
|
||||
|
@ -36,7 +36,47 @@
|
||||
|
||||
#include "tsmf_main.h"
|
||||
|
||||
BOOL tsmf_playback_ack(IWTSVirtualChannelCallback *pChannelCallback,
|
||||
void tsmf_send_eos_response(IWTSVirtualChannelCallback* pChannelCallback, UINT32 message_id)
|
||||
{
|
||||
wStream* s;
|
||||
int status;
|
||||
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*) pChannelCallback;
|
||||
|
||||
s = Stream_New(NULL, 24);
|
||||
if (!s)
|
||||
return FALSE;
|
||||
|
||||
if (!callback)
|
||||
{
|
||||
DEBUG_TSMF("No callback reference - unable to send eos response!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (callback && callback->stream_id && callback->channel && callback->channel->Write)
|
||||
{
|
||||
s = Stream_New(NULL, 24);
|
||||
if (!s)
|
||||
return FALSE;
|
||||
Stream_Write_UINT32(s, TSMF_INTERFACE_CLIENT_NOTIFICATIONS | STREAM_ID_PROXY);
|
||||
Stream_Write_UINT32(s, message_id);
|
||||
Stream_Write_UINT32(s, CLIENT_EVENT_NOTIFICATION); /* FunctionId */
|
||||
Stream_Write_UINT32(s, callback->stream_id); /* StreamId */
|
||||
Stream_Write_UINT32(s, TSMM_CLIENT_EVENT_ENDOFSTREAM); /* EventId */
|
||||
Stream_Write_UINT32(s, 0); /* cbData */
|
||||
DEBUG_TSMF("response size %i", Stream_GetPosition(s));
|
||||
|
||||
status = callback->channel->Write(callback->channel, Stream_GetPosition(s), Stream_Buffer(s), NULL);
|
||||
if (status)
|
||||
{
|
||||
WLog_ERR(TAG, "response error %d", status);
|
||||
}
|
||||
Stream_Free(s, TRUE);
|
||||
}
|
||||
|
||||
return (status == 0);
|
||||
}
|
||||
|
||||
void tsmf_playback_ack(IWTSVirtualChannelCallback *pChannelCallback,
|
||||
UINT32 message_id, UINT64 duration, UINT32 data_size)
|
||||
{
|
||||
wStream *s;
|
||||
|
@ -64,7 +64,8 @@ struct _TSMF_PLUGIN
|
||||
rdpContext* rdpcontext;
|
||||
};
|
||||
|
||||
BOOL tsmf_playback_ack(IWTSVirtualChannelCallback* pChannelCallback,
|
||||
void tsmf_send_eos_response(IWTSVirtualChannelCallback* pChannelCallback, UINT32 message_id);
|
||||
void tsmf_playback_ack(IWTSVirtualChannelCallback* pChannelCallback,
|
||||
UINT32 message_id, UINT64 duration, UINT32 data_size);
|
||||
|
||||
#endif
|
||||
|
@ -56,13 +56,23 @@
|
||||
|
||||
#define AUDIO_TOLERANCE 10000000LL
|
||||
|
||||
// 1 second
|
||||
#define VIDEO_ADJUST_MAX 10000000
|
||||
|
||||
#define MAX_ACK_TIME 666667
|
||||
|
||||
#define AUDIO_MIN_BUFFER_LEVEL 3
|
||||
#define AUDIO_MAX_BUFFER_LEVEL 6
|
||||
|
||||
#define VIDEO_MIN_BUFFER_LEVEL 10
|
||||
#define VIDEO_MAX_BUFFER_LEVEL 30
|
||||
|
||||
struct _TSMF_PRESENTATION
|
||||
{
|
||||
BYTE presentation_id[GUID_SIZE];
|
||||
|
||||
const char *audio_name;
|
||||
const char *audio_device;
|
||||
int eos;
|
||||
|
||||
IWTSVirtualChannelCallback *channel_callback;
|
||||
|
||||
@ -93,6 +103,9 @@ struct _TSMF_STREAM
|
||||
|
||||
int major_type;
|
||||
int eos;
|
||||
UINT32 eos_message_id;
|
||||
IWTSVirtualChannelCallback* eos_channel_callback;
|
||||
int delayed_stop;
|
||||
UINT32 width;
|
||||
UINT32 height;
|
||||
|
||||
@ -101,11 +114,17 @@ struct _TSMF_STREAM
|
||||
UINT32 channels;
|
||||
UINT32 bits_per_sample;
|
||||
|
||||
/* The start time of last played sample */
|
||||
UINT64 last_start_time;
|
||||
/* The end_time of last played sample */
|
||||
UINT64 last_end_time;
|
||||
/* Next sample should not start before this system time. */
|
||||
UINT64 next_start_time;
|
||||
|
||||
UINT32 minBufferLevel;
|
||||
UINT32 maxBufferLevel;
|
||||
UINT32 currentBufferLevel;
|
||||
|
||||
HANDLE play_thread;
|
||||
HANDLE ack_thread;
|
||||
HANDLE stopEvent;
|
||||
@ -114,6 +133,8 @@ struct _TSMF_STREAM
|
||||
wQueue *sample_list;
|
||||
wQueue *sample_ack_list;
|
||||
rdpContext* rdpcontext;
|
||||
|
||||
BOOL seeking;
|
||||
};
|
||||
|
||||
struct _TSMF_SAMPLE
|
||||
@ -128,6 +149,8 @@ struct _TSMF_SAMPLE
|
||||
UINT32 decoded_size;
|
||||
UINT32 pixfmt;
|
||||
|
||||
BOOL invalidTimestamps;
|
||||
|
||||
TSMF_STREAM* stream;
|
||||
IWTSVirtualChannelCallback *channel_callback;
|
||||
UINT64 ack_time;
|
||||
@ -170,7 +193,9 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
|
||||
if (stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
|
||||
{
|
||||
/* Check if some other stream has earlier sample that needs to be played first */
|
||||
if (stream->last_end_time > AUDIO_TOLERANCE)
|
||||
// Start time is more reliable than end time as some stream types seem to have incorrect
|
||||
// end times from the server
|
||||
if (stream->last_start_time > AUDIO_TOLERANCE)
|
||||
{
|
||||
ArrayList_Lock(presentation->stream_list);
|
||||
count = ArrayList_Count(presentation->stream_list);
|
||||
@ -179,9 +204,12 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
|
||||
{
|
||||
s = (TSMF_STREAM *) ArrayList_GetItem(presentation->stream_list, index);
|
||||
|
||||
if (s != stream && !s->eos && s->last_end_time &&
|
||||
s->last_end_time < stream->last_end_time - AUDIO_TOLERANCE)
|
||||
// Start time is more reliable than end time as some stream types seem to have incorrect
|
||||
// end times from the server
|
||||
if (s != stream && !s->eos && s->last_start_time &&
|
||||
s->last_start_time < stream->last_start_time - AUDIO_TOLERANCE)
|
||||
{
|
||||
DEBUG_TSMF("Pending due to audio tolerance");
|
||||
pending = TRUE;
|
||||
break;
|
||||
}
|
||||
@ -192,8 +220,11 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
|
||||
}
|
||||
else
|
||||
{
|
||||
if (stream->last_end_time > presentation->audio_end_time)
|
||||
// Start time is more reliable than end time as some stream types seem to have incorrect
|
||||
// end times from the server
|
||||
if (stream->last_start_time > presentation->audio_start_time)
|
||||
{
|
||||
DEBUG_TSMF("Pending due to stream start time > audio start time");
|
||||
pending = TRUE;
|
||||
}
|
||||
}
|
||||
@ -206,9 +237,14 @@ static TSMF_SAMPLE* tsmf_stream_pop_sample(TSMF_STREAM* stream, int sync)
|
||||
|
||||
sample = (TSMF_SAMPLE *) Queue_Dequeue(stream->sample_list);
|
||||
|
||||
if (sample && (sample->end_time > stream->last_end_time))
|
||||
// Only update stream last end time if the sample end time is valid and greater than the current stream end time
|
||||
if (sample && (sample->end_time > stream->last_end_time) && (!sample->invalidTimestamps))
|
||||
stream->last_end_time = sample->end_time;
|
||||
|
||||
// Only update stream last start time if the sample start time is valid and greater than the current stream start time
|
||||
if (sample && (sample->start_time > stream->last_start_time) && (!sample->invalidTimestamps))
|
||||
stream->last_start_time = sample->start_time;
|
||||
|
||||
return sample;
|
||||
}
|
||||
|
||||
@ -242,6 +278,8 @@ static BOOL tsmf_sample_queue_ack(TSMF_SAMPLE* sample)
|
||||
return Queue_Enqueue(sample->stream->sample_ack_list, sample);
|
||||
}
|
||||
|
||||
// Returns TRUE if no more samples are currently available
|
||||
// Returns FALSE otherwise
|
||||
static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
|
||||
{
|
||||
TSMF_STREAM* stream = arg;
|
||||
@ -250,22 +288,38 @@ static BOOL tsmf_stream_process_ack(void* arg, BOOL force)
|
||||
BOOL rc = FALSE;
|
||||
|
||||
if (!stream)
|
||||
return FALSE;
|
||||
return TRUE;
|
||||
|
||||
Queue_Lock(stream->sample_ack_list);
|
||||
sample = (TSMF_SAMPLE*) Queue_Peek(stream->sample_ack_list);
|
||||
|
||||
if (!sample)
|
||||
{
|
||||
rc = TRUE;
|
||||
goto finally;
|
||||
}
|
||||
|
||||
if (!force)
|
||||
{
|
||||
// Do some min/max ack limiting if we have access to Buffer level information
|
||||
if (stream->decoder->BufferLevel)
|
||||
{
|
||||
// Try to keep buffer level below max by withholding acks
|
||||
if (stream->currentBufferLevel > stream->maxBufferLevel)
|
||||
goto finally;
|
||||
// Try to keep buffer level above min by pushing acks through quickly
|
||||
else if (stream->currentBufferLevel < stream->minBufferLevel)
|
||||
goto dequeue;
|
||||
}
|
||||
|
||||
// Time based acks only
|
||||
ack_time = get_current_time();
|
||||
|
||||
if (sample->ack_time > ack_time)
|
||||
goto finally;
|
||||
}
|
||||
|
||||
dequeue:
|
||||
sample = Queue_Dequeue(stream->sample_ack_list);
|
||||
if (sample)
|
||||
{
|
||||
@ -295,6 +349,7 @@ TSMF_PRESENTATION* tsmf_presentation_new(const BYTE* guid, IWTSVirtualChannelCal
|
||||
CopyMemory(presentation->presentation_id, guid, GUID_SIZE);
|
||||
presentation->channel_callback = pChannelCallback;
|
||||
presentation->volume = 5000; /* 50% */
|
||||
presentation->muted = 0;
|
||||
if (!(presentation->stream_list = ArrayList_New(TRUE)))
|
||||
goto error_stream_list;
|
||||
|
||||
@ -372,9 +427,11 @@ static BOOL tsmf_sample_playback_video(TSMF_SAMPLE* sample)
|
||||
{
|
||||
t = get_current_time();
|
||||
|
||||
// Start time is more reliable than end time as some stream types seem to have incorrect
|
||||
// end times from the server
|
||||
if (stream->next_start_time > t &&
|
||||
(sample->end_time >= presentation->audio_start_time ||
|
||||
sample->end_time < stream->last_end_time))
|
||||
((sample->start_time >= presentation->audio_start_time) ||
|
||||
((sample->start_time < stream->last_start_time) && (!sample->invalidTimestamps))))
|
||||
{
|
||||
USleep((stream->next_start_time - t) / 10);
|
||||
}
|
||||
@ -449,9 +506,15 @@ static BOOL tsmf_sample_playback_audio(TSMF_SAMPLE* sample)
|
||||
}
|
||||
|
||||
sample->ack_time = latency + get_current_time();
|
||||
stream->last_end_time = sample->end_time + latency;
|
||||
stream->presentation->audio_start_time = sample->start_time + latency;
|
||||
stream->presentation->audio_end_time = sample->end_time + latency;
|
||||
|
||||
//Only update stream times if the sample timestamps are valid
|
||||
if (!sample->invalidTimestamps)
|
||||
{
|
||||
stream->last_start_time = sample->start_time + latency;
|
||||
stream->last_end_time = sample->end_time + latency;
|
||||
stream->presentation->audio_start_time = sample->start_time + latency;
|
||||
stream->presentation->audio_end_time = sample->end_time + latency;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -467,6 +530,31 @@ static BOOL tsmf_sample_playback(TSMF_SAMPLE* sample)
|
||||
{
|
||||
if (stream->decoder->DecodeEx)
|
||||
{
|
||||
// Try to "sync" video buffers to audio buffers by looking at the running time for each stream
|
||||
// The difference between the two running times causes an offset between audio and video actual
|
||||
// render times. So, we try to adjust timestamps on the video buffer to match those on the audio buffer.
|
||||
if (stream->major_type == TSMF_MAJOR_TYPE_VIDEO)
|
||||
{
|
||||
TSMF_STREAM* temp_stream = NULL;
|
||||
TSMF_PRESENTATION* presentation = stream->presentation;
|
||||
ArrayList_Lock(presentation->stream_list);
|
||||
int count = ArrayList_Count(presentation->stream_list);
|
||||
int index = 0;
|
||||
for (index = 0; index < count; index++)
|
||||
{
|
||||
temp_stream = (TSMF_STREAM*) ArrayList_GetItem(presentation->stream_list, index);
|
||||
if (temp_stream->major_type == TSMF_MAJOR_TYPE_AUDIO)
|
||||
{
|
||||
UINT64 video_time = (UINT64) stream->decoder->GetRunningTime(stream->decoder);
|
||||
UINT64 audio_time = (UINT64) temp_stream->decoder->GetRunningTime(temp_stream->decoder);
|
||||
sample->start_time += abs(video_time - audio_time) > VIDEO_ADJUST_MAX ? (video_time - audio_time) : VIDEO_ADJUST_MAX;
|
||||
sample->end_time += abs(video_time - audio_time) > VIDEO_ADJUST_MAX ? (video_time - audio_time) : VIDEO_ADJUST_MAX;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ArrayList_Unlock(presentation->stream_list);
|
||||
}
|
||||
|
||||
ret = stream->decoder->DecodeEx(stream->decoder, sample->data, sample->data_size, sample->extensions,
|
||||
sample->start_time, sample->end_time, sample->duration);
|
||||
}
|
||||
@ -543,37 +631,22 @@ static BOOL tsmf_sample_playback(TSMF_SAMPLE* sample)
|
||||
{
|
||||
TSMF_STREAM* stream = sample->stream;
|
||||
UINT64 ack_anticipation_time = get_current_time();
|
||||
UINT64 currentRunningTime = sample->start_time;
|
||||
BOOL buffer_filled = TRUE;
|
||||
|
||||
if (stream->decoder->GetRunningTime)
|
||||
// Classify the buffer as filled once it reaches minimum level
|
||||
if (stream->decoder->BufferLevel)
|
||||
{
|
||||
currentRunningTime = stream->decoder->GetRunningTime(stream->decoder);
|
||||
}
|
||||
|
||||
if (stream->decoder->BufferFilled)
|
||||
{
|
||||
buffer_filled = stream->decoder->BufferFilled(stream->decoder);
|
||||
if (stream->currentBufferLevel < stream->minBufferLevel)
|
||||
buffer_filled = FALSE;
|
||||
}
|
||||
|
||||
if (buffer_filled)
|
||||
{
|
||||
if (currentRunningTime > sample->start_time)
|
||||
{
|
||||
ack_anticipation_time += sample->duration;
|
||||
}
|
||||
else if (currentRunningTime == 0)
|
||||
{
|
||||
ack_anticipation_time += sample->duration;
|
||||
}
|
||||
else
|
||||
{
|
||||
ack_anticipation_time += (sample->start_time - currentRunningTime);
|
||||
}
|
||||
ack_anticipation_time += (sample->duration/2 < MAX_ACK_TIME) ? sample->duration/2 : MAX_ACK_TIME;
|
||||
}
|
||||
else
|
||||
{
|
||||
ack_anticipation_time += sample->duration / 2;
|
||||
ack_anticipation_time += (sample->duration/2 < MAX_ACK_TIME) ? sample->duration/2 : MAX_ACK_TIME;
|
||||
}
|
||||
|
||||
switch (sample->stream->major_type)
|
||||
@ -608,31 +681,63 @@ static void* tsmf_stream_ack_func(void *arg)
|
||||
|
||||
while (1)
|
||||
{
|
||||
DWORD ev = WaitForMultipleObjects(2, hdl, FALSE, INFINITE);
|
||||
DWORD ev = WaitForMultipleObjects(2, hdl, FALSE, 1000);
|
||||
|
||||
if (ev == WAIT_FAILED)
|
||||
{
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForMultipleObjects failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ev == WAIT_OBJECT_0)
|
||||
break;
|
||||
|
||||
if (!stream->decoder)
|
||||
continue;
|
||||
|
||||
if (stream->decoder->SetAckFunc)
|
||||
continue;
|
||||
|
||||
if (tsmf_stream_process_ack(stream, FALSE))
|
||||
if (ev == WAIT_FAILED)
|
||||
{
|
||||
error = ERROR_INTERNAL_ERROR;
|
||||
WLog_ERR(TAG, "tsmf_stream_process_ack failed!");
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForMultipleObjects failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream->decoder)
|
||||
if (stream->decoder->BufferLevel)
|
||||
stream->currentBufferLevel = stream->decoder->BufferLevel(stream->decoder);
|
||||
|
||||
if (stream->eos)
|
||||
{
|
||||
while ((stream->currentBufferLevel > 0) || !(tsmf_stream_process_ack(stream, TRUE)))
|
||||
{
|
||||
DEBUG_TSMF("END OF STREAM PROCESSING!");
|
||||
if (stream->decoder->BufferLevel)
|
||||
stream->currentBufferLevel = stream->decoder->BufferLevel(stream->decoder);
|
||||
else
|
||||
stream->currentBufferLevel = 1;
|
||||
|
||||
USleep(1000);
|
||||
}
|
||||
|
||||
tsmf_send_eos_response(stream->eos_channel_callback, stream->eos_message_id);
|
||||
stream->eos = 0;
|
||||
|
||||
if (stream->delayed_stop)
|
||||
{
|
||||
DEBUG_TSMF("Finishing delayed stream stop, now that eos has processed.");
|
||||
tsmf_stream_flush(stream);
|
||||
|
||||
if (stream->decoder->Control)
|
||||
stream->decoder->Control(stream->decoder, Control_Stop, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// Stream stopped force all of the acks to happen
|
||||
if (ev == WAIT_OBJECT_0)
|
||||
{
|
||||
DEBUG_TSMF("ack: Stream stopped!");
|
||||
while(1)
|
||||
{
|
||||
if (tsmf_stream_process_ack(stream, TRUE))
|
||||
break;
|
||||
USleep(1000);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (tsmf_stream_process_ack(stream, FALSE))
|
||||
continue;
|
||||
|
||||
if (stream->currentBufferLevel > stream->minBufferLevel)
|
||||
USleep(1000);
|
||||
}
|
||||
|
||||
if (error && stream->rdpcontext)
|
||||
@ -646,7 +751,7 @@ static void* tsmf_stream_ack_func(void *arg)
|
||||
static void* tsmf_stream_playback_func(void *arg)
|
||||
{
|
||||
HANDLE hdl[2];
|
||||
TSMF_SAMPLE* sample;
|
||||
TSMF_SAMPLE* sample = NULL;
|
||||
TSMF_STREAM* stream = (TSMF_STREAM *) arg;
|
||||
TSMF_PRESENTATION* presentation = stream->presentation;
|
||||
UINT error = CHANNEL_RC_OK;
|
||||
@ -678,28 +783,33 @@ static void* tsmf_stream_playback_func(void *arg)
|
||||
|
||||
while (1)
|
||||
{
|
||||
status = WaitForMultipleObjects(2, hdl, FALSE, INFINITE);
|
||||
status = WaitForMultipleObjects(2, hdl, FALSE, 1000);
|
||||
|
||||
if (status == WAIT_FAILED)
|
||||
{
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForMultipleObjects failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
if (status == WAIT_FAILED)
|
||||
{
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForMultipleObjects failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
|
||||
status = WaitForSingleObject(stream->stopEvent, 0);
|
||||
|
||||
if (status == WAIT_FAILED)
|
||||
{
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForSingleObject failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
status = WaitForSingleObject(stream->stopEvent, 0);
|
||||
|
||||
if (status == WAIT_OBJECT_0)
|
||||
break;
|
||||
if (status == WAIT_FAILED)
|
||||
{
|
||||
error = GetLastError();
|
||||
WLog_ERR(TAG, "WaitForSingleObject failed with error %lu!", error);
|
||||
break;
|
||||
}
|
||||
|
||||
sample = tsmf_stream_pop_sample(stream, 0);
|
||||
if (status == WAIT_OBJECT_0)
|
||||
break;
|
||||
|
||||
if (stream->decoder)
|
||||
if (stream->decoder->BufferLevel)
|
||||
stream->currentBufferLevel = stream->decoder->BufferLevel(stream->decoder);
|
||||
|
||||
sample = tsmf_stream_pop_sample(stream, 0);
|
||||
|
||||
if (sample && !tsmf_sample_playback(sample))
|
||||
{
|
||||
@ -707,8 +817,12 @@ static void* tsmf_stream_playback_func(void *arg)
|
||||
error = ERROR_INTERNAL_ERROR;
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream->currentBufferLevel > stream->minBufferLevel)
|
||||
USleep(1000);
|
||||
}
|
||||
|
||||
|
||||
if (stream->audio)
|
||||
{
|
||||
stream->audio->Free(stream->audio);
|
||||
@ -728,7 +842,9 @@ static BOOL tsmf_stream_start(TSMF_STREAM* stream)
|
||||
if (!stream || !stream->presentation || !stream->decoder || !stream->decoder->Control)
|
||||
return TRUE;
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Resume, NULL);
|
||||
stream->eos = 0;
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Restart, NULL);
|
||||
}
|
||||
|
||||
static BOOL tsmf_stream_stop(TSMF_STREAM* stream)
|
||||
@ -736,7 +852,23 @@ static BOOL tsmf_stream_stop(TSMF_STREAM* stream)
|
||||
if (!stream || !stream->decoder || !stream->decoder->Control)
|
||||
return TRUE;
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Stop, NULL);
|
||||
// If stopping after eos - we delay until the eos has been processed
|
||||
// this allows us to process any buffers that have been acked even though
|
||||
// they have not actually been completely processes by the decoder
|
||||
if (stream->eos)
|
||||
{
|
||||
DEBUG_TSMF("Setting up a delayed stop for once the eos has been processed.");
|
||||
stream->delayed_stop = 1;
|
||||
return TRUE;
|
||||
}
|
||||
// Otherwise force stop immediately
|
||||
else
|
||||
{
|
||||
DEBUG_TSMF("Stop with no pending eos response, so do it immediately.");
|
||||
tsmf_stream_flush(stream);
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Stop, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static BOOL tsmf_stream_pause(TSMF_STREAM* stream)
|
||||
@ -752,7 +884,9 @@ static BOOL tsmf_stream_restart(TSMF_STREAM* stream)
|
||||
if (!stream || !stream->decoder || !stream->decoder->Control)
|
||||
return TRUE;
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Resume, NULL);
|
||||
stream->eos = 0;
|
||||
|
||||
return stream->decoder->Control(stream->decoder, Control_Restart, NULL);
|
||||
}
|
||||
|
||||
static BOOL tsmf_stream_change_volume(TSMF_STREAM* stream, UINT32 newVolume, UINT32 muted)
|
||||
@ -902,6 +1036,9 @@ BOOL tsmf_presentation_stop(TSMF_PRESENTATION* presentation)
|
||||
}
|
||||
|
||||
ArrayList_Unlock(presentation->stream_list);
|
||||
presentation->audio_start_time = 0;
|
||||
presentation->audio_end_time = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -922,6 +1059,9 @@ BOOL tsmf_presentation_set_geometry_info(TSMF_PRESENTATION* presentation,
|
||||
if (!width || !height)
|
||||
return TRUE;
|
||||
|
||||
// Streams can be added/removed from the presentation and the server will resend geometry info when a new stream is
|
||||
// added to the presentation.
|
||||
/*
|
||||
if ((width == presentation->width) && (height == presentation->height) &&
|
||||
(x == presentation->x) && (y == presentation->y) &&
|
||||
(num_rects == presentation->nr_rects) &&
|
||||
@ -929,6 +1069,7 @@ BOOL tsmf_presentation_set_geometry_info(TSMF_PRESENTATION* presentation,
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
*/
|
||||
|
||||
presentation->x = x;
|
||||
presentation->y = y;
|
||||
@ -969,7 +1110,7 @@ void tsmf_presentation_set_audio_device(TSMF_PRESENTATION* presentation, const c
|
||||
presentation->audio_device = device;
|
||||
}
|
||||
|
||||
static BOOL tsmf_stream_flush(TSMF_STREAM* stream)
|
||||
BOOL tsmf_stream_flush(TSMF_STREAM* stream)
|
||||
{
|
||||
BOOL ret = TRUE;
|
||||
|
||||
@ -979,6 +1120,9 @@ static BOOL tsmf_stream_flush(TSMF_STREAM* stream)
|
||||
ret = stream->audio->Flush(stream->audio);
|
||||
|
||||
stream->eos = 0;
|
||||
stream->eos_message_id = 0;
|
||||
stream->eos_channel_callback = NULL;
|
||||
stream->delayed_stop = 0;
|
||||
stream->last_end_time = 0;
|
||||
stream->next_start_time = 0;
|
||||
|
||||
@ -1049,6 +1193,14 @@ TSMF_STREAM* tsmf_stream_new(TSMF_PRESENTATION* presentation, UINT32 stream_id,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
stream->minBufferLevel = VIDEO_MIN_BUFFER_LEVEL;
|
||||
stream->maxBufferLevel = VIDEO_MAX_BUFFER_LEVEL;
|
||||
stream->currentBufferLevel = 1;
|
||||
|
||||
stream->seeking = FALSE;
|
||||
stream->eos = 0;
|
||||
stream->eos_message_id = 0;
|
||||
stream->eos_channel_callback = NULL;
|
||||
stream->stream_id = stream_id;
|
||||
stream->presentation = presentation;
|
||||
stream->stopEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
@ -1152,6 +1304,9 @@ BOOL tsmf_stream_set_format(TSMF_STREAM* stream, const char *name, wStream *s)
|
||||
mediatype.Width, mediatype.Height, mediatype.BitRate,
|
||||
(double) mediatype.SamplesPerSecond.Numerator / (double) mediatype.SamplesPerSecond.Denominator,
|
||||
mediatype.ExtraDataSize);
|
||||
|
||||
stream->minBufferLevel = VIDEO_MIN_BUFFER_LEVEL;
|
||||
stream->maxBufferLevel = VIDEO_MAX_BUFFER_LEVEL;
|
||||
}
|
||||
else if (mediatype.MajorType == TSMF_MAJOR_TYPE_AUDIO)
|
||||
{
|
||||
@ -1164,6 +1319,9 @@ BOOL tsmf_stream_set_format(TSMF_STREAM* stream, const char *name, wStream *s)
|
||||
|
||||
if (stream->bits_per_sample == 0)
|
||||
stream->bits_per_sample = 16;
|
||||
|
||||
stream->minBufferLevel = AUDIO_MIN_BUFFER_LEVEL;
|
||||
stream->maxBufferLevel = AUDIO_MAX_BUFFER_LEVEL;
|
||||
}
|
||||
|
||||
stream->major_type = mediatype.MajorType;
|
||||
@ -1183,13 +1341,14 @@ BOOL tsmf_stream_set_format(TSMF_STREAM* stream, const char *name, wStream *s)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tsmf_stream_end(TSMF_STREAM* stream)
|
||||
void tsmf_stream_end(TSMF_STREAM* stream, UINT32 message_id, IWTSVirtualChannelCallback* pChannelCallback)
|
||||
{
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
stream->eos = 1;
|
||||
stream->presentation->eos = 1;
|
||||
stream->eos_message_id = message_id;
|
||||
stream->eos_channel_callback = pChannelCallback;
|
||||
}
|
||||
|
||||
void _tsmf_stream_free(TSMF_STREAM* stream)
|
||||
@ -1197,8 +1356,7 @@ void _tsmf_stream_free(TSMF_STREAM* stream)
|
||||
if (!stream)
|
||||
return;
|
||||
|
||||
if (tsmf_stream_stop(stream))
|
||||
tsmf_stream_flush(stream);
|
||||
tsmf_stream_stop(stream);
|
||||
SetEvent(stream->stopEvent);
|
||||
|
||||
if (stream->play_thread)
|
||||
@ -1267,6 +1425,10 @@ BOOL tsmf_stream_push_sample(TSMF_STREAM* stream, IWTSVirtualChannelCallback *pC
|
||||
sample->end_time = end_time;
|
||||
sample->duration = duration;
|
||||
sample->extensions = extensions;
|
||||
if ((sample->extensions & 0x00000080) || (sample->extensions & 0x00000040))
|
||||
sample->invalidTimestamps = TRUE;
|
||||
else
|
||||
sample->invalidTimestamps = FALSE;
|
||||
sample->stream = stream;
|
||||
sample->channel_callback = pChannelCallback;
|
||||
sample->data_size = data_size;
|
||||
|
@ -55,8 +55,9 @@ void tsmf_presentation_free(TSMF_PRESENTATION *presentation);
|
||||
TSMF_STREAM *tsmf_stream_new(TSMF_PRESENTATION *presentation, UINT32 stream_id, rdpContext* rdpcontext);
|
||||
TSMF_STREAM *tsmf_stream_find_by_id(TSMF_PRESENTATION *presentation, UINT32 stream_id);
|
||||
BOOL tsmf_stream_set_format(TSMF_STREAM *stream, const char *name, wStream *s);
|
||||
void tsmf_stream_end(TSMF_STREAM *stream);
|
||||
void tsmf_stream_end(TSMF_STREAM *stream, UINT32 message_id, IWTSVirtualChannelCallback* pChannelCallback);
|
||||
void tsmf_stream_free(TSMF_STREAM *stream);
|
||||
void tsmf_stream_flush(TSMF_STREAM* stream);
|
||||
|
||||
BOOL tsmf_stream_push_sample(TSMF_STREAM *stream, IWTSVirtualChannelCallback *pChannelCallback,
|
||||
UINT32 sample_id, UINT64 start_time, UINT64 end_time, UINT64 duration, UINT32 extensions,
|
||||
|
Loading…
Reference in New Issue
Block a user