FreeRDP/channels/tsmf/client/tsmf_main.c

625 lines
16 KiB
C
Raw Normal View History

2011-09-19 18:54:09 +04:00
/**
2012-10-09 07:02:04 +04:00
* FreeRDP: A Remote Desktop Protocol Implementation
2011-09-19 18:54:09 +04:00
* Video Redirection Virtual Channel
*
* Copyright 2010-2011 Vic Lee
* Copyright 2015 Thincast Technologies GmbH
* Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com>
2011-09-19 18:54:09 +04:00
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include <winpr/stream.h>
#include <winpr/cmdline.h>
#include <freerdp/client/tsmf.h>
2011-09-19 18:54:09 +04:00
#include "tsmf_types.h"
2011-09-19 18:54:09 +04:00
#include "tsmf_constants.h"
#include "tsmf_ifman.h"
#include "tsmf_media.h"
#include "tsmf_main.h"
BOOL tsmf_send_eos_response(IWTSVirtualChannelCallback* pChannelCallback, UINT32 message_id)
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
{
wStream* s = NULL;
int status = -1;
2019-11-06 17:24:51 +03:00
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*)pChannelCallback;
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
if (!callback)
{
DEBUG_TSMF("No callback reference - unable to send eos response!");
return FALSE;
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
}
if (callback && callback->stream_id && callback->channel && callback->channel->Write)
{
s = Stream_New(NULL, 24);
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
if (!s)
return FALSE;
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
Stream_Write_UINT32(s, TSMF_INTERFACE_CLIENT_NOTIFICATIONS | STREAM_ID_PROXY);
Stream_Write_UINT32(s, message_id);
2019-11-06 17:24:51 +03:00
Stream_Write_UINT32(s, CLIENT_EVENT_NOTIFICATION); /* FunctionId */
Stream_Write_UINT32(s, callback->stream_id); /* StreamId */
Stream_Write_UINT32(s, TSMM_CLIENT_EVENT_ENDOFSTREAM); /* EventId */
2019-11-06 17:24:51 +03:00
Stream_Write_UINT32(s, 0); /* cbData */
DEBUG_TSMF("EOS response size %" PRIuz "", Stream_GetPosition(s));
status = callback->channel->Write(callback->channel, Stream_GetPosition(s),
Stream_Buffer(s), NULL);
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
if (status)
{
WLog_ERR(TAG, "response error %d", status);
}
- Use decodebin2 instead of old decodebin - decodebin has issues - Use autovideosink - xvimagesink does not work with cards with no xv ports available and cant be used if wanted to use the fluendo hardware accelerated playback codec - Use autoaudiosink - let gstreamer choose best audio playback plugin - Catch when autosinks add known elements so that we can manipulate properties on them - Adjust caps of various media types to work better with gstreamer, some codecs are picky about having certain fields available - Remove unneeded plugins such as "ffmpegcolorspace" and "videoscale" - these do not work correctly with fluendo hardware accelerated playback codec - Name audio/video gstreamer elements better for easier debugging - Update gstreamer pipeline and element properties to handle playback better - Detect when valid timestamps are available for buffer from server and try to account for when they are not valid - Start time is much more reliable then end time from server for various media formats, so use it when possible to make decisions instead of end time - Do not rebuild gstreamer pipeline for a seek(very expensive), instead reset gstreamer time to 0 and maintain offset between real time and gstreamer time - Change buffer filled function back to a buffer level function, so that we can use buffer level to make better choices above gstreamer decoder in tsmf - Remove ack function from gstreamer, instead rely on ack thread to handle acks - Rework X11 gstreamer code to handle various videosinks which implement the XOverlayInterface and to keep more detailed information on the sub-window that is used for display - Add check to see if a decoder is available for telling the server the client various media types - Add in support for M4S2 and WMA1 media types - Fix flush message handling, they are for individual streams and not the entire presentation - Delay eos response to try to allow more time for buffers to be loaded into decoder, as we anticipate acks to server and the server will issue stop as soon as we ack eos. - Fix issue with geometry info being ignored when resent for new streams within existing presentation - Fixed volume level initialization issue when a stream is stopped and restarted - Attempt to sync video/audio streams...because we run two different gstreamer pipelines - they can enter pause/playing states at different times and are thus not synchronized. Attempt to adjust video buffer timestamps based on difference between audio/video running time to account for this difference. This logic accounts for a huge improvement in audio/video sync(ie. lip sync to words)
2015-07-08 00:39:29 +03:00
Stream_Free(s, TRUE);
}
return (status == 0);
}
2019-11-06 17:24:51 +03:00
BOOL tsmf_playback_ack(IWTSVirtualChannelCallback* pChannelCallback, UINT32 message_id,
UINT64 duration, UINT32 data_size)
2011-09-19 18:54:09 +04:00
{
wStream* s = NULL;
int status = -1;
2019-11-06 17:24:51 +03:00
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*)pChannelCallback;
2014-11-10 22:02:54 +03:00
if (!callback)
return FALSE;
2013-05-09 01:48:30 +04:00
s = Stream_New(NULL, 32);
if (!s)
return FALSE;
2014-11-10 22:02:54 +03:00
2013-05-09 00:09:16 +04:00
Stream_Write_UINT32(s, TSMF_INTERFACE_CLIENT_NOTIFICATIONS | STREAM_ID_PROXY);
Stream_Write_UINT32(s, message_id);
2019-11-06 17:24:51 +03:00
Stream_Write_UINT32(s, PLAYBACK_ACK); /* FunctionId */
2013-05-09 00:09:16 +04:00
Stream_Write_UINT32(s, callback->stream_id); /* StreamId */
2019-11-06 17:24:51 +03:00
Stream_Write_UINT64(s, duration); /* DataDuration */
Stream_Write_UINT64(s, data_size); /* cbData */
DEBUG_TSMF("ACK response size %" PRIuz "", Stream_GetPosition(s));
2014-11-10 22:02:54 +03:00
if (!callback->channel || !callback->channel->Write)
2014-11-10 22:02:54 +03:00
{
2017-04-10 11:39:01 +03:00
WLog_ERR(TAG, "callback=%p, channel=%p, write=%p", callback,
(callback ? callback->channel : NULL),
(callback && callback->channel ? callback->channel->Write : NULL));
2014-11-10 22:02:54 +03:00
}
else
2014-11-10 22:02:54 +03:00
{
2019-11-06 17:24:51 +03:00
status = callback->channel->Write(callback->channel, Stream_GetPosition(s),
Stream_Buffer(s), NULL);
2014-11-10 22:02:54 +03:00
}
if (status)
2011-09-19 18:54:09 +04:00
{
WLog_ERR(TAG, "response error %d", status);
2011-09-19 18:54:09 +04:00
}
2014-11-10 22:02:54 +03:00
2013-05-09 01:48:30 +04:00
Stream_Free(s, TRUE);
return (status == 0);
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_on_data_received(IWTSVirtualChannelCallback* pChannelCallback, wStream* data)
2011-09-19 18:54:09 +04:00
{
size_t length;
wStream* input;
wStream* output;
UINT error = CHANNEL_RC_OK;
2015-06-12 13:26:15 +03:00
BOOL processed = FALSE;
2011-09-19 18:54:09 +04:00
TSMF_IFMAN ifman;
2012-10-09 11:26:39 +04:00
UINT32 MessageId;
UINT32 FunctionId;
UINT32 InterfaceId;
2019-11-06 17:24:51 +03:00
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*)pChannelCallback;
UINT32 cbSize = Stream_GetRemainingLength(data);
2011-09-19 18:54:09 +04:00
/* 2.2.1 Shared Message Header (SHARED_MSG_HEADER) */
2014-11-10 22:02:54 +03:00
if (cbSize < 12)
2011-09-19 18:54:09 +04:00
{
2019-11-06 17:24:51 +03:00
WLog_ERR(TAG, "invalid size. cbSize=%" PRIu32 "", cbSize);
2015-06-12 13:26:15 +03:00
return ERROR_INVALID_DATA;
2011-09-19 18:54:09 +04:00
}
2013-05-09 01:48:30 +04:00
input = data;
2013-05-09 01:48:30 +04:00
output = Stream_New(NULL, 256);
if (!output)
return ERROR_OUTOFMEMORY;
Stream_Seek(output, 8);
2014-11-10 22:02:54 +03:00
Stream_Read_UINT32(input, InterfaceId); /* InterfaceId (4 bytes) */
2019-11-06 17:24:51 +03:00
Stream_Read_UINT32(input, MessageId); /* MessageId (4 bytes) */
Stream_Read_UINT32(input, FunctionId); /* FunctionId (4 bytes) */
DEBUG_TSMF("cbSize=%" PRIu32 " InterfaceId=0x%" PRIX32 " MessageId=0x%" PRIX32
" FunctionId=0x%" PRIX32 "",
cbSize, InterfaceId, MessageId, FunctionId);
2014-11-10 22:02:54 +03:00
ZeroMemory(&ifman, sizeof(TSMF_IFMAN));
2011-09-19 18:54:09 +04:00
ifman.channel_callback = pChannelCallback;
2019-11-06 17:24:51 +03:00
ifman.decoder_name = ((TSMF_PLUGIN*)callback->plugin)->decoder_name;
ifman.audio_name = ((TSMF_PLUGIN*)callback->plugin)->audio_name;
ifman.audio_device = ((TSMF_PLUGIN*)callback->plugin)->audio_device;
2014-11-10 22:02:54 +03:00
CopyMemory(ifman.presentation_id, callback->presentation_id, GUID_SIZE);
2011-09-19 18:54:09 +04:00
ifman.stream_id = callback->stream_id;
ifman.message_id = MessageId;
ifman.input = input;
ifman.input_size = cbSize - 12;
ifman.output = output;
ifman.output_pending = FALSE;
2011-09-19 18:54:09 +04:00
ifman.output_interface_id = InterfaceId;
2014-11-10 22:02:54 +03:00
2019-11-06 17:24:51 +03:00
// fprintf(stderr, "InterfaceId: 0x%08"PRIX32" MessageId: 0x%08"PRIX32" FunctionId:
// 0x%08"PRIX32"\n", InterfaceId, MessageId, FunctionId);
2014-11-10 22:02:54 +03:00
switch (InterfaceId)
2011-09-19 18:54:09 +04:00
{
case TSMF_INTERFACE_CAPABILITIES | STREAM_ID_NONE:
2014-11-10 22:02:54 +03:00
switch (FunctionId)
2011-09-19 18:54:09 +04:00
{
case RIM_EXCHANGE_CAPABILITY_REQUEST:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_rim_exchange_capability_request(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2014-11-10 22:02:54 +03:00
case RIMCALL_RELEASE:
case RIMCALL_QUERYINTERFACE:
break;
2011-09-19 18:54:09 +04:00
default:
break;
}
2011-09-19 18:54:09 +04:00
break;
2014-11-10 22:02:54 +03:00
2011-09-19 18:54:09 +04:00
case TSMF_INTERFACE_DEFAULT | STREAM_ID_PROXY:
2014-11-10 22:02:54 +03:00
switch (FunctionId)
2011-09-19 18:54:09 +04:00
{
case SET_CHANNEL_PARAMS:
if (Stream_GetRemainingLength(input) < GUID_SIZE + 4)
{
error = ERROR_INVALID_DATA;
goto out;
}
2014-11-10 22:02:54 +03:00
CopyMemory(callback->presentation_id, Stream_Pointer(input), GUID_SIZE);
Stream_Seek(input, GUID_SIZE);
2013-05-09 00:09:16 +04:00
Stream_Read_UINT32(input, callback->stream_id);
2019-11-06 17:24:51 +03:00
DEBUG_TSMF("SET_CHANNEL_PARAMS StreamId=%" PRIu32 "", callback->stream_id);
ifman.output_pending = TRUE;
2015-06-12 13:26:15 +03:00
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case EXCHANGE_CAPABILITIES_REQ:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_exchange_capability_request(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case CHECK_FORMAT_SUPPORT_REQ:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_check_format_support_request(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_NEW_PRESENTATION:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_new_presentation(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ADD_STREAM:
2019-11-06 17:24:51 +03:00
error =
tsmf_ifman_add_stream(&ifman, ((TSMF_PLUGIN*)callback->plugin)->rdpcontext);
2015-06-12 13:26:15 +03:00
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case SET_TOPOLOGY_REQ:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_set_topology_request(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case REMOVE_STREAM:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_remove_stream(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
case SET_SOURCE_VIDEO_RECT:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_set_source_video_rect(&ifman);
processed = TRUE;
break;
2011-09-19 18:54:09 +04:00
case SHUTDOWN_PRESENTATION_REQ:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_shutdown_presentation(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_STREAM_VOLUME:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_stream_volume(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_CHANNEL_VOLUME:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_channel_volume(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case SET_VIDEO_WINDOW:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_set_video_window(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case UPDATE_GEOMETRY_INFO:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_update_geometry_info(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case SET_ALLOCATOR:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_set_allocator(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case NOTIFY_PREROLL:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_notify_preroll(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_SAMPLE:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_sample(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_FLUSH:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_flush(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_END_OF_STREAM:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_end_of_stream(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_PLAYBACK_STARTED:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_playback_started(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_PLAYBACK_PAUSED:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_playback_paused(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_PLAYBACK_RESTARTED:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_playback_restarted(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_PLAYBACK_STOPPED:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_playback_stopped(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2011-09-19 18:54:09 +04:00
case ON_PLAYBACK_RATE_CHANGED:
2015-06-12 13:26:15 +03:00
error = tsmf_ifman_on_playback_rate_changed(&ifman);
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
2014-11-10 22:02:54 +03:00
case RIMCALL_RELEASE:
case RIMCALL_QUERYINTERFACE:
break;
2011-09-19 18:54:09 +04:00
default:
break;
}
2011-09-19 18:54:09 +04:00
break;
2014-11-10 22:02:54 +03:00
2011-09-19 18:54:09 +04:00
default:
break;
}
input = NULL;
ifman.input = NULL;
2014-11-10 22:02:54 +03:00
if (error)
{
2019-11-06 17:24:51 +03:00
WLog_ERR(TAG, "ifman data received processing error %" PRIu32 "", error);
}
2015-06-12 13:26:15 +03:00
if (!processed)
2011-09-19 18:54:09 +04:00
{
2014-11-10 22:02:54 +03:00
switch (FunctionId)
2011-09-19 18:54:09 +04:00
{
case RIMCALL_RELEASE:
/* [MS-RDPEXPS] 2.2.2.2 Interface Release (IFACE_RELEASE)
This message does not require a reply. */
2015-06-12 13:26:15 +03:00
processed = TRUE;
2011-09-19 18:54:09 +04:00
ifman.output_pending = 1;
break;
2011-09-19 18:54:09 +04:00
case RIMCALL_QUERYINTERFACE:
/* [MS-RDPEXPS] 2.2.2.1.2 Query Interface Response (QI_RSP)
This message is not supported in this channel. */
2015-06-12 13:26:15 +03:00
processed = TRUE;
2011-09-19 18:54:09 +04:00
break;
}
2014-11-10 22:02:54 +03:00
2015-06-12 13:26:15 +03:00
if (!processed)
2011-09-19 18:54:09 +04:00
{
WLog_ERR(TAG,
2019-11-06 17:24:51 +03:00
"Unknown InterfaceId: 0x%08" PRIX32 " MessageId: 0x%08" PRIX32
" FunctionId: 0x%08" PRIX32 "\n",
InterfaceId, MessageId, FunctionId);
2011-09-19 18:54:09 +04:00
/* When a request is not implemented we return empty response indicating error */
}
2014-11-10 22:02:54 +03:00
2015-06-12 13:26:15 +03:00
processed = TRUE;
2011-09-19 18:54:09 +04:00
}
2014-11-10 22:02:54 +03:00
2015-06-12 13:26:15 +03:00
if (processed && !ifman.output_pending)
2011-09-19 18:54:09 +04:00
{
/* Response packet does not have FunctionId */
length = Stream_GetPosition(output);
Stream_SetPosition(output, 0);
2013-05-09 00:09:16 +04:00
Stream_Write_UINT32(output, ifman.output_interface_id);
Stream_Write_UINT32(output, MessageId);
DEBUG_TSMF("response size %d", length);
2015-06-12 13:26:15 +03:00
error = callback->channel->Write(callback->channel, length, Stream_Buffer(output), NULL);
2014-11-10 22:02:54 +03:00
2015-06-12 13:26:15 +03:00
if (error)
2011-09-19 18:54:09 +04:00
{
2019-11-06 17:24:51 +03:00
WLog_ERR(TAG, "response error %" PRIu32 "", error);
2011-09-19 18:54:09 +04:00
}
}
2014-11-10 22:02:54 +03:00
out:
2013-05-09 01:48:30 +04:00
Stream_Free(output, TRUE);
2015-06-12 13:26:15 +03:00
return error;
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_on_close(IWTSVirtualChannelCallback* pChannelCallback)
2011-09-19 18:54:09 +04:00
{
2014-11-10 22:02:54 +03:00
TSMF_STREAM* stream;
TSMF_PRESENTATION* presentation;
2019-11-06 17:24:51 +03:00
TSMF_CHANNEL_CALLBACK* callback = (TSMF_CHANNEL_CALLBACK*)pChannelCallback;
DEBUG_TSMF("");
2014-11-10 22:02:54 +03:00
if (callback->stream_id)
2011-09-19 18:54:09 +04:00
{
presentation = tsmf_presentation_find_by_id(callback->presentation_id);
2014-11-10 22:02:54 +03:00
if (presentation)
2011-09-19 18:54:09 +04:00
{
stream = tsmf_stream_find_by_id(presentation, callback->stream_id);
2014-11-10 22:02:54 +03:00
if (stream)
2011-09-19 18:54:09 +04:00
tsmf_stream_free(stream);
}
}
2014-11-10 22:02:54 +03:00
free(pChannelCallback);
2015-06-12 13:26:15 +03:00
return CHANNEL_RC_OK;
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_on_new_channel_connection(IWTSListenerCallback* pListenerCallback,
2019-11-06 17:24:51 +03:00
IWTSVirtualChannel* pChannel, BYTE* Data, BOOL* pbAccept,
IWTSVirtualChannelCallback** ppCallback)
2011-09-19 18:54:09 +04:00
{
TSMF_CHANNEL_CALLBACK* callback;
2019-11-06 17:24:51 +03:00
TSMF_LISTENER_CALLBACK* listener_callback = (TSMF_LISTENER_CALLBACK*)pListenerCallback;
DEBUG_TSMF("");
2019-11-06 17:24:51 +03:00
callback = (TSMF_CHANNEL_CALLBACK*)calloc(1, sizeof(TSMF_CHANNEL_CALLBACK));
if (!callback)
2015-06-12 13:26:15 +03:00
return CHANNEL_RC_NO_MEMORY;
2011-09-19 18:54:09 +04:00
callback->iface.OnDataReceived = tsmf_on_data_received;
callback->iface.OnClose = tsmf_on_close;
callback->iface.OnOpen = NULL;
2011-09-19 18:54:09 +04:00
callback->plugin = listener_callback->plugin;
callback->channel_mgr = listener_callback->channel_mgr;
callback->channel = pChannel;
2019-11-06 17:24:51 +03:00
*ppCallback = (IWTSVirtualChannelCallback*)callback;
2015-06-12 13:26:15 +03:00
return CHANNEL_RC_OK;
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_plugin_initialize(IWTSPlugin* pPlugin, IWTSVirtualChannelManager* pChannelMgr)
2011-09-19 18:54:09 +04:00
{
UINT status;
2019-11-06 17:24:51 +03:00
TSMF_PLUGIN* tsmf = (TSMF_PLUGIN*)pPlugin;
DEBUG_TSMF("");
2019-11-06 17:24:51 +03:00
tsmf->listener_callback = (TSMF_LISTENER_CALLBACK*)calloc(1, sizeof(TSMF_LISTENER_CALLBACK));
if (!tsmf->listener_callback)
2015-06-12 13:26:15 +03:00
return CHANNEL_RC_NO_MEMORY;
2011-09-19 18:54:09 +04:00
tsmf->listener_callback->iface.OnNewChannelConnection = tsmf_on_new_channel_connection;
tsmf->listener_callback->plugin = pPlugin;
tsmf->listener_callback->channel_mgr = pChannelMgr;
2019-11-06 17:24:51 +03:00
status = pChannelMgr->CreateListener(
pChannelMgr, "TSMF", 0, (IWTSListenerCallback*)tsmf->listener_callback, &(tsmf->listener));
tsmf->listener->pInterface = tsmf->iface.pInterface;
return status;
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_plugin_terminated(IWTSPlugin* pPlugin)
2011-09-19 18:54:09 +04:00
{
2019-11-06 17:24:51 +03:00
TSMF_PLUGIN* tsmf = (TSMF_PLUGIN*)pPlugin;
DEBUG_TSMF("");
2015-05-11 10:07:39 +03:00
free(tsmf->listener_callback);
free(tsmf);
2015-06-12 13:26:15 +03:00
return CHANNEL_RC_OK;
2011-09-19 18:54:09 +04:00
}
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
static UINT tsmf_process_addin_args(IWTSPlugin* pPlugin, ADDIN_ARGV* args)
2011-09-19 18:54:09 +04:00
{
int status;
DWORD flags;
COMMAND_LINE_ARGUMENT_A* arg;
2019-11-06 17:24:51 +03:00
TSMF_PLUGIN* tsmf = (TSMF_PLUGIN*)pPlugin;
COMMAND_LINE_ARGUMENT_A tsmf_args[] = { { "sys", COMMAND_LINE_VALUE_REQUIRED, "<subsystem>",
NULL, NULL, -1, NULL, "audio subsystem" },
{ "dev", COMMAND_LINE_VALUE_REQUIRED, "<device>", NULL,
NULL, -1, NULL, "audio device name" },
{ "decoder", COMMAND_LINE_VALUE_REQUIRED, "<subsystem>",
NULL, NULL, -1, NULL, "decoder subsystem" },
{ NULL, 0, NULL, NULL, NULL, -1, NULL, NULL } };
flags = COMMAND_LINE_SIGIL_NONE | COMMAND_LINE_SEPARATOR_COLON;
2019-11-06 17:24:51 +03:00
status = CommandLineParseArgumentsA(args->argc, args->argv, tsmf_args, flags, tsmf, NULL, NULL);
if (status != 0)
return ERROR_INVALID_DATA;
arg = tsmf_args;
do
2011-09-19 18:54:09 +04:00
{
2014-11-10 22:02:54 +03:00
if (!(arg->Flags & COMMAND_LINE_VALUE_PRESENT))
continue;
2019-11-06 17:24:51 +03:00
CommandLineSwitchStart(arg) CommandLineSwitchCase(arg, "sys")
2011-09-19 18:54:09 +04:00
{
tsmf->audio_name = _strdup(arg->Value);
if (!tsmf->audio_name)
return ERROR_OUTOFMEMORY;
}
CommandLineSwitchCase(arg, "dev")
{
tsmf->audio_device = _strdup(arg->Value);
if (!tsmf->audio_device)
return ERROR_OUTOFMEMORY;
2011-09-19 18:54:09 +04:00
}
CommandLineSwitchCase(arg, "decoder")
{
tsmf->decoder_name = _strdup(arg->Value);
if (!tsmf->decoder_name)
return ERROR_OUTOFMEMORY;
}
CommandLineSwitchDefault(arg)
{
}
CommandLineSwitchEnd(arg)
2019-11-06 17:24:51 +03:00
} while ((arg = CommandLineFindNextArgumentA(arg)) != NULL);
return CHANNEL_RC_OK;
2011-09-19 18:54:09 +04:00
}
#ifdef BUILTIN_CHANNELS
2019-11-06 17:24:51 +03:00
#define DVCPluginEntry tsmf_DVCPluginEntry
2016-02-29 17:18:19 +03:00
#else
2019-11-06 17:24:51 +03:00
#define DVCPluginEntry FREERDP_API DVCPluginEntry
2012-10-14 10:38:58 +04:00
#endif
/**
* Function description
*
* @return 0 on success, otherwise a Win32 error code
*/
UINT DVCPluginEntry(IDRDYNVC_ENTRY_POINTS* pEntryPoints)
2011-09-19 18:54:09 +04:00
{
UINT status = 0;
TSMF_PLUGIN* tsmf;
TsmfClientContext* context;
UINT error = CHANNEL_RC_NO_MEMORY;
2019-11-06 17:24:51 +03:00
tsmf = (TSMF_PLUGIN*)pEntryPoints->GetPlugin(pEntryPoints, "tsmf");
if (!tsmf)
2011-09-19 18:54:09 +04:00
{
2019-11-06 17:24:51 +03:00
tsmf = (TSMF_PLUGIN*)calloc(1, sizeof(TSMF_PLUGIN));
if (!tsmf)
{
WLog_ERR(TAG, "calloc failed!");
return CHANNEL_RC_NO_MEMORY;
}
2011-09-19 18:54:09 +04:00
tsmf->iface.Initialize = tsmf_plugin_initialize;
tsmf->iface.Connected = NULL;
tsmf->iface.Disconnected = NULL;
tsmf->iface.Terminated = tsmf_plugin_terminated;
2019-11-06 17:24:51 +03:00
tsmf->rdpcontext =
((freerdp*)((rdpSettings*)pEntryPoints->GetRdpSettings(pEntryPoints))->instance)
->context;
context = (TsmfClientContext*)calloc(1, sizeof(TsmfClientContext));
if (!context)
{
WLog_ERR(TAG, "calloc failed!");
goto error_context;
}
2019-11-06 17:24:51 +03:00
context->handle = (void*)tsmf;
tsmf->iface.pInterface = (void*)context;
if (!tsmf_media_init())
{
error = ERROR_INVALID_OPERATION;
goto error_init;
}
2019-11-06 17:24:51 +03:00
status = pEntryPoints->RegisterPlugin(pEntryPoints, "tsmf", (IWTSPlugin*)tsmf);
2011-09-19 18:54:09 +04:00
}
if (status == CHANNEL_RC_OK)
2011-09-19 18:54:09 +04:00
{
2019-11-06 17:24:51 +03:00
status =
tsmf_process_addin_args((IWTSPlugin*)tsmf, pEntryPoints->GetPluginData(pEntryPoints));
2011-09-19 18:54:09 +04:00
}
return status;
error_init:
free(context);
error_context:
free(tsmf);
return error;
2011-09-19 18:54:09 +04:00
}