FreeRDP/server/Mac/mf_rdpsnd.c

220 lines
7.3 KiB
C
Raw Normal View History

2012-11-01 07:04:31 +04:00
/**
* FreeRDP: A Remote Desktop Protocol Implementation
* FreeRDP Mac OS X Server (Audio Output)
*
* Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
* Copyright 2015 Thincast Technologies GmbH
* Copyright 2015 DI (FH) Martin Haimberger <martin.haimberger@thincast.com>
2012-11-01 07:04:31 +04:00
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <freerdp/server/rdpsnd.h>
2012-11-01 07:04:31 +04:00
#include "mf_info.h"
2012-11-01 07:04:31 +04:00
#include "mf_rdpsnd.h"
2017-11-24 14:46:01 +03:00
#include <winpr/sysinfo.h>
2018-09-25 18:05:12 +03:00
#include <freerdp/server/server-common.h>
2014-09-12 19:38:12 +04:00
#include <freerdp/log.h>
#define TAG SERVER_TAG("mac")
AQRecorderState recorderState;
static void mf_peer_rdpsnd_activated(RdpsndServerContext* context)
{
2013-02-20 00:06:42 +04:00
OSStatus status;
int i, j;
BOOL formatAgreed = FALSE;
AUDIO_FORMAT* agreedFormat = NULL;
//we should actually loop through the list of client formats here
//and see if we can send the client something that it supports...
2014-09-12 19:38:12 +04:00
WLog_DBG(TAG, "Client supports the following %d formats: ", context->num_client_formats);
2013-03-29 08:15:29 +04:00
for (i = 0; i < context->num_client_formats; i++)
{
2013-03-29 08:15:29 +04:00
/* TODO: improve the way we agree on a format */
for (j = 0; j < context->num_server_formats; j++)
{
if ((context->client_formats[i].wFormatTag == context->server_formats[j].wFormatTag) &&
(context->client_formats[i].nChannels == context->server_formats[j].nChannels) &&
(context->client_formats[i].nSamplesPerSec == context->server_formats[j].nSamplesPerSec))
{
2014-09-12 19:38:12 +04:00
WLog_DBG(TAG, "agreed on format!");
formatAgreed = TRUE;
agreedFormat = (AUDIO_FORMAT*)&context->server_formats[j];
break;
}
}
2017-11-24 14:46:01 +03:00
if (formatAgreed == TRUE)
break;
}
2017-11-24 14:46:01 +03:00
if (formatAgreed == FALSE)
{
2014-09-12 19:38:12 +04:00
WLog_DBG(TAG, "Could not agree on a audio format with the server");
return;
}
2014-09-12 19:38:12 +04:00
context->SelectFormat(context, i);
context->SetVolume(context, 0x7FFF, 0x7FFF);
2017-11-24 14:46:01 +03:00
switch (agreedFormat->wFormatTag)
{
case WAVE_FORMAT_ALAW:
recorderState.dataFormat.mFormatID = kAudioFormatDVIIntelIMA;
break;
2017-11-24 14:46:01 +03:00
case WAVE_FORMAT_PCM:
recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
break;
2017-11-24 14:46:01 +03:00
default:
recorderState.dataFormat.mFormatID = kAudioFormatLinearPCM;
break;
}
2017-11-24 14:46:01 +03:00
recorderState.dataFormat.mSampleRate = agreedFormat->nSamplesPerSec;
2017-11-24 14:46:01 +03:00
recorderState.dataFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;;
recorderState.dataFormat.mBytesPerPacket = 4;
recorderState.dataFormat.mFramesPerPacket = 1;
recorderState.dataFormat.mBytesPerFrame = 4;
recorderState.dataFormat.mChannelsPerFrame = agreedFormat->nChannels;
recorderState.dataFormat.mBitsPerChannel = agreedFormat->wBitsPerSample;
2013-02-20 00:06:42 +04:00
recorderState.snd_context = context;
status = AudioQueueNewInput(&recorderState.dataFormat,
2017-11-24 14:46:01 +03:00
mf_peer_rdpsnd_input_callback,
&recorderState,
NULL,
kCFRunLoopCommonModes,
0,
&recorderState.queue);
2013-02-20 00:06:42 +04:00
if (status != noErr)
{
WLog_DBG(TAG, "Failed to create a new Audio Queue. Status code: %"PRId32"", status);
2013-02-20 00:06:42 +04:00
}
2017-11-24 14:46:01 +03:00
UInt32 dataFormatSize = sizeof(recorderState.dataFormat);
2013-02-20 00:06:42 +04:00
AudioQueueGetProperty(recorderState.queue,
2017-11-24 14:46:01 +03:00
kAudioConverterCurrentInputStreamDescription,
&recorderState.dataFormat,
&dataFormatSize);
mf_rdpsnd_derive_buffer_size(recorderState.queue, &recorderState.dataFormat, 0.05,
&recorderState.bufferByteSize);
for (i = 0; i < SND_NUMBUFFERS; ++i)
2013-02-20 00:06:42 +04:00
{
AudioQueueAllocateBuffer(recorderState.queue,
2017-11-24 14:46:01 +03:00
recorderState.bufferByteSize,
&recorderState.buffers[i]);
2013-02-20 00:06:42 +04:00
AudioQueueEnqueueBuffer(recorderState.queue,
2017-11-24 14:46:01 +03:00
recorderState.buffers[i],
0,
NULL);
2013-02-20 00:06:42 +04:00
}
2017-11-24 14:46:01 +03:00
2013-02-20 00:06:42 +04:00
recorderState.currentPacket = 0;
recorderState.isRunning = true;
2017-11-24 14:46:01 +03:00
AudioQueueStart(recorderState.queue, NULL);
2012-11-01 07:04:31 +04:00
}
BOOL mf_peer_rdpsnd_init(mfPeerContext* context)
{
context->rdpsnd = rdpsnd_server_context_new(context->vcm);
context->rdpsnd->rdpcontext = &context->_p;
2012-11-01 07:04:31 +04:00
context->rdpsnd->data = context;
2018-09-25 18:05:12 +03:00
context->rdpsnd->num_server_formats = server_rdpsnd_get_formats(&context->rdpsnd->server_formats);
if (context->rdpsnd->num_server_formats > 0)
context->rdpsnd->src_format = &context->rdpsnd->server_formats[0];
2012-11-01 07:04:31 +04:00
context->rdpsnd->Activated = mf_peer_rdpsnd_activated;
2014-07-16 07:01:56 +04:00
context->rdpsnd->Initialize(context->rdpsnd, TRUE);
2012-11-01 07:04:31 +04:00
return TRUE;
}
BOOL mf_peer_rdpsnd_stop()
{
2013-02-20 00:06:42 +04:00
recorderState.isRunning = false;
AudioQueueStop(recorderState.queue, true);
return TRUE;
}
2017-11-24 14:46:01 +03:00
void mf_peer_rdpsnd_input_callback(void* inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp* inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription* inPacketDescs)
{
2013-02-20 00:06:42 +04:00
OSStatus status;
2017-11-24 14:46:01 +03:00
AQRecorderState* rState;
2013-02-20 00:06:42 +04:00
rState = inUserData;
2017-11-24 14:46:01 +03:00
2013-02-20 00:06:42 +04:00
if (inNumberPacketDescriptions == 0 && rState->dataFormat.mBytesPerPacket != 0)
{
inNumberPacketDescriptions = inBuffer->mAudioDataByteSize / rState->dataFormat.mBytesPerPacket;
}
2017-11-24 14:46:01 +03:00
2013-02-20 00:06:42 +04:00
if (rState->isRunning == 0)
{
return ;
}
2017-11-24 14:46:01 +03:00
rState->snd_context->SendSamples(rState->snd_context, inBuffer->mAudioData,
2017-11-24 14:46:01 +03:00
inBuffer->mAudioDataByteSize / 4, (UINT16)(GetTickCount() & 0xffff));
2013-02-20 00:06:42 +04:00
status = AudioQueueEnqueueBuffer(
2017-11-24 14:46:01 +03:00
rState->queue,
inBuffer,
0,
NULL);
2013-02-20 00:06:42 +04:00
if (status != noErr)
{
WLog_DBG(TAG, "AudioQueueEnqueueBuffer() returned status = %"PRId32"", status);
2013-02-20 00:06:42 +04:00
}
}
2017-11-24 14:46:01 +03:00
void mf_rdpsnd_derive_buffer_size(AudioQueueRef audioQueue,
AudioStreamBasicDescription* ASBDescription,
Float64 seconds,
UInt32* outBufferSize)
{
2013-02-20 00:06:42 +04:00
static const int maxBufferSize = 0x50000;
int maxPacketSize = ASBDescription->mBytesPerPacket;
2017-11-24 14:46:01 +03:00
2013-02-20 00:06:42 +04:00
if (maxPacketSize == 0)
{
UInt32 maxVBRPacketSize = sizeof(maxPacketSize);
2017-11-24 14:46:01 +03:00
AudioQueueGetProperty(audioQueue,
kAudioQueueProperty_MaximumOutputPacketSize,
// in Mac OS X v10.5, instead use
// kAudioConverterPropertyMaximumOutputPacketSize
&maxPacketSize,
&maxVBRPacketSize
);
2013-02-20 00:06:42 +04:00
}
2017-11-24 14:46:01 +03:00
2013-02-20 00:06:42 +04:00
Float64 numBytesForTime =
2017-11-24 14:46:01 +03:00
ASBDescription->mSampleRate * maxPacketSize * seconds;
*outBufferSize = (UInt32)(numBytesForTime < maxBufferSize ? numBytesForTime : maxBufferSize);
}