Basic opensles microphone redirection support, data conversion still buggy.

Fixed pulse audio default device name, now working without arguments again.
This commit is contained in:
Armin Novak 2013-09-26 16:05:16 +02:00
parent e03305f18c
commit bd7845e656
6 changed files with 307 additions and 540 deletions

View File

@ -21,6 +21,7 @@
#include "config.h"
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -554,8 +555,10 @@ int DVCPluginEntry(IDRDYNVC_ENTRY_POINTS* pEntryPoints)
ADDIN_ARGV* args;
AUDIN_PLUGIN* audin;
audin = (AUDIN_PLUGIN*) pEntryPoints->GetPlugin(pEntryPoints, "audin");
assert(pEntryPoints);
assert(pEntryPoints->GetPlugin);
audin = (AUDIN_PLUGIN*) pEntryPoints->GetPlugin(pEntryPoints, "audin");
if (audin == NULL)
{
audin = (AUDIN_PLUGIN*) malloc(sizeof(AUDIN_PLUGIN));
@ -577,26 +580,32 @@ int DVCPluginEntry(IDRDYNVC_ENTRY_POINTS* pEntryPoints)
if (audin->subsystem)
audin_load_device_plugin((IWTSPlugin*) audin, audin->subsystem, args);
#if defined(WITH_PULSE)
if (!audin->device)
{
audin_set_subsystem(audin, "pulse");
audin_set_device_name(audin, "");
audin_load_device_plugin((IWTSPlugin*) audin, audin->subsystem, args);
}
#endif
#if defined(WITH_ALSA)
if (!audin->device)
{
audin_set_subsystem(audin, "alsa");
audin_set_device_name(audin, "default");
audin_load_device_plugin((IWTSPlugin*) audin, audin->subsystem, args);
}
#endif
#if defined(WITH_OPENSLES)
if (!audin->device)
{
audin_set_subsystem(audin, "opensles");
audin_set_device_name(audin, "default");
audin_load_device_plugin((IWTSPlugin*) audin, audin->subsystem, args);
}
#endif
if (audin->device == NULL)
{

View File

@ -15,9 +15,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
define_channel_client_subsystem("audin" "opensl_es" "")
define_channel_client_subsystem("audin" "opensles" "")
set(${MODULE_PREFIX}_SRCS
opensl_io.c
audin_opensl_es.c)
include_directories(..)
@ -30,7 +31,9 @@ set_target_properties(${MODULE_NAME} PROPERTIES PREFIX "")
set_complex_link_libraries(VARIABLE ${MODULE_PREFIX}_LIBS
MONOLITHIC ${MONOLITHIC_BUILD}
MODULE freerdp
MODULES freerdp-codec freerdp-utils)
MODULES freerdp-codec freerdp-utils
${OPENSLES_LIBRARIES}
)
set(${MODULE_PREFIX}_LIBS ${${MODULE_PREFIX}_LIBS} ${OPENSLES_LIBRARIES})

View File

@ -21,6 +21,7 @@
#include "config.h"
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -36,8 +37,8 @@
#include <SLES/OpenSLES.h>
#include "opensl_io.h"
#include "audin_main.h"
#include "opensl_io.h"
typedef struct _AudinOpenSLESDevice
{
@ -47,16 +48,17 @@ typedef struct _AudinOpenSLESDevice
OPENSL_STREAM *stream;
UINT32 frames_per_packet;
UINT32 target_rate;
UINT32 actual_rate;
UINT32 target_channels;
UINT32 actual_channels;
int bytes_per_channel;
int wformat;
int format;
int block_size;
UINT32 rate;
UINT32 channels;
UINT32 bytes_per_channel;
UINT32 wformat;
UINT32 format;
UINT32 block_size;
FREERDP_DSP_CONTEXT* dsp_context;
AudinReceive receive;
HANDLE thread;
HANDLE stopEvent;
@ -64,124 +66,72 @@ typedef struct _AudinOpenSLESDevice
void* user_data;
} AudinOpenSLESDevice;
static BOOL audin_opensles_thread_receive(AudinOpenSLESDevice* opensles,
void* src, int count)
{
int frames;
int cframes;
int ret = 0;
int encoded_size;
BYTE* encoded_data;
int rbytes_per_frame;
int tbytes_per_frame;
rbytes_per_frame = opensles->actual_channels * opensles->bytes_per_channel;
tbytes_per_frame = opensles->target_channels * opensles->bytes_per_channel;
if ((opensles->target_rate == opensles->actual_rate) &&
(opensles->target_channels == opensles->actual_channels))
{
frames = size / rbytes_per_frame;
}
else
{
opensles->dsp_context->resample(opensles->dsp_context, src,
opensles->bytes_per_channel, opensles->actual_channels,
opensles->actual_rate, size / rbytes_per_frame,
opensles->target_channels, opensles->target_rate);
frames = opensles->dsp_context->resampled_frames;
DEBUG_DVC("resampled %d frames at %d to %d frames at %d",
size / rbytes_per_frame, opensles->actual_rate,
frames, opensles->target_rate);
size = frames * tbytes_per_frame;
src = opensles->dsp_context->resampled_buffer;
}
while (frames > 0)
{
if (WaitForSingleObject(opensles->stopEvent, 0) == WAIT_OBJECT_0)
break;
cframes = opensles->frames_per_packet - opensles->buffer_frames;
if (cframes > frames)
cframes = frames;
CopyMemory(opensles->buffer + opensles->buffer_frames * tbytes_per_frame,
src, cframes * tbytes_per_frame);
opensles->buffer_frames += cframes;
if (opensles->buffer_frames >= opensles->frames_per_packet)
{
if (opensles->wformat == WAVE_FORMAT_DVI_ADPCM)
{
opensles->dsp_context->encode_ima_adpcm(opensles->dsp_context,
opensles->buffer, opensles->buffer_frames * tbytes_per_frame,
opensles->target_channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer;
encoded_size = opensles->dsp_context->adpcm_size;
DEBUG_DVC("encoded %d to %d",
opensles->buffer_frames * tbytes_per_frame, encoded_size);
}
else
{
encoded_data = opensles->buffer;
encoded_size = opensles->buffer_frames * tbytes_per_frame;
}
if (WaitForSingleObject(opensles->stopEvent, 0) == WAIT_OBJECT_0)
break;
else
ret = opensles->receive(encoded_data, encoded_size,
opensles->user_data);
opensles->buffer_frames = 0;
if (!ret)
break;
}
src += cframes * tbytes_per_frame;
frames -= cframes;
}
return (ret) ? TRUE : FALSE;
}
static void* audin_opensles_thread_func(void* arg)
{
float* buffer;
int rbytes_per_frame;
int tbytes_per_frame;
snd_pcm_t* capture_handle = NULL;
union
{
void *v;
short* s;
BYTE *b;
} buffer;
AudinOpenSLESDevice* opensles = (AudinOpenSLESDevice*) arg;
DEBUG_SND("opensles=%p", opensles);
DEBUG_DVC("opensles=%p", opensles);
assert(opensles);
assert(opensles->frames_per_packet > 0);
assert(opensles->dsp_context);
assert(opensles->stopEvent);
assert(opensles->stream);
buffer = (BYTE*) calloc(sizeof(float), opensles->frames_per_packet);
ZeroMemory(buffer, opensles->frames_per_packet);
buffer.v = calloc(sizeof(short), opensles->frames_per_packet);
ZeroMemory(buffer.v, opensles->frames_per_packet);
freerdp_dsp_context_reset_adpcm(opensles->dsp_context);
while (!(WaitForSingleObject(opensles->stopEvent, 0) == WAIT_OBJECT_0))
{
int rc = android_AudioIn(opensles->stream, buffer,
size_t encoded_size;
void *encoded_data;
int rc = android_RecIn(opensles->stream, buffer.s,
opensles->frames_per_packet);
if (rc < 0)
{
DEBUG_WARN("snd_pcm_readi (%s)", snd_strerror(error));
break;
DEBUG_WARN("android_RecIn %d", rc);
}
if (!audin_opensles_thread_receive(opensles, buffer, rc * sizeof(float)))
DEBUG_DVC("Got %d frames from microphone", opensles->frames_per_packet);
if (opensles->format == WAVE_FORMAT_ADPCM)
{
opensles->dsp_context->encode_ms_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short),
opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer;
encoded_size = opensles->dsp_context->adpcm_size;
}
else if (opensles->format == WAVE_FORMAT_DVI_ADPCM)
{
opensles->dsp_context->encode_ima_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short),
opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer;
encoded_size = opensles->dsp_context->adpcm_size;
}
else
{
encoded_data = buffer.v;
encoded_size = opensles->frames_per_packet;
}
rc = opensles->receive(encoded_data, encoded_size, opensles->user_data);
if (!rc)
break;
}
free(buffer);
free(buffer.v);
DEBUG_DVC("thread shutdown.");
@ -195,21 +145,34 @@ static void audin_opensles_free(IAudinDevice* device)
DEBUG_DVC("device=%p", device);
/* The function may have been called out of order,
* ignore duplicate requests. */
if (!opensles)
return;
assert(opensles);
assert(opensles->dsp_context);
assert(!opensles->stream);
freerdp_dsp_context_free(opensles->dsp_context);
free(opensles->device_name);
if (opensles->device_name)
free(opensles->device_name);
free(opensles);
}
static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* format)
{
AudinOpenSLESDevice* opensles = (AudinOpenSLESDevice*) device;
SLResult rc;
DEBUG_DVC("device=%p, format=%p", device, format);
DEBUG_DVC("device=%p, format=%p", opensles, format);
assert(format);
switch (format->wFormatTag)
{
/*
case WAVE_FORMAT_PCM:
if (format->cbSize == 0 &&
(format->nSamplesPerSec <= 48000) &&
@ -219,8 +182,9 @@ static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* f
return TRUE;
}
break;
case WAVE_FORMAT_DVI_ADPCM:
*/
case WAVE_FORMAT_ADPCM:
// case WAVE_FORMAT_DVI_ADPCM:
if ((format->nSamplesPerSec <= 48000) &&
(format->wBitsPerSample == 4) &&
(format->nChannels == 1 || format->nChannels == 2))
@ -242,67 +206,97 @@ static void audin_opensles_set_format(IAudinDevice* device,
DEBUG_DVC("device=%p, format=%p, FramesPerPacket=%d",
device, format, FramesPerPacket);
opensles->target_rate = format->nSamplesPerSec;
opensles->actual_rate = format->nSamplesPerSec;
opensles->target_channels = format->nChannels;
opensles->actual_channels = format->nChannels;
assert(format);
/* The function may have been called out of order, ignore
* requests before the device is available. */
if (!opensles)
return;
switch (format->wFormatTag)
{
case WAVE_FORMAT_PCM:
opensles->frames_per_packet = FramesPerPacket;
switch (format->wBitsPerSample)
{
case 4:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 1;
break;
case 8:
opensles->format = SND_PCM_FORMAT_S8;
opensles->format = WAVE_FORMAT_PCM;
opensles->bytes_per_channel = 1;
break;
case 16:
opensles->format = SND_PCM_FORMAT_S16_LE;
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2;
break;
}
break;
case WAVE_FORMAT_DVI_ADPCM:
opensles->format = SND_PCM_FORMAT_S16_LE;
case WAVE_FORMAT_ADPCM:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
opensles->frames_per_packet =
(opensles->frames_per_packet * format->nChannels * 2 /
(FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2);
DEBUG_DVC("aligned FramesPerPacket=%d",
opensles->frames_per_packet);
break;
case WAVE_FORMAT_DVI_ADPCM:
opensles->format = WAVE_FORMAT_DVI_ADPCM;
opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
opensles->frames_per_packet =
(FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2);
break;
default:
DEBUG_WARN("Unsupported fromat %08X requested, ignoring.",
format->wFormatTag);
return;
}
opensles->rate = format->nSamplesPerSec;
opensles->channels = format->nChannels;
opensles->wformat = format->wFormatTag;
opensles->block_size = format->nBlockAlign;
if (opensles->stream)
{
android_CloseRecDevice(opensles->stream);
opensles->stream = android_OpenRecDevice(opensles->device_name,
opensles->rate, opensles->channels,
opensles->frames_per_packet);
}
}
static int audin_opensles_open(IAudinDevice* device, AudinReceive receive,
static void audin_opensles_open(IAudinDevice* device, AudinReceive receive,
void* user_data)
{
int status = 0;
int rbytes_per_frame;
int tbytes_per_frame;
AudinOpenSLESDevice* opensles = (AudinOpenSLESDevice*) device;
DEBUG_DVC("device=%p, receive=%d, user_data=%p", device, receive, user_data);
opensles->stream = android_OpenAudioDevice(opensles->target_rate,
opensles->target_channels, 0, opensles->frames_per_packet);
assert(opensles);
/* The function may have been called out of order,
* ignore duplicate open requests. */
if(opensles->stream)
return;
opensles->stream = android_OpenRecDevice(
opensles->device_name,
opensles->rate,
opensles->channels, opensles->frames_per_packet);
assert(opensles->stream);
opensles->receive = receive;
opensles->user_data = user_data;
rbytes_per_frame = opensles->actual_channels * opensles->bytes_per_channel;
tbytes_per_frame = opensles->target_channels * opensles->bytes_per_channel;
opensles->buffer =
(BYTE*) malloc(tbytes_per_frame * opensles->frames_per_packet);
ZeroMemory(opensles->buffer,
tbytes_per_frame * opensles->frames_per_packet);
opensles->buffer_frames = 0;
opensles->stopEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
opensles->thread = CreateThread(NULL, 0,
(LPTHREAD_START_ROUTINE) audin_opensles_thread_func,
@ -314,22 +308,33 @@ static void audin_opensles_close(IAudinDevice* device)
AudinOpenSLESDevice* opensles = (AudinOpenSLESDevice*) device;
DEBUG_DVC("device=%p", device);
assert(opensles);
/* The function may have been called out of order,
* ignore duplicate requests. */
if (!opensles->stopEvent)
return;
assert(opensles->stopEvent);
assert(opensles->thread);
assert(opensles->stream);
SetEvent(opensles->stopEvent);
WaitForSingleObject(opensles->thread, INFINITE);
CloseHandle(opensles->stopEvent);
CloseHandle(opensles->thread);
android_CloseAudioDevice(opensles->stream);
android_CloseRecDevice(opensles->stream);
opensles->stopEvent = NULL;
opensles->thread = NULL;
opensles->receive = NULL;
opensles->user_data = NULL;
opsnsles->stream = NULL;
opensles->stream = NULL;
}
static const COMMAND_LINE_ARGUMENT_A audin_opensles_args[] =
static COMMAND_LINE_ARGUMENT_A audin_opensles_args[] =
{
{ "audio-dev", COMMAND_LINE_VALUE_REQUIRED, "<device>",
NULL, NULL, -1, NULL, "audio device name" },
@ -400,17 +405,6 @@ int freerdp_audin_client_subsystem_entry(
audin_opensles_parse_addin_args(opensles, args);
if (!opensles->device_name)
opensles->device_name = _strdup("default");
opensles->frames_per_packet = 128;
opensles->target_rate = 22050;
opensles->actual_rate = 22050;
opensles->format = SND_PCM_FORMAT_S16_LE;
opensles->target_channels = 2;
opensles->actual_channels = 2;
opensles->bytes_per_channel = 2;
opensles->dsp_context = freerdp_dsp_context_new();
pEntryPoints->pRegisterAudinDevice(pEntryPoints->plugin,

View File

@ -27,15 +27,13 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include "audin_main.h"
#include "opensl_io.h"
#define CONV16BIT 32768
#define CONVMYFLT (1./32768.)
static void* createThreadLock(void);
static int waitThreadLock(void *lock);
static void notifyThreadLock(void *lock);
static void destroyThreadLock(void *lock);
static void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context);
static void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context);
// creates the OpenSL ES audio engine
@ -44,128 +42,33 @@ static SLresult openSLCreateEngine(OPENSL_STREAM *p)
SLresult result;
// create engine
result = slCreateEngine(&(p->engineObject), 0, NULL, 0, NULL, NULL);
DEBUG_DVC("engineObject=%p", p->engineObject);
if(result != SL_RESULT_SUCCESS) goto engine_end;
// realize the engine
result = (*p->engineObject)->Realize(p->engineObject, SL_BOOLEAN_FALSE);
DEBUG_DVC("Realize=%d", result);
if(result != SL_RESULT_SUCCESS) goto engine_end;
// get the engine interface, which is needed in order to create other objects
result = (*p->engineObject)->GetInterface(p->engineObject, SL_IID_ENGINE, &(p->engineEngine));
DEBUG_DVC("engineEngine=%p", p->engineEngine);
if(result != SL_RESULT_SUCCESS) goto engine_end;
// get the volume interface - important, this is optional!
result = (*p->engineObject)->GetInterface(p->engineObject, SL_IID_DEVICEVOLUME, &(p->deviceVolume));
DEBUG_DVC("deviceVolume=%p", p->deviceVolume);
if(result != SL_RESULT_SUCCESS)
{
p->deviceVolume = NULL;
result = SL_RESULT_SUCCESS;
}
engine_end:
assert(SL_RESULT_SUCCESS == result);
return result;
}
// opens the OpenSL ES device for output
static SLresult openSLPlayOpen(OPENSL_STREAM *p)
{
SLresult result;
SLuint32 sr = p->sr;
SLuint32 channels = p->outchannels;
if(channels){
// configure audio source
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
switch(sr){
case 8000:
sr = SL_SAMPLINGRATE_8;
break;
case 11025:
sr = SL_SAMPLINGRATE_11_025;
break;
case 16000:
sr = SL_SAMPLINGRATE_16;
break;
case 22050:
sr = SL_SAMPLINGRATE_22_05;
break;
case 24000:
sr = SL_SAMPLINGRATE_24;
break;
case 32000:
sr = SL_SAMPLINGRATE_32;
break;
case 44100:
sr = SL_SAMPLINGRATE_44_1;
break;
case 48000:
sr = SL_SAMPLINGRATE_48;
break;
case 64000:
sr = SL_SAMPLINGRATE_64;
break;
case 88200:
sr = SL_SAMPLINGRATE_88_2;
break;
case 96000:
sr = SL_SAMPLINGRATE_96;
break;
case 192000:
sr = SL_SAMPLINGRATE_192;
break;
default:
return -1;
}
const SLInterfaceID ids[] = {SL_IID_VOLUME};
const SLboolean req[] = {SL_BOOLEAN_FALSE};
result = (*p->engineEngine)->CreateOutputMix(p->engineEngine, &(p->outputMixObject), 1, ids, req);
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// realize the output mix
result = (*p->outputMixObject)->Realize(p->outputMixObject, SL_BOOLEAN_FALSE);
int speakers;
if(channels > 1)
speakers = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
else speakers = SL_SPEAKER_FRONT_CENTER;
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM,channels, sr,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
speakers, SL_BYTEORDER_LITTLEENDIAN};
SLDataSource audioSrc = {&loc_bufq, &format_pcm};
// configure audio sink
SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, p->outputMixObject};
SLDataSink audioSnk = {&loc_outmix, NULL};
// create audio player
const SLInterfaceID ids1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req1[] = {SL_BOOLEAN_TRUE};
result = (*p->engineEngine)->CreateAudioPlayer(p->engineEngine, &(p->bqPlayerObject), &audioSrc, &audioSnk,
1, ids1, req1);
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// realize the player
result = (*p->bqPlayerObject)->Realize(p->bqPlayerObject, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// get the play interface
result = (*p->bqPlayerObject)->GetInterface(p->bqPlayerObject, SL_IID_PLAY, &(p->bqPlayerPlay));
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// get the buffer queue interface
result = (*p->bqPlayerObject)->GetInterface(p->bqPlayerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
&(p->bqPlayerBufferQueue));
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// register callback on the buffer queue
result = (*p->bqPlayerBufferQueue)->RegisterCallback(p->bqPlayerBufferQueue, bqPlayerCallback, p);
if(result != SL_RESULT_SUCCESS) goto end_openaudio;
// set the player's state to playing
result = (*p->bqPlayerPlay)->SetPlayState(p->bqPlayerPlay, SL_PLAYSTATE_PLAYING);
end_openaudio:
return result;
}
return SL_RESULT_SUCCESS;
}
// Open the OpenSL ES device for input
static SLresult openSLRecOpen(OPENSL_STREAM *p){
@ -173,6 +76,8 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
SLuint32 sr = p->sr;
SLuint32 channels = p->inchannels;
assert(!p->recorderObject);
if(channels){
switch(sr){
@ -226,7 +131,8 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
int speakers;
if(channels > 1)
speakers = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
else speakers = SL_SPEAKER_FRONT_CENTER;
else
speakers = SL_SPEAKER_FRONT_CENTER;
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, channels, sr,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
@ -235,32 +141,44 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
// create audio recorder
// (requires the RECORD_AUDIO permission)
const SLInterfaceID id[1] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req[1] = {SL_BOOLEAN_TRUE};
result = (*p->engineEngine)->CreateAudioRecorder(p->engineEngine, &(p->recorderObject), &audioSrc,
&audioSnk, 1, id, req);
const SLInterfaceID id[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE};
const SLboolean req[] = {SL_BOOLEAN_TRUE};
result = (*p->engineEngine)->CreateAudioRecorder(p->engineEngine,
&(p->recorderObject), &audioSrc, &audioSnk, 1, id, req);
DEBUG_DVC("p->recorderObject=%p", p->recorderObject);
assert(!result);
if (SL_RESULT_SUCCESS != result) goto end_recopen;
// realize the audio recorder
result = (*p->recorderObject)->Realize(p->recorderObject, SL_BOOLEAN_FALSE);
DEBUG_DVC("Realize=%d", result);
assert(!result);
if (SL_RESULT_SUCCESS != result) goto end_recopen;
// get the record interface
result = (*p->recorderObject)->GetInterface(p->recorderObject, SL_IID_RECORD, &(p->recorderRecord));
result = (*p->recorderObject)->GetInterface(p->recorderObject,
SL_IID_RECORD, &(p->recorderRecord));
DEBUG_DVC("p->recorderRecord=%p", p->recorderRecord);
assert(!result);
if (SL_RESULT_SUCCESS != result) goto end_recopen;
// get the buffer queue interface
result = (*p->recorderObject)->GetInterface(p->recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
result = (*p->recorderObject)->GetInterface(p->recorderObject,
SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
&(p->recorderBufferQueue));
DEBUG_DVC("p->recorderBufferQueue=%p", p->recorderBufferQueue);
assert(!result);
if (SL_RESULT_SUCCESS != result) goto end_recopen;
// register callback on the buffer queue
result = (*p->recorderBufferQueue)->RegisterCallback(p->recorderBufferQueue, bqRecorderCallback,
p);
if (SL_RESULT_SUCCESS != result) goto end_recopen;
result = (*p->recorderRecord)->SetRecordState(p->recorderRecord, SL_RECORDSTATE_RECORDING);
result = (*p->recorderBufferQueue)->RegisterCallback(p->recorderBufferQueue,
bqRecorderCallback, p);
DEBUG_DVC("p->recorderBufferQueue=%p", p->recorderBufferQueue);
assert(!result);
if (SL_RESULT_SUCCESS != result)
goto end_recopen;
end_recopen:
end_recopen:
return result;
}
else return SL_RESULT_SUCCESS;
@ -269,16 +187,9 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
}
// close the OpenSL IO and destroy the audio engine
static void openSLDestroyEngine(OPENSL_STREAM *p){
// destroy buffer queue audio player object, and invalidate all associated interfaces
if (p->bqPlayerObject != NULL) {
(*p->bqPlayerObject)->Destroy(p->bqPlayerObject);
p->bqPlayerObject = NULL;
p->bqPlayerPlay = NULL;
p->bqPlayerBufferQueue = NULL;
p->bqPlayerEffectSend = NULL;
}
static void openSLDestroyEngine(OPENSL_STREAM *p)
{
DEBUG_DVC("p=%p", p);
// destroy audio recorder object, and invalidate all associated interfaces
if (p->recorderObject != NULL) {
@ -288,12 +199,6 @@ static void openSLDestroyEngine(OPENSL_STREAM *p){
p->recorderBufferQueue = NULL;
}
// destroy output mix object, and invalidate all associated interfaces
if (p->outputMixObject != NULL) {
(*p->outputMixObject)->Destroy(p->outputMixObject);
p->outputMixObject = NULL;
}
// destroy engine object, and invalidate all associated interfaces
if (p->engineObject != NULL) {
(*p->engineObject)->Destroy(p->engineObject);
@ -304,226 +209,116 @@ static void openSLDestroyEngine(OPENSL_STREAM *p){
}
// open the android audio device for input and/or output
OPENSL_STREAM *android_OpenAudioDevice(int sr, int inchannels, int outchannels, int bufferframes){
// open the android audio device for input
OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes)
{
OPENSL_STREAM *p;
p = (OPENSL_STREAM *) calloc(sizeof(OPENSL_STREAM),1);
memset(p, 0, sizeof(OPENSL_STREAM));
p->inchannels = inchannels;
p->outchannels = outchannels;
p->sr = sr;
p->inlock = createThreadLock();
p->outlock = createThreadLock();
if((p->outBufSamples = bufferframes*outchannels) != 0) {
if((p->outputBuffer[0] = (short *) calloc(p->outBufSamples, sizeof(short))) == NULL ||
(p->outputBuffer[1] = (short *) calloc(p->outBufSamples, sizeof(short))) == NULL) {
android_CloseAudioDevice(p);
return NULL;
}
}
if((p->inBufSamples = bufferframes*inchannels) != 0){
if((p->inputBuffer[0] = (short *) calloc(p->inBufSamples, sizeof(short))) == NULL ||
(p->inputBuffer[1] = (short *) calloc(p->inBufSamples, sizeof(short))) == NULL){
android_CloseAudioDevice(p);
return NULL;
}
}
p->currentInputIndex = 0;
p->currentOutputBuffer = 0;
p->currentInputIndex = p->inBufSamples;
p->currentInputBuffer = 0;
p->queue = Queue_New(TRUE, -1, -1);
if(openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
android_CloseAudioDevice(p);
android_CloseRecDevice(p);
return NULL;
}
if(openSLRecOpen(p) != SL_RESULT_SUCCESS) {
android_CloseAudioDevice(p);
android_CloseRecDevice(p);
return NULL;
}
if(openSLPlayOpen(p) != SL_RESULT_SUCCESS) {
android_CloseAudioDevice(p);
return NULL;
}
p->buffersize = bufferframes;
notifyThreadLock(p->outlock);
notifyThreadLock(p->inlock);
p->time = 0.;
return p;
}
// close the android audio device
void android_CloseAudioDevice(OPENSL_STREAM *p){
void android_CloseRecDevice(OPENSL_STREAM *p)
{
DEBUG_DVC("p=%p", p);
if (p == NULL)
return;
while (Queue_Count(p->queue) > 0)
{
queue_element *e = Queue_Dequeue(p->queue);
free(e->data);
free(e);
}
if (p->next)
{
free(p->next->data);
free(p->next);
}
Queue_Free(p->queue);
openSLDestroyEngine(p);
if (p->inlock != NULL) {
notifyThreadLock(p->inlock);
destroyThreadLock(p->inlock);
p->inlock = NULL;
}
if (p->outlock != NULL) {
notifyThreadLock(p->outlock);
destroyThreadLock(p->outlock);
p->inlock = NULL;
}
if (p->outputBuffer[0] != NULL) {
free(p->outputBuffer[0]);
p->outputBuffer[0] = NULL;
}
if (p->outputBuffer[1] != NULL) {
free(p->outputBuffer[1]);
p->outputBuffer[1] = NULL;
}
if (p->inputBuffer[0] != NULL) {
free(p->inputBuffer[0]);
p->inputBuffer[0] = NULL;
}
if (p->inputBuffer[1] != NULL) {
free(p->inputBuffer[1]);
p->inputBuffer[1] = NULL;
}
free(p);
}
// returns timestamp of the processed stream
double android_GetTimestamp(OPENSL_STREAM *p){
return p->time;
}
// this callback handler is called every time a buffer finishes recording
void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
{
queue_element *e;
OPENSL_STREAM *p = (OPENSL_STREAM *) context;
notifyThreadLock(p->inlock);
DEBUG_DVC("p=%p", p);
assert(p);
assert(p->queue);
DEBUG_DVC("Signalled");
e = calloc(1, sizeof(queue_element));
e->data = calloc(p->buffersize, sizeof(short));
e->size = p->buffersize;
if (p->next)
Queue_Enqueue(p->queue, p->next);
(*p->recorderBufferQueue)->Enqueue(p->recorderBufferQueue,
e->data, e->size);
p->next = e;
}
// gets a buffer of size samples from the device
int android_AudioIn(OPENSL_STREAM *p,float *buffer,int size){
short *inBuffer;
int i, bufsamps = p->inBufSamples, index = p->currentInputIndex;
if(p == NULL || bufsamps == 0) return 0;
inBuffer = p->inputBuffer[p->currentInputBuffer];
for(i=0; i < size; i++){
if (index >= bufsamps) {
waitThreadLock(p->inlock);
(*p->recorderBufferQueue)->Enqueue(p->recorderBufferQueue,
inBuffer,bufsamps*sizeof(short));
p->currentInputBuffer = (p->currentInputBuffer ? 0 : 1);
index = 0;
inBuffer = p->inputBuffer[p->currentInputBuffer];
}
buffer[i] = (float) inBuffer[index++]*CONVMYFLT;
}
p->currentInputIndex = index;
if(p->outchannels == 0) p->time += (double) size/(p->sr*p->inchannels);
return i;
}
// this callback handler is called every time a buffer finishes playing
void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
int android_RecIn(OPENSL_STREAM *p,short *buffer,int size)
{
OPENSL_STREAM *p = (OPENSL_STREAM *) context;
notifyThreadLock(p->outlock);
queue_element *e;
int rc;
assert(p);
assert(buffer);
assert(size > 0);
/* Initial trigger for the queue. */
if (!p->next)
{
(*p->recorderRecord)->SetRecordState(p->recorderRecord, SL_RECORDSTATE_RECORDING);
bqRecorderCallback(p->recorderBufferQueue, p);
}
e = Queue_Dequeue(p->queue);
if (!e)
return -1;
rc = e->size;
assert(p->buffersize == e->size);
memcpy(buffer, e->data, size > e->size ? e->size : size);
free(e->data);
free(e);
DEBUG_DVC("yay!");
return size;
}
// puts a buffer of size samples to the device
int android_AudioOut(OPENSL_STREAM *p, float *buffer,int size){
short *outBuffer;
int i, bufsamps = p->outBufSamples, index = p->currentOutputIndex;
if(p == NULL || bufsamps == 0) return 0;
outBuffer = p->outputBuffer[p->currentOutputBuffer];
for(i=0; i < size; i++){
outBuffer[index++] = (short) (buffer[i]*CONV16BIT);
if (index >= p->outBufSamples) {
waitThreadLock(p->outlock);
(*p->bqPlayerBufferQueue)->Enqueue(p->bqPlayerBufferQueue,
outBuffer,bufsamps*sizeof(short));
p->currentOutputBuffer = (p->currentOutputBuffer ? 0 : 1);
index = 0;
outBuffer = p->outputBuffer[p->currentOutputBuffer];
}
}
p->currentOutputIndex = index;
p->time += (double) size/(p->sr*p->outchannels);
return i;
}
//----------------------------------------------------------------------
// thread Locks
// to ensure synchronisation between callbacks and processing code
void* createThreadLock(void)
{
threadLock *p;
p = (threadLock*) malloc(sizeof(threadLock));
if (p == NULL)
return NULL;
memset(p, 0, sizeof(threadLock));
if (pthread_mutex_init(&(p->m), (pthread_mutexattr_t*) NULL) != 0) {
free((void*) p);
return NULL;
}
if (pthread_cond_init(&(p->c), (pthread_condattr_t*) NULL) != 0) {
pthread_mutex_destroy(&(p->m));
free((void*) p);
return NULL;
}
p->s = (unsigned char) 1;
return p;
}
int waitThreadLock(void *lock)
{
threadLock *p;
int retval = 0;
p = (threadLock*) lock;
pthread_mutex_lock(&(p->m));
while (!p->s) {
pthread_cond_wait(&(p->c), &(p->m));
}
p->s = (unsigned char) 0;
pthread_mutex_unlock(&(p->m));
}
void notifyThreadLock(void *lock)
{
threadLock *p;
p = (threadLock*) lock;
pthread_mutex_lock(&(p->m));
p->s = (unsigned char) 1;
pthread_cond_signal(&(p->c));
pthread_mutex_unlock(&(p->m));
}
void destroyThreadLock(void *lock)
{
threadLock *p;
p = (threadLock*) lock;
if (p == NULL)
return;
notifyThreadLock(p);
pthread_cond_destroy(&(p->c));
pthread_mutex_destroy(&(p->m));
free(p);
}

View File

@ -32,88 +32,57 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <SLES/OpenSLES.h>
#include <SLES/OpenSLES_Android.h>
#include <pthread.h>
#include <stdlib.h>
typedef struct threadLock_{
pthread_mutex_t m;
pthread_cond_t c;
unsigned char s;
} threadLock;
#include <winpr/synch.h>
#include <winpr/collections.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
size_t size;
void *data;
} queue_element;
typedef struct opensl_stream {
// engine interfaces
SLObjectItf engineObject;
SLEngineItf engineEngine;
// output mix interfaces
SLObjectItf outputMixObject;
// buffer queue player interfaces
SLObjectItf bqPlayerObject;
SLPlayItf bqPlayerPlay;
SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
SLEffectSendItf bqPlayerEffectSend;
// device interfaces
SLDeviceVolumeItf deviceVolume;
// recorder interfaces
SLObjectItf recorderObject;
SLRecordItf recorderRecord;
SLAndroidSimpleBufferQueueItf recorderBufferQueue;
// buffer indexes
int currentInputIndex;
int currentOutputIndex;
// current buffer half (0, 1)
int currentOutputBuffer;
int currentInputBuffer;
// buffers
short *outputBuffer[2];
short *inputBuffer[2];
// size of buffers
int outBufSamples;
int inBufSamples;
// locks
void* inlock;
void* outlock;
double time;
int inchannels;
int outchannels;
int sr;
unsigned int inchannels;
unsigned int sr;
unsigned int buffersize;
wQueue *queue;
queue_element *next;
} OPENSL_STREAM;
/*
Open the audio device with a given sampling rate (sr), input and output channels and IO buffer size
in frames. Returns a handle to the OpenSL stream
*/
OPENSL_STREAM* android_OpenAudioDevice(int sr, int inchannels, int outchannels, int bufferframes);
OPENSL_STREAM* android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes);
/*
Close the audio device
*/
void android_CloseAudioDevice(OPENSL_STREAM *p);
void android_CloseRecDevice(OPENSL_STREAM *p);
/*
Read a buffer from the OpenSL stream *p, of size samples. Returns the number of samples read.
*/
int android_AudioIn(OPENSL_STREAM *p, float *buffer,int size);
/*
Write a buffer to the OpenSL stream *p, of size samples. Returns the number of samples written.
*/
int android_AudioOut(OPENSL_STREAM *p, float *buffer,int size);
/*
Get the current IO block time in seconds
*/
double android_GetTimestamp(OPENSL_STREAM *p);
int android_RecIn(OPENSL_STREAM *p, short *buffer,int size);
#ifdef __cplusplus
};
#endif

View File

@ -410,7 +410,7 @@ static void audin_pulse_open(IAudinDevice* device, AudinReceive receive, void* u
/* 500ms latency */
buffer_attr.fragsize = pa_usec_to_bytes(500000, &pulse->sample_spec);
if (pa_stream_connect_record(pulse->stream,
pulse->device_name[0] ? pulse->device_name : NULL,
pulse->device_name,
&buffer_attr, PA_STREAM_ADJUST_LATENCY) < 0)
{
pa_threaded_mainloop_unlock(pulse->mainloop);
@ -447,7 +447,7 @@ static void audin_pulse_open(IAudinDevice* device, AudinReceive receive, void* u
}
}
COMMAND_LINE_ARGUMENT_A audin_pulse_args[] =
static COMMAND_LINE_ARGUMENT_A audin_pulse_args[] =
{
{ "audio-dev", COMMAND_LINE_VALUE_REQUIRED, "<device>", NULL, NULL, -1, NULL, "audio device name" },
{ NULL, 0, NULL, NULL, NULL, -1, NULL, NULL }
@ -505,9 +505,6 @@ int freerdp_audin_client_subsystem_entry(PFREERDP_AUDIN_DEVICE_ENTRY_POINTS pEnt
audin_pulse_parse_addin_args(pulse, args);
if (!pulse->device_name)
pulse->device_name = _strdup("default");
pulse->dsp_context = freerdp_dsp_context_new();
pulse->mainloop = pa_threaded_mainloop_new();