Initial working microphone redirection for android.

Added debug messages to alsa and pulse microphone backends.
This commit is contained in:
Armin Novak 2013-09-27 11:39:04 +02:00
parent bd7845e656
commit c2d3f60ad0
5 changed files with 96 additions and 63 deletions

View File

@ -165,7 +165,12 @@ static BOOL audin_alsa_thread_receive(AudinALSADevice* alsa, BYTE* src, int size
if (WaitForSingleObject(alsa->stopEvent, 0) == WAIT_OBJECT_0)
break;
else
{
DEBUG_DVC("encoded %d [%d] to %d [%X]", alsa->buffer_frames,
tbytes_per_frame, encoded_size,
alsa->wformat);
ret = alsa->receive(encoded_data, encoded_size, alsa->user_data);
}
alsa->buffer_frames = 0;

View File

@ -53,7 +53,6 @@ typedef struct _AudinOpenSLESDevice
UINT32 bytes_per_channel;
UINT32 wformat;
UINT32 format;
UINT32 block_size;
@ -84,7 +83,7 @@ static void* audin_opensles_thread_func(void* arg)
assert(opensles->stopEvent);
assert(opensles->stream);
buffer.v = calloc(sizeof(short), opensles->frames_per_packet);
buffer.v = calloc(opensles->bytes_per_channel, opensles->frames_per_packet);
ZeroMemory(buffer.v, opensles->frames_per_packet);
freerdp_dsp_context_reset_adpcm(opensles->dsp_context);
@ -98,23 +97,22 @@ static void* audin_opensles_thread_func(void* arg)
if (rc < 0)
{
DEBUG_WARN("android_RecIn %d", rc);
continue;
}
DEBUG_DVC("Got %d frames from microphone", opensles->frames_per_packet);
if (opensles->format == WAVE_FORMAT_ADPCM)
{
opensles->dsp_context->encode_ms_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short),
buffer.b, rc * opensles->bytes_per_channel,
opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer;
encoded_size = opensles->dsp_context->adpcm_size;
}
else if (opensles->format == WAVE_FORMAT_DVI_ADPCM)
{
opensles->dsp_context->encode_ima_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short),
buffer.b, rc * opensles->bytes_per_channel,
opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer;
@ -123,9 +121,11 @@ static void* audin_opensles_thread_func(void* arg)
else
{
encoded_data = buffer.v;
encoded_size = opensles->frames_per_packet;
encoded_size = rc * opensles->bytes_per_channel;
}
DEBUG_DVC("encoded %d [%d] to %d [%X]", rc,
opensles->bytes_per_channel, encoded_size, opensles->format);
rc = opensles->receive(encoded_data, encoded_size, opensles->user_data);
if (!rc)
break;
@ -172,19 +172,17 @@ static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* f
switch (format->wFormatTag)
{
/*
case WAVE_FORMAT_PCM:
case WAVE_FORMAT_PCM: /* PCM */
if (format->cbSize == 0 &&
(format->nSamplesPerSec <= 48000) &&
(format->wBitsPerSample == 8 || format->wBitsPerSample == 16) &&
(format->nChannels == 1 || format->nChannels == 2))
(format->nChannels >= 1 && format->nChannels <= 2))
{
return TRUE;
}
break;
*/
case WAVE_FORMAT_ADPCM:
// case WAVE_FORMAT_DVI_ADPCM:
// case WAVE_FORMAT_ADPCM: /* IMA ADPCM */
case WAVE_FORMAT_DVI_ADPCM:
if ((format->nSamplesPerSec <= 48000) &&
(format->wBitsPerSample == 4) &&
(format->nChannels == 1 || format->nChannels == 2))
@ -192,6 +190,11 @@ static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* f
return TRUE;
}
break;
default:
DEBUG_DVC("Encoding '%s' [%08X] not supported",
rdpsnd_get_audio_tag_string(format->wFormatTag),
format->wFormatTag);
break;
}
return FALSE;
@ -220,58 +223,45 @@ static void audin_opensles_set_format(IAudinDevice* device,
switch (format->wBitsPerSample)
{
case 4:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 1;
break;
case 8:
opensles->format = WAVE_FORMAT_PCM;
opensles->bytes_per_channel = 1;
break;
case 16:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2;
break;
}
break;
case WAVE_FORMAT_ADPCM:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
opensles->frames_per_packet =
(FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2);
break;
case WAVE_FORMAT_DVI_ADPCM:
opensles->format = WAVE_FORMAT_DVI_ADPCM;
opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
opensles->frames_per_packet =
(FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2);
break;
case WAVE_FORMAT_ALAW:
case WAVE_FORMAT_MULAW:
opensles->frames_per_packet = FramesPerPacket;
break;
default:
DEBUG_WARN("Unsupported fromat %08X requested, ignoring.",
format->wFormatTag);
DEBUG_WARN("Encoding '%s' [%08X] not supported",
rdpsnd_get_audio_tag_string(format->wFormatTag),
format->wFormatTag);
return;
}
opensles->rate = format->nSamplesPerSec;
opensles->channels = format->nChannels;
opensles->wformat = format->wFormatTag;
opensles->format = format->wFormatTag;
opensles->block_size = format->nBlockAlign;
if (opensles->stream)
{
android_CloseRecDevice(opensles->stream);
opensles->stream = android_OpenRecDevice(opensles->device_name,
opensles->rate, opensles->channels,
opensles->frames_per_packet);
}
DEBUG_DVC("aligned frames_per_packet=%d, block_size=%d",
opensles->frames_per_packet, opensles->block_size);
}
static void audin_opensles_open(IAudinDevice* device, AudinReceive receive,
@ -291,7 +281,9 @@ static void audin_opensles_open(IAudinDevice* device, AudinReceive receive,
opensles->stream = android_OpenRecDevice(
opensles->device_name,
opensles->rate,
opensles->channels, opensles->frames_per_packet);
opensles->channels,
opensles->frames_per_packet,
opensles->bytes_per_channel * 8);
assert(opensles->stream);
opensles->receive = receive;
@ -314,7 +306,10 @@ static void audin_opensles_close(IAudinDevice* device)
/* The function may have been called out of order,
* ignore duplicate requests. */
if (!opensles->stopEvent)
{
DEBUG_WARN("[ERROR] function called without matching open.");
return;
}
assert(opensles->stopEvent);
assert(opensles->thread);

View File

@ -134,9 +134,27 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
else
speakers = SL_SPEAKER_FRONT_CENTER;
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, channels, sr,
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
speakers, SL_BYTEORDER_LITTLEENDIAN};
SLDataFormat_PCM format_pcm;
format_pcm.formatType = SL_DATAFORMAT_PCM;
format_pcm.numChannels = channels;
format_pcm.samplesPerSec = sr;
format_pcm.channelMask = speakers;
format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
if (16 == p->bits_per_sample)
{
format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
format_pcm.containerSize = 16;
}
else if (8 == p->bits_per_sample)
{
format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_8;
format_pcm.containerSize = 8;
}
else
assert(0);
SLDataSink audioSnk = {&loc_bq, &format_pcm};
// create audio recorder
@ -211,7 +229,7 @@ static void openSLDestroyEngine(OPENSL_STREAM *p)
// open the android audio device for input
OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes)
int bufferframes, int bits_per_sample)
{
OPENSL_STREAM *p;
@ -221,19 +239,27 @@ OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels,
p->inchannels = inchannels;
p->sr = sr;
p->queue = Queue_New(TRUE, -1, -1);
p->buffersize = bufferframes;
p->bits_per_sample = bits_per_sample;
if(openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
if ((p->bits_per_sample != 8) && (p->bits_per_sample != 16))
{
android_CloseRecDevice(p);
return NULL;
}
if(openSLCreateEngine(p) != SL_RESULT_SUCCESS)
{
android_CloseRecDevice(p);
return NULL;
}
if(openSLRecOpen(p) != SL_RESULT_SUCCESS) {
if(openSLRecOpen(p) != SL_RESULT_SUCCESS)
{
android_CloseRecDevice(p);
return NULL;
}
p->buffersize = bufferframes;
return p;
}
@ -245,11 +271,15 @@ void android_CloseRecDevice(OPENSL_STREAM *p)
if (p == NULL)
return;
while (Queue_Count(p->queue) > 0)
if (p->queue)
{
queue_element *e = Queue_Dequeue(p->queue);
free(e->data);
free(e);
while (Queue_Count(p->queue) > 0)
{
queue_element *e = Queue_Dequeue(p->queue);
free(e->data);
free(e);
}
Queue_Free(p->queue);
}
if (p->next)
@ -258,7 +288,6 @@ void android_CloseRecDevice(OPENSL_STREAM *p)
free(p->next);
}
Queue_Free(p->queue);
openSLDestroyEngine(p);
free(p);
@ -276,9 +305,9 @@ void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
assert(p);
assert(p->queue);
DEBUG_DVC("Signalled");
printf("Signalled");
e = calloc(1, sizeof(queue_element));
e->data = calloc(p->buffersize, sizeof(short));
e->data = calloc(p->buffersize, p->bits_per_sample / 8);
e->size = p->buffersize;
if (p->next)
@ -306,19 +335,21 @@ int android_RecIn(OPENSL_STREAM *p,short *buffer,int size)
bqRecorderCallback(p->recorderBufferQueue, p);
}
WaitForSingleObject(p->queue->event, INFINITE);
e = Queue_Dequeue(p->queue);
if (!e)
{
DEBUG_WARN("[ERROR] got e=%p from queue", e);
return -1;
}
rc = e->size;
rc = (e->size < size) ? e->size : size;
assert(p->buffersize == e->size);
memcpy(buffer, e->data, size > e->size ? e->size : size);
memcpy(buffer, e->data, rc * sizeof(short));
free(e->data);
free(e);
DEBUG_DVC("yay!");
return size;
return rc;
}

View File

@ -62,8 +62,9 @@ typedef struct opensl_stream {
SLAndroidSimpleBufferQueueItf recorderBufferQueue;
unsigned int inchannels;
unsigned int sr;
unsigned int sr;
unsigned int buffersize;
unsigned int bits_per_sample;
wQueue *queue;
queue_element *next;
@ -74,7 +75,7 @@ typedef struct opensl_stream {
in frames. Returns a handle to the OpenSL stream
*/
OPENSL_STREAM* android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes);
int bufferframes, int bits_per_sample);
/*
Close the audio device
*/

View File

@ -324,8 +324,6 @@ static void audin_pulse_stream_request_callback(pa_stream* stream, size_t length
pulse->sample_spec.channels, pulse->block_size);
encoded_data = pulse->dsp_context->adpcm_buffer;
encoded_size = pulse->dsp_context->adpcm_size;
DEBUG_DVC("encoded %d to %d",
pulse->buffer_frames * pulse->bytes_per_frame, encoded_size);
}
else
{
@ -333,6 +331,9 @@ static void audin_pulse_stream_request_callback(pa_stream* stream, size_t length
encoded_size = pulse->buffer_frames * pulse->bytes_per_frame;
}
DEBUG_DVC("encoded %d [%d] to %d [%X]",
pulse->buffer_frames, pulse->bytes_per_frame, encoded_size,
pulse->format);
ret = pulse->receive(encoded_data, encoded_size, pulse->user_data);
pulse->buffer_frames = 0;
if (!ret)