Initial working microphone redirection for android.

Added debug messages to alsa and pulse microphone backends.
This commit is contained in:
Armin Novak 2013-09-27 11:39:04 +02:00
parent bd7845e656
commit c2d3f60ad0
5 changed files with 96 additions and 63 deletions

View File

@ -165,7 +165,12 @@ static BOOL audin_alsa_thread_receive(AudinALSADevice* alsa, BYTE* src, int size
if (WaitForSingleObject(alsa->stopEvent, 0) == WAIT_OBJECT_0) if (WaitForSingleObject(alsa->stopEvent, 0) == WAIT_OBJECT_0)
break; break;
else else
{
DEBUG_DVC("encoded %d [%d] to %d [%X]", alsa->buffer_frames,
tbytes_per_frame, encoded_size,
alsa->wformat);
ret = alsa->receive(encoded_data, encoded_size, alsa->user_data); ret = alsa->receive(encoded_data, encoded_size, alsa->user_data);
}
alsa->buffer_frames = 0; alsa->buffer_frames = 0;

View File

@ -53,7 +53,6 @@ typedef struct _AudinOpenSLESDevice
UINT32 bytes_per_channel; UINT32 bytes_per_channel;
UINT32 wformat;
UINT32 format; UINT32 format;
UINT32 block_size; UINT32 block_size;
@ -84,7 +83,7 @@ static void* audin_opensles_thread_func(void* arg)
assert(opensles->stopEvent); assert(opensles->stopEvent);
assert(opensles->stream); assert(opensles->stream);
buffer.v = calloc(sizeof(short), opensles->frames_per_packet); buffer.v = calloc(opensles->bytes_per_channel, opensles->frames_per_packet);
ZeroMemory(buffer.v, opensles->frames_per_packet); ZeroMemory(buffer.v, opensles->frames_per_packet);
freerdp_dsp_context_reset_adpcm(opensles->dsp_context); freerdp_dsp_context_reset_adpcm(opensles->dsp_context);
@ -98,23 +97,22 @@ static void* audin_opensles_thread_func(void* arg)
if (rc < 0) if (rc < 0)
{ {
DEBUG_WARN("android_RecIn %d", rc); DEBUG_WARN("android_RecIn %d", rc);
continue;
} }
DEBUG_DVC("Got %d frames from microphone", opensles->frames_per_packet);
if (opensles->format == WAVE_FORMAT_ADPCM) if (opensles->format == WAVE_FORMAT_ADPCM)
{ {
opensles->dsp_context->encode_ms_adpcm(opensles->dsp_context, opensles->dsp_context->encode_ms_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short), buffer.b, rc * opensles->bytes_per_channel,
opensles->channels, opensles->block_size); opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer; encoded_data = opensles->dsp_context->adpcm_buffer;
encoded_size = opensles->dsp_context->adpcm_size; encoded_size = opensles->dsp_context->adpcm_size;
} }
else if (opensles->format == WAVE_FORMAT_DVI_ADPCM) else if (opensles->format == WAVE_FORMAT_DVI_ADPCM)
{ {
opensles->dsp_context->encode_ima_adpcm(opensles->dsp_context, opensles->dsp_context->encode_ima_adpcm(opensles->dsp_context,
buffer.b, opensles->frames_per_packet * sizeof(short), buffer.b, rc * opensles->bytes_per_channel,
opensles->channels, opensles->block_size); opensles->channels, opensles->block_size);
encoded_data = opensles->dsp_context->adpcm_buffer; encoded_data = opensles->dsp_context->adpcm_buffer;
@ -123,9 +121,11 @@ static void* audin_opensles_thread_func(void* arg)
else else
{ {
encoded_data = buffer.v; encoded_data = buffer.v;
encoded_size = opensles->frames_per_packet; encoded_size = rc * opensles->bytes_per_channel;
} }
DEBUG_DVC("encoded %d [%d] to %d [%X]", rc,
opensles->bytes_per_channel, encoded_size, opensles->format);
rc = opensles->receive(encoded_data, encoded_size, opensles->user_data); rc = opensles->receive(encoded_data, encoded_size, opensles->user_data);
if (!rc) if (!rc)
break; break;
@ -172,19 +172,17 @@ static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* f
switch (format->wFormatTag) switch (format->wFormatTag)
{ {
/* case WAVE_FORMAT_PCM: /* PCM */
case WAVE_FORMAT_PCM:
if (format->cbSize == 0 && if (format->cbSize == 0 &&
(format->nSamplesPerSec <= 48000) && (format->nSamplesPerSec <= 48000) &&
(format->wBitsPerSample == 8 || format->wBitsPerSample == 16) && (format->wBitsPerSample == 8 || format->wBitsPerSample == 16) &&
(format->nChannels == 1 || format->nChannels == 2)) (format->nChannels >= 1 && format->nChannels <= 2))
{ {
return TRUE; return TRUE;
} }
break; break;
*/ // case WAVE_FORMAT_ADPCM: /* IMA ADPCM */
case WAVE_FORMAT_ADPCM: case WAVE_FORMAT_DVI_ADPCM:
// case WAVE_FORMAT_DVI_ADPCM:
if ((format->nSamplesPerSec <= 48000) && if ((format->nSamplesPerSec <= 48000) &&
(format->wBitsPerSample == 4) && (format->wBitsPerSample == 4) &&
(format->nChannels == 1 || format->nChannels == 2)) (format->nChannels == 1 || format->nChannels == 2))
@ -192,6 +190,11 @@ static BOOL audin_opensles_format_supported(IAudinDevice* device, audinFormat* f
return TRUE; return TRUE;
} }
break; break;
default:
DEBUG_DVC("Encoding '%s' [%08X] not supported",
rdpsnd_get_audio_tag_string(format->wFormatTag),
format->wFormatTag);
break;
} }
return FALSE; return FALSE;
@ -220,31 +223,18 @@ static void audin_opensles_set_format(IAudinDevice* device,
switch (format->wBitsPerSample) switch (format->wBitsPerSample)
{ {
case 4: case 4:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 1; opensles->bytes_per_channel = 1;
break; break;
case 8: case 8:
opensles->format = WAVE_FORMAT_PCM;
opensles->bytes_per_channel = 1; opensles->bytes_per_channel = 1;
break; break;
case 16: case 16:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2; opensles->bytes_per_channel = 2;
break; break;
} }
break; break;
case WAVE_FORMAT_ADPCM: case WAVE_FORMAT_ADPCM:
opensles->format = WAVE_FORMAT_ADPCM;
opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
opensles->frames_per_packet =
(FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2);
break;
case WAVE_FORMAT_DVI_ADPCM: case WAVE_FORMAT_DVI_ADPCM:
opensles->format = WAVE_FORMAT_DVI_ADPCM;
opensles->bytes_per_channel = 2; opensles->bytes_per_channel = 2;
bs = (format->nBlockAlign - 4 * format->nChannels) * 4; bs = (format->nBlockAlign - 4 * format->nChannels) * 4;
@ -252,26 +242,26 @@ static void audin_opensles_set_format(IAudinDevice* device,
(FramesPerPacket * format->nChannels * 2 / (FramesPerPacket * format->nChannels * 2 /
bs + 1) * bs / (format->nChannels * 2); bs + 1) * bs / (format->nChannels * 2);
break; break;
case WAVE_FORMAT_ALAW:
case WAVE_FORMAT_MULAW:
opensles->frames_per_packet = FramesPerPacket;
break;
default: default:
DEBUG_WARN("Unsupported fromat %08X requested, ignoring.", DEBUG_WARN("Encoding '%s' [%08X] not supported",
format->wFormatTag); rdpsnd_get_audio_tag_string(format->wFormatTag),
format->wFormatTag);
return; return;
} }
opensles->rate = format->nSamplesPerSec; opensles->rate = format->nSamplesPerSec;
opensles->channels = format->nChannels; opensles->channels = format->nChannels;
opensles->wformat = format->wFormatTag; opensles->format = format->wFormatTag;
opensles->block_size = format->nBlockAlign; opensles->block_size = format->nBlockAlign;
if (opensles->stream) DEBUG_DVC("aligned frames_per_packet=%d, block_size=%d",
{ opensles->frames_per_packet, opensles->block_size);
android_CloseRecDevice(opensles->stream);
opensles->stream = android_OpenRecDevice(opensles->device_name,
opensles->rate, opensles->channels,
opensles->frames_per_packet);
}
} }
static void audin_opensles_open(IAudinDevice* device, AudinReceive receive, static void audin_opensles_open(IAudinDevice* device, AudinReceive receive,
@ -291,7 +281,9 @@ static void audin_opensles_open(IAudinDevice* device, AudinReceive receive,
opensles->stream = android_OpenRecDevice( opensles->stream = android_OpenRecDevice(
opensles->device_name, opensles->device_name,
opensles->rate, opensles->rate,
opensles->channels, opensles->frames_per_packet); opensles->channels,
opensles->frames_per_packet,
opensles->bytes_per_channel * 8);
assert(opensles->stream); assert(opensles->stream);
opensles->receive = receive; opensles->receive = receive;
@ -314,7 +306,10 @@ static void audin_opensles_close(IAudinDevice* device)
/* The function may have been called out of order, /* The function may have been called out of order,
* ignore duplicate requests. */ * ignore duplicate requests. */
if (!opensles->stopEvent) if (!opensles->stopEvent)
{
DEBUG_WARN("[ERROR] function called without matching open.");
return; return;
}
assert(opensles->stopEvent); assert(opensles->stopEvent);
assert(opensles->thread); assert(opensles->thread);

View File

@ -134,9 +134,27 @@ static SLresult openSLRecOpen(OPENSL_STREAM *p){
else else
speakers = SL_SPEAKER_FRONT_CENTER; speakers = SL_SPEAKER_FRONT_CENTER;
SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2}; SLDataLocator_AndroidSimpleBufferQueue loc_bq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, channels, sr, SLDataFormat_PCM format_pcm;
SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
speakers, SL_BYTEORDER_LITTLEENDIAN}; format_pcm.formatType = SL_DATAFORMAT_PCM;
format_pcm.numChannels = channels;
format_pcm.samplesPerSec = sr;
format_pcm.channelMask = speakers;
format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
if (16 == p->bits_per_sample)
{
format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
format_pcm.containerSize = 16;
}
else if (8 == p->bits_per_sample)
{
format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_8;
format_pcm.containerSize = 8;
}
else
assert(0);
SLDataSink audioSnk = {&loc_bq, &format_pcm}; SLDataSink audioSnk = {&loc_bq, &format_pcm};
// create audio recorder // create audio recorder
@ -211,7 +229,7 @@ static void openSLDestroyEngine(OPENSL_STREAM *p)
// open the android audio device for input // open the android audio device for input
OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels, OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes) int bufferframes, int bits_per_sample)
{ {
OPENSL_STREAM *p; OPENSL_STREAM *p;
@ -221,18 +239,26 @@ OPENSL_STREAM *android_OpenRecDevice(char *name, int sr, int inchannels,
p->inchannels = inchannels; p->inchannels = inchannels;
p->sr = sr; p->sr = sr;
p->queue = Queue_New(TRUE, -1, -1); p->queue = Queue_New(TRUE, -1, -1);
if(openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
android_CloseRecDevice(p);
return NULL;
}
if(openSLRecOpen(p) != SL_RESULT_SUCCESS) {
android_CloseRecDevice(p);
return NULL;
}
p->buffersize = bufferframes; p->buffersize = bufferframes;
p->bits_per_sample = bits_per_sample;
if ((p->bits_per_sample != 8) && (p->bits_per_sample != 16))
{
android_CloseRecDevice(p);
return NULL;
}
if(openSLCreateEngine(p) != SL_RESULT_SUCCESS)
{
android_CloseRecDevice(p);
return NULL;
}
if(openSLRecOpen(p) != SL_RESULT_SUCCESS)
{
android_CloseRecDevice(p);
return NULL;
}
return p; return p;
} }
@ -245,11 +271,15 @@ void android_CloseRecDevice(OPENSL_STREAM *p)
if (p == NULL) if (p == NULL)
return; return;
while (Queue_Count(p->queue) > 0) if (p->queue)
{ {
queue_element *e = Queue_Dequeue(p->queue); while (Queue_Count(p->queue) > 0)
free(e->data); {
free(e); queue_element *e = Queue_Dequeue(p->queue);
free(e->data);
free(e);
}
Queue_Free(p->queue);
} }
if (p->next) if (p->next)
@ -258,7 +288,6 @@ void android_CloseRecDevice(OPENSL_STREAM *p)
free(p->next); free(p->next);
} }
Queue_Free(p->queue);
openSLDestroyEngine(p); openSLDestroyEngine(p);
free(p); free(p);
@ -276,9 +305,9 @@ void bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
assert(p); assert(p);
assert(p->queue); assert(p->queue);
DEBUG_DVC("Signalled"); printf("Signalled");
e = calloc(1, sizeof(queue_element)); e = calloc(1, sizeof(queue_element));
e->data = calloc(p->buffersize, sizeof(short)); e->data = calloc(p->buffersize, p->bits_per_sample / 8);
e->size = p->buffersize; e->size = p->buffersize;
if (p->next) if (p->next)
@ -306,19 +335,21 @@ int android_RecIn(OPENSL_STREAM *p,short *buffer,int size)
bqRecorderCallback(p->recorderBufferQueue, p); bqRecorderCallback(p->recorderBufferQueue, p);
} }
WaitForSingleObject(p->queue->event, INFINITE);
e = Queue_Dequeue(p->queue); e = Queue_Dequeue(p->queue);
if (!e) if (!e)
{
DEBUG_WARN("[ERROR] got e=%p from queue", e);
return -1; return -1;
}
rc = e->size; rc = (e->size < size) ? e->size : size;
assert(p->buffersize == e->size); assert(p->buffersize == e->size);
memcpy(buffer, e->data, size > e->size ? e->size : size); memcpy(buffer, e->data, rc * sizeof(short));
free(e->data); free(e->data);
free(e); free(e);
DEBUG_DVC("yay!"); return rc;
return size;
} }

View File

@ -62,8 +62,9 @@ typedef struct opensl_stream {
SLAndroidSimpleBufferQueueItf recorderBufferQueue; SLAndroidSimpleBufferQueueItf recorderBufferQueue;
unsigned int inchannels; unsigned int inchannels;
unsigned int sr; unsigned int sr;
unsigned int buffersize; unsigned int buffersize;
unsigned int bits_per_sample;
wQueue *queue; wQueue *queue;
queue_element *next; queue_element *next;
@ -74,7 +75,7 @@ typedef struct opensl_stream {
in frames. Returns a handle to the OpenSL stream in frames. Returns a handle to the OpenSL stream
*/ */
OPENSL_STREAM* android_OpenRecDevice(char *name, int sr, int inchannels, OPENSL_STREAM* android_OpenRecDevice(char *name, int sr, int inchannels,
int bufferframes); int bufferframes, int bits_per_sample);
/* /*
Close the audio device Close the audio device
*/ */

View File

@ -324,8 +324,6 @@ static void audin_pulse_stream_request_callback(pa_stream* stream, size_t length
pulse->sample_spec.channels, pulse->block_size); pulse->sample_spec.channels, pulse->block_size);
encoded_data = pulse->dsp_context->adpcm_buffer; encoded_data = pulse->dsp_context->adpcm_buffer;
encoded_size = pulse->dsp_context->adpcm_size; encoded_size = pulse->dsp_context->adpcm_size;
DEBUG_DVC("encoded %d to %d",
pulse->buffer_frames * pulse->bytes_per_frame, encoded_size);
} }
else else
{ {
@ -333,6 +331,9 @@ static void audin_pulse_stream_request_callback(pa_stream* stream, size_t length
encoded_size = pulse->buffer_frames * pulse->bytes_per_frame; encoded_size = pulse->buffer_frames * pulse->bytes_per_frame;
} }
DEBUG_DVC("encoded %d [%d] to %d [%X]",
pulse->buffer_frames, pulse->bytes_per_frame, encoded_size,
pulse->format);
ret = pulse->receive(encoded_data, encoded_size, pulse->user_data); ret = pulse->receive(encoded_data, encoded_size, pulse->user_data);
pulse->buffer_frames = 0; pulse->buffer_frames = 0;
if (!ret) if (!ret)