Merge pull request #116 from kd7tck/newaudio

Redesign audio system to support multiple mix channels
This commit is contained in:
Ray 2016-05-20 09:22:07 +02:00
commit bdb450fccb
5 changed files with 401 additions and 394 deletions

View File

@ -59,8 +59,9 @@
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Defines and Macros // Defines and Macros
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
#define MAX_STREAM_BUFFERS 2 #define MAX_STREAM_BUFFERS 2 // Number of buffers for each alSource
#define MAX_AUDIO_CONTEXTS 4 // Number of open AL sources #define MAX_MIX_CHANNELS 4 // Number of open AL sources
#define MAX_MUSIC_STREAMS 2 // Number of simultanious music sources
#if defined(PLATFORM_RPI) || defined(PLATFORM_ANDROID) #if defined(PLATFORM_RPI) || defined(PLATFORM_ANDROID)
// NOTE: On RPI and Android should be lower to avoid frame-stalls // NOTE: On RPI and Android should be lower to avoid frame-stalls
@ -76,37 +77,32 @@
// Types and Structures Definition // Types and Structures Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Music type (file streaming from memory) // Used to create custom audio streams that are not bound to a specific file. There can be
// NOTE: Anything longer than ~10 seconds should be streamed... // no more than 4 concurrent mixchannels in use. This is due to each active mixc being tied to
typedef struct Music { // a dedicated mix channel.
stb_vorbis *stream; typedef struct MixChannel_t {
jar_xm_context_t *chipctx; // Stores jar_xm context
ALuint buffers[MAX_STREAM_BUFFERS];
ALuint source;
ALenum format;
int channels;
int sampleRate;
int totalSamplesLeft;
float totalLengthSeconds;
bool loop;
bool chipTune; // True if chiptune is loaded
} Music;
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
// a dedicated mix channel. All audio is 32bit floating point in stereo.
typedef struct AudioContext_t {
unsigned short sampleRate; // default is 48000 unsigned short sampleRate; // default is 48000
unsigned char channels; // 1=mono,2=stereo unsigned char channels; // 1=mono,2=stereo
unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream
bool floatingPoint; // if false then the short datatype is used instead bool floatingPoint; // if false then the short datatype is used instead
bool playing; bool playing; // false if paused
ALenum alFormat; // openAL format specifier ALenum alFormat; // openAL format specifier
ALuint alSource; // openAL source ALuint alSource; // openAL source
ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer
} AudioContext_t; } MixChannel_t;
// Music type (file streaming from memory)
// NOTE: Anything longer than ~10 seconds should be streamed into a mix channel...
typedef struct Music {
stb_vorbis *stream;
jar_xm_context_t *chipctx; // Stores jar_xm mixc
MixChannel_t *mixc; // mix channel
int totalSamplesLeft;
float totalLengthSeconds;
bool loop;
bool chipTune; // True if chiptune is loaded
} Music;
#if defined(AUDIO_STANDALONE) #if defined(AUDIO_STANDALONE)
typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType; typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType;
@ -115,23 +111,28 @@ typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType;
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Global Variables Definition // Global Variables Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static AudioContext_t* mixChannelsActive_g[MAX_AUDIO_CONTEXTS]; // What mix channels are currently active static MixChannel_t* mixChannelsActive_g[MAX_MIX_CHANNELS]; // What mix channels are currently active
static bool musicEnabled = false; static bool musicEnabled_g = false;
static Music currentMusic; // Current music loaded static Music currentMusic[MAX_MUSIC_STREAMS]; // Current music loaded, up to two can play at the same time
// NOTE: Only one music file playing at a time
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Module specific Functions Declaration // Module specific Functions Declaration
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static Wave LoadWAV(const char *fileName); // Load WAV file static Wave LoadWAV(const char *fileName); // Load WAV file
static Wave LoadOGG(char *fileName); // Load OGG file static Wave LoadOGG(char *fileName); // Load OGG file
static void UnloadWave(Wave wave); // Unload wave data static void UnloadWave(Wave wave); // Unload wave data
static bool BufferMusicStream(ALuint buffer); // Fill music buffers with data static bool BufferMusicStream(int index, int numBuffers); // Fill music buffers with data
static void EmptyMusicStream(void); // Empty music buffers static void EmptyMusicStream(int index); // Empty music buffers
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer);// fill buffer with zeros, returns number processed
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // pass two arrays of the same legnth in static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint); // For streaming into mix channels.
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // pass two arrays of same length in static void CloseMixChannel(MixChannel_t* mixc); // Frees mix channel
static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements); // Pushes more audio data into mixc mix channel, if NULL is passed it pauses
static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer); // Fill buffer with zeros, returns number processed
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // Pass two arrays of the same legnth in
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // Pass two arrays of same length in
static int IsMusicStreamReadyForBuffering(int index); // Checks if music buffer is ready to be refilled
#if defined(AUDIO_STANDALONE) #if defined(AUDIO_STANDALONE)
const char *GetExtension(const char *fileName); // Get the extension for a filename const char *GetExtension(const char *fileName); // Get the extension for a filename
@ -142,7 +143,7 @@ void TraceLog(int msgType, const char *text, ...); // Outputs a trace log messa
// Module Functions Definition - Audio Device initialization and Closing // Module Functions Definition - Audio Device initialization and Closing
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Initialize audio device and context // Initialize audio device and mixc
void InitAudioDevice(void) void InitAudioDevice(void)
{ {
// Open and initialize a device with default settings // Open and initialize a device with default settings
@ -158,7 +159,7 @@ void InitAudioDevice(void)
alcCloseDevice(device); alcCloseDevice(device);
TraceLog(ERROR, "Could not setup audio context"); TraceLog(ERROR, "Could not setup mix channel");
} }
TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER)); TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER));
@ -169,15 +170,19 @@ void InitAudioDevice(void)
alListener3f(AL_ORIENTATION, 0, 0, -1); alListener3f(AL_ORIENTATION, 0, 0, -1);
} }
// Close the audio device for the current context, and destroys the context // Close the audio device for all contexts
void CloseAudioDevice(void) void CloseAudioDevice(void)
{ {
StopMusicStream(); // Stop music streaming and close current stream for(int index=0; index<MAX_MUSIC_STREAMS; index++)
{
if(currentMusic[index].mixc) StopMusicStream(index); // Stop music streaming and close current stream
}
ALCdevice *device; ALCdevice *device;
ALCcontext *context = alcGetCurrentContext(); ALCcontext *context = alcGetCurrentContext();
if (context == NULL) TraceLog(WARNING, "Could not get current audio context for closing"); if (context == NULL) TraceLog(WARNING, "Could not get current mix channel for closing");
device = alcGetContextsDevice(context); device = alcGetContextsDevice(context);
@ -202,187 +207,141 @@ bool IsAudioDeviceReady(void)
// Module Functions Definition - Custom audio output // Module Functions Definition - Custom audio output
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing // For streaming into mix channels.
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time. // The mixChannel is what audio muxing channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point // exmple usage is InitMixChannel(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint) static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint)
{ {
if(mixChannel >= MAX_AUDIO_CONTEXTS) return NULL; if(mixChannel >= MAX_MIX_CHANNELS) return NULL;
if(!IsAudioDeviceReady()) InitAudioDevice(); if(!IsAudioDeviceReady()) InitAudioDevice();
else StopMusicStream();
if(!mixChannelsActive_g[mixChannel]){ if(!mixChannelsActive_g[mixChannel]){
AudioContext_t *ac = (AudioContext_t*)malloc(sizeof(AudioContext_t)); MixChannel_t *mixc = (MixChannel_t*)malloc(sizeof(MixChannel_t));
ac->sampleRate = sampleRate; mixc->sampleRate = sampleRate;
ac->channels = channels; mixc->channels = channels;
ac->mixChannel = mixChannel; mixc->mixChannel = mixChannel;
ac->floatingPoint = floatingPoint; mixc->floatingPoint = floatingPoint;
mixChannelsActive_g[mixChannel] = ac; mixChannelsActive_g[mixChannel] = mixc;
// setup openAL format // setup openAL format
if(channels == 1) if(channels == 1)
{ {
if(floatingPoint) if(floatingPoint)
ac->alFormat = AL_FORMAT_MONO_FLOAT32; mixc->alFormat = AL_FORMAT_MONO_FLOAT32;
else else
ac->alFormat = AL_FORMAT_MONO16; mixc->alFormat = AL_FORMAT_MONO16;
} }
else if(channels == 2) else if(channels == 2)
{ {
if(floatingPoint) if(floatingPoint)
ac->alFormat = AL_FORMAT_STEREO_FLOAT32; mixc->alFormat = AL_FORMAT_STEREO_FLOAT32;
else else
ac->alFormat = AL_FORMAT_STEREO16; mixc->alFormat = AL_FORMAT_STEREO16;
} }
// Create an audio source // Create an audio source
alGenSources(1, &ac->alSource); alGenSources(1, &mixc->alSource);
alSourcef(ac->alSource, AL_PITCH, 1); alSourcef(mixc->alSource, AL_PITCH, 1);
alSourcef(ac->alSource, AL_GAIN, 1); alSourcef(mixc->alSource, AL_GAIN, 1);
alSource3f(ac->alSource, AL_POSITION, 0, 0, 0); alSource3f(mixc->alSource, AL_POSITION, 0, 0, 0);
alSource3f(ac->alSource, AL_VELOCITY, 0, 0, 0); alSource3f(mixc->alSource, AL_VELOCITY, 0, 0, 0);
// Create Buffer // Create Buffer
alGenBuffers(MAX_STREAM_BUFFERS, ac->alBuffer); alGenBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
//fill buffers //fill buffers
int x; int x;
for(x=0;x<MAX_STREAM_BUFFERS;x++) for(x=0;x<MAX_STREAM_BUFFERS;x++)
FillAlBufferWithSilence(ac, ac->alBuffer[x]); FillAlBufferWithSilence(mixc, mixc->alBuffer[x]);
alSourceQueueBuffers(ac->alSource, MAX_STREAM_BUFFERS, ac->alBuffer); alSourceQueueBuffers(mixc->alSource, MAX_STREAM_BUFFERS, mixc->alBuffer);
alSourcePlay(ac->alSource); mixc->playing = true;
ac->playing = true; alSourcePlay(mixc->alSource);
return ac; return mixc;
} }
return NULL; return NULL;
} }
// Frees buffer in audio context // Frees buffer in mix channel
void CloseAudioContext(AudioContext ctx) static void CloseMixChannel(MixChannel_t* mixc)
{ {
AudioContext_t *context = (AudioContext_t*)ctx; if(mixc){
if(context){ alSourceStop(mixc->alSource);
alSourceStop(context->alSource); mixc->playing = false;
context->playing = false;
//flush out all queued buffers //flush out all queued buffers
ALuint buffer = 0; ALuint buffer = 0;
int queued = 0; int queued = 0;
alGetSourcei(context->alSource, AL_BUFFERS_QUEUED, &queued); alGetSourcei(mixc->alSource, AL_BUFFERS_QUEUED, &queued);
while (queued > 0) while (queued > 0)
{ {
alSourceUnqueueBuffers(context->alSource, 1, &buffer); alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
queued--; queued--;
} }
//delete source and buffers //delete source and buffers
alDeleteSources(1, &context->alSource); alDeleteSources(1, &mixc->alSource);
alDeleteBuffers(MAX_STREAM_BUFFERS, context->alBuffer); alDeleteBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
mixChannelsActive_g[context->mixChannel] = NULL; mixChannelsActive_g[mixc->mixChannel] = NULL;
free(context); free(mixc);
ctx = NULL; mixc = NULL;
} }
} }
// Pushes more audio data into context mix channel, if none are ever pushed then zeros are fed in. // Pushes more audio data into mixc mix channel, only one buffer per call
// Call "UpdateAudioContext(ctx, NULL, 0)" if you want to pause the audio. // Call "BufferMixChannel(mixc, NULL, 0)" if you want to pause the audio.
// @Returns number of samples that where processed. // @Returns number of samples that where processed.
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements) static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements)
{ {
AudioContext_t *context = (AudioContext_t*)ctx; if(!mixc || mixChannelsActive_g[mixc->mixChannel] != mixc) return 0; // when there is two channels there must be an even number of samples
if(!context || (context->channels == 2 && numberElements % 2 != 0)) return 0; // when there is two channels there must be an even number of samples
if (!data || !numberElements) if (!data || !numberElements)
{ // pauses audio until data is given { // pauses audio until data is given
alSourcePause(context->alSource); if(mixc->playing){
context->playing = false; alSourcePause(mixc->alSource);
mixc->playing = false;
}
return 0; return 0;
} }
else else if(!mixc->playing)
{ // restart audio otherwise { // restart audio otherwise
ALint state; alSourcePlay(mixc->alSource);
alGetSourcei(context->alSource, AL_SOURCE_STATE, &state); mixc->playing = true;
if (state != AL_PLAYING){
alSourcePlay(context->alSource);
context->playing = true;
}
} }
if (context && context->playing && mixChannelsActive_g[context->mixChannel] == context)
ALuint buffer = 0;
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
if(!buffer) return 0;
if(mixc->floatingPoint) // process float buffers
{ {
ALint processed = 0; float *ptr = (float*)data;
ALuint buffer = 0; alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(float), mixc->sampleRate);
unsigned short numberProcessed = 0;
unsigned short numberRemaining = numberElements;
alGetSourcei(context->alSource, AL_BUFFERS_PROCESSED, &processed); // Get the number of already processed buffers (if any)
if(!processed) return 0; // nothing to process, queue is still full
while (processed > 0)
{
if(context->floatingPoint) // process float buffers
{
float *ptr = (float*)data;
alSourceUnqueueBuffers(context->alSource, 1, &buffer);
if(numberRemaining >= MUSIC_BUFFER_SIZE_FLOAT)
{
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate);
numberProcessed+=MUSIC_BUFFER_SIZE_FLOAT;
numberRemaining-=MUSIC_BUFFER_SIZE_FLOAT;
}
else
{
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(float), context->sampleRate);
numberProcessed+=numberRemaining;
numberRemaining=0;
}
alSourceQueueBuffers(context->alSource, 1, &buffer);
processed--;
}
else if(!context->floatingPoint) // process short buffers
{
short *ptr = (short*)data;
alSourceUnqueueBuffers(context->alSource, 1, &buffer);
if(numberRemaining >= MUSIC_BUFFER_SIZE_SHORT)
{
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], MUSIC_BUFFER_SIZE_FLOAT*sizeof(short), context->sampleRate);
numberProcessed+=MUSIC_BUFFER_SIZE_SHORT;
numberRemaining-=MUSIC_BUFFER_SIZE_SHORT;
}
else
{
alBufferData(buffer, context->alFormat, &ptr[numberProcessed], numberRemaining*sizeof(short), context->sampleRate);
numberProcessed+=numberRemaining;
numberRemaining=0;
}
alSourceQueueBuffers(context->alSource, 1, &buffer);
processed--;
}
else
break;
}
return numberProcessed;
} }
return 0; else // process short buffers
{
short *ptr = (short*)data;
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(short), mixc->sampleRate);
}
alSourceQueueBuffers(mixc->alSource, 1, &buffer);
return numberElements;
} }
// fill buffer with zeros, returns number processed // fill buffer with zeros, returns number processed
static unsigned short FillAlBufferWithSilence(AudioContext_t *context, ALuint buffer) static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer)
{ {
if(context->floatingPoint){ if(mixc->floatingPoint){
float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f}; float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f};
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), context->sampleRate); alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), mixc->sampleRate);
return MUSIC_BUFFER_SIZE_FLOAT; return MUSIC_BUFFER_SIZE_FLOAT;
} }
else else
{ {
short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0}; short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0};
alBufferData(buffer, context->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), context->sampleRate); alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), mixc->sampleRate);
return MUSIC_BUFFER_SIZE_SHORT; return MUSIC_BUFFER_SIZE_SHORT;
} }
} }
@ -417,6 +376,42 @@ static void ResampleByteToFloat(char *chars, float *floats, unsigned short len)
} }
} }
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint)
{
int mixIndex;
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot
{
if(mixChannelsActive_g[mixIndex] == NULL) break;
else if(mixIndex = MAX_MIX_CHANNELS - 1) return -1; // error
}
if(InitMixChannel(sampleRate, mixIndex, channels, floatingPoint))
return mixIndex;
else
return -2; // error
}
void CloseRawAudioContext(RawAudioContext ctx)
{
if(mixChannelsActive_g[ctx])
CloseMixChannel(mixChannelsActive_g[ctx]);
}
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements)
{
int numBuffered = 0;
if(ctx >= 0)
{
MixChannel_t* mixc = mixChannelsActive_g[ctx];
numBuffered = BufferMixChannel(mixc, data, numberElements);
}
return numBuffered;
}
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
@ -767,205 +762,215 @@ void SetSoundPitch(Sound sound, float pitch)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Start music playing (open stream) // Start music playing (open stream)
void PlayMusicStream(char *fileName) // returns 0 on success
int PlayMusicStream(int musicIndex, char *fileName)
{ {
int mixIndex;
if(currentMusic[musicIndex].stream || currentMusic[musicIndex].chipctx) return 1; // error
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot
{
if(mixChannelsActive_g[mixIndex] == NULL) break;
else if(mixIndex = MAX_MIX_CHANNELS - 1) return 2; // error
}
if (strcmp(GetExtension(fileName),"ogg") == 0) if (strcmp(GetExtension(fileName),"ogg") == 0)
{ {
// Stop current music, clean buffers, unload current stream
StopMusicStream();
// Open audio stream // Open audio stream
currentMusic.stream = stb_vorbis_open_filename(fileName, NULL, NULL); currentMusic[musicIndex].stream = stb_vorbis_open_filename(fileName, NULL, NULL);
if (currentMusic.stream == NULL) if (currentMusic[musicIndex].stream == NULL)
{ {
TraceLog(WARNING, "[%s] OGG audio file could not be opened", fileName); TraceLog(WARNING, "[%s] OGG audio file could not be opened", fileName);
return 3; // error
} }
else else
{ {
// Get file info // Get file info
stb_vorbis_info info = stb_vorbis_get_info(currentMusic.stream); stb_vorbis_info info = stb_vorbis_get_info(currentMusic[musicIndex].stream);
currentMusic.channels = info.channels;
currentMusic.sampleRate = info.sample_rate;
TraceLog(INFO, "[%s] Ogg sample rate: %i", fileName, info.sample_rate); TraceLog(INFO, "[%s] Ogg sample rate: %i", fileName, info.sample_rate);
TraceLog(INFO, "[%s] Ogg channels: %i", fileName, info.channels); TraceLog(INFO, "[%s] Ogg channels: %i", fileName, info.channels);
TraceLog(DEBUG, "[%s] Temp memory required: %i", fileName, info.temp_memory_required); TraceLog(DEBUG, "[%s] Temp memory required: %i", fileName, info.temp_memory_required);
if (info.channels == 2) currentMusic.format = AL_FORMAT_STEREO16; currentMusic[musicIndex].loop = true; // We loop by default
else currentMusic.format = AL_FORMAT_MONO16; musicEnabled_g = true;
currentMusic.loop = true; // We loop by default currentMusic[musicIndex].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[musicIndex].stream) * info.channels;
musicEnabled = true; currentMusic[musicIndex].totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[musicIndex].stream);
// Create an audio source if (info.channels == 2){
alGenSources(1, &currentMusic.source); // Generate pointer to audio source currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 2, false);
currentMusic[musicIndex].mixc->playing = true;
alSourcef(currentMusic.source, AL_PITCH, 1); }
alSourcef(currentMusic.source, AL_GAIN, 1); else{
alSource3f(currentMusic.source, AL_POSITION, 0, 0, 0); currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 1, false);
alSource3f(currentMusic.source, AL_VELOCITY, 0, 0, 0); currentMusic[musicIndex].mixc->playing = true;
//alSourcei(currentMusic.source, AL_LOOPING, AL_TRUE); // ERROR: Buffers do not queue! }
if(!currentMusic[musicIndex].mixc) return 4; // error
// Generate two OpenAL buffers
alGenBuffers(2, currentMusic.buffers);
// Fill buffers with music...
BufferMusicStream(currentMusic.buffers[0]);
BufferMusicStream(currentMusic.buffers[1]);
// Queue buffers and start playing
alSourceQueueBuffers(currentMusic.source, 2, currentMusic.buffers);
alSourcePlay(currentMusic.source);
// NOTE: Regularly, we must check if a buffer has been processed and refill it: UpdateMusicStream()
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels;
currentMusic.totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic.stream);
} }
} }
else if (strcmp(GetExtension(fileName),"xm") == 0) else if (strcmp(GetExtension(fileName),"xm") == 0)
{ {
// Stop current music, clean buffers, unload current stream
StopMusicStream();
// new song settings for xm chiptune
currentMusic.chipTune = true;
currentMusic.channels = 2;
currentMusic.sampleRate = 48000;
currentMusic.loop = true;
// only stereo is supported for xm // only stereo is supported for xm
if(!jar_xm_create_context_from_file(&currentMusic.chipctx, currentMusic.sampleRate, fileName)) if(!jar_xm_create_context_from_file(&currentMusic[musicIndex].chipctx, 48000, fileName))
{ {
currentMusic.format = AL_FORMAT_STEREO16; currentMusic[musicIndex].chipTune = true;
jar_xm_set_max_loop_count(currentMusic.chipctx, 0); // infinite number of loops currentMusic[musicIndex].loop = true;
currentMusic.totalSamplesLeft = jar_xm_get_remaining_samples(currentMusic.chipctx); jar_xm_set_max_loop_count(currentMusic[musicIndex].chipctx, 0); // infinite number of loops
currentMusic.totalLengthSeconds = ((float)currentMusic.totalSamplesLeft) / ((float)currentMusic.sampleRate); currentMusic[musicIndex].totalSamplesLeft = jar_xm_get_remaining_samples(currentMusic[musicIndex].chipctx);
musicEnabled = true; currentMusic[musicIndex].totalLengthSeconds = ((float)currentMusic[musicIndex].totalSamplesLeft) / 48000.f;
musicEnabled_g = true;
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic.totalSamplesLeft); TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic[musicIndex].totalSamplesLeft);
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic.totalLengthSeconds); TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic[musicIndex].totalLengthSeconds);
// Set up OpenAL currentMusic[musicIndex].mixc = InitMixChannel(48000, mixIndex, 2, false);
alGenSources(1, &currentMusic.source); if(!currentMusic[musicIndex].mixc) return 5; // error
alSourcef(currentMusic.source, AL_PITCH, 1); currentMusic[musicIndex].mixc->playing = true;
alSourcef(currentMusic.source, AL_GAIN, 1);
alSource3f(currentMusic.source, AL_POSITION, 0, 0, 0);
alSource3f(currentMusic.source, AL_VELOCITY, 0, 0, 0);
alGenBuffers(2, currentMusic.buffers);
BufferMusicStream(currentMusic.buffers[0]);
BufferMusicStream(currentMusic.buffers[1]);
alSourceQueueBuffers(currentMusic.source, 2, currentMusic.buffers);
alSourcePlay(currentMusic.source);
// NOTE: Regularly, we must check if a buffer has been processed and refill it: UpdateMusicStream()
}
else TraceLog(WARNING, "[%s] XM file could not be opened", fileName);
}
else TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName);
}
// Stop music playing (close stream)
void StopMusicStream(void)
{
if (musicEnabled)
{
alSourceStop(currentMusic.source);
EmptyMusicStream(); // Empty music buffers
alDeleteSources(1, &currentMusic.source);
alDeleteBuffers(2, currentMusic.buffers);
if (currentMusic.chipTune)
{
jar_xm_free_context(currentMusic.chipctx);
} }
else else
{ {
stb_vorbis_close(currentMusic.stream); TraceLog(WARNING, "[%s] XM file could not be opened", fileName);
return 6; // error
} }
} }
else
{
TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName);
return 7; // error
}
return 0; // normal return
}
musicEnabled = false; // Stop music playing for individual music index of currentMusic array (close stream)
void StopMusicStream(int index)
{
if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
{
CloseMixChannel(currentMusic[index].mixc);
if (currentMusic[index].chipTune)
{
jar_xm_free_context(currentMusic[index].chipctx);
}
else
{
stb_vorbis_close(currentMusic[index].stream);
}
if(!getMusicStreamCount()) musicEnabled_g = false;
if(currentMusic[index].stream || currentMusic[index].chipctx)
{
currentMusic[index].stream = NULL;
currentMusic[index].chipctx = NULL;
}
}
}
//get number of music channels active at this time, this does not mean they are playing
int getMusicStreamCount(void)
{
int musicCount = 0;
for(int musicIndex = 0; musicIndex < MAX_MUSIC_STREAMS; musicIndex++) // find empty music slot
if(currentMusic[musicIndex].stream != NULL || currentMusic[musicIndex].chipTune) musicCount++;
return musicCount;
} }
// Pause music playing // Pause music playing
void PauseMusicStream(void) void PauseMusicStream(int index)
{ {
// Pause music stream if music available! // Pause music stream if music available!
if (musicEnabled) if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc && musicEnabled_g)
{ {
TraceLog(INFO, "Pausing music stream"); TraceLog(INFO, "Pausing music stream");
alSourcePause(currentMusic.source); alSourcePause(currentMusic[index].mixc->alSource);
musicEnabled = false; currentMusic[index].mixc->playing = false;
} }
} }
// Resume music playing // Resume music playing
void ResumeMusicStream(void) void ResumeMusicStream(int index)
{ {
// Resume music playing... if music available! // Resume music playing... if music available!
ALenum state; ALenum state;
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if (state == AL_PAUSED) if (state == AL_PAUSED)
{ {
TraceLog(INFO, "Resuming music stream"); TraceLog(INFO, "Resuming music stream");
alSourcePlay(currentMusic.source); alSourcePlay(currentMusic[index].mixc->alSource);
musicEnabled = true; currentMusic[index].mixc->playing = true;
}
} }
} }
// Check if music is playing // Check if any music is playing
bool IsMusicPlaying(void) bool IsMusicPlaying(int index)
{ {
bool playing = false; bool playing = false;
ALint state; ALint state;
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
if (state == AL_PLAYING) playing = true; alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if (state == AL_PLAYING) playing = true;
}
return playing; return playing;
} }
// Set volume for music // Set volume for music
void SetMusicVolume(float volume) void SetMusicVolume(int index, float volume)
{ {
alSourcef(currentMusic.source, AL_GAIN, volume); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alSourcef(currentMusic[index].mixc->alSource, AL_GAIN, volume);
}
}
void SetMusicPitch(int index, float pitch)
{
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alSourcef(currentMusic[index].mixc->alSource, AL_PITCH, pitch);
}
} }
// Get current music time length (in seconds) // Get current music time length (in seconds)
float GetMusicTimeLength(void) float GetMusicTimeLength(int index)
{ {
float totalSeconds; float totalSeconds;
if (currentMusic.chipTune) if (currentMusic[index].chipTune)
{ {
totalSeconds = currentMusic.totalLengthSeconds; totalSeconds = currentMusic[index].totalLengthSeconds;
} }
else else
{ {
totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic.stream); totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[index].stream);
} }
return totalSeconds; return totalSeconds;
} }
// Get current music time played (in seconds) // Get current music time played (in seconds)
float GetMusicTimePlayed(void) float GetMusicTimePlayed(int index)
{ {
float secondsPlayed; float secondsPlayed;
if (currentMusic.chipTune) if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
{ {
uint64_t samples; if (currentMusic[index].chipTune)
jar_xm_get_position(currentMusic.chipctx, NULL, NULL, NULL, &samples); {
secondsPlayed = (float)samples / (currentMusic.sampleRate * currentMusic.channels); // Not sure if this is the correct value uint64_t samples;
} jar_xm_get_position(currentMusic[index].chipctx, NULL, NULL, NULL, &samples);
else secondsPlayed = (float)samples / (48000 * currentMusic[index].mixc->channels); // Not sure if this is the correct value
{ }
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels; else
int samplesPlayed = totalSamples - currentMusic.totalSamplesLeft; {
secondsPlayed = (float)samplesPlayed / (currentMusic.sampleRate * currentMusic.channels); int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
int samplesPlayed = totalSamples - currentMusic[index].totalSamplesLeft;
secondsPlayed = (float)samplesPlayed / (currentMusic[index].mixc->sampleRate * currentMusic[index].mixc->channels);
}
} }
@ -977,116 +982,118 @@ float GetMusicTimePlayed(void)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Fill music buffers with new data from music stream // Fill music buffers with new data from music stream
static bool BufferMusicStream(ALuint buffer) static bool BufferMusicStream(int index, int numBuffers)
{ {
short pcm[MUSIC_BUFFER_SIZE_SHORT]; short pcm[MUSIC_BUFFER_SIZE_SHORT];
float pcmf[MUSIC_BUFFER_SIZE_FLOAT];
int size = 0; // Total size of data steamed (in bytes) int size = 0; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts
int streamedBytes = 0; // samples of data obtained, channels are not included in calculation
bool active = true; // We can get more data from stream (not finished) bool active = true; // We can get more data from stream (not finished)
if (musicEnabled) if (currentMusic[index].chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes.
{ {
if (currentMusic.chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes. if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT)
{ size = MUSIC_BUFFER_SIZE_SHORT / 2;
int readlen = MUSIC_BUFFER_SIZE_SHORT / 2;
jar_xm_generate_samples_16bit(currentMusic.chipctx, pcm, readlen); // reads 2*readlen shorts and moves them to buffer+size memory location
size += readlen * currentMusic.channels; // Not sure if this is what it needs
}
else else
size = currentMusic[index].totalSamplesLeft / 2;
for(int x=0; x<numBuffers; x++)
{ {
while (size < MUSIC_BUFFER_SIZE_SHORT) jar_xm_generate_samples_16bit(currentMusic[index].chipctx, pcm, size); // reads 2*readlen shorts and moves them to buffer+size memory location
BufferMixChannel(currentMusic[index].mixc, pcm, size * 2);
currentMusic[index].totalSamplesLeft -= size * 2;
if(currentMusic[index].totalSamplesLeft <= 0)
{ {
streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic.stream, currentMusic.channels, pcm + size, MUSIC_BUFFER_SIZE_SHORT - size); active = false;
if (streamedBytes > 0) size += (streamedBytes*currentMusic.channels); break;
else break;
} }
} }
TraceLog(DEBUG, "Streaming music data to buffer. Bytes streamed: %i", size);
}
if (size > 0)
{
alBufferData(buffer, currentMusic.format, pcm, size*sizeof(short), currentMusic.sampleRate);
currentMusic.totalSamplesLeft -= size;
if(currentMusic.totalSamplesLeft <= 0) active = false; // end if no more samples left
} }
else else
{ {
active = false; if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT)
TraceLog(WARNING, "No more data obtained from stream"); size = MUSIC_BUFFER_SIZE_SHORT;
else
size = currentMusic[index].totalSamplesLeft;
for(int x=0; x<numBuffers; x++)
{
int streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic[index].stream, currentMusic[index].mixc->channels, pcm, size);
BufferMixChannel(currentMusic[index].mixc, pcm, streamedBytes * currentMusic[index].mixc->channels);
currentMusic[index].totalSamplesLeft -= streamedBytes * currentMusic[index].mixc->channels;
if(currentMusic[index].totalSamplesLeft <= 0)
{
active = false;
break;
}
}
} }
return active; return active;
} }
// Empty music buffers // Empty music buffers
static void EmptyMusicStream(void) static void EmptyMusicStream(int index)
{ {
ALuint buffer = 0; ALuint buffer = 0;
int queued = 0; int queued = 0;
alGetSourcei(currentMusic.source, AL_BUFFERS_QUEUED, &queued); alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_QUEUED, &queued);
while (queued > 0) while (queued > 0)
{ {
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer); alSourceUnqueueBuffers(currentMusic[index].mixc->alSource, 1, &buffer);
queued--; queued--;
} }
} }
// Update (re-fill) music buffers if data already processed //determine if a music stream is ready to be written to
void UpdateMusicStream(void) static int IsMusicStreamReadyForBuffering(int index)
{ {
ALuint buffer = 0;
ALint processed = 0; ALint processed = 0;
alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_PROCESSED, &processed);
return processed;
}
// Update (re-fill) music buffers if data already processed
void UpdateMusicStream(int index)
{
ALenum state;
bool active = true; bool active = true;
int numBuffers = IsMusicStreamReadyForBuffering(index);
if (musicEnabled)
if (currentMusic[index].mixc->playing && index < MAX_MUSIC_STREAMS && musicEnabled_g && currentMusic[index].mixc && numBuffers)
{ {
// Get the number of already processed buffers (if any) active = BufferMusicStream(index, numBuffers);
alGetSourcei(currentMusic.source, AL_BUFFERS_PROCESSED, &processed);
if (!active && currentMusic[index].loop)
while (processed > 0)
{ {
// Recover processed buffer for refill if (currentMusic[index].chipTune)
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer);
// Refill buffer
active = BufferMusicStream(buffer);
// If no more data to stream, restart music (if loop)
if ((!active) && (currentMusic.loop))
{ {
if(currentMusic.chipTune) currentMusic[index].totalSamplesLeft = currentMusic[index].totalLengthSeconds * 48000;
{
currentMusic.totalSamplesLeft = currentMusic.totalLengthSeconds * currentMusic.sampleRate;
}
else
{
stb_vorbis_seek_start(currentMusic.stream);
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream)*currentMusic.channels;
}
active = BufferMusicStream(buffer);
} }
else
// Add refilled buffer to queue again... don't let the music stop! {
alSourceQueueBuffers(currentMusic.source, 1, &buffer); stb_vorbis_seek_start(currentMusic[index].stream);
currentMusic[index].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data..."); }
active = true;
processed--;
} }
ALenum state; if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data...");
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state);
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if ((state != AL_PLAYING) && active) alSourcePlay(currentMusic.source); if (state != AL_PLAYING && active) alSourcePlay(currentMusic[index].mixc->alSource);
if (!active) StopMusicStream(); if (!active) StopMusicStream(index);
} }
else
return;
} }
// Load WAV file into Wave structure // Load WAV file into Wave structure

View File

@ -61,10 +61,7 @@ typedef struct Wave {
short channels; short channels;
} Wave; } Wave;
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be typedef int RawAudioContext;
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
// a dedicated mix channel.
typedef void* AudioContext;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { // Prevents name mangling of functions extern "C" { // Prevents name mangling of functions
@ -82,13 +79,6 @@ void InitAudioDevice(void); // Initialize au
void CloseAudioDevice(void); // Close the audio device and context (and music stream) void CloseAudioDevice(void); // Close the audio device and context (and music stream)
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint);
void CloseAudioContext(AudioContext ctx); // Frees audio context
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements); // Pushes more audio data into context mix channel, if NULL is passed to data then zeros are played
Sound LoadSound(char *fileName); // Load sound to memory Sound LoadSound(char *fileName); // Load sound to memory
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource) Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
@ -100,15 +90,24 @@ bool IsSoundPlaying(Sound sound); // Check if a so
void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level) void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level)
void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level) void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level)
void PlayMusicStream(char *fileName); // Start music playing (open stream) int PlayMusicStream(int musicIndex, char *fileName); // Start music playing (open stream)
void UpdateMusicStream(void); // Updates buffers for music streaming void UpdateMusicStream(int index); // Updates buffers for music streaming
void StopMusicStream(void); // Stop music playing (close stream) void StopMusicStream(int index); // Stop music playing (close stream)
void PauseMusicStream(void); // Pause music playing void PauseMusicStream(int index); // Pause music playing
void ResumeMusicStream(void); // Resume playing paused music void ResumeMusicStream(int index); // Resume playing paused music
bool IsMusicPlaying(void); // Check if music is playing bool IsMusicPlaying(int index); // Check if music is playing
void SetMusicVolume(float volume); // Set volume for music (1.0 is max level) void SetMusicVolume(int index, float volume); // Set volume for music (1.0 is max level)
float GetMusicTimeLength(void); // Get music time length (in seconds) float GetMusicTimeLength(int index); // Get music time length (in seconds)
float GetMusicTimePlayed(void); // Get current music time played (in seconds) float GetMusicTimePlayed(int index); // Get current music time played (in seconds)
int getMusicStreamCount(void);
void SetMusicPitch(int index, float pitch);
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint);
void CloseRawAudioContext(RawAudioContext ctx);
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements); // returns number of elements buffered
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -18,11 +18,11 @@
* float speed = 1.f; * float speed = 1.f;
* float currentTime = 0.f; * float currentTime = 0.f;
* float currentPos[2] = {0,0}; * float currentPos[2] = {0,0};
* float newPos[2] = {1,1}; * float finalPos[2] = {1,1};
* float tempPosition[2] = currentPos;//x,y positions * float startPosition[2] = currentPos;//x,y positions
* while(currentPos[0] < newPos[0]) * while(currentPos[0] < finalPos[0])
* currentPos[0] = EaseSineIn(currentTime, tempPosition[0], tempPosition[0]-newPos[0], speed); * currentPos[0] = EaseSineIn(currentTime, startPosition[0], startPosition[0]-finalPos[0], speed);
* currentPos[1] = EaseSineIn(currentTime, tempPosition[1], tempPosition[1]-newPos[0], speed); * currentPos[1] = EaseSineIn(currentTime, startPosition[1], startPosition[1]-finalPos[0], speed);
* currentTime += diffTime(); * currentTime += diffTime();
* *
* A port of Robert Penner's easing equations to C (http://robertpenner.com/easing/) * A port of Robert Penner's easing equations to C (http://robertpenner.com/easing/)

View File

@ -451,10 +451,7 @@ typedef struct Wave {
short channels; short channels;
} Wave; } Wave;
// Audio Context, used to create custom audio streams that are not bound to a sound file. There can be typedef int RawAudioContext;
// no more than 4 concurrent audio contexts in use. This is due to each active context being tied to
// a dedicated mix channel.
typedef void* AudioContext;
// Texture formats // Texture formats
// NOTE: Support depends on OpenGL version and platform // NOTE: Support depends on OpenGL version and platform
@ -876,13 +873,6 @@ void InitAudioDevice(void); // Initialize au
void CloseAudioDevice(void); // Close the audio device and context (and music stream) void CloseAudioDevice(void); // Close the audio device and context (and music stream)
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
// Audio contexts are for outputing custom audio waveforms, This will shut down any other sound sources currently playing
// The mixChannel is what mix channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
// exmple usage is InitAudioContext(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
AudioContext InitAudioContext(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint);
void CloseAudioContext(AudioContext ctx); // Frees audio context
unsigned short UpdateAudioContext(AudioContext ctx, void *data, unsigned short numberElements); // Pushes more audio data into context mix channel, if NULL is passed to data then zeros are played
Sound LoadSound(char *fileName); // Load sound to memory Sound LoadSound(char *fileName); // Load sound to memory
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource) Sound LoadSoundFromRES(const char *rresName, int resId); // Load sound to memory from rRES file (raylib Resource)
@ -894,15 +884,24 @@ bool IsSoundPlaying(Sound sound); // Check if a so
void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level) void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level)
void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level) void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level)
void PlayMusicStream(char *fileName); // Start music playing (open stream) int PlayMusicStream(int musicIndex, char *fileName); // Start music playing (open stream)
void UpdateMusicStream(void); // Updates buffers for music streaming void UpdateMusicStream(int index); // Updates buffers for music streaming
void StopMusicStream(void); // Stop music playing (close stream) void StopMusicStream(int index); // Stop music playing (close stream)
void PauseMusicStream(void); // Pause music playing void PauseMusicStream(int index); // Pause music playing
void ResumeMusicStream(void); // Resume playing paused music void ResumeMusicStream(int index); // Resume playing paused music
bool IsMusicPlaying(void); // Check if music is playing bool IsMusicPlaying(int index); // Check if music is playing
void SetMusicVolume(float volume); // Set volume for music (1.0 is max level) void SetMusicVolume(int index, float volume); // Set volume for music (1.0 is max level)
float GetMusicTimeLength(void); // Get current music time length (in seconds) float GetMusicTimeLength(int index); // Get current music time length (in seconds)
float GetMusicTimePlayed(void); // Get current music time played (in seconds) float GetMusicTimePlayed(int index); // Get current music time played (in seconds)
int getMusicStreamCount(void);
void SetMusicPitch(int index, float pitch);
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint);
void CloseRawAudioContext(RawAudioContext ctx);
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements); // returns number of elements buffered
#ifdef __cplusplus #ifdef __cplusplus
} }

2
src/windows_compile.bat Normal file
View File

@ -0,0 +1,2 @@
set PATH=C:\raylib\MinGW\bin;%PATH%
mingw32-make