2014-09-17 00:51:31 +04:00
/**********************************************************************************************
2013-11-19 02:38:44 +04:00
*
2017-03-19 14:52:58 +03:00
* raylib . audio - Basic funtionality to work with audio
2013-11-19 02:38:44 +04:00
*
2017-03-20 22:34:44 +03:00
* FEATURES :
* - Manage audio device ( init / close )
* - Load and unload audio files
* - Format wave data ( sample rate , size , channels )
* - Play / Stop / Pause / Resume loaded audio
* - Manage mixing channels
* - Manage raw audio context
2017-02-16 02:50:02 +03:00
*
* CONFIGURATION :
*
* # define AUDIO_STANDALONE
2017-03-19 14:52:58 +03:00
* Define to use the module as standalone library ( independently of raylib ) .
2017-02-16 02:50:02 +03:00
* Required types and functions are defined in the same module .
*
2017-12-28 19:58:09 +03:00
* # define USE_OPENAL_BACKEND
* Use OpenAL Soft audio backend usage
2017-12-20 14:34:18 +03:00
*
2017-03-26 23:49:01 +03:00
* # define SUPPORT_FILEFORMAT_WAV
2017-02-16 02:50:02 +03:00
* # define SUPPORT_FILEFORMAT_OGG
* # define SUPPORT_FILEFORMAT_XM
* # define SUPPORT_FILEFORMAT_MOD
* # define SUPPORT_FILEFORMAT_FLAC
* Selected desired fileformats to be supported for loading . Some of those formats are
* supported by default , to remove support , just comment unrequired # define in this module
*
2017-12-20 14:34:18 +03:00
* LIMITATIONS ( only OpenAL Soft ) :
2017-03-19 14:52:58 +03:00
* Only up to two channels supported : MONO and STEREO ( for additional channels , use AL_EXT_MCFORMATS )
* Only the following sample sizes supported : 8 bit PCM , 16 bit PCM , 32 - bit float PCM ( using AL_EXT_FLOAT32 )
2017-02-16 02:50:02 +03:00
*
* DEPENDENCIES :
2017-12-20 14:34:18 +03:00
* mini_al - Audio device / context management ( https : //github.com/dr-soft/mini_al)
2016-11-16 20:46:13 +03:00
* stb_vorbis - OGG audio files loading ( http : //www.nothings.org/stb_vorbis/)
* jar_xm - XM module file loading
* jar_mod - MOD audio file loading
* dr_flac - FLAC audio file loading
*
2017-12-20 14:34:18 +03:00
* * OpenAL Soft - Audio device management , still used on HTML5 and OSX platforms
*
2017-02-16 02:50:02 +03:00
* CONTRIBUTORS :
2017-12-20 14:34:18 +03:00
* David Reid ( github : @ mackron ) ( Nov . 2017 ) :
* - Complete port to mini_al library
*
* Joshua Reisenauer ( github : @ kd7tck ) ( 2015 )
2017-03-19 14:52:58 +03:00
* - XM audio module support ( jar_xm )
* - MOD audio module support ( jar_mod )
* - Mixing channels support
* - Raw audio context support
2017-02-16 02:50:02 +03:00
*
2016-07-15 19:16:34 +03:00
*
2017-02-16 02:50:02 +03:00
* LICENSE : zlib / libpng
2016-11-16 20:46:13 +03:00
*
2017-12-20 14:34:18 +03:00
* Copyright ( c ) 2014 - 2018 Ramon Santamaria ( @ raysan5 )
2014-09-03 18:51:28 +04:00
*
* This software is provided " as-is " , without any express or implied warranty . In no event
2013-11-23 16:30:54 +04:00
* will the authors be held liable for any damages arising from the use of this software .
2013-11-19 02:38:44 +04:00
*
2014-09-03 18:51:28 +04:00
* Permission is granted to anyone to use this software for any purpose , including commercial
2013-11-23 16:30:54 +04:00
* applications , and to alter it and redistribute it freely , subject to the following restrictions :
2013-11-19 02:38:44 +04:00
*
2014-09-03 18:51:28 +04:00
* 1. The origin of this software must not be misrepresented ; you must not claim that you
* wrote the original software . If you use this software in a product , an acknowledgment
2013-11-23 16:30:54 +04:00
* in the product documentation would be appreciated but is not required .
2013-11-19 02:38:44 +04:00
*
2013-11-23 16:30:54 +04:00
* 2. Altered source versions must be plainly marked as such , and must not be misrepresented
* as being the original software .
2013-11-19 02:38:44 +04:00
*
2013-11-23 16:30:54 +04:00
* 3. This notice may not be removed or altered from any source distribution .
2013-11-19 02:38:44 +04:00
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-03-26 23:49:01 +03:00
// Default configuration flags (supported features)
//-------------------------------------------------
# define SUPPORT_FILEFORMAT_WAV
# define SUPPORT_FILEFORMAT_OGG
2017-04-23 13:06:05 +03:00
# define SUPPORT_FILEFORMAT_XM
2017-09-13 23:23:24 +03:00
# define SUPPORT_FILEFORMAT_MOD
2017-03-26 23:49:01 +03:00
//-------------------------------------------------
2017-12-28 19:58:09 +03:00
# if !defined(USE_OPENAL_BACKEND)
2017-12-20 14:34:18 +03:00
# define USE_MINI_AL 1 // Set to 1 to use mini_al; 0 to use OpenAL.
2017-11-15 15:04:23 +03:00
# endif
2017-11-12 07:17:05 +03:00
2015-07-29 22:41:19 +03:00
# if defined(AUDIO_STANDALONE)
# include "audio.h"
2016-11-18 15:39:57 +03:00
# include <stdarg.h> // Required for: va_list, va_start(), vfprintf(), va_end()
2015-07-29 22:41:19 +03:00
# else
# include "raylib.h"
2017-04-21 01:08:00 +03:00
# include "utils.h" // Required for: fopen() Android mapping
2015-07-29 22:41:19 +03:00
# endif
2013-11-19 02:38:44 +04:00
2017-11-14 14:44:57 +03:00
# include "external/mini_al.h" // Implemented in mini_al.c. Cannot implement this here because it conflicts with Win32 APIs such as CloseWindow(), etc.
2017-12-20 14:34:18 +03:00
# if !defined(USE_MINI_AL) || (USE_MINI_AL == 0)
2017-11-12 07:17:05 +03:00
# if defined(__APPLE__)
# include "OpenAL/al.h" // OpenAL basic header
# include "OpenAL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
# else
# include "AL/al.h" // OpenAL basic header
# include "AL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
//#include "AL/alext.h" // OpenAL extensions header, required for AL_EXT_FLOAT32 and AL_EXT_MCFORMATS
# endif
// OpenAL extension: AL_EXT_FLOAT32 - Support for 32bit float samples
// OpenAL extension: AL_EXT_MCFORMATS - Support for multi-channel formats (Quad, 5.1, 6.1, 7.1)
2017-11-14 14:44:57 +03:00
# endif
2013-11-19 02:38:44 +04:00
2016-06-02 18:12:31 +03:00
# include <stdlib.h> // Required for: malloc(), free()
# include <string.h> // Required for: strcmp(), strncmp()
# include <stdio.h> // Required for: FILE, fopen(), fclose(), fread()
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
//#define STB_VORBIS_HEADER_ONLY
# include "external/stb_vorbis.h" // OGG loading functions
# endif
2016-04-26 04:40:19 +03:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
# define JAR_XM_IMPLEMENTATION
# include "external/jar_xm.h" // XM loading functions
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_MOD)
# define JAR_MOD_IMPLEMENTATION
# include "external/jar_mod.h" // MOD loading functions
# endif
2016-06-02 06:09:00 +03:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
# define DR_FLAC_IMPLEMENTATION
# define DR_FLAC_NO_WIN32_IO
# include "external/dr_flac.h" // FLAC loading functions
# endif
2016-10-10 19:22:55 +03:00
2016-07-29 14:17:50 +03:00
# ifdef _MSC_VER
# undef bool
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Defines and Macros
//----------------------------------------------------------------------------------
2016-08-01 22:37:45 +03:00
# define MAX_STREAM_BUFFERS 2 // Number of buffers for each audio stream
2016-08-01 13:49:17 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Music buffer size is defined by number of samples, independent of sample size and channels number
2016-08-01 13:49:17 +03:00
// After some math, considering a sampleRate of 48000, a buffer refill rate of 1/60 seconds
// and double-buffering system, I concluded that a 4096 samples buffer should be enough
2016-08-01 22:37:45 +03:00
// In case of music-stalls, just increase this number
2016-12-25 03:58:56 +03:00
# define AUDIO_BUFFER_SIZE 4096 // PCM data samples (i.e. 16bit, Mono: 8Kb)
2013-11-19 02:38:44 +04:00
2017-01-15 03:25:09 +03:00
// Support uncompressed PCM data in 32-bit float IEEE format
// NOTE: This definition is included in "AL/alext.h", but some OpenAL implementations
// could not provide the extensions header (Android), so its defined here
# if !defined(AL_EXT_float32)
# define AL_EXT_float32 1
# define AL_FORMAT_MONO_FLOAT32 0x10010
# define AL_FORMAT_STEREO_FLOAT32 0x10011
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
2014-04-19 18:36:49 +04:00
2017-12-20 02:34:31 +03:00
typedef enum {
MUSIC_AUDIO_OGG = 0 ,
MUSIC_AUDIO_FLAC ,
MUSIC_MODULE_XM ,
MUSIC_MODULE_MOD
} MusicContextType ;
2016-08-01 13:49:17 +03:00
2016-05-13 07:14:02 +03:00
// Music type (file streaming from memory)
2016-08-02 20:09:07 +03:00
typedef struct MusicData {
2016-08-01 13:49:17 +03:00
MusicContextType ctxType ; // Type of music context (OGG, XM, MOD)
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2016-08-01 13:49:17 +03:00
stb_vorbis * ctxOgg ; // OGG audio context
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
drflac * ctxFlac ; // FLAC audio context
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2016-08-01 13:49:17 +03:00
jar_xm_context_t * ctxXm ; // XM chiptune context
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-08-01 13:49:17 +03:00
jar_mod_context_t ctxMod ; // MOD chiptune context
2017-03-26 23:49:01 +03:00
# endif
2016-08-01 13:49:17 +03:00
2016-08-02 18:32:24 +03:00
AudioStream stream ; // Audio stream (double buffering)
2016-08-01 13:49:17 +03:00
2017-02-06 03:03:58 +03:00
int loopCount ; // Loops count (times music repeats), -1 means infinite loop
2016-08-01 13:49:17 +03:00
unsigned int totalSamples ; // Total number of samples
unsigned int samplesLeft ; // Number of samples left to end
2016-11-18 15:39:57 +03:00
} MusicData ;
2016-08-01 13:49:17 +03:00
2015-07-31 13:31:39 +03:00
# if defined(AUDIO_STANDALONE)
2017-12-20 02:34:31 +03:00
typedef enum {
LOG_INFO = 0 ,
LOG_ERROR ,
LOG_WARNING ,
LOG_DEBUG ,
LOG_OTHER
} TraceLogType ;
2015-07-31 13:31:39 +03:00
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Global Variables Definition
//----------------------------------------------------------------------------------
2016-08-01 13:58:30 +03:00
// ...
2016-06-02 18:12:31 +03:00
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2016-09-08 01:20:06 +03:00
static Wave LoadWAV ( const char * fileName ) ; // Load WAV file
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_OGG)
2016-09-08 01:20:06 +03:00
static Wave LoadOGG ( const char * fileName ) ; // Load OGG file
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
static Wave LoadFLAC ( const char * fileName ) ; // Load FLAC file
2017-03-26 23:49:01 +03:00
# endif
2014-04-09 22:25:26 +04:00
2015-07-31 13:31:39 +03:00
# if defined(AUDIO_STANDALONE)
2017-03-29 01:35:42 +03:00
bool IsFileExtension ( const char * fileName , const char * ext ) ; // Check file extension
2017-07-02 13:35:13 +03:00
void TraceLog ( int msgType , const char * text , . . . ) ; // Show trace log messages (LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_DEBUG)
2015-07-31 13:31:39 +03:00
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
2014-04-19 18:36:49 +04:00
// Module Functions Definition - Audio Device initialization and Closing
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
# define DEVICE_FORMAT mal_format_f32
# define DEVICE_CHANNELS 2
# define DEVICE_SAMPLE_RATE 44100
2017-11-18 01:42:14 +03:00
typedef enum { AUDIO_BUFFER_USAGE_STATIC = 0 , AUDIO_BUFFER_USAGE_STREAM } AudioBufferUsage ;
2017-11-12 07:17:05 +03:00
2017-12-20 02:34:31 +03:00
// Audio buffer structure
2017-12-20 13:37:43 +03:00
typedef struct AudioBuffer AudioBuffer ;
struct AudioBuffer {
2017-11-18 01:42:14 +03:00
mal_dsp dsp ; // For format conversion.
2017-11-14 14:44:57 +03:00
float volume ;
float pitch ;
bool playing ;
bool paused ;
2017-11-18 01:42:14 +03:00
bool looping ; // Always true for AudioStreams.
AudioBufferUsage usage ; // Slightly different logic is used when feeding data to the playback device depending on whether or not data is streamed.
2017-11-14 14:44:57 +03:00
bool isSubBufferProcessed [ 2 ] ;
unsigned int frameCursorPos ;
unsigned int bufferSizeInFrames ;
2017-12-20 13:37:43 +03:00
AudioBuffer * next ;
AudioBuffer * prev ;
2017-11-14 14:44:57 +03:00
unsigned char buffer [ 1 ] ;
2017-12-20 13:37:43 +03:00
} ;
2017-11-18 01:42:14 +03:00
2017-12-20 02:34:31 +03:00
void StopAudioBuffer ( AudioBuffer * audioBuffer ) ;
2017-11-18 01:42:14 +03:00
2017-11-12 07:17:05 +03:00
static mal_context context ;
static mal_device device ;
static mal_bool32 isAudioInitialized = MAL_FALSE ;
static float masterVolume = 1 ;
2017-11-18 05:15:48 +03:00
static mal_mutex audioLock ;
2017-12-20 13:37:43 +03:00
static AudioBuffer * firstAudioBuffer = NULL ; // Audio buffers are tracked in a linked list.
static AudioBuffer * lastAudioBuffer = NULL ;
2017-11-15 15:04:23 +03:00
2017-11-18 01:42:14 +03:00
static void TrackAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-12 13:59:16 +03:00
{
2017-11-18 05:15:48 +03:00
mal_mutex_lock ( & audioLock ) ;
2017-12-20 13:37:43 +03:00
2017-11-12 13:59:16 +03:00
{
2017-12-20 13:37:43 +03:00
if ( firstAudioBuffer = = NULL ) firstAudioBuffer = audioBuffer ;
else
{
2017-11-18 01:42:14 +03:00
lastAudioBuffer - > next = audioBuffer ;
audioBuffer - > prev = lastAudioBuffer ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
lastAudioBuffer = audioBuffer ;
2017-11-12 13:59:16 +03:00
}
2017-12-20 13:37:43 +03:00
2017-11-18 05:15:48 +03:00
mal_mutex_unlock ( & audioLock ) ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
static void UntrackAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-12 13:59:16 +03:00
{
2017-11-18 05:15:48 +03:00
mal_mutex_lock ( & audioLock ) ;
2017-12-20 13:37:43 +03:00
2017-11-12 13:59:16 +03:00
{
2017-12-20 13:37:43 +03:00
if ( audioBuffer - > prev = = NULL ) firstAudioBuffer = audioBuffer - > next ;
else audioBuffer - > prev - > next = audioBuffer - > next ;
2017-11-12 13:59:16 +03:00
2017-12-20 13:37:43 +03:00
if ( audioBuffer - > next = = NULL ) lastAudioBuffer = audioBuffer - > prev ;
else audioBuffer - > next - > prev = audioBuffer - > prev ;
2017-11-15 15:04:23 +03:00
2017-11-18 01:42:14 +03:00
audioBuffer - > prev = NULL ;
audioBuffer - > next = NULL ;
2017-11-12 13:59:16 +03:00
}
2017-12-20 13:37:43 +03:00
2017-11-18 05:15:48 +03:00
mal_mutex_unlock ( & audioLock ) ;
2017-11-12 13:59:16 +03:00
}
2017-12-20 13:37:43 +03:00
static void OnLog_MAL ( mal_context * pContext , mal_device * pDevice , const char * message )
2017-11-12 07:17:05 +03:00
{
( void ) pContext ;
( void ) pDevice ;
TraceLog ( LOG_ERROR , message ) ; // All log messages from mini_al are errors.
}
2017-11-15 15:04:23 +03:00
// This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation.
//
// framesOut is both an input and an output. It will be initially filled with zeros outside of this function.
static void MixFrames ( float * framesOut , const float * framesIn , mal_uint32 frameCount , float localVolume )
{
2017-12-20 13:37:43 +03:00
for ( mal_uint32 iFrame = 0 ; iFrame < frameCount ; + + iFrame )
{
for ( mal_uint32 iChannel = 0 ; iChannel < device . channels ; + + iChannel )
{
float * frameOut = framesOut + ( iFrame * device . channels ) ;
const float * frameIn = framesIn + ( iFrame * device . channels ) ;
2017-11-15 15:04:23 +03:00
2017-12-20 13:37:43 +03:00
frameOut [ iChannel ] + = frameIn [ iChannel ] * masterVolume * localVolume ;
2017-11-15 15:04:23 +03:00
}
}
}
2017-12-20 13:37:43 +03:00
static mal_uint32 OnSendAudioDataToDevice ( mal_device * pDevice , mal_uint32 frameCount , void * pFramesOut )
2017-11-12 07:17:05 +03:00
{
// This is where all of the mixing takes place.
( void ) pDevice ;
// Mixing is basically just an accumulation. We need to initialize the output buffer to 0.
memset ( pFramesOut , 0 , frameCount * pDevice - > channels * mal_get_sample_size_in_bytes ( pDevice - > format ) ) ;
// Using a mutex here for thread-safety which makes things not real-time. This is unlikely to be necessary for this project, but may
// want to consider how you might want to avoid this.
2017-11-18 05:15:48 +03:00
mal_mutex_lock ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
{
2017-11-18 01:42:14 +03:00
for ( AudioBuffer * audioBuffer = firstAudioBuffer ; audioBuffer ! = NULL ; audioBuffer = audioBuffer - > next )
2017-11-12 07:17:05 +03:00
{
// Ignore stopped or paused sounds.
2017-12-20 13:37:43 +03:00
if ( ! audioBuffer - > playing | | audioBuffer - > paused ) continue ;
2017-11-12 07:17:05 +03:00
mal_uint32 framesRead = 0 ;
2017-12-20 13:37:43 +03:00
for ( ; ; )
{
if ( framesRead > frameCount )
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_DEBUG , " Mixed too many frames from audio buffer " ) ;
2017-11-12 07:17:05 +03:00
break ;
}
2017-12-20 13:37:43 +03:00
if ( framesRead = = frameCount ) break ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
// Just read as much data as we can from the stream.
2017-11-12 07:17:05 +03:00
mal_uint32 framesToRead = ( frameCount - framesRead ) ;
2017-12-20 13:37:43 +03:00
while ( framesToRead > 0 )
{
2017-11-15 15:04:23 +03:00
float tempBuffer [ 1024 ] ; // 512 frames for stereo.
2017-11-12 07:17:05 +03:00
2017-11-15 15:04:23 +03:00
mal_uint32 framesToReadRightNow = framesToRead ;
2017-12-20 13:37:43 +03:00
if ( framesToReadRightNow > sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / DEVICE_CHANNELS )
{
2017-11-22 11:36:48 +03:00
framesToReadRightNow = sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / DEVICE_CHANNELS ;
2017-11-12 07:17:05 +03:00
}
2017-11-15 15:04:23 +03:00
// If we're not looping, we need to make sure we flush the internal buffers of the DSP pipeline to ensure we get the
// last few samples.
2017-11-18 01:42:14 +03:00
mal_bool32 flushDSP = ! audioBuffer - > looping ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
mal_uint32 framesJustRead = mal_dsp_read_frames_ex ( & audioBuffer - > dsp , framesToReadRightNow , tempBuffer , flushDSP ) ;
2017-12-20 13:37:43 +03:00
if ( framesJustRead > 0 )
{
float * framesOut = ( float * ) pFramesOut + ( framesRead * device . channels ) ;
float * framesIn = tempBuffer ;
2017-11-18 01:42:14 +03:00
MixFrames ( framesOut , framesIn , framesJustRead , audioBuffer - > volume ) ;
2017-11-15 15:04:23 +03:00
framesToRead - = framesJustRead ;
framesRead + = framesJustRead ;
2017-11-12 07:17:05 +03:00
}
2017-11-15 15:04:23 +03:00
// If we weren't able to read all the frames we requested, break.
2017-12-20 13:37:43 +03:00
if ( framesJustRead < framesToReadRightNow )
{
if ( ! audioBuffer - > looping )
{
2017-11-18 01:42:14 +03:00
StopAudioBuffer ( audioBuffer ) ;
2017-11-15 15:04:23 +03:00
break ;
2017-12-20 13:37:43 +03:00
}
else
{
2017-11-15 15:04:23 +03:00
// Should never get here, but just for safety, move the cursor position back to the start and continue the loop.
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = 0 ;
2017-11-15 15:04:23 +03:00
continue ;
}
}
}
// If for some reason we weren't able to read every frame we'll need to break from the loop. Not doing this could
// theoretically put us into an infinite loop.
2017-12-20 13:37:43 +03:00
if ( framesToRead > 0 ) break ;
2017-11-12 07:17:05 +03:00
}
}
}
2017-12-20 13:37:43 +03:00
2017-11-18 05:15:48 +03:00
mal_mutex_unlock ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
return frameCount ; // We always output the same number of frames that were originally requested.
}
# endif
2013-11-19 02:38:44 +04:00
2016-08-01 13:49:17 +03:00
// Initialize audio device
2014-09-03 19:06:10 +04:00
void InitAudioDevice ( void )
2013-11-19 02:38:44 +04:00
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
// Context.
mal_context_config contextConfig = mal_context_config_init ( OnLog_MAL ) ;
mal_result result = mal_context_init ( NULL , 0 , & contextConfig , & context ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to initialize audio context " ) ;
2017-11-12 07:17:05 +03:00
return ;
}
// Device. Using the default device. Format is floating point because it simplifies mixing.
mal_device_config deviceConfig = mal_device_config_init ( DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , NULL , OnSendAudioDataToDevice ) ;
2017-11-24 15:13:33 +03:00
2017-11-12 07:17:05 +03:00
result = mal_device_init ( & context , mal_device_type_playback , NULL , & deviceConfig , NULL , & device ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to initialize audio playback device " ) ;
2017-11-12 07:17:05 +03:00
mal_context_uninit ( & context ) ;
return ;
}
// Keep the device running the whole time. May want to consider doing something a bit smarter and only have the device running
// while there's at least one sound being played.
result = mal_device_start ( & device ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to start audio playback device " ) ;
2017-11-12 07:17:05 +03:00
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
return ;
}
// Mixing happens on a seperate thread which means we need to synchronize. I'm using a mutex here to make things simple, but may
// want to look at something a bit smarter later on to keep everything real-time, if that's necessary.
2017-11-18 05:15:48 +03:00
if ( mal_mutex_init ( & context , & audioLock ) ! = MAL_SUCCESS )
2017-11-12 07:17:05 +03:00
{
TraceLog ( LOG_ERROR , " Failed to create mutex for audio mixing " ) ;
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
return ;
}
2017-11-19 09:36:24 +03:00
TraceLog ( LOG_INFO , " Audio device initialized successfully: %s " , device . name ) ;
2017-11-24 14:54:00 +03:00
TraceLog ( LOG_INFO , " Audio backend: mini_al / %s " , mal_get_backend_name ( context . backend ) ) ;
TraceLog ( LOG_INFO , " Audio format: %s -> %s " , mal_get_format_name ( device . format ) , mal_get_format_name ( device . internalFormat ) ) ;
TraceLog ( LOG_INFO , " Audio channels: %d -> %d " , device . channels , device . internalChannels ) ;
TraceLog ( LOG_INFO , " Audio sample rate: %d -> %d " , device . sampleRate , device . internalSampleRate ) ;
TraceLog ( LOG_INFO , " Audio buffer size: %d " , device . bufferSizeInFrames ) ;
2017-11-12 07:17:05 +03:00
isAudioInitialized = MAL_TRUE ;
# else
2013-11-23 16:30:54 +04:00
// Open and initialize a device with default settings
ALCdevice * device = alcOpenDevice ( NULL ) ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
if ( ! device ) TraceLog ( LOG_ERROR , " Audio device could not be opened " ) ;
2016-08-01 13:49:17 +03:00
else
2013-11-23 16:30:54 +04:00
{
2016-08-01 13:49:17 +03:00
ALCcontext * context = alcCreateContext ( device , NULL ) ;
2014-09-03 18:51:28 +04:00
2016-08-01 13:49:17 +03:00
if ( ( context = = NULL ) | | ( alcMakeContextCurrent ( context ) = = ALC_FALSE ) )
{
if ( context ! = NULL ) alcDestroyContext ( context ) ;
2014-09-03 18:51:28 +04:00
2016-08-01 13:49:17 +03:00
alcCloseDevice ( device ) ;
2013-11-23 16:30:54 +04:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_ERROR , " Could not initialize audio context " ) ;
2016-08-01 13:49:17 +03:00
}
else
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " Audio device and context initialized successfully: %s " , alcGetString ( device , ALC_DEVICE_SPECIFIER ) ) ;
2014-09-03 18:51:28 +04:00
2016-08-01 13:49:17 +03:00
// Listener definition (just for 2D)
2017-02-06 02:44:54 +03:00
alListener3f ( AL_POSITION , 0.0f , 0.0f , 0.0f ) ;
alListener3f ( AL_VELOCITY , 0.0f , 0.0f , 0.0f ) ;
alListener3f ( AL_ORIENTATION , 0.0f , 0.0f , - 1.0f ) ;
alListenerf ( AL_GAIN , 1.0f ) ;
2017-11-12 07:17:05 +03:00
2017-12-20 14:34:18 +03:00
if ( alIsExtensionPresent ( " AL_EXT_float32 " ) ) TraceLog ( LOG_INFO , " [EXTENSION] AL_EXT_float32 supported " ) ;
else TraceLog ( LOG_INFO , " [EXTENSION] AL_EXT_float32 not supported " ) ;
2016-08-01 13:49:17 +03:00
}
}
2017-11-12 07:17:05 +03:00
# endif
2013-11-19 02:38:44 +04:00
}
2016-05-14 10:25:40 +03:00
// Close the audio device for all contexts
2014-09-03 19:06:10 +04:00
void CloseAudioDevice ( void )
2013-11-19 02:38:44 +04:00
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
if ( ! isAudioInitialized )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_WARNING , " Could not close audio device because it is not currently initialized " ) ;
return ;
}
2017-11-18 05:15:48 +03:00
mal_mutex_uninit ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
# else
2013-11-23 16:30:54 +04:00
ALCdevice * device ;
ALCcontext * context = alcGetCurrentContext ( ) ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
if ( context = = NULL ) TraceLog ( LOG_WARNING , " Could not get current audio context for closing " ) ;
2013-11-19 02:38:44 +04:00
2013-11-23 16:30:54 +04:00
device = alcGetContextsDevice ( context ) ;
2013-11-19 02:38:44 +04:00
2013-11-23 16:30:54 +04:00
alcMakeContextCurrent ( NULL ) ;
alcDestroyContext ( context ) ;
alcCloseDevice ( device ) ;
2017-11-12 07:17:05 +03:00
# endif
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " Audio device closed successfully " ) ;
2013-11-19 02:38:44 +04:00
}
2016-07-15 19:16:34 +03:00
// Check if device has been initialized successfully
2016-05-01 02:05:43 +03:00
bool IsAudioDeviceReady ( void )
2016-04-30 09:00:12 +03:00
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
return isAudioInitialized ;
# else
2016-04-30 09:00:12 +03:00
ALCcontext * context = alcGetCurrentContext ( ) ;
2016-07-29 22:35:57 +03:00
2016-04-30 09:00:12 +03:00
if ( context = = NULL ) return false ;
2016-06-02 18:12:31 +03:00
else
{
2016-04-30 09:00:12 +03:00
ALCdevice * device = alcGetContextsDevice ( context ) ;
2016-07-29 22:35:57 +03:00
2016-04-30 09:00:12 +03:00
if ( device = = NULL ) return false ;
else return true ;
}
2017-11-12 07:17:05 +03:00
# endif
2016-04-30 09:00:12 +03:00
}
2017-02-06 02:44:54 +03:00
// Set master volume (listener)
void SetMasterVolume ( float volume )
{
if ( volume < 0.0f ) volume = 0.0f ;
else if ( volume > 1.0f ) volume = 1.0f ;
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
masterVolume = 1 ;
# else
2017-02-06 02:44:54 +03:00
alListenerf ( AL_GAIN , volume ) ;
2017-11-12 07:17:05 +03:00
# endif
2017-02-06 02:44:54 +03:00
}
2017-11-18 01:42:14 +03:00
//----------------------------------------------------------------------------------
// Audio Buffer
//----------------------------------------------------------------------------------
# if USE_MINI_AL
static mal_uint32 AudioBuffer_OnDSPRead ( mal_dsp * pDSP , mal_uint32 frameCount , void * pFramesOut , void * pUserData )
{
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) pUserData ;
2017-11-18 01:42:14 +03:00
2017-12-20 13:37:43 +03:00
mal_uint32 subBufferSizeInFrames = audioBuffer - > bufferSizeInFrames / 2 ;
mal_uint32 currentSubBufferIndex = audioBuffer - > frameCursorPos / subBufferSizeInFrames ;
if ( currentSubBufferIndex > 1 )
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_DEBUG , " Frame cursor position moved too far forward in audio stream " ) ;
return 0 ;
}
// Another thread can update the processed state of buffers so we just take a copy here to try and avoid potential synchronization problems.
bool isSubBufferProcessed [ 2 ] ;
isSubBufferProcessed [ 0 ] = audioBuffer - > isSubBufferProcessed [ 0 ] ;
isSubBufferProcessed [ 1 ] = audioBuffer - > isSubBufferProcessed [ 1 ] ;
2017-12-20 13:37:43 +03:00
mal_uint32 frameSizeInBytes = mal_get_sample_size_in_bytes ( audioBuffer - > dsp . config . formatIn ) * audioBuffer - > dsp . config . channelsIn ;
2017-11-18 01:42:14 +03:00
// Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0.
mal_uint32 framesRead = 0 ;
for ( ; ; )
{
// We break from this loop differently depending on the buffer's usage. For static buffers, we simply fill as much data as we can. For
// streaming buffers we only fill the halves of the buffer that are processed. Unprocessed halves must keep their audio data in-tact.
2017-12-20 13:37:43 +03:00
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
if ( framesRead > = frameCount ) break ;
}
else
{
if ( isSubBufferProcessed [ currentSubBufferIndex ] ) break ;
2017-11-18 01:42:14 +03:00
}
mal_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
2017-12-20 13:37:43 +03:00
if ( totalFramesRemaining = = 0 ) break ;
2017-11-18 01:42:14 +03:00
mal_uint32 framesRemainingInOutputBuffer ;
2017-12-20 13:37:43 +03:00
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
2017-11-18 01:42:14 +03:00
framesRemainingInOutputBuffer = audioBuffer - > bufferSizeInFrames - audioBuffer - > frameCursorPos ;
2017-12-20 13:37:43 +03:00
}
else
{
2017-11-18 01:42:14 +03:00
mal_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames * currentSubBufferIndex ;
framesRemainingInOutputBuffer = subBufferSizeInFrames - ( audioBuffer - > frameCursorPos - firstFrameIndexOfThisSubBuffer ) ;
}
mal_uint32 framesToRead = totalFramesRemaining ;
2017-12-20 13:37:43 +03:00
if ( framesToRead > framesRemainingInOutputBuffer ) framesToRead = framesRemainingInOutputBuffer ;
2017-11-18 01:42:14 +03:00
2017-12-20 13:37:43 +03:00
memcpy ( ( unsigned char * ) pFramesOut + ( framesRead * frameSizeInBytes ) , audioBuffer - > buffer + ( audioBuffer - > frameCursorPos * frameSizeInBytes ) , framesToRead * frameSizeInBytes ) ;
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = ( audioBuffer - > frameCursorPos + framesToRead ) % audioBuffer - > bufferSizeInFrames ;
framesRead + = framesToRead ;
// If we've read to the end of the buffer, mark it as processed.
2017-12-20 13:37:43 +03:00
if ( framesToRead = = framesRemainingInOutputBuffer )
{
2017-11-18 01:42:14 +03:00
audioBuffer - > isSubBufferProcessed [ currentSubBufferIndex ] = true ;
isSubBufferProcessed [ currentSubBufferIndex ] = true ;
currentSubBufferIndex = ( currentSubBufferIndex + 1 ) % 2 ;
// We need to break from this loop if we're not looping.
2017-12-20 13:37:43 +03:00
if ( ! audioBuffer - > looping )
{
2017-11-18 01:42:14 +03:00
StopAudioBuffer ( audioBuffer ) ;
break ;
}
}
}
// Zero-fill excess.
mal_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
2017-12-20 13:37:43 +03:00
if ( totalFramesRemaining > 0 )
{
2017-11-18 01:42:14 +03:00
memset ( ( unsigned char * ) pFramesOut + ( framesRead * frameSizeInBytes ) , 0 , totalFramesRemaining * frameSizeInBytes ) ;
// For static buffers we can fill the remaining frames with silence for safety, but we don't want
// to report those frames as "read". The reason for this is that the caller uses the return value
// to know whether or not a non-looping sound has finished playback.
2017-12-20 13:37:43 +03:00
if ( audioBuffer - > usage ! = AUDIO_BUFFER_USAGE_STATIC ) framesRead + = totalFramesRemaining ;
2017-11-18 01:42:14 +03:00
}
return framesRead ;
}
// Create a new audio buffer. Initially filled with silence.
2017-12-20 13:37:43 +03:00
AudioBuffer * CreateAudioBuffer ( mal_format format , mal_uint32 channels , mal_uint32 sampleRate , mal_uint32 bufferSizeInFrames , AudioBufferUsage usage )
2017-11-18 01:42:14 +03:00
{
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) calloc ( sizeof ( * audioBuffer ) + ( bufferSizeInFrames * channels * mal_get_sample_size_in_bytes ( format ) ) , 1 ) ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " CreateAudioBuffer() : Failed to allocate memory for audio buffer " ) ;
return NULL ;
}
// We run audio data through a format converter.
mal_dsp_config dspConfig ;
memset ( & dspConfig , 0 , sizeof ( dspConfig ) ) ;
dspConfig . formatIn = format ;
dspConfig . formatOut = DEVICE_FORMAT ;
dspConfig . channelsIn = channels ;
dspConfig . channelsOut = DEVICE_CHANNELS ;
dspConfig . sampleRateIn = sampleRate ;
dspConfig . sampleRateOut = DEVICE_SAMPLE_RATE ;
mal_result resultMAL = mal_dsp_init ( & dspConfig , AudioBuffer_OnDSPRead , audioBuffer , & audioBuffer - > dsp ) ;
2017-12-20 13:37:43 +03:00
if ( resultMAL ! = MAL_SUCCESS )
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " LoadSoundFromWave() : Failed to create data conversion pipeline " ) ;
free ( audioBuffer ) ;
return NULL ;
}
audioBuffer - > volume = 1 ;
audioBuffer - > pitch = 1 ;
audioBuffer - > playing = 0 ;
audioBuffer - > paused = 0 ;
audioBuffer - > looping = 0 ;
audioBuffer - > usage = usage ;
audioBuffer - > bufferSizeInFrames = bufferSizeInFrames ;
audioBuffer - > frameCursorPos = 0 ;
// Buffers should be marked as processed by default so that a call to UpdateAudioStream() immediately after initialization works correctly.
audioBuffer - > isSubBufferProcessed [ 0 ] = true ;
audioBuffer - > isSubBufferProcessed [ 1 ] = true ;
TrackAudioBuffer ( audioBuffer ) ;
return audioBuffer ;
}
// Delete an audio buffer.
2017-12-20 13:37:43 +03:00
void DeleteAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
UntrackAudioBuffer ( audioBuffer ) ;
free ( audioBuffer ) ;
}
// Check if an audio buffer is playing.
2017-12-20 13:37:43 +03:00
bool IsAudioBufferPlaying ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return false ;
}
return audioBuffer - > playing & & ! audioBuffer - > paused ;
}
// Play an audio buffer.
//
// This will restart the buffer from the start. Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position
// should be maintained.
2017-12-20 13:37:43 +03:00
void PlayAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > playing = true ;
audioBuffer - > paused = false ;
audioBuffer - > frameCursorPos = 0 ;
}
// Stop an audio buffer.
2017-12-20 13:37:43 +03:00
void StopAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
// Don't do anything if the audio buffer is already stopped.
2017-12-20 13:37:43 +03:00
if ( ! IsAudioBufferPlaying ( audioBuffer ) ) return ;
2017-11-18 01:42:14 +03:00
audioBuffer - > playing = false ;
audioBuffer - > paused = false ;
audioBuffer - > frameCursorPos = 0 ;
audioBuffer - > isSubBufferProcessed [ 0 ] = true ;
audioBuffer - > isSubBufferProcessed [ 1 ] = true ;
}
// Pause an audio buffer.
2017-12-20 13:37:43 +03:00
void PauseAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > paused = true ;
}
// Resume an audio buffer.
2017-12-20 13:37:43 +03:00
void ResumeAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > paused = false ;
}
// Set volume for an audio buffer.
2017-12-20 13:37:43 +03:00
void SetAudioBufferVolume ( AudioBuffer * audioBuffer , float volume )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > volume = volume ;
}
// Set pitch for an audio buffer.
2017-12-20 13:37:43 +03:00
void SetAudioBufferPitch ( AudioBuffer * audioBuffer , float pitch )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > pitch = pitch ;
// Pitching is just an adjustment of the sample rate. Note that this changes the duration of the sound - higher pitches
// will make the sound faster; lower pitches make it slower.
mal_uint32 newOutputSampleRate = ( mal_uint32 ) ( ( ( ( float ) audioBuffer - > dsp . config . sampleRateOut / ( float ) audioBuffer - > dsp . config . sampleRateIn ) / pitch ) * audioBuffer - > dsp . config . sampleRateIn ) ;
mal_dsp_set_output_sample_rate ( & audioBuffer - > dsp , newOutputSampleRate ) ;
}
# endif
2014-04-19 18:36:49 +04:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Sounds loading and playing (.WAV)
//----------------------------------------------------------------------------------
2016-12-25 03:58:56 +03:00
// Load wave data from file
2016-09-08 01:20:06 +03:00
Wave LoadWave ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2016-01-23 15:22:13 +03:00
Wave wave = { 0 } ;
2014-09-17 00:51:31 +04:00
2017-03-29 01:35:42 +03:00
if ( IsFileExtension ( fileName , " .wav " ) ) wave = LoadWAV ( fileName ) ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .ogg " ) ) wave = LoadOGG ( fileName ) ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .flac " ) ) wave = LoadFLAC ( fileName ) ;
2017-03-26 23:49:01 +03:00
# endif
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [%s] Audio fileformat not supported, it can't be loaded " , fileName ) ;
2014-04-19 18:36:49 +04:00
2016-09-08 01:20:06 +03:00
return wave ;
}
2016-08-16 12:09:55 +03:00
2016-12-25 03:58:56 +03:00
// Load wave data from raw array data
Wave LoadWaveEx ( void * data , int sampleCount , int sampleRate , int sampleSize , int channels )
2016-09-08 01:20:06 +03:00
{
Wave wave ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
wave . data = data ;
2016-09-08 02:03:05 +03:00
wave . sampleCount = sampleCount ;
wave . sampleRate = sampleRate ;
2016-12-25 03:58:56 +03:00
wave . sampleSize = sampleSize ;
2016-09-08 02:03:05 +03:00
wave . channels = channels ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Copy wave data to work with, user is responsible of input data to free
2016-09-15 12:53:16 +03:00
Wave cwave = WaveCopy ( wave ) ;
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
WaveFormat ( & cwave , sampleRate , sampleSize , channels ) ;
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
return cwave ;
2016-09-08 01:20:06 +03:00
}
2016-12-25 03:58:56 +03:00
// Load sound from file
2016-09-08 01:20:06 +03:00
// NOTE: The entire file is loaded to memory to be played (no-streaming)
Sound LoadSound ( const char * fileName )
{
Wave wave = LoadWave ( fileName ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
Sound sound = LoadSoundFromWave ( wave ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
UnloadWave ( wave ) ; // Sound is loaded, we can unload wave
2014-09-03 18:51:28 +04:00
2013-11-23 16:30:54 +04:00
return sound ;
2013-11-19 02:38:44 +04:00
}
2014-12-15 03:08:30 +03:00
// Load sound from wave data
2016-08-01 13:49:17 +03:00
// NOTE: Wave data must be unallocated manually
2014-12-15 03:08:30 +03:00
Sound LoadSoundFromWave ( Wave wave )
{
2016-01-23 15:22:13 +03:00
Sound sound = { 0 } ;
2014-12-15 03:08:30 +03:00
if ( wave . data ! = NULL )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
// When using mini_al we need to do our own mixing. To simplify this we need convert the format of each sound to be consistent with
// the format used to open the playback device. We can do this two ways:
//
// 1) Convert the whole sound in one go at load time (here).
// 2) Convert the audio data in chunks at mixing time.
//
// I have decided on the first option because it offloads work required for the format conversion to the to the loading stage. The
// downside to this is that it uses more memory if the original sound is u8 or s16.
mal_format formatIn = ( ( wave . sampleSize = = 8 ) ? mal_format_u8 : ( ( wave . sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_uint32 frameCountIn = wave . sampleCount ; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
mal_uint32 frameCount = mal_convert_frames ( NULL , DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , NULL , formatIn , wave . channels , wave . sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 ) TraceLog ( LOG_ERROR , " LoadSoundFromWave() : Failed to get frame count for format conversion " ) ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
AudioBuffer * audioBuffer = CreateAudioBuffer ( DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , frameCount , AUDIO_BUFFER_USAGE_STATIC ) ;
2017-12-20 13:37:43 +03:00
if ( audioBuffer = = NULL ) TraceLog ( LOG_ERROR , " LoadSoundFromWave() : Failed to create audio buffer " ) ;
2017-11-15 15:04:23 +03:00
2017-11-18 01:42:14 +03:00
frameCount = mal_convert_frames ( audioBuffer - > buffer , audioBuffer - > dsp . config . formatIn , audioBuffer - > dsp . config . channelsIn , audioBuffer - > dsp . config . sampleRateIn , wave . data , formatIn , wave . channels , wave . sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 ) TraceLog ( LOG_ERROR , " LoadSoundFromWave() : Format conversion failed " ) ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
sound . audioBuffer = audioBuffer ;
2017-11-12 07:17:05 +03:00
# else
2014-12-15 03:08:30 +03:00
ALenum format = 0 ;
2016-08-16 12:09:55 +03:00
2016-08-15 17:35:11 +03:00
// The OpenAL format is worked out by looking at the number of channels and the sample size (bits per sample)
2014-12-15 03:08:30 +03:00
if ( wave . channels = = 1 )
{
2016-08-15 17:35:11 +03:00
switch ( wave . sampleSize )
{
case 8 : format = AL_FORMAT_MONO8 ; break ;
case 16 : format = AL_FORMAT_MONO16 ; break ;
2017-01-15 03:25:09 +03:00
case 32 : format = AL_FORMAT_MONO_FLOAT32 ; break ; // Requires OpenAL extension: AL_EXT_FLOAT32
2017-07-02 13:35:13 +03:00
default : TraceLog ( LOG_WARNING , " Wave sample size not supported: %i " , wave . sampleSize ) ; break ;
2016-08-15 17:35:11 +03:00
}
2014-12-15 03:08:30 +03:00
}
else if ( wave . channels = = 2 )
{
2016-08-15 17:35:11 +03:00
switch ( wave . sampleSize )
{
case 8 : format = AL_FORMAT_STEREO8 ; break ;
case 16 : format = AL_FORMAT_STEREO16 ; break ;
2017-01-15 03:25:09 +03:00
case 32 : format = AL_FORMAT_STEREO_FLOAT32 ; break ; // Requires OpenAL extension: AL_EXT_FLOAT32
2017-07-02 13:35:13 +03:00
default : TraceLog ( LOG_WARNING , " Wave sample size not supported: %i " , wave . sampleSize ) ; break ;
2016-08-15 17:35:11 +03:00
}
2014-12-15 03:08:30 +03:00
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " Wave number of channels not supported: %i " , wave . channels ) ;
2016-08-16 12:09:55 +03:00
2014-12-15 03:08:30 +03:00
// Create an audio source
ALuint source ;
alGenSources ( 1 , & source ) ; // Generate pointer to audio source
2017-02-06 02:44:54 +03:00
alSourcef ( source , AL_PITCH , 1.0f ) ;
alSourcef ( source , AL_GAIN , 1.0f ) ;
alSource3f ( source , AL_POSITION , 0.0f , 0.0f , 0.0f ) ;
alSource3f ( source , AL_VELOCITY , 0.0f , 0.0f , 0.0f ) ;
2014-12-15 03:08:30 +03:00
alSourcei ( source , AL_LOOPING , AL_FALSE ) ;
// Convert loaded data to OpenAL buffer
//----------------------------------------
ALuint buffer ;
alGenBuffers ( 1 , & buffer ) ; // Generate pointer to buffer
2016-08-16 12:09:55 +03:00
2017-01-18 21:14:39 +03:00
unsigned int dataSize = wave . sampleCount * wave . channels * wave . sampleSize / 8 ; // Size in bytes
2014-12-15 03:08:30 +03:00
// Upload sound data to buffer
2016-08-15 17:35:11 +03:00
alBufferData ( buffer , format , wave . data , dataSize , wave . sampleRate ) ;
2014-12-15 03:08:30 +03:00
// Attach sound buffer to source
alSourcei ( source , AL_BUFFER , buffer ) ;
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [SND ID %i][BUFR ID %i] Sound data loaded successfully (%i Hz, %i bit, %s) " , source , buffer , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2014-12-15 03:08:30 +03:00
sound . source = source ;
sound . buffer = buffer ;
2016-08-29 12:17:58 +03:00
sound . format = format ;
2017-11-12 07:17:05 +03:00
# endif
2014-12-15 03:08:30 +03:00
}
return sound ;
}
2016-12-25 03:58:56 +03:00
// Unload wave data
2016-09-08 01:20:06 +03:00
void UnloadWave ( Wave wave )
{
2017-01-18 21:14:39 +03:00
if ( wave . data ! = NULL ) free ( wave . data ) ;
2016-09-08 01:20:06 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " Unloaded wave data from RAM " ) ;
2016-09-08 01:20:06 +03:00
}
2013-11-19 02:38:44 +04:00
// Unload sound
void UnloadSound ( Sound sound )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
DeleteAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2017-01-18 19:25:25 +03:00
alSourceStop ( sound . source ) ;
2017-01-29 01:02:30 +03:00
2013-11-23 16:30:54 +04:00
alDeleteSources ( 1 , & sound . source ) ;
alDeleteBuffers ( 1 , & sound . buffer ) ;
2017-11-12 07:17:05 +03:00
# endif
2016-07-29 22:35:57 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [SND ID %i][BUFR ID %i] Unloaded sound data from RAM " , sound . source , sound . buffer ) ;
2013-11-19 02:38:44 +04:00
}
2016-08-29 12:17:58 +03:00
// Update sound buffer with new data
// NOTE: data must match sound.format
2017-02-10 00:19:48 +03:00
void UpdateSound ( Sound sound , const void * data , int samplesCount )
2016-08-29 12:17:58 +03:00
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) sound . audioBuffer ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 07:17:05 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateSound() : Invalid sound - no audio buffer " ) ;
2017-11-12 07:17:05 +03:00
return ;
}
2017-11-18 01:42:14 +03:00
StopAudioBuffer ( audioBuffer ) ;
2017-11-12 07:17:05 +03:00
2017-11-15 15:04:23 +03:00
// TODO: May want to lock/unlock this since this data buffer is read at mixing time.
2017-11-18 01:42:14 +03:00
memcpy ( audioBuffer - > buffer , data , samplesCount * audioBuffer - > dsp . config . channelsIn * mal_get_sample_size_in_bytes ( audioBuffer - > dsp . config . formatIn ) ) ;
2017-11-12 07:17:05 +03:00
# else
2016-08-29 12:17:58 +03:00
ALint sampleRate , sampleSize , channels ;
alGetBufferi ( sound . buffer , AL_FREQUENCY , & sampleRate ) ;
2016-12-25 03:58:56 +03:00
alGetBufferi ( sound . buffer , AL_BITS , & sampleSize ) ; // It could also be retrieved from sound.format
alGetBufferi ( sound . buffer , AL_CHANNELS , & channels ) ; // It could also be retrieved from sound.format
2017-01-29 01:02:30 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " UpdateSound() : AL_FREQUENCY: %i " , sampleRate ) ;
TraceLog ( LOG_DEBUG , " UpdateSound() : AL_BITS: %i " , sampleSize ) ;
TraceLog ( LOG_DEBUG , " UpdateSound() : AL_CHANNELS: %i " , channels ) ;
2016-08-29 12:17:58 +03:00
2017-02-10 00:19:48 +03:00
unsigned int dataSize = samplesCount * channels * sampleSize / 8 ; // Size of data in bytes
2017-01-29 01:02:30 +03:00
2016-08-29 12:17:58 +03:00
alSourceStop ( sound . source ) ; // Stop sound
alSourcei ( sound . source , AL_BUFFER , 0 ) ; // Unbind buffer from sound to update
//alDeleteBuffers(1, &sound.buffer); // Delete current buffer data
//alGenBuffers(1, &sound.buffer); // Generate new buffer
// Upload new data to sound buffer
alBufferData ( sound . buffer , sound . format , data , dataSize , sampleRate ) ;
// Attach sound buffer to source again
alSourcei ( sound . source , AL_BUFFER , sound . buffer ) ;
2017-11-12 07:17:05 +03:00
# endif
2016-08-29 12:17:58 +03:00
}
2013-11-19 02:38:44 +04:00
// Play a sound
void PlaySound ( Sound sound )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
PlayAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2013-11-23 16:30:54 +04:00
alSourcePlay ( sound . source ) ; // Play the sound
2017-11-12 07:17:05 +03:00
# endif
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
//TraceLog(LOG_INFO, "Playing sound");
2013-11-23 16:30:54 +04:00
// Find the current position of the sound being played
// NOTE: Only work when the entire file is in a single buffer
//int byteOffset;
//alGetSourcei(sound.source, AL_BYTE_OFFSET, &byteOffset);
2014-01-23 15:36:18 +04:00
//
//int sampleRate;
//alGetBufferi(sound.buffer, AL_FREQUENCY, &sampleRate); // AL_CHANNELS, AL_BITS (bps)
2014-09-03 18:51:28 +04:00
2016-09-12 20:36:41 +03:00
//float seconds = (float)byteOffset/sampleRate; // Number of seconds since the beginning of the sound
2014-01-23 15:36:18 +04:00
//or
//float result;
//alGetSourcef(sound.source, AL_SEC_OFFSET, &result); // AL_SAMPLE_OFFSET
2013-11-19 02:38:44 +04:00
}
// Pause a sound
void PauseSound ( Sound sound )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
PauseAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2013-11-23 16:30:54 +04:00
alSourcePause ( sound . source ) ;
2017-11-12 07:17:05 +03:00
# endif
2013-11-19 02:38:44 +04:00
}
2016-08-01 13:49:17 +03:00
// Resume a paused sound
void ResumeSound ( Sound sound )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
ResumeAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2016-08-01 13:49:17 +03:00
ALenum state ;
alGetSourcei ( sound . source , AL_SOURCE_STATE , & state ) ;
if ( state = = AL_PAUSED ) alSourcePlay ( sound . source ) ;
2017-11-12 07:17:05 +03:00
# endif
2016-08-01 13:49:17 +03:00
}
2013-11-19 02:38:44 +04:00
// Stop reproducing a sound
void StopSound ( Sound sound )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
StopAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2013-11-23 16:30:54 +04:00
alSourceStop ( sound . source ) ;
2017-11-12 07:17:05 +03:00
# endif
2013-11-19 02:38:44 +04:00
}
2014-01-23 15:36:18 +04:00
// Check if a sound is playing
2016-05-03 19:04:21 +03:00
bool IsSoundPlaying ( Sound sound )
2014-01-23 15:36:18 +04:00
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
return IsAudioBufferPlaying ( ( AudioBuffer * ) sound . audioBuffer ) ;
2017-11-12 07:17:05 +03:00
# else
2014-01-23 15:36:18 +04:00
bool playing = false ;
ALint state ;
2014-09-03 18:51:28 +04:00
2014-01-23 15:36:18 +04:00
alGetSourcei ( sound . source , AL_SOURCE_STATE , & state ) ;
if ( state = = AL_PLAYING ) playing = true ;
2014-09-03 18:51:28 +04:00
2014-01-23 15:36:18 +04:00
return playing ;
2017-11-12 07:17:05 +03:00
# endif
2014-01-23 15:36:18 +04:00
}
2014-04-19 18:36:49 +04:00
// Set volume for a sound
void SetSoundVolume ( Sound sound , float volume )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
SetAudioBufferVolume ( ( AudioBuffer * ) sound . audioBuffer , volume ) ;
2017-11-12 07:17:05 +03:00
# else
2014-04-19 18:36:49 +04:00
alSourcef ( sound . source , AL_GAIN , volume ) ;
2017-11-12 07:17:05 +03:00
# endif
2014-04-19 18:36:49 +04:00
}
// Set pitch for a sound
void SetSoundPitch ( Sound sound , float pitch )
{
2017-11-12 07:17:05 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
SetAudioBufferPitch ( ( AudioBuffer * ) sound . audioBuffer , pitch ) ;
2017-11-12 07:17:05 +03:00
# else
2014-04-19 18:36:49 +04:00
alSourcef ( sound . source , AL_PITCH , pitch ) ;
2017-11-12 07:17:05 +03:00
# endif
2014-04-19 18:36:49 +04:00
}
2016-09-08 01:20:06 +03:00
// Convert wave data to desired format
void WaveFormat ( Wave * wave , int sampleRate , int sampleSize , int channels )
{
2018-01-15 15:54:25 +03:00
# if USE_MINI_AL
2017-11-12 07:17:05 +03:00
mal_format formatIn = ( ( wave - > sampleSize = = 8 ) ? mal_format_u8 : ( ( wave - > sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_format formatOut = ( ( sampleSize = = 8 ) ? mal_format_u8 : ( ( sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_uint32 frameCountIn = wave - > sampleCount ; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
mal_uint32 frameCount = mal_convert_frames ( NULL , formatOut , channels , sampleRate , NULL , formatIn , wave - > channels , wave - > sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_ERROR , " WaveFormat() : Failed to get frame count for format conversion. " ) ;
return ;
}
2017-12-20 13:37:43 +03:00
void * data = malloc ( frameCount * channels * ( sampleSize / 8 ) ) ;
2017-11-12 07:17:05 +03:00
frameCount = mal_convert_frames ( data , formatOut , channels , sampleRate , wave - > data , formatIn , wave - > channels , wave - > sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_ERROR , " WaveFormat() : Format conversion failed. " ) ;
return ;
}
wave - > sampleCount = frameCount ;
wave - > sampleSize = sampleSize ;
wave - > sampleRate = sampleRate ;
wave - > channels = channels ;
free ( wave - > data ) ;
wave - > data = data ;
2018-01-15 15:54:25 +03:00
# else
2016-12-25 03:58:56 +03:00
// Format sample rate
2017-01-19 15:18:04 +03:00
// NOTE: Only supported 22050 <--> 44100
if ( wave - > sampleRate ! = sampleRate )
{
// TODO: Resample wave data (upsampling or downsampling)
2017-01-29 01:02:30 +03:00
// NOTE 1: To downsample, you have to drop samples or average them.
2017-01-19 15:18:04 +03:00
// NOTE 2: To upsample, you have to interpolate new samples.
2017-01-29 01:02:30 +03:00
2017-01-19 15:18:04 +03:00
wave - > sampleRate = sampleRate ;
}
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// Format sample size
// NOTE: Only supported 8 bit <--> 16 bit <--> 32 bit
2016-09-08 01:20:06 +03:00
if ( wave - > sampleSize ! = sampleSize )
{
2016-12-25 03:58:56 +03:00
void * data = malloc ( wave - > sampleCount * wave - > channels * sampleSize / 8 ) ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
for ( int i = 0 ; i < wave - > sampleCount ; i + + )
2016-09-08 01:20:06 +03:00
{
2016-12-25 03:58:56 +03:00
for ( int j = 0 ; j < wave - > channels ; j + + )
2016-09-08 01:20:06 +03:00
{
2016-12-25 03:58:56 +03:00
if ( sampleSize = = 8 )
{
if ( wave - > sampleSize = = 16 ) ( ( unsigned char * ) data ) [ wave - > channels * i + j ] = ( unsigned char ) ( ( ( float ) ( ( ( short * ) wave - > data ) [ wave - > channels * i + j ] ) / 32767.0f ) * 256 ) ;
else if ( wave - > sampleSize = = 32 ) ( ( unsigned char * ) data ) [ wave - > channels * i + j ] = ( unsigned char ) ( ( ( float * ) wave - > data ) [ wave - > channels * i + j ] * 127.0f + 127 ) ;
}
2017-01-29 01:02:30 +03:00
else if ( sampleSize = = 16 )
2016-12-25 03:58:56 +03:00
{
if ( wave - > sampleSize = = 8 ) ( ( short * ) data ) [ wave - > channels * i + j ] = ( short ) ( ( ( float ) ( ( ( unsigned char * ) wave - > data ) [ wave - > channels * i + j ] - 127 ) / 256.0f ) * 32767 ) ;
else if ( wave - > sampleSize = = 32 ) ( ( short * ) data ) [ wave - > channels * i + j ] = ( short ) ( ( ( ( float * ) wave - > data ) [ wave - > channels * i + j ] ) * 32767 ) ;
}
2017-01-29 01:02:30 +03:00
else if ( sampleSize = = 32 )
2016-12-25 03:58:56 +03:00
{
if ( wave - > sampleSize = = 8 ) ( ( float * ) data ) [ wave - > channels * i + j ] = ( float ) ( ( ( unsigned char * ) wave - > data ) [ wave - > channels * i + j ] - 127 ) / 256.0f ;
else if ( wave - > sampleSize = = 16 ) ( ( float * ) data ) [ wave - > channels * i + j ] = ( float ) ( ( ( short * ) wave - > data ) [ wave - > channels * i + j ] ) / 32767.0f ;
}
2016-09-08 01:20:06 +03:00
}
}
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
wave - > sampleSize = sampleSize ;
free ( wave - > data ) ;
wave - > data = data ;
}
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// Format channels (interlaced mode)
// NOTE: Only supported mono <--> stereo
if ( wave - > channels ! = channels )
{
2017-05-03 15:16:53 +03:00
void * data = malloc ( wave - > sampleCount * wave - > sampleSize / 8 * channels ) ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
if ( ( wave - > channels = = 1 ) & & ( channels = = 2 ) ) // mono ---> stereo (duplicate mono information)
2016-09-08 01:20:06 +03:00
{
for ( int i = 0 ; i < wave - > sampleCount ; i + + )
{
2016-12-25 03:58:56 +03:00
for ( int j = 0 ; j < channels ; j + + )
{
if ( wave - > sampleSize = = 8 ) ( ( unsigned char * ) data ) [ channels * i + j ] = ( ( unsigned char * ) wave - > data ) [ i ] ;
else if ( wave - > sampleSize = = 16 ) ( ( short * ) data ) [ channels * i + j ] = ( ( short * ) wave - > data ) [ i ] ;
else if ( wave - > sampleSize = = 32 ) ( ( float * ) data ) [ channels * i + j ] = ( ( float * ) wave - > data ) [ i ] ;
}
2016-09-08 01:20:06 +03:00
}
}
2016-12-25 03:58:56 +03:00
else if ( ( wave - > channels = = 2 ) & & ( channels = = 1 ) ) // stereo ---> mono (mix stereo channels)
2016-09-08 01:20:06 +03:00
{
2016-12-25 03:58:56 +03:00
for ( int i = 0 , j = 0 ; i < wave - > sampleCount ; i + + , j + = 2 )
2016-09-08 01:20:06 +03:00
{
2016-12-25 03:58:56 +03:00
if ( wave - > sampleSize = = 8 ) ( ( unsigned char * ) data ) [ i ] = ( ( ( unsigned char * ) wave - > data ) [ j ] + ( ( unsigned char * ) wave - > data ) [ j + 1 ] ) / 2 ;
else if ( wave - > sampleSize = = 16 ) ( ( short * ) data ) [ i ] = ( ( ( short * ) wave - > data ) [ j ] + ( ( short * ) wave - > data ) [ j + 1 ] ) / 2 ;
else if ( wave - > sampleSize = = 32 ) ( ( float * ) data ) [ i ] = ( ( ( float * ) wave - > data ) [ j ] + ( ( float * ) wave - > data ) [ j + 1 ] ) / 2.0f ;
2016-09-08 01:20:06 +03:00
}
}
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// TODO: Add/remove additional interlaced channels
wave - > channels = channels ;
free ( wave - > data ) ;
wave - > data = data ;
2016-09-09 02:34:30 +03:00
}
2017-11-12 07:17:05 +03:00
# endif
2016-09-08 01:20:06 +03:00
}
// Copy a wave to a new wave
Wave WaveCopy ( Wave wave )
{
2016-12-25 03:58:56 +03:00
Wave newWave = { 0 } ;
2016-09-08 01:20:06 +03:00
2017-05-03 15:16:53 +03:00
newWave . data = malloc ( wave . sampleCount * wave . sampleSize / 8 * wave . channels ) ;
2016-09-08 01:20:06 +03:00
if ( newWave . data ! = NULL )
{
// NOTE: Size must be provided in bytes
2016-09-08 02:03:05 +03:00
memcpy ( newWave . data , wave . data , wave . sampleCount * wave . channels * wave . sampleSize / 8 ) ;
2016-09-08 01:20:06 +03:00
newWave . sampleCount = wave . sampleCount ;
newWave . sampleRate = wave . sampleRate ;
newWave . sampleSize = wave . sampleSize ;
newWave . channels = wave . channels ;
}
return newWave ;
}
// Crop a wave to defined samples range
// NOTE: Security check in case of out-of-range
void WaveCrop ( Wave * wave , int initSample , int finalSample )
{
2017-01-29 01:02:30 +03:00
if ( ( initSample > = 0 ) & & ( initSample < finalSample ) & &
2016-09-15 12:53:16 +03:00
( finalSample > 0 ) & & ( finalSample < wave - > sampleCount ) )
2016-09-08 02:03:05 +03:00
{
2016-12-25 03:58:56 +03:00
int sampleCount = finalSample - initSample ;
2017-01-29 01:02:30 +03:00
2017-05-03 15:16:53 +03:00
void * data = malloc ( sampleCount * wave - > sampleSize / 8 * wave - > channels ) ;
2017-01-29 01:02:30 +03:00
2017-03-02 04:07:09 +03:00
memcpy ( data , ( unsigned char * ) wave - > data + ( initSample * wave - > channels * wave - > sampleSize / 8 ) , sampleCount * wave - > channels * wave - > sampleSize / 8 ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 02:03:05 +03:00
free ( wave - > data ) ;
2016-12-25 03:58:56 +03:00
wave - > data = data ;
2016-09-08 02:03:05 +03:00
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " Wave crop range out of bounds " ) ;
2016-09-08 01:20:06 +03:00
}
// Get samples data from wave as a floats array
2016-09-09 02:34:30 +03:00
// NOTE: Returned sample values are normalized to range [-1..1]
2016-09-08 01:20:06 +03:00
float * GetWaveData ( Wave wave )
{
2016-12-25 03:58:56 +03:00
float * samples = ( float * ) malloc ( wave . sampleCount * wave . channels * sizeof ( float ) ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
for ( int i = 0 ; i < wave . sampleCount ; i + + )
{
2016-12-25 03:58:56 +03:00
for ( int j = 0 ; j < wave . channels ; j + + )
{
if ( wave . sampleSize = = 8 ) samples [ wave . channels * i + j ] = ( float ) ( ( ( unsigned char * ) wave . data ) [ wave . channels * i + j ] - 127 ) / 256.0f ;
else if ( wave . sampleSize = = 16 ) samples [ wave . channels * i + j ] = ( float ) ( ( short * ) wave . data ) [ wave . channels * i + j ] / 32767.0f ;
else if ( wave . sampleSize = = 32 ) samples [ wave . channels * i + j ] = ( ( float * ) wave . data ) [ wave . channels * i + j ] ;
}
2016-09-08 01:20:06 +03:00
}
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
return samples ;
}
2014-04-19 18:36:49 +04:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Music loading and stream playing (.OGG)
//----------------------------------------------------------------------------------
2016-08-01 13:49:17 +03:00
// Load music stream from file
2016-09-08 01:20:06 +03:00
Music LoadMusicStream ( const char * fileName )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
Music music = ( MusicData * ) malloc ( sizeof ( MusicData ) ) ;
2016-07-29 22:35:57 +03:00
2017-03-29 01:35:42 +03:00
if ( IsFileExtension ( fileName , " .ogg " ) )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
// Open ogg audio stream
music - > ctxOgg = stb_vorbis_open_filename ( fileName , NULL , NULL ) ;
2016-07-29 22:35:57 +03:00
2017-07-02 13:35:13 +03:00
if ( music - > ctxOgg = = NULL ) TraceLog ( LOG_WARNING , " [%s] OGG audio file could not be opened " , fileName ) ;
2014-04-19 18:36:49 +04:00
else
{
2016-08-01 13:49:17 +03:00
stb_vorbis_info info = stb_vorbis_get_info ( music - > ctxOgg ) ; // Get Ogg file info
2016-07-29 22:35:57 +03:00
2016-12-25 03:58:56 +03:00
// OGG bit rate defaults to 16 bit, it's enough for compressed format
2016-08-01 13:49:17 +03:00
music - > stream = InitAudioStream ( info . sample_rate , 16 , info . channels ) ;
2017-01-18 19:04:20 +03:00
music - > totalSamples = ( unsigned int ) stb_vorbis_stream_length_in_samples ( music - > ctxOgg ) ; // Independent by channel
2016-08-01 13:49:17 +03:00
music - > samplesLeft = music - > totalSamples ;
music - > ctxType = MUSIC_AUDIO_OGG ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-08-01 22:37:45 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] FLAC total samples: %i " , fileName , music - > totalSamples ) ;
TraceLog ( LOG_DEBUG , " [%s] OGG sample rate: %i " , fileName , info . sample_rate ) ;
TraceLog ( LOG_DEBUG , " [%s] OGG channels: %i " , fileName , info . channels ) ;
TraceLog ( LOG_DEBUG , " [%s] OGG memory required: %i " , fileName , info . temp_memory_required ) ;
2014-04-19 18:36:49 +04:00
}
}
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .flac " ) )
2016-10-10 19:22:55 +03:00
{
music - > ctxFlac = drflac_open_file ( fileName ) ;
2017-01-29 01:02:30 +03:00
2017-07-02 13:35:13 +03:00
if ( music - > ctxFlac = = NULL ) TraceLog ( LOG_WARNING , " [%s] FLAC audio file could not be opened " , fileName ) ;
2016-10-10 19:22:55 +03:00
else
{
music - > stream = InitAudioStream ( music - > ctxFlac - > sampleRate , music - > ctxFlac - > bitsPerSample , music - > ctxFlac - > channels ) ;
2016-12-26 12:52:57 +03:00
music - > totalSamples = ( unsigned int ) music - > ctxFlac - > totalSampleCount / music - > ctxFlac - > channels ;
2016-10-10 19:22:55 +03:00
music - > samplesLeft = music - > totalSamples ;
music - > ctxType = MUSIC_AUDIO_FLAC ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2017-01-29 01:02:30 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] FLAC total samples: %i " , fileName , music - > totalSamples ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC sample rate: %i " , fileName , music - > ctxFlac - > sampleRate ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC bits per sample: %i " , fileName , music - > ctxFlac - > bitsPerSample ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC channels: %i " , fileName , music - > ctxFlac - > channels ) ;
2016-10-10 19:22:55 +03:00
}
}
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .xm " ) )
2016-04-25 04:18:18 +03:00
{
2016-08-01 13:49:17 +03:00
int result = jar_xm_create_context_from_file ( & music - > ctxXm , 48000 , fileName ) ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
if ( ! result ) // XM context created successfully
2016-04-25 04:18:18 +03:00
{
2017-02-06 03:03:58 +03:00
jar_xm_set_max_loop_count ( music - > ctxXm , 0 ) ; // Set infinite number of loops
2016-08-01 13:49:17 +03:00
// NOTE: Only stereo is supported for XM
2016-12-25 03:58:56 +03:00
music - > stream = InitAudioStream ( 48000 , 16 , 2 ) ;
2016-08-01 22:37:45 +03:00
music - > totalSamples = ( unsigned int ) jar_xm_get_remaining_samples ( music - > ctxXm ) ;
music - > samplesLeft = music - > totalSamples ;
2016-08-01 13:49:17 +03:00
music - > ctxType = MUSIC_MODULE_XM ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] XM number of samples: %i " , fileName , music - > totalSamples ) ;
TraceLog ( LOG_DEBUG , " [%s] XM track length: %11.6f sec " , fileName , ( float ) music - > totalSamples / 48000.0f ) ;
2016-05-12 06:15:37 +03:00
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [%s] XM file could not be opened " , fileName ) ;
2016-05-12 06:15:37 +03:00
}
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .mod " ) )
2016-06-02 06:09:00 +03:00
{
2016-08-01 13:49:17 +03:00
jar_mod_init ( & music - > ctxMod ) ;
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
if ( jar_mod_load_file ( & music - > ctxMod , fileName ) )
2016-06-02 06:09:00 +03:00
{
2016-08-01 22:37:45 +03:00
music - > stream = InitAudioStream ( 48000 , 16 , 2 ) ;
2016-08-01 13:49:17 +03:00
music - > totalSamples = ( unsigned int ) jar_mod_max_samples ( & music - > ctxMod ) ;
music - > samplesLeft = music - > totalSamples ;
2016-08-01 22:37:45 +03:00
music - > ctxType = MUSIC_MODULE_MOD ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-07-29 22:35:57 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] MOD number of samples: %i " , fileName , music - > samplesLeft ) ;
TraceLog ( LOG_DEBUG , " [%s] MOD track length: %11.6f sec " , fileName , ( float ) music - > totalSamples / 48000.0f ) ;
2016-06-02 06:09:00 +03:00
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [%s] MOD file could not be opened " , fileName ) ;
2016-06-02 06:09:00 +03:00
}
2017-03-26 23:49:01 +03:00
# endif
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [%s] Audio fileformat not supported, it can't be loaded " , fileName ) ;
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
return music ;
2014-04-19 18:36:49 +04:00
}
2016-08-01 13:49:17 +03:00
// Unload music stream
void UnloadMusicStream ( Music music )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
CloseAudioStream ( music - > stream ) ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
if ( music - > ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( music - > ctxOgg ) ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
else if ( music - > ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( music - > ctxFlac ) ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2016-08-01 13:49:17 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( music - > ctxXm ) ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-08-01 13:49:17 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_MOD ) jar_mod_unload ( & music - > ctxMod ) ;
2017-03-26 23:49:01 +03:00
# endif
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
free ( music ) ;
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Start music playing (open stream)
void PlayMusicStream ( Music music )
{
2017-11-12 07:54:37 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) music - > stream . audioBuffer ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayMusicStream() : No audio buffer " ) ;
return ;
}
// For music streams, we need to make sure we maintain the frame cursor position. This is hack for this section of code in UpdateMusicStream()
// // NOTE: In case window is minimized, music stream is stopped,
// // just make sure to play again on window restore
// if (IsMusicPlaying(music)) PlayMusicStream(music);
mal_uint32 frameCursorPos = audioBuffer - > frameCursorPos ;
2017-12-20 13:37:43 +03:00
PlayAudioStream ( music - > stream ) ; // <-- This resets the cursor position.
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = frameCursorPos ;
2017-11-12 07:54:37 +03:00
# else
2016-08-01 13:49:17 +03:00
alSourcePlay ( music - > stream . source ) ;
2017-11-12 07:54:37 +03:00
# endif
2016-07-29 22:35:57 +03:00
}
2016-08-01 13:49:17 +03:00
// Pause music playing
void PauseMusicStream ( Music music )
2014-04-19 18:36:49 +04:00
{
2017-11-12 14:55:24 +03:00
# if USE_MINI_AL
PauseAudioStream ( music - > stream ) ;
# else
2016-08-01 13:49:17 +03:00
alSourcePause ( music - > stream . source ) ;
2017-11-12 14:55:24 +03:00
# endif
2016-08-01 13:49:17 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Resume music playing
void ResumeMusicStream ( Music music )
{
2017-11-12 14:55:24 +03:00
# if USE_MINI_AL
ResumeAudioStream ( music - > stream ) ;
# else
2016-08-01 13:49:17 +03:00
ALenum state ;
alGetSourcei ( music - > stream . source , AL_SOURCE_STATE , & state ) ;
2016-07-29 22:35:57 +03:00
2017-05-14 19:32:47 +03:00
if ( state = = AL_PAUSED )
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [AUD ID %i] Resume music stream playing " , music - > stream . source ) ;
2017-05-14 19:32:47 +03:00
alSourcePlay ( music - > stream . source ) ;
}
2017-11-12 14:55:24 +03:00
# endif
2016-08-01 13:49:17 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Stop music playing (close stream)
2017-05-10 20:34:57 +03:00
// TODO: To clear a buffer, make sure they have been already processed!
2016-08-01 13:49:17 +03:00
void StopMusicStream ( Music music )
{
2017-11-12 14:55:24 +03:00
# if USE_MINI_AL
StopAudioStream ( music - > stream ) ;
# else
2016-08-01 13:49:17 +03:00
alSourceStop ( music - > stream . source ) ;
2017-03-05 12:55:58 +03:00
2017-05-10 20:34:57 +03:00
/*
2017-03-05 12:55:58 +03:00
// Clear stream buffers
2017-05-10 20:34:57 +03:00
// WARNING: Queued buffers must have been processed before unqueueing and reloaded with data!!!
2017-03-05 12:55:58 +03:00
void * pcm = calloc ( AUDIO_BUFFER_SIZE * music - > stream . sampleSize / 8 * music - > stream . channels , 1 ) ;
2017-05-10 20:34:57 +03:00
2017-03-05 12:55:58 +03:00
for ( int i = 0 ; i < MAX_STREAM_BUFFERS ; i + + )
{
2017-05-10 20:34:57 +03:00
//UpdateAudioStream(music->stream, pcm, AUDIO_BUFFER_SIZE); // Update one buffer at a time
alBufferData ( music - > stream . buffers [ i ] , music - > stream . format , pcm , AUDIO_BUFFER_SIZE * music - > stream . sampleSize / 8 * music - > stream . channels , music - > stream . sampleRate ) ;
2017-03-05 12:55:58 +03:00
}
free ( pcm ) ;
2017-05-10 20:34:57 +03:00
*/
2017-11-12 14:55:24 +03:00
# endif
2017-03-05 12:55:58 +03:00
// Restart music context
2016-09-15 12:53:16 +03:00
switch ( music - > ctxType )
{
case MUSIC_AUDIO_OGG : stb_vorbis_seek_start ( music - > ctxOgg ) ; break ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
case MUSIC_MODULE_FLAC : /* TODO: Restart FLAC context */ break ;
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2016-12-25 03:58:56 +03:00
case MUSIC_MODULE_XM : /* TODO: Restart XM context */ break ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-09-15 12:53:16 +03:00
case MUSIC_MODULE_MOD : jar_mod_seek_start ( & music - > ctxMod ) ; break ;
2017-03-26 23:49:01 +03:00
# endif
2016-09-15 12:53:16 +03:00
default : break ;
}
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
music - > samplesLeft = music - > totalSamples ;
2016-05-11 10:37:10 +03:00
}
2014-09-03 18:51:28 +04:00
2016-07-15 19:16:34 +03:00
// Update (re-fill) music buffers if data already processed
2017-02-10 00:19:48 +03:00
// TODO: Make sure buffers are ready for update... check music state
2016-08-01 13:49:17 +03:00
void UpdateMusicStream ( Music music )
2016-07-15 19:16:34 +03:00
{
2017-11-12 14:55:24 +03:00
# if USE_MINI_AL
bool streamEnding = false ;
2017-12-20 13:37:43 +03:00
unsigned int subBufferSizeInFrames = ( ( AudioBuffer * ) music - > stream . audioBuffer ) - > bufferSizeInFrames / 2 ;
2017-11-24 14:54:00 +03:00
2017-11-12 14:55:24 +03:00
// NOTE: Using dynamic allocation because it could require more than 16KB
2017-11-24 14:54:00 +03:00
void * pcm = calloc ( subBufferSizeInFrames * music - > stream . sampleSize / 8 * music - > stream . channels , 1 ) ;
2017-11-12 14:55:24 +03:00
int samplesCount = 0 ; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts
while ( IsAudioBufferProcessed ( music - > stream ) )
{
2017-11-24 14:54:00 +03:00
if ( music - > samplesLeft > = subBufferSizeInFrames ) samplesCount = subBufferSizeInFrames ;
2017-11-12 14:55:24 +03:00
else samplesCount = music - > samplesLeft ;
// TODO: Really don't like ctxType thingy...
switch ( music - > ctxType )
{
case MUSIC_AUDIO_OGG :
{
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
int numSamplesOgg = stb_vorbis_get_samples_short_interleaved ( music - > ctxOgg , music - > stream . channels , ( short * ) pcm , samplesCount * music - > stream . channels ) ;
} break ;
# if defined(SUPPORT_FILEFORMAT_FLAC)
case MUSIC_AUDIO_FLAC :
{
// NOTE: Returns the number of samples to process
unsigned int numSamplesFlac = ( unsigned int ) drflac_read_s16 ( music - > ctxFlac , samplesCount * music - > stream . channels , ( short * ) pcm ) ;
} break ;
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
case MUSIC_MODULE_XM : jar_xm_generate_samples_16bit ( music - > ctxXm , pcm , samplesCount ) ; break ;
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
case MUSIC_MODULE_MOD : jar_mod_fillbuffer ( & music - > ctxMod , pcm , samplesCount , 0 ) ; break ;
# endif
default : break ;
}
UpdateAudioStream ( music - > stream , pcm , samplesCount ) ;
music - > samplesLeft - = samplesCount ;
if ( music - > samplesLeft < = 0 )
{
streamEnding = true ;
break ;
}
}
// Free allocated pcm data
free ( pcm ) ;
// Reset audio stream for looping
if ( streamEnding )
{
StopMusicStream ( music ) ; // Stop music (and reset)
// Decrease loopCount to stop when required
if ( music - > loopCount > 0 )
{
music - > loopCount - - ; // Decrease loop count
PlayMusicStream ( music ) ; // Play again
}
2017-11-14 14:15:50 +03:00
else
{
2017-12-20 13:37:43 +03:00
if ( music - > loopCount = = - 1 ) PlayMusicStream ( music ) ;
2017-11-14 14:15:50 +03:00
}
2017-11-12 14:55:24 +03:00
}
else
{
// NOTE: In case window is minimized, music stream is stopped,
// just make sure to play again on window restore
if ( IsMusicPlaying ( music ) ) PlayMusicStream ( music ) ;
}
# else
2016-09-15 12:53:16 +03:00
ALenum state ;
2016-07-15 19:16:34 +03:00
ALint processed = 0 ;
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
alGetSourcei ( music - > stream . source , AL_SOURCE_STATE , & state ) ; // Get music stream state
alGetSourcei ( music - > stream . source , AL_BUFFERS_PROCESSED , & processed ) ; // Get processed buffers
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
if ( processed > 0 )
2016-07-15 19:16:34 +03:00
{
2017-05-14 19:32:47 +03:00
bool streamEnding = false ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Using dynamic allocation because it could require more than 16KB
2017-05-03 15:16:53 +03:00
void * pcm = calloc ( AUDIO_BUFFER_SIZE * music - > stream . sampleSize / 8 * music - > stream . channels , 1 ) ;
2016-08-16 12:09:55 +03:00
2016-12-25 03:58:56 +03:00
int numBuffersToProcess = processed ;
2017-02-10 00:19:48 +03:00
int samplesCount = 0 ; // Total size of data steamed in L+R samples for xm floats,
2017-05-08 01:55:47 +03:00
// individual L or R for ogg shorts
2016-08-02 18:32:24 +03:00
for ( int i = 0 ; i < numBuffersToProcess ; i + + )
{
2017-02-10 00:19:48 +03:00
if ( music - > samplesLeft > = AUDIO_BUFFER_SIZE ) samplesCount = AUDIO_BUFFER_SIZE ;
else samplesCount = music - > samplesLeft ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// TODO: Really don't like ctxType thingy...
2016-08-02 18:32:24 +03:00
switch ( music - > ctxType )
{
2016-08-16 12:09:55 +03:00
case MUSIC_AUDIO_OGG :
2016-08-02 18:32:24 +03:00
{
2016-12-25 03:58:56 +03:00
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
2017-02-10 00:19:48 +03:00
int numSamplesOgg = stb_vorbis_get_samples_short_interleaved ( music - > ctxOgg , music - > stream . channels , ( short * ) pcm , samplesCount * music - > stream . channels ) ;
2016-08-16 12:09:55 +03:00
2016-10-10 19:22:55 +03:00
} break ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
case MUSIC_AUDIO_FLAC :
{
2016-12-25 03:58:56 +03:00
// NOTE: Returns the number of samples to process
2017-02-10 00:19:48 +03:00
unsigned int numSamplesFlac = ( unsigned int ) drflac_read_s16 ( music - > ctxFlac , samplesCount * music - > stream . channels , ( short * ) pcm ) ;
2016-08-16 12:09:55 +03:00
2016-08-02 18:32:24 +03:00
} break ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2017-02-10 00:19:48 +03:00
case MUSIC_MODULE_XM : jar_xm_generate_samples_16bit ( music - > ctxXm , pcm , samplesCount ) ; break ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2017-02-10 00:19:48 +03:00
case MUSIC_MODULE_MOD : jar_mod_fillbuffer ( & music - > ctxMod , pcm , samplesCount , 0 ) ; break ;
2017-03-26 23:49:01 +03:00
# endif
2016-08-02 18:32:24 +03:00
default : break ;
}
2017-01-29 01:02:30 +03:00
2017-02-10 00:19:48 +03:00
UpdateAudioStream ( music - > stream , pcm , samplesCount ) ;
music - > samplesLeft - = samplesCount ;
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
if ( music - > samplesLeft < = 0 )
{
2017-05-14 19:32:47 +03:00
streamEnding = true ;
2016-08-02 18:32:24 +03:00
break ;
}
}
2017-05-10 20:34:57 +03:00
// Free allocated pcm data
free ( pcm ) ;
2016-08-16 12:09:55 +03:00
2016-08-02 18:32:24 +03:00
// Reset audio stream for looping
2017-05-14 19:32:47 +03:00
if ( streamEnding )
2016-07-15 19:16:34 +03:00
{
2016-09-15 12:53:16 +03:00
StopMusicStream ( music ) ; // Stop music (and reset)
2017-02-06 03:03:58 +03:00
// Decrease loopCount to stop when required
if ( music - > loopCount > 0 )
{
music - > loopCount - - ; // Decrease loop count
PlayMusicStream ( music ) ; // Play again
}
2017-11-14 14:15:50 +03:00
else
{
if ( music - > loopCount = = - 1 )
{
PlayMusicStream ( music ) ;
}
}
2016-09-15 12:53:16 +03:00
}
else
{
// NOTE: In case window is minimized, music stream is stopped,
// just make sure to play again on window restore
if ( state ! = AL_PLAYING ) PlayMusicStream ( music ) ;
2016-07-15 19:16:34 +03:00
}
2014-09-17 00:51:31 +04:00
}
2017-11-12 14:55:24 +03:00
# endif
2014-04-19 18:36:49 +04:00
}
2016-05-12 04:14:59 +03:00
// Check if any music is playing
2016-08-01 13:49:17 +03:00
bool IsMusicPlaying ( Music music )
2014-04-09 22:25:26 +04:00
{
2017-11-12 14:55:24 +03:00
# if USE_MINI_AL
return IsAudioStreamPlaying ( music - > stream ) ;
# else
2014-12-31 20:03:32 +03:00
bool playing = false ;
ALint state ;
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
alGetSourcei ( music - > stream . source , AL_SOURCE_STATE , & state ) ;
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
if ( state = = AL_PLAYING ) playing = true ;
2014-09-03 18:51:28 +04:00
2014-12-31 20:03:32 +03:00
return playing ;
2017-11-12 14:55:24 +03:00
# endif
2014-04-09 22:25:26 +04:00
}
2014-04-19 18:36:49 +04:00
// Set volume for music
2016-08-01 13:49:17 +03:00
void SetMusicVolume ( Music music , float volume )
2014-01-23 15:36:18 +04:00
{
2017-11-14 14:15:50 +03:00
# if USE_MINI_AL
SetAudioStreamVolume ( music - > stream , volume ) ;
# else
2016-08-01 13:49:17 +03:00
alSourcef ( music - > stream . source , AL_GAIN , volume ) ;
2017-11-14 14:15:50 +03:00
# endif
2016-05-12 04:14:59 +03:00
}
2016-06-02 18:12:31 +03:00
// Set pitch for music
2016-08-01 13:49:17 +03:00
void SetMusicPitch ( Music music , float pitch )
2016-05-12 04:14:59 +03:00
{
2017-11-14 14:15:50 +03:00
# if USE_MINI_AL
SetAudioStreamPitch ( music - > stream , pitch ) ;
# else
2016-08-01 13:49:17 +03:00
alSourcef ( music - > stream . source , AL_PITCH , pitch ) ;
2017-11-14 14:15:50 +03:00
# endif
2014-01-23 15:36:18 +04:00
}
2017-01-24 02:32:16 +03:00
2017-02-06 03:03:58 +03:00
// Set music loop count (loop repeats)
// NOTE: If set to -1, means infinite loop
2017-11-12 14:55:24 +03:00
void SetMusicLoopCount ( Music music , int count )
2017-02-06 03:03:58 +03:00
{
music - > loopCount = count ;
}
2016-06-02 06:09:00 +03:00
// Get music time length (in seconds)
2016-08-01 13:49:17 +03:00
float GetMusicTimeLength ( Music music )
2014-01-23 15:36:18 +04:00
{
2016-08-01 13:49:17 +03:00
float totalSeconds = ( float ) music - > totalSamples / music - > stream . sampleRate ;
2016-08-16 12:09:55 +03:00
2014-04-19 18:36:49 +04:00
return totalSeconds ;
}
// Get current music time played (in seconds)
2016-08-01 13:49:17 +03:00
float GetMusicTimePlayed ( Music music )
2014-04-19 18:36:49 +04:00
{
2016-05-21 19:08:09 +03:00
float secondsPlayed = 0.0f ;
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
unsigned int samplesPlayed = music - > totalSamples - music - > samplesLeft ;
2016-12-25 03:58:56 +03:00
secondsPlayed = ( float ) samplesPlayed / music - > stream . sampleRate ;
2016-08-01 13:49:17 +03:00
return secondsPlayed ;
}
2017-11-12 13:59:16 +03:00
2016-08-01 13:49:17 +03:00
// Init audio stream (to stream audio pcm data)
2016-08-02 18:32:24 +03:00
AudioStream InitAudioStream ( unsigned int sampleRate , unsigned int sampleSize , unsigned int channels )
2016-08-01 13:49:17 +03:00
{
AudioStream stream = { 0 } ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
stream . sampleRate = sampleRate ;
stream . sampleSize = sampleSize ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// Only mono and stereo channels are supported, more channels require AL_EXT_MCFORMATS extension
if ( ( channels > 0 ) & & ( channels < 3 ) ) stream . channels = channels ;
else
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " Init audio stream: Number of channels not supported: %i " , channels ) ;
2016-12-25 03:58:56 +03:00
stream . channels = 1 ; // Fallback to mono channel
}
2016-08-01 13:49:17 +03:00
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-11-18 01:42:14 +03:00
mal_format formatIn = ( ( stream . sampleSize = = 8 ) ? mal_format_u8 : ( ( stream . sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
2017-11-12 13:59:16 +03:00
2017-11-24 14:54:00 +03:00
// The size of a streaming buffer must be at least double the size of a period.
unsigned int periodSize = device . bufferSizeInFrames / device . periods ;
unsigned int subBufferSize = AUDIO_BUFFER_SIZE ;
2017-12-20 13:37:43 +03:00
if ( subBufferSize < periodSize ) subBufferSize = periodSize ;
2017-11-24 14:54:00 +03:00
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = CreateAudioBuffer ( formatIn , stream . channels , stream . sampleRate , subBufferSize * 2 , AUDIO_BUFFER_USAGE_STREAM ) ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " InitAudioStream() : Failed to create audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return stream ;
}
2017-11-18 01:42:14 +03:00
audioBuffer - > looping = true ; // Always loop for streaming buffers.
stream . audioBuffer = audioBuffer ;
2017-11-12 13:59:16 +03:00
# else
2016-08-01 13:49:17 +03:00
// Setup OpenAL format
2016-12-25 03:58:56 +03:00
if ( stream . channels = = 1 )
2016-08-01 13:49:17 +03:00
{
switch ( sampleSize )
2016-05-15 02:30:32 +03:00
{
2016-08-01 13:49:17 +03:00
case 8 : stream . format = AL_FORMAT_MONO8 ; break ;
case 16 : stream . format = AL_FORMAT_MONO16 ; break ;
2017-01-15 03:25:09 +03:00
case 32 : stream . format = AL_FORMAT_MONO_FLOAT32 ; break ; // Requires OpenAL extension: AL_EXT_FLOAT32
2017-07-02 13:35:13 +03:00
default : TraceLog ( LOG_WARNING , " Init audio stream: Sample size not supported: %i " , sampleSize ) ; break ;
2016-06-02 06:09:00 +03:00
}
2016-08-01 13:49:17 +03:00
}
2016-12-25 03:58:56 +03:00
else if ( stream . channels = = 2 )
2016-08-01 13:49:17 +03:00
{
switch ( sampleSize )
2016-06-02 06:09:00 +03:00
{
2016-08-01 13:49:17 +03:00
case 8 : stream . format = AL_FORMAT_STEREO8 ; break ;
case 16 : stream . format = AL_FORMAT_STEREO16 ; break ;
2017-01-15 03:25:09 +03:00
case 32 : stream . format = AL_FORMAT_STEREO_FLOAT32 ; break ; // Requires OpenAL extension: AL_EXT_FLOAT32
2017-07-02 13:35:13 +03:00
default : TraceLog ( LOG_WARNING , " Init audio stream: Sample size not supported: %i " , sampleSize ) ; break ;
2016-05-15 02:30:32 +03:00
}
2016-08-01 13:49:17 +03:00
}
// Create an audio source
alGenSources ( 1 , & stream . source ) ;
2017-02-06 02:44:54 +03:00
alSourcef ( stream . source , AL_PITCH , 1.0f ) ;
alSourcef ( stream . source , AL_GAIN , 1.0f ) ;
alSource3f ( stream . source , AL_POSITION , 0.0f , 0.0f , 0.0f ) ;
alSource3f ( stream . source , AL_VELOCITY , 0.0f , 0.0f , 0.0f ) ;
2016-08-01 13:49:17 +03:00
2016-08-02 18:32:24 +03:00
// Create Buffers (double buffering)
2016-08-01 13:49:17 +03:00
alGenBuffers ( MAX_STREAM_BUFFERS , stream . buffers ) ;
// Initialize buffer with zeros by default
2016-12-25 03:58:56 +03:00
// NOTE: Using dynamic allocation because it requires more than 16KB
void * pcm = calloc ( AUDIO_BUFFER_SIZE * stream . sampleSize / 8 * stream . channels , 1 ) ;
2017-01-29 01:02:30 +03:00
2016-08-01 13:49:17 +03:00
for ( int i = 0 ; i < MAX_STREAM_BUFFERS ; i + + )
{
2016-12-25 03:58:56 +03:00
alBufferData ( stream . buffers [ i ] , stream . format , pcm , AUDIO_BUFFER_SIZE * stream . sampleSize / 8 * stream . channels , stream . sampleRate ) ;
2016-04-25 04:18:18 +03:00
}
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
free ( pcm ) ;
2014-09-03 18:51:28 +04:00
2016-08-01 13:49:17 +03:00
alSourceQueueBuffers ( stream . source , MAX_STREAM_BUFFERS , stream . buffers ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [AUD ID %i] Audio stream loaded successfully (%i Hz, %i bit, %s) " , stream . source , stream . sampleRate , stream . sampleSize , ( stream . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2016-08-01 13:49:17 +03:00
return stream ;
2014-04-19 18:36:49 +04:00
}
2016-08-01 13:49:17 +03:00
// Close audio stream and free memory
2016-08-02 18:32:24 +03:00
void CloseAudioStream ( AudioStream stream )
2016-08-01 13:49:17 +03:00
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
DeleteAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
# else
2016-08-01 13:49:17 +03:00
// Stop playing channel
alSourceStop ( stream . source ) ;
// Flush out all queued buffers
int queued = 0 ;
alGetSourcei ( stream . source , AL_BUFFERS_QUEUED , & queued ) ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
ALuint buffer = 0 ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
while ( queued > 0 )
{
alSourceUnqueueBuffers ( stream . source , 1 , & buffer ) ;
queued - - ;
}
// Delete source and buffers
alDeleteSources ( 1 , & stream . source ) ;
alDeleteBuffers ( MAX_STREAM_BUFFERS , stream . buffers ) ;
2017-11-12 13:59:16 +03:00
# endif
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [AUD ID %i] Unloaded audio stream data " , stream . source ) ;
2016-08-01 13:49:17 +03:00
}
2016-08-02 18:32:24 +03:00
// Update audio stream buffers with data
2017-05-10 20:34:57 +03:00
// NOTE 1: Only updates one buffer of the stream source: unqueue -> update -> queue
// NOTE 2: To unqueue a buffer it needs to be processed: IsAudioBufferProcessed()
2017-02-10 00:19:48 +03:00
void UpdateAudioStream ( AudioStream stream , const void * data , int samplesCount )
2016-08-02 18:32:24 +03:00
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-11-18 01:42:14 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) stream . audioBuffer ;
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateAudioStream() : No audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
2017-11-18 01:42:14 +03:00
if ( audioBuffer - > isSubBufferProcessed [ 0 ] | | audioBuffer - > isSubBufferProcessed [ 1 ] )
2017-11-12 13:59:16 +03:00
{
mal_uint32 subBufferToUpdate ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer - > isSubBufferProcessed [ 0 ] & & audioBuffer - > isSubBufferProcessed [ 1 ] )
2017-11-12 13:59:16 +03:00
{
// Both buffers are available for updating. Update the first one and make sure the cursor is moved back to the front.
subBufferToUpdate = 0 ;
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = 0 ;
2017-11-12 13:59:16 +03:00
}
else
{
// Just update whichever sub-buffer is processed.
2017-11-18 01:42:14 +03:00
subBufferToUpdate = ( audioBuffer - > isSubBufferProcessed [ 0 ] ) ? 0 : 1 ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
mal_uint32 subBufferSizeInFrames = audioBuffer - > bufferSizeInFrames / 2 ;
2017-12-20 13:37:43 +03:00
unsigned char * subBuffer = audioBuffer - > buffer + ( ( subBufferSizeInFrames * stream . channels * ( stream . sampleSize / 8 ) ) * subBufferToUpdate ) ;
2017-11-12 13:59:16 +03:00
// Does this API expect a whole buffer to be updated in one go? Assuming so, but if not will need to change this logic.
if ( subBufferSizeInFrames > = ( mal_uint32 ) samplesCount )
{
mal_uint32 framesToWrite = subBufferSizeInFrames ;
2017-12-20 13:37:43 +03:00
if ( framesToWrite > ( mal_uint32 ) samplesCount ) framesToWrite = ( mal_uint32 ) samplesCount ;
2017-11-12 13:59:16 +03:00
2017-12-20 13:37:43 +03:00
mal_uint32 bytesToWrite = framesToWrite * stream . channels * ( stream . sampleSize / 8 ) ;
2017-11-12 13:59:16 +03:00
memcpy ( subBuffer , data , bytesToWrite ) ;
// Any leftover frames should be filled with zeros.
mal_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite ;
2017-12-20 13:37:43 +03:00
if ( leftoverFrameCount > 0 )
{
memset ( subBuffer + bytesToWrite , 0 , leftoverFrameCount * stream . channels * ( stream . sampleSize / 8 ) ) ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
audioBuffer - > isSubBufferProcessed [ subBufferToUpdate ] = false ;
2017-11-12 13:59:16 +03:00
}
else
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateAudioStream() : Attempting to write too many frames to buffer " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
}
else
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " Audio buffer not available for updating " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
# else
2016-08-01 13:49:17 +03:00
ALuint buffer = 0 ;
alSourceUnqueueBuffers ( stream . source , 1 , & buffer ) ;
2016-08-16 12:09:55 +03:00
2016-08-02 18:32:24 +03:00
// Check if any buffer was available for unqueue
if ( alGetError ( ) ! = AL_INVALID_VALUE )
{
2017-05-10 20:34:57 +03:00
alBufferData ( buffer , stream . format , data , samplesCount * stream . sampleSize / 8 * stream . channels , stream . sampleRate ) ;
2016-08-02 18:32:24 +03:00
alSourceQueueBuffers ( stream . source , 1 , & buffer ) ;
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [AUD ID %i] Audio buffer not available for unqueuing " , stream . source ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-02 18:32:24 +03:00
}
// Check if any audio stream buffers requires refill
bool IsAudioBufferProcessed ( AudioStream stream )
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) stream . audioBuffer ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " IsAudioBufferProcessed() : No audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return false ;
}
2017-11-18 01:42:14 +03:00
return audioBuffer - > isSubBufferProcessed [ 0 ] | | audioBuffer - > isSubBufferProcessed [ 1 ] ;
2017-11-12 13:59:16 +03:00
# else
2016-08-02 18:32:24 +03:00
ALint processed = 0 ;
2016-08-01 13:49:17 +03:00
2016-08-02 18:32:24 +03:00
// Determine if music stream is ready to be written
alGetSourcei ( stream . source , AL_BUFFERS_PROCESSED , & processed ) ;
2016-08-01 13:49:17 +03:00
2016-08-02 18:32:24 +03:00
return ( processed > 0 ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-01 13:49:17 +03:00
}
2014-04-19 18:36:49 +04:00
2016-08-02 18:32:24 +03:00
// Play audio stream
void PlayAudioStream ( AudioStream stream )
2014-04-19 18:36:49 +04:00
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
PlayAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
# else
2016-08-02 18:32:24 +03:00
alSourcePlay ( stream . source ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-02 18:32:24 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
// Play audio stream
void PauseAudioStream ( AudioStream stream )
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
PauseAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
# else
2016-08-02 18:32:24 +03:00
alSourcePause ( stream . source ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-02 18:32:24 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
// Resume audio stream playing
void ResumeAudioStream ( AudioStream stream )
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
ResumeAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
# else
2016-08-02 18:32:24 +03:00
ALenum state ;
alGetSourcei ( stream . source , AL_SOURCE_STATE , & state ) ;
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
if ( state = = AL_PAUSED ) alSourcePlay ( stream . source ) ;
2017-11-12 13:59:16 +03:00
# endif
2014-04-19 18:36:49 +04:00
}
2017-11-12 14:55:24 +03:00
// Check if audio stream is playing.
bool IsAudioStreamPlaying ( AudioStream stream )
{
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
return IsAudioBufferPlaying ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 14:55:24 +03:00
# else
bool playing = false ;
ALint state ;
alGetSourcei ( stream . source , AL_SOURCE_STATE , & state ) ;
if ( state = = AL_PLAYING ) playing = true ;
return playing ;
# endif
}
2016-08-02 18:32:24 +03:00
// Stop audio stream
void StopAudioStream ( AudioStream stream )
{
2017-11-12 13:59:16 +03:00
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
StopAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
# else
2016-08-02 18:32:24 +03:00
alSourceStop ( stream . source ) ;
2017-11-12 13:59:16 +03:00
# endif
2016-08-02 18:32:24 +03:00
}
2017-11-14 14:15:50 +03:00
void SetAudioStreamVolume ( AudioStream stream , float volume )
{
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
SetAudioBufferVolume ( ( AudioBuffer * ) stream . audioBuffer , volume ) ;
2017-11-14 14:15:50 +03:00
# else
alSourcef ( stream . source , AL_GAIN , volume ) ;
# endif
}
void SetAudioStreamPitch ( AudioStream stream , float pitch )
{
# if USE_MINI_AL
2017-12-20 13:37:43 +03:00
SetAudioBufferPitch ( ( AudioBuffer * ) stream . audioBuffer , pitch ) ;
2017-11-14 14:15:50 +03:00
# else
alSourcef ( stream . source , AL_PITCH , pitch ) ;
# endif
}
2016-08-02 18:32:24 +03:00
//----------------------------------------------------------------------------------
// Module specific Functions Definition
//----------------------------------------------------------------------------------
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2013-11-19 02:38:44 +04:00
// Load WAV file into Wave structure
2014-04-19 18:36:49 +04:00
static Wave LoadWAV ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2013-12-01 15:34:31 +04:00
// Basic WAV headers structs
typedef struct {
char chunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int chunkSize ;
2013-12-01 15:34:31 +04:00
char format [ 4 ] ;
2016-12-27 19:37:35 +03:00
} WAVRiffHeader ;
2013-12-01 15:34:31 +04:00
typedef struct {
char subChunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int subChunkSize ;
2013-12-01 15:34:31 +04:00
short audioFormat ;
short numChannels ;
2014-11-09 10:06:58 +03:00
int sampleRate ;
int byteRate ;
2013-12-01 15:34:31 +04:00
short blockAlign ;
short bitsPerSample ;
2016-12-27 19:37:35 +03:00
} WAVFormat ;
2013-12-01 15:34:31 +04:00
typedef struct {
char subChunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int subChunkSize ;
2016-12-27 19:37:35 +03:00
} WAVData ;
2014-09-03 18:51:28 +04:00
2016-12-27 19:37:35 +03:00
WAVRiffHeader wavRiffHeader ;
WAVFormat wavFormat ;
WAVData wavData ;
2014-09-03 18:51:28 +04:00
2016-01-23 15:22:13 +03:00
Wave wave = { 0 } ;
2013-12-01 15:34:31 +04:00
FILE * wavFile ;
2014-09-03 18:51:28 +04:00
2013-11-19 02:38:44 +04:00
wavFile = fopen ( fileName , " rb " ) ;
2014-09-03 18:51:28 +04:00
2014-12-31 20:03:32 +03:00
if ( wavFile = = NULL )
2013-11-23 16:30:54 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV file could not be opened " , fileName ) ;
2015-07-31 13:31:39 +03:00
wave . data = NULL ;
2013-11-23 16:30:54 +04:00
}
2014-04-09 22:25:26 +04:00
else
{
// Read in the first chunk into the struct
2016-12-27 19:37:35 +03:00
fread ( & wavRiffHeader , sizeof ( WAVRiffHeader ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for RIFF and WAVE tags
2016-12-25 03:58:56 +03:00
if ( strncmp ( wavRiffHeader . chunkID , " RIFF " , 4 ) | |
strncmp ( wavRiffHeader . format , " WAVE " , 4 ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid RIFF or WAVE Header " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Read in the 2nd chunk for the wave info
2016-12-27 19:37:35 +03:00
fread ( & wavFormat , sizeof ( WAVFormat ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for fmt tag
2016-12-25 03:58:56 +03:00
if ( ( wavFormat . subChunkID [ 0 ] ! = ' f ' ) | | ( wavFormat . subChunkID [ 1 ] ! = ' m ' ) | |
( wavFormat . subChunkID [ 2 ] ! = ' t ' ) | | ( wavFormat . subChunkID [ 3 ] ! = ' ' ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid Wave format " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Check for extra parameters;
2016-12-25 03:58:56 +03:00
if ( wavFormat . subChunkSize > 16 ) fseek ( wavFile , sizeof ( short ) , SEEK_CUR ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Read in the the last byte of data before the sound file
2016-12-27 19:37:35 +03:00
fread ( & wavData , sizeof ( WAVData ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for data tag
2016-12-25 03:58:56 +03:00
if ( ( wavData . subChunkID [ 0 ] ! = ' d ' ) | | ( wavData . subChunkID [ 1 ] ! = ' a ' ) | |
( wavData . subChunkID [ 2 ] ! = ' t ' ) | | ( wavData . subChunkID [ 3 ] ! = ' a ' ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid data header " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Allocate memory for data
2017-01-19 15:18:04 +03:00
wave . data = malloc ( wavData . subChunkSize ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Read in the sound data into the soundData variable
2017-02-12 01:17:56 +03:00
fread ( wave . data , wavData . subChunkSize , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2016-12-20 02:33:45 +03:00
// Store wave parameters
2016-12-25 03:58:56 +03:00
wave . sampleRate = wavFormat . sampleRate ;
wave . sampleSize = wavFormat . bitsPerSample ;
wave . channels = wavFormat . numChannels ;
2017-01-29 01:02:30 +03:00
2017-01-19 15:18:04 +03:00
// NOTE: Only support 8 bit, 16 bit and 32 bit sample sizes
if ( ( wave . sampleSize ! = 8 ) & & ( wave . sampleSize ! = 16 ) & & ( wave . sampleSize ! = 32 ) )
2016-12-25 03:58:56 +03:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV sample size (%ibit) not supported, converted to 16bit " , fileName , wave . sampleSize ) ;
2017-01-19 15:18:04 +03:00
WaveFormat ( & wave , wave . sampleRate , 16 , wave . channels ) ;
2016-12-25 03:58:56 +03:00
}
2017-01-19 15:18:04 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Only support up to 2 channels (mono, stereo)
2017-01-29 01:02:30 +03:00
if ( wave . channels > 2 )
2016-12-25 03:58:56 +03:00
{
WaveFormat ( & wave , wave . sampleRate , wave . sampleSize , 2 ) ;
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV channels number (%i) not supported, converted to 2 channels " , fileName , wave . channels ) ;
2016-12-25 03:58:56 +03:00
}
2017-01-29 01:02:30 +03:00
2016-12-20 02:33:45 +03:00
// NOTE: subChunkSize comes in bytes, we need to translate it to number of samples
2016-12-25 03:58:56 +03:00
wave . sampleCount = ( wavData . subChunkSize / ( wave . sampleSize / 8 ) ) / wave . channels ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [%s] WAV file loaded successfully (%i Hz, %i bit, %s) " , fileName , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2014-04-09 22:25:26 +04:00
}
}
}
2013-11-23 16:30:54 +04:00
2014-04-09 22:25:26 +04:00
fclose ( wavFile ) ;
}
2014-09-03 18:51:28 +04:00
2013-11-23 16:30:54 +04:00
return wave ;
2013-12-01 15:34:31 +04:00
}
2017-03-26 23:49:01 +03:00
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2014-04-19 18:36:49 +04:00
// Load OGG file into Wave structure
2014-09-17 00:51:31 +04:00
// NOTE: Using stb_vorbis library
2016-09-08 01:20:06 +03:00
static Wave LoadOGG ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2017-05-03 15:16:53 +03:00
Wave wave = { 0 } ;
2014-09-03 18:51:28 +04:00
2014-04-19 18:36:49 +04:00
stb_vorbis * oggFile = stb_vorbis_open_filename ( fileName , NULL , NULL ) ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
if ( oggFile = = NULL ) TraceLog ( LOG_WARNING , " [%s] OGG file could not be opened " , fileName ) ;
2015-07-31 13:31:39 +03:00
else
{
stb_vorbis_info info = stb_vorbis_get_info ( oggFile ) ;
2017-05-03 15:16:53 +03:00
2015-07-31 13:31:39 +03:00
wave . sampleRate = info . sample_rate ;
2016-08-15 17:35:11 +03:00
wave . sampleSize = 16 ; // 16 bit per sample (short)
2015-07-31 13:31:39 +03:00
wave . channels = info . channels ;
2017-05-03 15:16:53 +03:00
wave . sampleCount = ( int ) stb_vorbis_stream_length_in_samples ( oggFile ) ; // Independent by channel
2017-01-29 01:02:30 +03:00
2015-07-31 13:31:39 +03:00
float totalSeconds = stb_vorbis_stream_length_in_seconds ( oggFile ) ;
2017-07-02 13:35:13 +03:00
if ( totalSeconds > 10 ) TraceLog ( LOG_WARNING , " [%s] Ogg audio length is larger than 10 seconds (%f), that's a big file in memory, consider music streaming " , fileName , totalSeconds ) ;
2014-09-03 18:51:28 +04:00
2017-01-18 19:04:20 +03:00
wave . data = ( short * ) malloc ( wave . sampleCount * wave . channels * sizeof ( short ) ) ;
2014-09-03 18:51:28 +04:00
2017-01-18 19:04:20 +03:00
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
int numSamplesOgg = stb_vorbis_get_samples_short_interleaved ( oggFile , info . channels , ( short * ) wave . data , wave . sampleCount * wave . channels ) ;
2015-02-02 02:53:49 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] Samples obtained: %i " , fileName , numSamplesOgg ) ;
2014-04-19 18:36:49 +04:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [%s] OGG file loaded successfully (%i Hz, %i bit, %s) " , fileName , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2015-07-31 13:31:39 +03:00
stb_vorbis_close ( oggFile ) ;
}
2014-09-03 18:51:28 +04:00
2014-04-19 18:36:49 +04:00
return wave ;
2014-04-09 22:25:26 +04:00
}
2017-03-26 23:49:01 +03:00
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
// Load FLAC file into Wave structure
// NOTE: Using dr_flac library
static Wave LoadFLAC ( const char * fileName )
{
Wave wave ;
// Decode an entire FLAC file in one go
uint64_t totalSampleCount ;
2016-12-26 12:52:57 +03:00
wave . data = drflac_open_and_decode_file_s16 ( fileName , & wave . channels , & wave . sampleRate , & totalSampleCount ) ;
2017-01-29 01:02:30 +03:00
2016-12-26 12:52:57 +03:00
wave . sampleCount = ( int ) totalSampleCount / wave . channels ;
wave . sampleSize = 16 ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Only support up to 2 channels (mono, stereo)
2017-07-02 13:35:13 +03:00
if ( wave . channels > 2 ) TraceLog ( LOG_WARNING , " [%s] FLAC channels number (%i) not supported " , fileName , wave . channels ) ;
2016-12-26 12:52:57 +03:00
2017-07-02 13:35:13 +03:00
if ( wave . data = = NULL ) TraceLog ( LOG_WARNING , " [%s] FLAC data could not be loaded " , fileName ) ;
else TraceLog ( LOG_INFO , " [%s] FLAC file loaded successfully (%i Hz, %i bit, %s) " , fileName, wave.sampleRate, wave.sampleSize, (wave.channels == 1) ? " Mono " : " Stereo " ) ;
2017-01-29 01:02:30 +03:00
2016-10-10 19:22:55 +03:00
return wave ;
}
2017-03-26 23:49:01 +03:00
# endif
2016-10-10 19:22:55 +03:00
2015-07-31 13:31:39 +03:00
// Some required functions for audio standalone module version
# if defined(AUDIO_STANDALONE)
2017-03-29 01:35:42 +03:00
// Check file extension
bool IsFileExtension ( const char * fileName , const char * ext )
2015-07-31 13:31:39 +03:00
{
2017-03-29 01:35:42 +03:00
bool result = false ;
const char * fileExt ;
if ( ( fileExt = strrchr ( fileName , ' . ' ) ) ! = NULL )
{
if ( strcmp ( fileExt , ext ) = = 0 ) result = true ;
}
return result ;
2015-07-31 13:31:39 +03:00
}
2017-07-02 13:35:13 +03:00
// Show trace log messages (LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_DEBUG)
2015-07-31 13:31:39 +03:00
void TraceLog ( int msgType , const char * text , . . . )
{
va_list args ;
2017-07-02 13:35:13 +03:00
va_start ( args , text ) ;
2015-07-31 13:31:39 +03:00
2016-08-31 11:27:29 +03:00
switch ( msgType )
2015-07-31 13:31:39 +03:00
{
2017-07-02 13:35:13 +03:00
case LOG_INFO : fprintf ( stdout , " INFO: " ) ; break ;
case LOG_ERROR : fprintf ( stdout , " ERROR: " ) ; break ;
case LOG_WARNING : fprintf ( stdout , " WARNING: " ) ; break ;
case LOG_DEBUG : fprintf ( stdout , " DEBUG: " ) ; break ;
2015-07-31 13:31:39 +03:00
default : break ;
}
2017-07-02 13:35:13 +03:00
vfprintf ( stdout , text , args ) ;
fprintf ( stdout , " \n " ) ;
2015-07-31 13:31:39 +03:00
2017-07-02 13:35:13 +03:00
va_end ( args ) ;
2015-07-31 13:31:39 +03:00
2017-07-02 13:35:13 +03:00
if ( msgType = = LOG_ERROR ) exit ( 1 ) ;
2015-07-31 13:31:39 +03:00
}
2017-02-12 01:34:41 +03:00
# endif