2014-09-17 00:51:31 +04:00
/**********************************************************************************************
2013-11-19 02:38:44 +04:00
*
2017-03-19 14:52:58 +03:00
* raylib . audio - Basic funtionality to work with audio
2013-11-19 02:38:44 +04:00
*
2017-03-20 22:34:44 +03:00
* FEATURES :
* - Manage audio device ( init / close )
* - Load and unload audio files
* - Format wave data ( sample rate , size , channels )
* - Play / Stop / Pause / Resume loaded audio
* - Manage mixing channels
* - Manage raw audio context
2017-02-16 02:50:02 +03:00
*
* CONFIGURATION :
2018-04-07 23:29:53 +03:00
*
2017-02-16 02:50:02 +03:00
* # define AUDIO_STANDALONE
2017-03-19 14:52:58 +03:00
* Define to use the module as standalone library ( independently of raylib ) .
2017-02-16 02:50:02 +03:00
* Required types and functions are defined in the same module .
*
2017-03-26 23:49:01 +03:00
* # define SUPPORT_FILEFORMAT_WAV
2017-02-16 02:50:02 +03:00
* # define SUPPORT_FILEFORMAT_OGG
* # define SUPPORT_FILEFORMAT_XM
* # define SUPPORT_FILEFORMAT_MOD
* # define SUPPORT_FILEFORMAT_FLAC
2018-05-04 17:59:48 +03:00
* # define SUPPORT_FILEFORMAT_MP3
2018-04-07 23:29:53 +03:00
* Selected desired fileformats to be supported for loading . Some of those formats are
2017-02-16 02:50:02 +03:00
* supported by default , to remove support , just comment unrequired # define in this module
*
2017-12-20 14:34:18 +03:00
* LIMITATIONS ( only OpenAL Soft ) :
2017-03-19 14:52:58 +03:00
* Only up to two channels supported : MONO and STEREO ( for additional channels , use AL_EXT_MCFORMATS )
* Only the following sample sizes supported : 8 bit PCM , 16 bit PCM , 32 - bit float PCM ( using AL_EXT_FLOAT32 )
2017-02-16 02:50:02 +03:00
*
* DEPENDENCIES :
2017-12-20 14:34:18 +03:00
* mini_al - Audio device / context management ( https : //github.com/dr-soft/mini_al)
2016-11-16 20:46:13 +03:00
* stb_vorbis - OGG audio files loading ( http : //www.nothings.org/stb_vorbis/)
* jar_xm - XM module file loading
* jar_mod - MOD audio file loading
* dr_flac - FLAC audio file loading
*
2017-12-20 14:34:18 +03:00
* * OpenAL Soft - Audio device management , still used on HTML5 and OSX platforms
*
2017-02-16 02:50:02 +03:00
* CONTRIBUTORS :
2017-12-20 14:34:18 +03:00
* David Reid ( github : @ mackron ) ( Nov . 2017 ) :
* - Complete port to mini_al library
*
* Joshua Reisenauer ( github : @ kd7tck ) ( 2015 )
2017-03-19 14:52:58 +03:00
* - XM audio module support ( jar_xm )
* - MOD audio module support ( jar_mod )
* - Mixing channels support
* - Raw audio context support
2017-02-16 02:50:02 +03:00
*
2016-07-15 19:16:34 +03:00
*
2017-02-16 02:50:02 +03:00
* LICENSE : zlib / libpng
2016-11-16 20:46:13 +03:00
*
2017-12-20 14:34:18 +03:00
* Copyright ( c ) 2014 - 2018 Ramon Santamaria ( @ raysan5 )
2014-09-03 18:51:28 +04:00
*
* This software is provided " as-is " , without any express or implied warranty . In no event
2013-11-23 16:30:54 +04:00
* will the authors be held liable for any damages arising from the use of this software .
2013-11-19 02:38:44 +04:00
*
2014-09-03 18:51:28 +04:00
* Permission is granted to anyone to use this software for any purpose , including commercial
2013-11-23 16:30:54 +04:00
* applications , and to alter it and redistribute it freely , subject to the following restrictions :
2013-11-19 02:38:44 +04:00
*
2014-09-03 18:51:28 +04:00
* 1. The origin of this software must not be misrepresented ; you must not claim that you
* wrote the original software . If you use this software in a product , an acknowledgment
2013-11-23 16:30:54 +04:00
* in the product documentation would be appreciated but is not required .
2013-11-19 02:38:44 +04:00
*
2013-11-23 16:30:54 +04:00
* 2. Altered source versions must be plainly marked as such , and must not be misrepresented
* as being the original software .
2013-11-19 02:38:44 +04:00
*
2013-11-23 16:30:54 +04:00
* 3. This notice may not be removed or altered from any source distribution .
2013-11-19 02:38:44 +04:00
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-07-29 22:41:19 +03:00
# if defined(AUDIO_STANDALONE)
# include "audio.h"
2016-11-18 15:39:57 +03:00
# include <stdarg.h> // Required for: va_list, va_start(), vfprintf(), va_end()
2015-07-29 22:41:19 +03:00
# else
2018-05-17 01:04:58 +03:00
# include "config.h" // Defines module configuration flags
# include "raylib.h" // Declares module functions
2017-04-21 01:08:00 +03:00
# include "utils.h" // Required for: fopen() Android mapping
2015-07-29 22:41:19 +03:00
# endif
2013-11-19 02:38:44 +04:00
2018-10-18 12:38:42 +03:00
# include "external/mini_al.h" // mini_al audio library
// NOTE: Cannot be implement here because it conflicts with
// Win32 APIs: Rectangle, CloseWindow(), ShowCursor(), PlaySoundA()
2013-11-19 02:38:44 +04:00
2016-06-02 18:12:31 +03:00
# include <stdlib.h> // Required for: malloc(), free()
# include <string.h> // Required for: strcmp(), strncmp()
# include <stdio.h> // Required for: FILE, fopen(), fclose(), fread()
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
//#define STB_VORBIS_HEADER_ONLY
# include "external/stb_vorbis.h" // OGG loading functions
# endif
2016-04-26 04:40:19 +03:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
# define JAR_XM_IMPLEMENTATION
# include "external/jar_xm.h" // XM loading functions
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_MOD)
# define JAR_MOD_IMPLEMENTATION
# include "external/jar_mod.h" // MOD loading functions
# endif
2016-06-02 06:09:00 +03:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
# define DR_FLAC_IMPLEMENTATION
# define DR_FLAC_NO_WIN32_IO
# include "external/dr_flac.h" // FLAC loading functions
# endif
2016-10-10 19:22:55 +03:00
2018-05-04 17:59:48 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
# define DR_MP3_IMPLEMENTATION
# include "external/dr_mp3.h" // MP3 loading functions
# endif
2018-10-16 11:53:01 +03:00
# if defined(_MSC_VER)
2016-07-29 14:17:50 +03:00
# undef bool
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Defines and Macros
//----------------------------------------------------------------------------------
2016-08-01 22:37:45 +03:00
# define MAX_STREAM_BUFFERS 2 // Number of buffers for each audio stream
2016-08-01 13:49:17 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Music buffer size is defined by number of samples, independent of sample size and channels number
2016-08-01 13:49:17 +03:00
// After some math, considering a sampleRate of 48000, a buffer refill rate of 1/60 seconds
// and double-buffering system, I concluded that a 4096 samples buffer should be enough
2016-08-01 22:37:45 +03:00
// In case of music-stalls, just increase this number
2016-12-25 03:58:56 +03:00
# define AUDIO_BUFFER_SIZE 4096 // PCM data samples (i.e. 16bit, Mono: 8Kb)
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
2014-04-19 18:36:49 +04:00
2017-12-20 02:34:31 +03:00
typedef enum {
MUSIC_AUDIO_OGG = 0 ,
MUSIC_AUDIO_FLAC ,
2018-05-17 01:04:58 +03:00
MUSIC_AUDIO_MP3 ,
2017-12-20 02:34:31 +03:00
MUSIC_MODULE_XM ,
MUSIC_MODULE_MOD
} MusicContextType ;
2016-08-01 13:49:17 +03:00
2016-05-13 07:14:02 +03:00
// Music type (file streaming from memory)
2016-08-02 20:09:07 +03:00
typedef struct MusicData {
2016-08-01 13:49:17 +03:00
MusicContextType ctxType ; // Type of music context (OGG, XM, MOD)
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2016-08-01 13:49:17 +03:00
stb_vorbis * ctxOgg ; // OGG audio context
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
drflac * ctxFlac ; // FLAC audio context
2017-03-26 23:49:01 +03:00
# endif
2018-05-17 01:04:58 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
drmp3 ctxMp3 ; // MP3 audio context
# endif
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
2016-08-01 13:49:17 +03:00
jar_xm_context_t * ctxXm ; // XM chiptune context
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-08-01 13:49:17 +03:00
jar_mod_context_t ctxMod ; // MOD chiptune context
2017-03-26 23:49:01 +03:00
# endif
2016-08-01 13:49:17 +03:00
2016-08-02 18:32:24 +03:00
AudioStream stream ; // Audio stream (double buffering)
2016-08-01 13:49:17 +03:00
2017-02-06 03:03:58 +03:00
int loopCount ; // Loops count (times music repeats), -1 means infinite loop
2016-08-01 13:49:17 +03:00
unsigned int totalSamples ; // Total number of samples
unsigned int samplesLeft ; // Number of samples left to end
2016-11-18 15:39:57 +03:00
} MusicData ;
2016-08-01 13:49:17 +03:00
2015-07-31 13:31:39 +03:00
# if defined(AUDIO_STANDALONE)
2017-12-20 02:34:31 +03:00
typedef enum {
LOG_INFO = 0 ,
LOG_ERROR ,
LOG_WARNING ,
LOG_DEBUG ,
LOG_OTHER
} TraceLogType ;
2015-07-31 13:31:39 +03:00
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Global Variables Definition
//----------------------------------------------------------------------------------
2016-08-01 13:58:30 +03:00
// ...
2016-06-02 18:12:31 +03:00
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2016-09-08 01:20:06 +03:00
static Wave LoadWAV ( const char * fileName ) ; // Load WAV file
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_OGG)
2016-09-08 01:20:06 +03:00
static Wave LoadOGG ( const char * fileName ) ; // Load OGG file
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
static Wave LoadFLAC ( const char * fileName ) ; // Load FLAC file
2017-03-26 23:49:01 +03:00
# endif
2018-09-19 16:57:46 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
static Wave LoadMP3 ( const char * fileName ) ; // Load MP3 file
# endif
2014-04-09 22:25:26 +04:00
2015-07-31 13:31:39 +03:00
# if defined(AUDIO_STANDALONE)
2017-03-29 01:35:42 +03:00
bool IsFileExtension ( const char * fileName , const char * ext ) ; // Check file extension
2017-07-02 13:35:13 +03:00
void TraceLog ( int msgType , const char * text , . . . ) ; // Show trace log messages (LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_DEBUG)
2015-07-31 13:31:39 +03:00
# endif
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
2018-02-11 03:12:16 +03:00
// mini_al AudioBuffer Functionality
2013-11-19 02:38:44 +04:00
//----------------------------------------------------------------------------------
2017-11-12 07:17:05 +03:00
# define DEVICE_FORMAT mal_format_f32
# define DEVICE_CHANNELS 2
# define DEVICE_SAMPLE_RATE 44100
2017-11-18 01:42:14 +03:00
typedef enum { AUDIO_BUFFER_USAGE_STATIC = 0 , AUDIO_BUFFER_USAGE_STREAM } AudioBufferUsage ;
2017-11-12 07:17:05 +03:00
2017-12-20 02:34:31 +03:00
// Audio buffer structure
2018-02-11 03:12:16 +03:00
// NOTE: Slightly different logic is used when feeding data to the playback device depending on whether or not data is streamed
2017-12-20 13:37:43 +03:00
typedef struct AudioBuffer AudioBuffer ;
struct AudioBuffer {
2018-02-11 03:12:16 +03:00
mal_dsp dsp ; // Required for format conversion
2017-11-14 14:44:57 +03:00
float volume ;
float pitch ;
bool playing ;
bool paused ;
2018-02-11 03:12:16 +03:00
bool looping ; // Always true for AudioStreams
int usage ; // AudioBufferUsage type
2017-11-14 14:44:57 +03:00
bool isSubBufferProcessed [ 2 ] ;
unsigned int frameCursorPos ;
unsigned int bufferSizeInFrames ;
2017-12-20 13:37:43 +03:00
AudioBuffer * next ;
AudioBuffer * prev ;
2017-11-14 14:44:57 +03:00
unsigned char buffer [ 1 ] ;
2017-12-20 13:37:43 +03:00
} ;
2017-11-18 01:42:14 +03:00
2018-02-11 03:12:16 +03:00
// mini_al global variables
2017-11-12 07:17:05 +03:00
static mal_context context ;
static mal_device device ;
2017-11-18 05:15:48 +03:00
static mal_mutex audioLock ;
2018-02-11 03:12:16 +03:00
static bool isAudioInitialized = MAL_FALSE ;
static float masterVolume = 1.0f ;
2017-11-12 13:59:16 +03:00
2018-02-11 03:12:16 +03:00
// Audio buffers are tracked in a linked list
static AudioBuffer * firstAudioBuffer = NULL ;
static AudioBuffer * lastAudioBuffer = NULL ;
2017-11-12 13:59:16 +03:00
2018-02-11 03:12:16 +03:00
// mini_al functions declaration
static void OnLog ( mal_context * pContext , mal_device * pDevice , const char * message ) ;
static mal_uint32 OnSendAudioDataToDevice ( mal_device * pDevice , mal_uint32 frameCount , void * pFramesOut ) ;
static mal_uint32 OnAudioBufferDSPRead ( mal_dsp * pDSP , mal_uint32 frameCount , void * pFramesOut , void * pUserData ) ;
static void MixAudioFrames ( float * framesOut , const float * framesIn , mal_uint32 frameCount , float localVolume ) ;
// AudioBuffer management functions declaration
// NOTE: Those functions are not exposed by raylib... for the moment
AudioBuffer * CreateAudioBuffer ( mal_format format , mal_uint32 channels , mal_uint32 sampleRate , mal_uint32 bufferSizeInFrames , AudioBufferUsage usage ) ;
void DeleteAudioBuffer ( AudioBuffer * audioBuffer ) ;
bool IsAudioBufferPlaying ( AudioBuffer * audioBuffer ) ;
void PlayAudioBuffer ( AudioBuffer * audioBuffer ) ;
void StopAudioBuffer ( AudioBuffer * audioBuffer ) ;
void PauseAudioBuffer ( AudioBuffer * audioBuffer ) ;
void ResumeAudioBuffer ( AudioBuffer * audioBuffer ) ;
void SetAudioBufferVolume ( AudioBuffer * audioBuffer , float volume ) ;
void SetAudioBufferPitch ( AudioBuffer * audioBuffer , float pitch ) ;
void TrackAudioBuffer ( AudioBuffer * audioBuffer ) ;
void UntrackAudioBuffer ( AudioBuffer * audioBuffer ) ;
2017-11-15 15:04:23 +03:00
2017-11-12 13:59:16 +03:00
2018-02-11 03:12:16 +03:00
// Log callback function
static void OnLog ( mal_context * pContext , mal_device * pDevice , const char * message )
2017-11-12 07:17:05 +03:00
{
( void ) pContext ;
( void ) pDevice ;
2018-02-11 03:12:16 +03:00
TraceLog ( LOG_ERROR , message ) ; // All log messages from mini_al are errors
2017-11-15 15:04:23 +03:00
}
2018-02-11 03:12:16 +03:00
// Sending audio data to device callback function
2017-12-20 13:37:43 +03:00
static mal_uint32 OnSendAudioDataToDevice ( mal_device * pDevice , mal_uint32 frameCount , void * pFramesOut )
2017-11-12 07:17:05 +03:00
{
// This is where all of the mixing takes place.
( void ) pDevice ;
// Mixing is basically just an accumulation. We need to initialize the output buffer to 0.
2018-04-21 10:34:56 +03:00
memset ( pFramesOut , 0 , frameCount * pDevice - > channels * mal_get_bytes_per_sample ( pDevice - > format ) ) ;
2017-11-12 07:17:05 +03:00
// Using a mutex here for thread-safety which makes things not real-time. This is unlikely to be necessary for this project, but may
// want to consider how you might want to avoid this.
2017-11-18 05:15:48 +03:00
mal_mutex_lock ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
{
2018-02-11 03:12:16 +03:00
for ( AudioBuffer * audioBuffer = firstAudioBuffer ; audioBuffer ! = NULL ; audioBuffer = audioBuffer - > next )
2017-11-12 07:17:05 +03:00
{
// Ignore stopped or paused sounds.
2017-12-20 13:37:43 +03:00
if ( ! audioBuffer - > playing | | audioBuffer - > paused ) continue ;
2017-11-12 07:17:05 +03:00
mal_uint32 framesRead = 0 ;
2017-12-20 13:37:43 +03:00
for ( ; ; )
{
if ( framesRead > frameCount )
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_DEBUG , " Mixed too many frames from audio buffer " ) ;
2017-11-12 07:17:05 +03:00
break ;
}
2017-12-20 13:37:43 +03:00
if ( framesRead = = frameCount ) break ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
// Just read as much data as we can from the stream.
2017-11-12 07:17:05 +03:00
mal_uint32 framesToRead = ( frameCount - framesRead ) ;
2017-12-20 13:37:43 +03:00
while ( framesToRead > 0 )
{
2017-11-15 15:04:23 +03:00
float tempBuffer [ 1024 ] ; // 512 frames for stereo.
2017-11-12 07:17:05 +03:00
2017-11-15 15:04:23 +03:00
mal_uint32 framesToReadRightNow = framesToRead ;
2017-12-20 13:37:43 +03:00
if ( framesToReadRightNow > sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / DEVICE_CHANNELS )
{
2017-11-22 11:36:48 +03:00
framesToReadRightNow = sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / DEVICE_CHANNELS ;
2017-11-12 07:17:05 +03:00
}
2018-05-21 13:39:19 +03:00
mal_uint32 framesJustRead = ( mal_uint32 ) mal_dsp_read ( & audioBuffer - > dsp , framesToReadRightNow , tempBuffer , audioBuffer - > dsp . pUserData ) ;
2017-12-20 13:37:43 +03:00
if ( framesJustRead > 0 )
{
float * framesOut = ( float * ) pFramesOut + ( framesRead * device . channels ) ;
float * framesIn = tempBuffer ;
2018-02-11 03:12:16 +03:00
MixAudioFrames ( framesOut , framesIn , framesJustRead , audioBuffer - > volume ) ;
2017-11-15 15:04:23 +03:00
framesToRead - = framesJustRead ;
framesRead + = framesJustRead ;
2017-11-12 07:17:05 +03:00
}
2017-11-15 15:04:23 +03:00
// If we weren't able to read all the frames we requested, break.
2017-12-20 13:37:43 +03:00
if ( framesJustRead < framesToReadRightNow )
{
if ( ! audioBuffer - > looping )
{
2017-11-18 01:42:14 +03:00
StopAudioBuffer ( audioBuffer ) ;
2017-11-15 15:04:23 +03:00
break ;
2017-12-20 13:37:43 +03:00
}
else
{
2018-02-11 03:12:16 +03:00
// Should never get here, but just for safety,
// move the cursor position back to the start and continue the loop.
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = 0 ;
2017-11-15 15:04:23 +03:00
continue ;
}
}
}
2018-02-11 03:12:16 +03:00
// If for some reason we weren't able to read every frame we'll need to break from the loop.
// Not doing this could theoretically put us into an infinite loop.
2017-12-20 13:37:43 +03:00
if ( framesToRead > 0 ) break ;
2017-11-12 07:17:05 +03:00
}
}
}
2017-12-20 13:37:43 +03:00
2017-11-18 05:15:48 +03:00
mal_mutex_unlock ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
return frameCount ; // We always output the same number of frames that were originally requested.
}
2018-02-11 03:12:16 +03:00
// DSP read from audio buffer callback function
static mal_uint32 OnAudioBufferDSPRead ( mal_dsp * pDSP , mal_uint32 frameCount , void * pFramesOut , void * pUserData )
{
AudioBuffer * audioBuffer = ( AudioBuffer * ) pUserData ;
mal_uint32 subBufferSizeInFrames = audioBuffer - > bufferSizeInFrames / 2 ;
mal_uint32 currentSubBufferIndex = audioBuffer - > frameCursorPos / subBufferSizeInFrames ;
if ( currentSubBufferIndex > 1 )
{
TraceLog ( LOG_DEBUG , " Frame cursor position moved too far forward in audio stream " ) ;
return 0 ;
}
// Another thread can update the processed state of buffers so we just take a copy here to try and avoid potential synchronization problems.
bool isSubBufferProcessed [ 2 ] ;
isSubBufferProcessed [ 0 ] = audioBuffer - > isSubBufferProcessed [ 0 ] ;
isSubBufferProcessed [ 1 ] = audioBuffer - > isSubBufferProcessed [ 1 ] ;
2018-04-21 10:34:56 +03:00
mal_uint32 frameSizeInBytes = mal_get_bytes_per_sample ( audioBuffer - > dsp . formatConverterIn . config . formatIn ) * audioBuffer - > dsp . formatConverterIn . config . channels ;
2018-02-11 03:12:16 +03:00
// Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0.
mal_uint32 framesRead = 0 ;
for ( ; ; )
{
// We break from this loop differently depending on the buffer's usage. For static buffers, we simply fill as much data as we can. For
// streaming buffers we only fill the halves of the buffer that are processed. Unprocessed halves must keep their audio data in-tact.
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
if ( framesRead > = frameCount ) break ;
}
else
{
if ( isSubBufferProcessed [ currentSubBufferIndex ] ) break ;
}
mal_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
if ( totalFramesRemaining = = 0 ) break ;
mal_uint32 framesRemainingInOutputBuffer ;
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
framesRemainingInOutputBuffer = audioBuffer - > bufferSizeInFrames - audioBuffer - > frameCursorPos ;
}
else
{
mal_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames * currentSubBufferIndex ;
framesRemainingInOutputBuffer = subBufferSizeInFrames - ( audioBuffer - > frameCursorPos - firstFrameIndexOfThisSubBuffer ) ;
}
mal_uint32 framesToRead = totalFramesRemaining ;
if ( framesToRead > framesRemainingInOutputBuffer ) framesToRead = framesRemainingInOutputBuffer ;
memcpy ( ( unsigned char * ) pFramesOut + ( framesRead * frameSizeInBytes ) , audioBuffer - > buffer + ( audioBuffer - > frameCursorPos * frameSizeInBytes ) , framesToRead * frameSizeInBytes ) ;
audioBuffer - > frameCursorPos = ( audioBuffer - > frameCursorPos + framesToRead ) % audioBuffer - > bufferSizeInFrames ;
framesRead + = framesToRead ;
// If we've read to the end of the buffer, mark it as processed.
if ( framesToRead = = framesRemainingInOutputBuffer )
{
audioBuffer - > isSubBufferProcessed [ currentSubBufferIndex ] = true ;
isSubBufferProcessed [ currentSubBufferIndex ] = true ;
currentSubBufferIndex = ( currentSubBufferIndex + 1 ) % 2 ;
// We need to break from this loop if we're not looping.
if ( ! audioBuffer - > looping )
{
StopAudioBuffer ( audioBuffer ) ;
break ;
}
}
}
// Zero-fill excess.
mal_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
if ( totalFramesRemaining > 0 )
{
2018-10-18 17:00:11 +03:00
memset ( ( unsigned char * ) pFramesOut + ( framesRead * frameSizeInBytes ) , 0 , totalFramesRemaining * frameSizeInBytes ) ;
2018-02-11 03:12:16 +03:00
// For static buffers we can fill the remaining frames with silence for safety, but we don't want
// to report those frames as "read". The reason for this is that the caller uses the return value
// to know whether or not a non-looping sound has finished playback.
if ( audioBuffer - > usage ! = AUDIO_BUFFER_USAGE_STATIC ) framesRead + = totalFramesRemaining ;
}
return framesRead ;
}
// This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation.
// NOTE: framesOut is both an input and an output. It will be initially filled with zeros outside of this function.
static void MixAudioFrames ( float * framesOut , const float * framesIn , mal_uint32 frameCount , float localVolume )
{
for ( mal_uint32 iFrame = 0 ; iFrame < frameCount ; + + iFrame )
{
for ( mal_uint32 iChannel = 0 ; iChannel < device . channels ; + + iChannel )
{
float * frameOut = framesOut + ( iFrame * device . channels ) ;
const float * frameIn = framesIn + ( iFrame * device . channels ) ;
frameOut [ iChannel ] + = frameIn [ iChannel ] * masterVolume * localVolume ;
}
}
}
2013-11-19 02:38:44 +04:00
2018-02-11 03:12:16 +03:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Audio Device initialization and Closing
//----------------------------------------------------------------------------------
2016-08-01 13:49:17 +03:00
// Initialize audio device
2014-09-03 19:06:10 +04:00
void InitAudioDevice ( void )
2013-11-19 02:38:44 +04:00
{
2017-11-12 07:17:05 +03:00
// Context.
2018-02-11 03:12:16 +03:00
mal_context_config contextConfig = mal_context_config_init ( OnLog ) ;
2017-11-12 07:17:05 +03:00
mal_result result = mal_context_init ( NULL , 0 , & contextConfig , & context ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to initialize audio context " ) ;
2017-11-12 07:17:05 +03:00
return ;
}
// Device. Using the default device. Format is floating point because it simplifies mixing.
mal_device_config deviceConfig = mal_device_config_init ( DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , NULL , OnSendAudioDataToDevice ) ;
2017-11-24 15:13:33 +03:00
2017-11-12 07:17:05 +03:00
result = mal_device_init ( & context , mal_device_type_playback , NULL , & deviceConfig , NULL , & device ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to initialize audio playback device " ) ;
2017-11-12 07:17:05 +03:00
mal_context_uninit ( & context ) ;
return ;
}
// Keep the device running the whole time. May want to consider doing something a bit smarter and only have the device running
// while there's at least one sound being played.
result = mal_device_start ( & device ) ;
if ( result ! = MAL_SUCCESS )
{
2017-11-24 15:13:33 +03:00
TraceLog ( LOG_ERROR , " Failed to start audio playback device " ) ;
2017-11-12 07:17:05 +03:00
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
return ;
}
// Mixing happens on a seperate thread which means we need to synchronize. I'm using a mutex here to make things simple, but may
// want to look at something a bit smarter later on to keep everything real-time, if that's necessary.
2017-11-18 05:15:48 +03:00
if ( mal_mutex_init ( & context , & audioLock ) ! = MAL_SUCCESS )
2017-11-12 07:17:05 +03:00
{
TraceLog ( LOG_ERROR , " Failed to create mutex for audio mixing " ) ;
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
return ;
}
2017-11-19 09:36:24 +03:00
TraceLog ( LOG_INFO , " Audio device initialized successfully: %s " , device . name ) ;
2017-11-24 14:54:00 +03:00
TraceLog ( LOG_INFO , " Audio backend: mini_al / %s " , mal_get_backend_name ( context . backend ) ) ;
TraceLog ( LOG_INFO , " Audio format: %s -> %s " , mal_get_format_name ( device . format ) , mal_get_format_name ( device . internalFormat ) ) ;
TraceLog ( LOG_INFO , " Audio channels: %d -> %d " , device . channels , device . internalChannels ) ;
TraceLog ( LOG_INFO , " Audio sample rate: %d -> %d " , device . sampleRate , device . internalSampleRate ) ;
TraceLog ( LOG_INFO , " Audio buffer size: %d " , device . bufferSizeInFrames ) ;
2017-11-12 07:17:05 +03:00
isAudioInitialized = MAL_TRUE ;
2013-11-19 02:38:44 +04:00
}
2016-05-14 10:25:40 +03:00
// Close the audio device for all contexts
2014-09-03 19:06:10 +04:00
void CloseAudioDevice ( void )
2013-11-19 02:38:44 +04:00
{
2017-12-20 13:37:43 +03:00
if ( ! isAudioInitialized )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_WARNING , " Could not close audio device because it is not currently initialized " ) ;
return ;
}
2017-11-18 05:15:48 +03:00
mal_mutex_uninit ( & audioLock ) ;
2017-11-12 07:17:05 +03:00
mal_device_uninit ( & device ) ;
mal_context_uninit ( & context ) ;
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " Audio device closed successfully " ) ;
2013-11-19 02:38:44 +04:00
}
2016-07-15 19:16:34 +03:00
// Check if device has been initialized successfully
2016-05-01 02:05:43 +03:00
bool IsAudioDeviceReady ( void )
2016-04-30 09:00:12 +03:00
{
2017-11-12 07:17:05 +03:00
return isAudioInitialized ;
2016-04-30 09:00:12 +03:00
}
2017-02-06 02:44:54 +03:00
// Set master volume (listener)
void SetMasterVolume ( float volume )
{
if ( volume < 0.0f ) volume = 0.0f ;
else if ( volume > 1.0f ) volume = 1.0f ;
2017-11-12 07:17:05 +03:00
2018-02-11 03:28:30 +03:00
masterVolume = volume ;
2017-02-06 02:44:54 +03:00
}
2017-11-18 01:42:14 +03:00
//----------------------------------------------------------------------------------
2018-02-11 03:12:16 +03:00
// Module Functions Definition - Audio Buffer management
2017-11-18 01:42:14 +03:00
//----------------------------------------------------------------------------------
2018-10-18 12:38:42 +03:00
2018-02-11 03:12:16 +03:00
// Create a new audio buffer. Initially filled with silence
2017-12-20 13:37:43 +03:00
AudioBuffer * CreateAudioBuffer ( mal_format format , mal_uint32 channels , mal_uint32 sampleRate , mal_uint32 bufferSizeInFrames , AudioBufferUsage usage )
2017-11-18 01:42:14 +03:00
{
2018-04-21 10:34:56 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) calloc ( sizeof ( * audioBuffer ) + ( bufferSizeInFrames * channels * mal_get_bytes_per_sample ( format ) ) , 1 ) ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " CreateAudioBuffer() : Failed to allocate memory for audio buffer " ) ;
return NULL ;
}
// We run audio data through a format converter.
mal_dsp_config dspConfig ;
memset ( & dspConfig , 0 , sizeof ( dspConfig ) ) ;
dspConfig . formatIn = format ;
dspConfig . formatOut = DEVICE_FORMAT ;
dspConfig . channelsIn = channels ;
dspConfig . channelsOut = DEVICE_CHANNELS ;
dspConfig . sampleRateIn = sampleRate ;
dspConfig . sampleRateOut = DEVICE_SAMPLE_RATE ;
2018-04-21 10:34:56 +03:00
dspConfig . onRead = OnAudioBufferDSPRead ;
dspConfig . pUserData = audioBuffer ;
dspConfig . allowDynamicSampleRate = MAL_TRUE ; // <-- Required for pitch shifting.
mal_result resultMAL = mal_dsp_init ( & dspConfig , & audioBuffer - > dsp ) ;
2017-12-20 13:37:43 +03:00
if ( resultMAL ! = MAL_SUCCESS )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " CreateAudioBuffer() : Failed to create data conversion pipeline " ) ;
2017-11-18 01:42:14 +03:00
free ( audioBuffer ) ;
return NULL ;
}
audioBuffer - > volume = 1 ;
audioBuffer - > pitch = 1 ;
audioBuffer - > playing = 0 ;
audioBuffer - > paused = 0 ;
audioBuffer - > looping = 0 ;
audioBuffer - > usage = usage ;
audioBuffer - > bufferSizeInFrames = bufferSizeInFrames ;
audioBuffer - > frameCursorPos = 0 ;
// Buffers should be marked as processed by default so that a call to UpdateAudioStream() immediately after initialization works correctly.
audioBuffer - > isSubBufferProcessed [ 0 ] = true ;
audioBuffer - > isSubBufferProcessed [ 1 ] = true ;
TrackAudioBuffer ( audioBuffer ) ;
return audioBuffer ;
}
2018-02-11 03:12:16 +03:00
// Delete an audio buffer
2017-12-20 13:37:43 +03:00
void DeleteAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " DeleteAudioBuffer() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
UntrackAudioBuffer ( audioBuffer ) ;
free ( audioBuffer ) ;
}
2018-02-11 03:12:16 +03:00
// Check if an audio buffer is playing
2017-12-20 13:37:43 +03:00
bool IsAudioBufferPlaying ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " IsAudioBufferPlaying() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return false ;
}
return audioBuffer - > playing & & ! audioBuffer - > paused ;
}
2018-02-11 03:12:16 +03:00
// Play an audio buffer
// NOTE: Buffer is restarted to the start.
// Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position should be maintained.
2017-12-20 13:37:43 +03:00
void PlayAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayAudioBuffer() : No audio buffer " ) ;
return ;
}
audioBuffer - > playing = true ;
audioBuffer - > paused = false ;
audioBuffer - > frameCursorPos = 0 ;
}
2018-02-11 03:12:16 +03:00
// Stop an audio buffer
2017-12-20 13:37:43 +03:00
void StopAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " StopAudioBuffer() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
// Don't do anything if the audio buffer is already stopped.
2017-12-20 13:37:43 +03:00
if ( ! IsAudioBufferPlaying ( audioBuffer ) ) return ;
2017-11-18 01:42:14 +03:00
audioBuffer - > playing = false ;
audioBuffer - > paused = false ;
audioBuffer - > frameCursorPos = 0 ;
audioBuffer - > isSubBufferProcessed [ 0 ] = true ;
audioBuffer - > isSubBufferProcessed [ 1 ] = true ;
}
2018-02-11 03:12:16 +03:00
// Pause an audio buffer
2017-12-20 13:37:43 +03:00
void PauseAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " PauseAudioBuffer() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
audioBuffer - > paused = true ;
}
2018-02-11 03:12:16 +03:00
// Resume an audio buffer
2017-12-20 13:37:43 +03:00
void ResumeAudioBuffer ( AudioBuffer * audioBuffer )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " ResumeAudioBuffer() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
audioBuffer - > paused = false ;
}
2018-02-11 03:12:16 +03:00
// Set volume for an audio buffer
2017-12-20 13:37:43 +03:00
void SetAudioBufferVolume ( AudioBuffer * audioBuffer , float volume )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " SetAudioBufferVolume() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
audioBuffer - > volume = volume ;
}
2018-02-11 03:12:16 +03:00
// Set pitch for an audio buffer
2017-12-20 13:37:43 +03:00
void SetAudioBufferPitch ( AudioBuffer * audioBuffer , float pitch )
2017-11-18 01:42:14 +03:00
{
if ( audioBuffer = = NULL )
{
2018-06-29 01:40:15 +03:00
TraceLog ( LOG_ERROR , " SetAudioBufferPitch() : No audio buffer " ) ;
2017-11-18 01:42:14 +03:00
return ;
}
audioBuffer - > pitch = pitch ;
// Pitching is just an adjustment of the sample rate. Note that this changes the duration of the sound - higher pitches
// will make the sound faster; lower pitches make it slower.
2018-04-21 10:34:56 +03:00
mal_uint32 newOutputSampleRate = ( mal_uint32 ) ( ( ( ( float ) audioBuffer - > dsp . src . config . sampleRateOut / ( float ) audioBuffer - > dsp . src . config . sampleRateIn ) / pitch ) * audioBuffer - > dsp . src . config . sampleRateIn ) ;
2017-11-18 01:42:14 +03:00
mal_dsp_set_output_sample_rate ( & audioBuffer - > dsp , newOutputSampleRate ) ;
}
2018-02-11 03:12:16 +03:00
// Track audio buffer to linked list next position
void TrackAudioBuffer ( AudioBuffer * audioBuffer )
{
mal_mutex_lock ( & audioLock ) ;
{
if ( firstAudioBuffer = = NULL ) firstAudioBuffer = audioBuffer ;
else
{
lastAudioBuffer - > next = audioBuffer ;
audioBuffer - > prev = lastAudioBuffer ;
}
lastAudioBuffer = audioBuffer ;
}
mal_mutex_unlock ( & audioLock ) ;
}
// Untrack audio buffer from linked list
void UntrackAudioBuffer ( AudioBuffer * audioBuffer )
{
mal_mutex_lock ( & audioLock ) ;
{
if ( audioBuffer - > prev = = NULL ) firstAudioBuffer = audioBuffer - > next ;
else audioBuffer - > prev - > next = audioBuffer - > next ;
if ( audioBuffer - > next = = NULL ) lastAudioBuffer = audioBuffer - > prev ;
else audioBuffer - > next - > prev = audioBuffer - > prev ;
audioBuffer - > prev = NULL ;
audioBuffer - > next = NULL ;
}
mal_mutex_unlock ( & audioLock ) ;
}
2017-11-18 01:42:14 +03:00
2014-04-19 18:36:49 +04:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Sounds loading and playing (.WAV)
//----------------------------------------------------------------------------------
2016-12-25 03:58:56 +03:00
// Load wave data from file
2016-09-08 01:20:06 +03:00
Wave LoadWave ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2016-01-23 15:22:13 +03:00
Wave wave = { 0 } ;
2014-09-17 00:51:31 +04:00
2017-03-29 01:35:42 +03:00
if ( IsFileExtension ( fileName , " .wav " ) ) wave = LoadWAV ( fileName ) ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .ogg " ) ) wave = LoadOGG ( fileName ) ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .flac " ) ) wave = LoadFLAC ( fileName ) ;
2018-09-19 16:57:46 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( IsFileExtension ( fileName , " .mp3 " ) ) wave = LoadMP3 ( fileName ) ;
2017-03-26 23:49:01 +03:00
# endif
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " [%s] Audio fileformat not supported, it can't be loaded " , fileName ) ;
2014-04-19 18:36:49 +04:00
2016-09-08 01:20:06 +03:00
return wave ;
}
2016-08-16 12:09:55 +03:00
2016-12-25 03:58:56 +03:00
// Load wave data from raw array data
Wave LoadWaveEx ( void * data , int sampleCount , int sampleRate , int sampleSize , int channels )
2016-09-08 01:20:06 +03:00
{
Wave wave ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
wave . data = data ;
2016-09-08 02:03:05 +03:00
wave . sampleCount = sampleCount ;
wave . sampleRate = sampleRate ;
2016-12-25 03:58:56 +03:00
wave . sampleSize = sampleSize ;
2016-09-08 02:03:05 +03:00
wave . channels = channels ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Copy wave data to work with, user is responsible of input data to free
2016-09-15 12:53:16 +03:00
Wave cwave = WaveCopy ( wave ) ;
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
WaveFormat ( & cwave , sampleRate , sampleSize , channels ) ;
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
return cwave ;
2016-09-08 01:20:06 +03:00
}
2016-12-25 03:58:56 +03:00
// Load sound from file
2016-09-08 01:20:06 +03:00
// NOTE: The entire file is loaded to memory to be played (no-streaming)
Sound LoadSound ( const char * fileName )
{
Wave wave = LoadWave ( fileName ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
Sound sound = LoadSoundFromWave ( wave ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
UnloadWave ( wave ) ; // Sound is loaded, we can unload wave
2014-09-03 18:51:28 +04:00
2013-11-23 16:30:54 +04:00
return sound ;
2013-11-19 02:38:44 +04:00
}
2014-12-15 03:08:30 +03:00
// Load sound from wave data
2016-08-01 13:49:17 +03:00
// NOTE: Wave data must be unallocated manually
2014-12-15 03:08:30 +03:00
Sound LoadSoundFromWave ( Wave wave )
{
2016-01-23 15:22:13 +03:00
Sound sound = { 0 } ;
2014-12-15 03:08:30 +03:00
if ( wave . data ! = NULL )
{
2017-11-12 07:17:05 +03:00
// When using mini_al we need to do our own mixing. To simplify this we need convert the format of each sound to be consistent with
// the format used to open the playback device. We can do this two ways:
//
// 1) Convert the whole sound in one go at load time (here).
// 2) Convert the audio data in chunks at mixing time.
//
// I have decided on the first option because it offloads work required for the format conversion to the to the loading stage. The
// downside to this is that it uses more memory if the original sound is u8 or s16.
mal_format formatIn = ( ( wave . sampleSize = = 8 ) ? mal_format_u8 : ( ( wave . sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_uint32 frameCountIn = wave . sampleCount ; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
2018-05-21 13:39:19 +03:00
mal_uint32 frameCount = ( mal_uint32 ) mal_convert_frames ( NULL , DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , NULL , formatIn , wave . channels , wave . sampleRate , frameCountIn ) ;
2018-02-11 03:12:16 +03:00
if ( frameCount = = 0 ) TraceLog ( LOG_WARNING , " LoadSoundFromWave() : Failed to get frame count for format conversion " ) ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
AudioBuffer * audioBuffer = CreateAudioBuffer ( DEVICE_FORMAT , DEVICE_CHANNELS , DEVICE_SAMPLE_RATE , frameCount , AUDIO_BUFFER_USAGE_STATIC ) ;
2018-02-11 03:12:16 +03:00
if ( audioBuffer = = NULL ) TraceLog ( LOG_WARNING , " LoadSoundFromWave() : Failed to create audio buffer " ) ;
2017-11-15 15:04:23 +03:00
2018-05-21 13:39:19 +03:00
frameCount = ( mal_uint32 ) mal_convert_frames ( audioBuffer - > buffer , audioBuffer - > dsp . formatConverterIn . config . formatIn , audioBuffer - > dsp . formatConverterIn . config . channels , audioBuffer - > dsp . src . config . sampleRateIn , wave . data , formatIn , wave . channels , wave . sampleRate , frameCountIn ) ;
2018-02-11 03:12:16 +03:00
if ( frameCount = = 0 ) TraceLog ( LOG_WARNING , " LoadSoundFromWave() : Format conversion failed " ) ;
2017-11-12 07:17:05 +03:00
2017-11-18 01:42:14 +03:00
sound . audioBuffer = audioBuffer ;
2014-12-15 03:08:30 +03:00
}
return sound ;
}
2016-12-25 03:58:56 +03:00
// Unload wave data
2016-09-08 01:20:06 +03:00
void UnloadWave ( Wave wave )
{
2017-01-18 21:14:39 +03:00
if ( wave . data ! = NULL ) free ( wave . data ) ;
2016-09-08 01:20:06 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " Unloaded wave data from RAM " ) ;
2016-09-08 01:20:06 +03:00
}
2013-11-19 02:38:44 +04:00
// Unload sound
void UnloadSound ( Sound sound )
{
2017-12-20 13:37:43 +03:00
DeleteAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2016-07-29 22:35:57 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [SND ID %i][BUFR ID %i] Unloaded sound data from RAM " , sound . source , sound . buffer ) ;
2013-11-19 02:38:44 +04:00
}
2016-08-29 12:17:58 +03:00
// Update sound buffer with new data
// NOTE: data must match sound.format
2017-02-10 00:19:48 +03:00
void UpdateSound ( Sound sound , const void * data , int samplesCount )
2016-08-29 12:17:58 +03:00
{
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) sound . audioBuffer ;
2018-10-18 12:38:42 +03:00
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 07:17:05 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateSound() : Invalid sound - no audio buffer " ) ;
2017-11-12 07:17:05 +03:00
return ;
}
2017-11-18 01:42:14 +03:00
StopAudioBuffer ( audioBuffer ) ;
2017-11-12 07:17:05 +03:00
2017-11-15 15:04:23 +03:00
// TODO: May want to lock/unlock this since this data buffer is read at mixing time.
2018-04-21 10:34:56 +03:00
memcpy ( audioBuffer - > buffer , data , samplesCount * audioBuffer - > dsp . formatConverterIn . config . channels * mal_get_bytes_per_sample ( audioBuffer - > dsp . formatConverterIn . config . formatIn ) ) ;
2016-08-29 12:17:58 +03:00
}
2018-09-17 17:56:02 +03:00
// Export wave data to file
void ExportWave ( Wave wave , const char * fileName )
{
bool success = false ;
if ( IsFileExtension ( fileName , " .wav " ) )
{
// Basic WAV headers structs
typedef struct {
char chunkID [ 4 ] ;
int chunkSize ;
char format [ 4 ] ;
} RiffHeader ;
typedef struct {
char subChunkID [ 4 ] ;
int subChunkSize ;
short audioFormat ;
short numChannels ;
int sampleRate ;
int byteRate ;
short blockAlign ;
short bitsPerSample ;
} WaveFormat ;
typedef struct {
char subChunkID [ 4 ] ;
int subChunkSize ;
} WaveData ;
RiffHeader riffHeader ;
WaveFormat waveFormat ;
WaveData waveData ;
// Fill structs with data
riffHeader . chunkID [ 0 ] = ' R ' ;
riffHeader . chunkID [ 1 ] = ' I ' ;
riffHeader . chunkID [ 2 ] = ' F ' ;
riffHeader . chunkID [ 3 ] = ' F ' ;
riffHeader . chunkSize = 44 - 4 + wave . sampleCount * wave . sampleSize / 8 ;
riffHeader . format [ 0 ] = ' W ' ;
riffHeader . format [ 1 ] = ' A ' ;
riffHeader . format [ 2 ] = ' V ' ;
riffHeader . format [ 3 ] = ' E ' ;
waveFormat . subChunkID [ 0 ] = ' f ' ;
waveFormat . subChunkID [ 1 ] = ' m ' ;
waveFormat . subChunkID [ 2 ] = ' t ' ;
waveFormat . subChunkID [ 3 ] = ' ' ;
waveFormat . subChunkSize = 16 ;
waveFormat . audioFormat = 1 ;
waveFormat . numChannels = wave . channels ;
waveFormat . sampleRate = wave . sampleRate ;
waveFormat . byteRate = wave . sampleRate * wave . sampleSize / 8 ;
waveFormat . blockAlign = wave . sampleSize / 8 ;
waveFormat . bitsPerSample = wave . sampleSize ;
waveData . subChunkID [ 0 ] = ' d ' ;
waveData . subChunkID [ 1 ] = ' a ' ;
waveData . subChunkID [ 2 ] = ' t ' ;
waveData . subChunkID [ 3 ] = ' a ' ;
waveData . subChunkSize = wave . sampleCount * wave . channels * wave . sampleSize / 8 ;
FILE * wavFile = fopen ( fileName , " wb " ) ;
if ( wavFile = = NULL ) return ;
fwrite ( & riffHeader , 1 , sizeof ( RiffHeader ) , wavFile ) ;
fwrite ( & waveFormat , 1 , sizeof ( WaveFormat ) , wavFile ) ;
fwrite ( & waveData , 1 , sizeof ( WaveData ) , wavFile ) ;
fwrite ( wave . data , 1 , wave . sampleCount * wave . channels * wave . sampleSize / 8 , wavFile ) ;
fclose ( wavFile ) ;
success = true ;
}
else if ( IsFileExtension ( fileName , " .raw " ) ) { } // TODO: Support additional file formats to export wave sample data
if ( success ) TraceLog ( LOG_INFO , " Wave exported successfully: %s " , fileName ) ;
else TraceLog ( LOG_WARNING , " Wave could not be exported. " ) ;
}
2013-11-19 02:38:44 +04:00
// Play a sound
void PlaySound ( Sound sound )
{
2017-12-20 13:37:43 +03:00
PlayAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2013-11-19 02:38:44 +04:00
}
// Pause a sound
void PauseSound ( Sound sound )
{
2017-12-20 13:37:43 +03:00
PauseAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2013-11-19 02:38:44 +04:00
}
2016-08-01 13:49:17 +03:00
// Resume a paused sound
void ResumeSound ( Sound sound )
{
2017-12-20 13:37:43 +03:00
ResumeAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2016-08-01 13:49:17 +03:00
}
2013-11-19 02:38:44 +04:00
// Stop reproducing a sound
void StopSound ( Sound sound )
{
2017-12-20 13:37:43 +03:00
StopAudioBuffer ( ( AudioBuffer * ) sound . audioBuffer ) ;
2013-11-19 02:38:44 +04:00
}
2014-01-23 15:36:18 +04:00
// Check if a sound is playing
2016-05-03 19:04:21 +03:00
bool IsSoundPlaying ( Sound sound )
2014-01-23 15:36:18 +04:00
{
2017-12-20 13:37:43 +03:00
return IsAudioBufferPlaying ( ( AudioBuffer * ) sound . audioBuffer ) ;
2014-01-23 15:36:18 +04:00
}
2014-04-19 18:36:49 +04:00
// Set volume for a sound
void SetSoundVolume ( Sound sound , float volume )
{
2017-12-20 13:37:43 +03:00
SetAudioBufferVolume ( ( AudioBuffer * ) sound . audioBuffer , volume ) ;
2014-04-19 18:36:49 +04:00
}
// Set pitch for a sound
void SetSoundPitch ( Sound sound , float pitch )
{
2017-12-20 13:37:43 +03:00
SetAudioBufferPitch ( ( AudioBuffer * ) sound . audioBuffer , pitch ) ;
2014-04-19 18:36:49 +04:00
}
2016-09-08 01:20:06 +03:00
// Convert wave data to desired format
void WaveFormat ( Wave * wave , int sampleRate , int sampleSize , int channels )
{
2017-11-12 07:17:05 +03:00
mal_format formatIn = ( ( wave - > sampleSize = = 8 ) ? mal_format_u8 : ( ( wave - > sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_format formatOut = ( ( sampleSize = = 8 ) ? mal_format_u8 : ( ( sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
mal_uint32 frameCountIn = wave - > sampleCount ; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so.
2018-05-21 13:39:19 +03:00
mal_uint32 frameCount = ( mal_uint32 ) mal_convert_frames ( NULL , formatOut , channels , sampleRate , NULL , formatIn , wave - > channels , wave - > sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_ERROR , " WaveFormat() : Failed to get frame count for format conversion. " ) ;
return ;
}
2017-12-20 13:37:43 +03:00
void * data = malloc ( frameCount * channels * ( sampleSize / 8 ) ) ;
2017-11-12 07:17:05 +03:00
2018-05-21 13:39:19 +03:00
frameCount = ( mal_uint32 ) mal_convert_frames ( data , formatOut , channels , sampleRate , wave - > data , formatIn , wave - > channels , wave - > sampleRate , frameCountIn ) ;
2017-12-20 13:37:43 +03:00
if ( frameCount = = 0 )
{
2017-11-12 07:17:05 +03:00
TraceLog ( LOG_ERROR , " WaveFormat() : Format conversion failed. " ) ;
return ;
}
wave - > sampleCount = frameCount ;
wave - > sampleSize = sampleSize ;
wave - > sampleRate = sampleRate ;
wave - > channels = channels ;
free ( wave - > data ) ;
wave - > data = data ;
2016-09-08 01:20:06 +03:00
}
// Copy a wave to a new wave
Wave WaveCopy ( Wave wave )
{
2016-12-25 03:58:56 +03:00
Wave newWave = { 0 } ;
2016-09-08 01:20:06 +03:00
2017-05-03 15:16:53 +03:00
newWave . data = malloc ( wave . sampleCount * wave . sampleSize / 8 * wave . channels ) ;
2016-09-08 01:20:06 +03:00
if ( newWave . data ! = NULL )
{
// NOTE: Size must be provided in bytes
2016-09-08 02:03:05 +03:00
memcpy ( newWave . data , wave . data , wave . sampleCount * wave . channels * wave . sampleSize / 8 ) ;
2016-09-08 01:20:06 +03:00
newWave . sampleCount = wave . sampleCount ;
newWave . sampleRate = wave . sampleRate ;
newWave . sampleSize = wave . sampleSize ;
newWave . channels = wave . channels ;
}
return newWave ;
}
// Crop a wave to defined samples range
// NOTE: Security check in case of out-of-range
void WaveCrop ( Wave * wave , int initSample , int finalSample )
{
2017-01-29 01:02:30 +03:00
if ( ( initSample > = 0 ) & & ( initSample < finalSample ) & &
2018-05-21 13:46:22 +03:00
( finalSample > 0 ) & & ( ( unsigned int ) finalSample < wave - > sampleCount ) )
2016-09-08 02:03:05 +03:00
{
2016-12-25 03:58:56 +03:00
int sampleCount = finalSample - initSample ;
2017-01-29 01:02:30 +03:00
2017-05-03 15:16:53 +03:00
void * data = malloc ( sampleCount * wave - > sampleSize / 8 * wave - > channels ) ;
2017-01-29 01:02:30 +03:00
2018-10-18 17:00:11 +03:00
memcpy ( data , ( unsigned char * ) wave - > data + ( initSample * wave - > channels * wave - > sampleSize / 8 ) , sampleCount * wave - > channels * wave - > sampleSize / 8 ) ;
2017-01-29 01:02:30 +03:00
2016-09-08 02:03:05 +03:00
free ( wave - > data ) ;
2016-12-25 03:58:56 +03:00
wave - > data = data ;
2016-09-08 02:03:05 +03:00
}
2017-07-02 13:35:13 +03:00
else TraceLog ( LOG_WARNING , " Wave crop range out of bounds " ) ;
2016-09-08 01:20:06 +03:00
}
// Get samples data from wave as a floats array
2016-09-09 02:34:30 +03:00
// NOTE: Returned sample values are normalized to range [-1..1]
2016-09-08 01:20:06 +03:00
float * GetWaveData ( Wave wave )
{
2016-12-25 03:58:56 +03:00
float * samples = ( float * ) malloc ( wave . sampleCount * wave . channels * sizeof ( float ) ) ;
2017-01-29 01:02:30 +03:00
2018-05-21 13:46:22 +03:00
for ( unsigned int i = 0 ; i < wave . sampleCount ; i + + )
2016-09-08 01:20:06 +03:00
{
2018-05-21 13:46:22 +03:00
for ( unsigned int j = 0 ; j < wave . channels ; j + + )
2016-12-25 03:58:56 +03:00
{
if ( wave . sampleSize = = 8 ) samples [ wave . channels * i + j ] = ( float ) ( ( ( unsigned char * ) wave . data ) [ wave . channels * i + j ] - 127 ) / 256.0f ;
else if ( wave . sampleSize = = 16 ) samples [ wave . channels * i + j ] = ( float ) ( ( short * ) wave . data ) [ wave . channels * i + j ] / 32767.0f ;
else if ( wave . sampleSize = = 32 ) samples [ wave . channels * i + j ] = ( ( float * ) wave . data ) [ wave . channels * i + j ] ;
}
2016-09-08 01:20:06 +03:00
}
2017-01-29 01:02:30 +03:00
2016-09-08 01:20:06 +03:00
return samples ;
}
2014-04-19 18:36:49 +04:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Music loading and stream playing (.OGG)
//----------------------------------------------------------------------------------
2016-08-01 13:49:17 +03:00
// Load music stream from file
2016-09-08 01:20:06 +03:00
Music LoadMusicStream ( const char * fileName )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
Music music = ( MusicData * ) malloc ( sizeof ( MusicData ) ) ;
2018-07-28 19:07:06 +03:00
bool musicLoaded = true ;
2016-07-29 22:35:57 +03:00
2017-03-29 01:35:42 +03:00
if ( IsFileExtension ( fileName , " .ogg " ) )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
// Open ogg audio stream
music - > ctxOgg = stb_vorbis_open_filename ( fileName , NULL , NULL ) ;
2016-07-29 22:35:57 +03:00
2018-07-28 19:07:06 +03:00
if ( music - > ctxOgg = = NULL ) musicLoaded = false ;
2014-04-19 18:36:49 +04:00
else
{
2016-08-01 13:49:17 +03:00
stb_vorbis_info info = stb_vorbis_get_info ( music - > ctxOgg ) ; // Get Ogg file info
2016-07-29 22:35:57 +03:00
2016-12-25 03:58:56 +03:00
// OGG bit rate defaults to 16 bit, it's enough for compressed format
2016-08-01 13:49:17 +03:00
music - > stream = InitAudioStream ( info . sample_rate , 16 , info . channels ) ;
2017-01-18 19:04:20 +03:00
music - > totalSamples = ( unsigned int ) stb_vorbis_stream_length_in_samples ( music - > ctxOgg ) ; // Independent by channel
2016-08-01 13:49:17 +03:00
music - > samplesLeft = music - > totalSamples ;
music - > ctxType = MUSIC_AUDIO_OGG ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-08-01 22:37:45 +03:00
2018-05-21 13:39:42 +03:00
TraceLog ( LOG_DEBUG , " [%s] OGG total samples: %i " , fileName , music - > totalSamples ) ;
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] OGG sample rate: %i " , fileName , info . sample_rate ) ;
TraceLog ( LOG_DEBUG , " [%s] OGG channels: %i " , fileName , info . channels ) ;
TraceLog ( LOG_DEBUG , " [%s] OGG memory required: %i " , fileName , info . temp_memory_required ) ;
2014-04-19 18:36:49 +04:00
}
}
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .flac " ) )
2016-10-10 19:22:55 +03:00
{
music - > ctxFlac = drflac_open_file ( fileName ) ;
2017-01-29 01:02:30 +03:00
2018-07-28 19:07:06 +03:00
if ( music - > ctxFlac = = NULL ) musicLoaded = false ;
2016-10-10 19:22:55 +03:00
else
{
music - > stream = InitAudioStream ( music - > ctxFlac - > sampleRate , music - > ctxFlac - > bitsPerSample , music - > ctxFlac - > channels ) ;
2016-12-26 12:52:57 +03:00
music - > totalSamples = ( unsigned int ) music - > ctxFlac - > totalSampleCount / music - > ctxFlac - > channels ;
2016-10-10 19:22:55 +03:00
music - > samplesLeft = music - > totalSamples ;
music - > ctxType = MUSIC_AUDIO_FLAC ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2017-01-29 01:02:30 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] FLAC total samples: %i " , fileName , music - > totalSamples ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC sample rate: %i " , fileName , music - > ctxFlac - > sampleRate ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC bits per sample: %i " , fileName , music - > ctxFlac - > bitsPerSample ) ;
TraceLog ( LOG_DEBUG , " [%s] FLAC channels: %i " , fileName , music - > ctxFlac - > channels ) ;
2016-10-10 19:22:55 +03:00
}
}
2017-03-26 23:49:01 +03:00
# endif
2018-05-17 01:04:58 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( IsFileExtension ( fileName , " .mp3 " ) )
{
2018-10-17 20:39:16 +03:00
int result = drmp3_init_file ( & music - > ctxMp3 , fileName , NULL ) ;
2018-05-17 01:04:58 +03:00
2018-10-17 20:39:16 +03:00
if ( ! result ) musicLoaded = false ;
2018-05-17 01:04:58 +03:00
else
{
2018-10-17 20:39:16 +03:00
TraceLog ( LOG_INFO , " [%s] MP3 sample rate: %i " , fileName , music - > ctxMp3 . sampleRate ) ;
TraceLog ( LOG_INFO , " [%s] MP3 bits per sample: %i " , fileName , 32 ) ;
TraceLog ( LOG_INFO , " [%s] MP3 channels: %i " , fileName , music - > ctxMp3 . channels ) ;
TraceLog ( LOG_INFO , " [%s] MP3 frames remaining: %i " , fileName , ( unsigned int ) music - > ctxMp3 . framesRemaining ) ;
music - > stream = InitAudioStream ( music - > ctxMp3 . sampleRate , 32 , music - > ctxMp3 . channels ) ;
// TODO: It seems the total number of samples is not obtained correctly...
2018-07-20 00:15:46 +03:00
music - > totalSamples = ( unsigned int ) music - > ctxMp3 . framesRemaining * music - > ctxMp3 . channels ;
2018-05-17 01:04:58 +03:00
music - > samplesLeft = music - > totalSamples ;
music - > ctxType = MUSIC_AUDIO_MP3 ;
music - > loopCount = - 1 ; // Infinite loop by default
2018-10-17 20:39:16 +03:00
TraceLog ( LOG_INFO , " [%s] MP3 total samples: %i " , fileName , music - > totalSamples ) ;
2018-05-17 01:04:58 +03:00
}
}
# endif
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .xm " ) )
2016-04-25 04:18:18 +03:00
{
2016-08-01 13:49:17 +03:00
int result = jar_xm_create_context_from_file ( & music - > ctxXm , 48000 , fileName ) ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
if ( ! result ) // XM context created successfully
2016-04-25 04:18:18 +03:00
{
2017-02-06 03:03:58 +03:00
jar_xm_set_max_loop_count ( music - > ctxXm , 0 ) ; // Set infinite number of loops
2016-08-01 13:49:17 +03:00
// NOTE: Only stereo is supported for XM
2016-12-25 03:58:56 +03:00
music - > stream = InitAudioStream ( 48000 , 16 , 2 ) ;
2016-08-01 22:37:45 +03:00
music - > totalSamples = ( unsigned int ) jar_xm_get_remaining_samples ( music - > ctxXm ) ;
music - > samplesLeft = music - > totalSamples ;
2016-08-01 13:49:17 +03:00
music - > ctxType = MUSIC_MODULE_XM ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] XM number of samples: %i " , fileName , music - > totalSamples ) ;
TraceLog ( LOG_DEBUG , " [%s] XM track length: %11.6f sec " , fileName , ( float ) music - > totalSamples / 48000.0f ) ;
2016-05-12 06:15:37 +03:00
}
2018-07-28 19:07:06 +03:00
else musicLoaded = false ;
2016-05-12 06:15:37 +03:00
}
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2017-03-29 01:35:42 +03:00
else if ( IsFileExtension ( fileName , " .mod " ) )
2016-06-02 06:09:00 +03:00
{
2016-08-01 13:49:17 +03:00
jar_mod_init ( & music - > ctxMod ) ;
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
if ( jar_mod_load_file ( & music - > ctxMod , fileName ) )
2016-06-02 06:09:00 +03:00
{
2016-08-01 22:37:45 +03:00
music - > stream = InitAudioStream ( 48000 , 16 , 2 ) ;
2016-08-01 13:49:17 +03:00
music - > totalSamples = ( unsigned int ) jar_mod_max_samples ( & music - > ctxMod ) ;
music - > samplesLeft = music - > totalSamples ;
2016-08-01 22:37:45 +03:00
music - > ctxType = MUSIC_MODULE_MOD ;
2017-02-06 03:03:58 +03:00
music - > loopCount = - 1 ; // Infinite loop by default
2016-07-29 22:35:57 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] MOD number of samples: %i " , fileName , music - > samplesLeft ) ;
TraceLog ( LOG_DEBUG , " [%s] MOD track length: %11.6f sec " , fileName , ( float ) music - > totalSamples / 48000.0f ) ;
2016-06-02 06:09:00 +03:00
}
2018-07-28 19:07:06 +03:00
else musicLoaded = false ;
2016-06-02 06:09:00 +03:00
}
2017-03-26 23:49:01 +03:00
# endif
2018-07-28 19:07:06 +03:00
else musicLoaded = false ;
if ( ! musicLoaded )
{
2018-07-28 19:19:53 +03:00
if ( music - > ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( music - > ctxOgg ) ;
2018-07-28 19:07:06 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2018-07-28 19:19:53 +03:00
else if ( music - > ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( music - > ctxFlac ) ;
2018-07-28 19:07:06 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
2018-07-28 19:19:53 +03:00
else if ( music - > ctxType = = MUSIC_AUDIO_MP3 ) drmp3_uninit ( & music - > ctxMp3 ) ;
2018-07-28 19:07:06 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2018-07-28 19:19:53 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( music - > ctxXm ) ;
2018-07-28 19:07:06 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2018-07-28 19:19:53 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_MOD ) jar_mod_unload ( & music - > ctxMod ) ;
2018-07-28 19:07:06 +03:00
# endif
2018-07-29 10:52:18 +03:00
free ( music ) ;
music = NULL ;
2018-07-28 19:07:06 +03:00
TraceLog ( LOG_WARNING , " [%s] Music file could not be opened " , fileName ) ;
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
return music ;
2014-04-19 18:36:49 +04:00
}
2016-08-01 13:49:17 +03:00
// Unload music stream
void UnloadMusicStream ( Music music )
2016-07-29 22:35:57 +03:00
{
2016-08-01 13:49:17 +03:00
CloseAudioStream ( music - > stream ) ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
if ( music - > ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( music - > ctxOgg ) ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
else if ( music - > ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( music - > ctxFlac ) ;
2017-03-26 23:49:01 +03:00
# endif
2018-05-17 01:04:58 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( music - > ctxType = = MUSIC_AUDIO_MP3 ) drmp3_uninit ( & music - > ctxMp3 ) ;
# endif
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
2016-08-01 13:49:17 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( music - > ctxXm ) ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-08-01 13:49:17 +03:00
else if ( music - > ctxType = = MUSIC_MODULE_MOD ) jar_mod_unload ( & music - > ctxMod ) ;
2017-03-26 23:49:01 +03:00
# endif
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
free ( music ) ;
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Start music playing (open stream)
void PlayMusicStream ( Music music )
{
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) music - > stream . audioBuffer ;
2018-10-18 12:38:42 +03:00
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
{
TraceLog ( LOG_ERROR , " PlayMusicStream() : No audio buffer " ) ;
return ;
}
// For music streams, we need to make sure we maintain the frame cursor position. This is hack for this section of code in UpdateMusicStream()
// // NOTE: In case window is minimized, music stream is stopped,
// // just make sure to play again on window restore
// if (IsMusicPlaying(music)) PlayMusicStream(music);
mal_uint32 frameCursorPos = audioBuffer - > frameCursorPos ;
2017-12-20 13:37:43 +03:00
PlayAudioStream ( music - > stream ) ; // <-- This resets the cursor position.
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = frameCursorPos ;
2016-07-29 22:35:57 +03:00
}
2016-08-01 13:49:17 +03:00
// Pause music playing
void PauseMusicStream ( Music music )
2014-04-19 18:36:49 +04:00
{
2017-11-12 14:55:24 +03:00
PauseAudioStream ( music - > stream ) ;
2016-08-01 13:49:17 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Resume music playing
void ResumeMusicStream ( Music music )
{
2017-11-12 14:55:24 +03:00
ResumeAudioStream ( music - > stream ) ;
2016-08-01 13:49:17 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-01 13:49:17 +03:00
// Stop music playing (close stream)
2017-05-10 20:34:57 +03:00
// TODO: To clear a buffer, make sure they have been already processed!
2016-08-01 13:49:17 +03:00
void StopMusicStream ( Music music )
{
2017-11-12 14:55:24 +03:00
StopAudioStream ( music - > stream ) ;
2017-03-05 12:55:58 +03:00
// Restart music context
2016-09-15 12:53:16 +03:00
switch ( music - > ctxType )
{
case MUSIC_AUDIO_OGG : stb_vorbis_seek_start ( music - > ctxOgg ) ; break ;
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2018-07-05 15:57:17 +03:00
case MUSIC_AUDIO_FLAC : /* TODO: Restart FLAC context */ break ;
2017-03-26 23:49:01 +03:00
# endif
2018-09-19 16:57:46 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
case MUSIC_AUDIO_MP3 : /* TODO: Restart MP3 context */ break ;
# endif
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
2016-12-25 03:58:56 +03:00
case MUSIC_MODULE_XM : /* TODO: Restart XM context */ break ;
2017-03-26 23:49:01 +03:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2016-09-15 12:53:16 +03:00
case MUSIC_MODULE_MOD : jar_mod_seek_start ( & music - > ctxMod ) ; break ;
2017-03-26 23:49:01 +03:00
# endif
2016-09-15 12:53:16 +03:00
default : break ;
}
2017-01-29 01:02:30 +03:00
2016-09-15 12:53:16 +03:00
music - > samplesLeft = music - > totalSamples ;
2016-05-11 10:37:10 +03:00
}
2014-09-03 18:51:28 +04:00
2016-07-15 19:16:34 +03:00
// Update (re-fill) music buffers if data already processed
2017-02-10 00:19:48 +03:00
// TODO: Make sure buffers are ready for update... check music state
2016-08-01 13:49:17 +03:00
void UpdateMusicStream ( Music music )
2016-07-15 19:16:34 +03:00
{
2017-11-12 14:55:24 +03:00
bool streamEnding = false ;
2017-12-20 13:37:43 +03:00
unsigned int subBufferSizeInFrames = ( ( AudioBuffer * ) music - > stream . audioBuffer ) - > bufferSizeInFrames / 2 ;
2017-11-24 14:54:00 +03:00
2017-11-12 14:55:24 +03:00
// NOTE: Using dynamic allocation because it could require more than 16KB
2017-11-24 14:54:00 +03:00
void * pcm = calloc ( subBufferSizeInFrames * music - > stream . sampleSize / 8 * music - > stream . channels , 1 ) ;
2017-11-12 14:55:24 +03:00
int samplesCount = 0 ; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts
while ( IsAudioBufferProcessed ( music - > stream ) )
{
2017-11-24 14:54:00 +03:00
if ( music - > samplesLeft > = subBufferSizeInFrames ) samplesCount = subBufferSizeInFrames ;
2017-11-12 14:55:24 +03:00
else samplesCount = music - > samplesLeft ;
// TODO: Really don't like ctxType thingy...
switch ( music - > ctxType )
{
case MUSIC_AUDIO_OGG :
{
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
2018-03-16 20:17:58 +03:00
stb_vorbis_get_samples_short_interleaved ( music - > ctxOgg , music - > stream . channels , ( short * ) pcm , samplesCount * music - > stream . channels ) ;
2017-11-12 14:55:24 +03:00
} break ;
# if defined(SUPPORT_FILEFORMAT_FLAC)
case MUSIC_AUDIO_FLAC :
{
// NOTE: Returns the number of samples to process
unsigned int numSamplesFlac = ( unsigned int ) drflac_read_s16 ( music - > ctxFlac , samplesCount * music - > stream . channels , ( short * ) pcm ) ;
} break ;
# endif
2018-05-17 01:04:58 +03:00
# if defined(SUPPORT_FILEFORMAT_MP3)
case MUSIC_AUDIO_MP3 :
{
// NOTE: Returns the number of samples to process
2018-07-20 00:15:46 +03:00
unsigned int numSamplesMp3 = ( unsigned int ) drmp3_read_f32 ( & music - > ctxMp3 , samplesCount * music - > stream . channels , ( float * ) pcm ) ;
2018-05-17 01:04:58 +03:00
} break ;
# endif
2017-11-12 14:55:24 +03:00
# if defined(SUPPORT_FILEFORMAT_XM)
case MUSIC_MODULE_XM : jar_xm_generate_samples_16bit ( music - > ctxXm , pcm , samplesCount ) ; break ;
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
case MUSIC_MODULE_MOD : jar_mod_fillbuffer ( & music - > ctxMod , pcm , samplesCount , 0 ) ; break ;
# endif
default : break ;
}
UpdateAudioStream ( music - > stream , pcm , samplesCount ) ;
music - > samplesLeft - = samplesCount ;
if ( music - > samplesLeft < = 0 )
{
streamEnding = true ;
break ;
}
}
// Free allocated pcm data
free ( pcm ) ;
// Reset audio stream for looping
if ( streamEnding )
{
StopMusicStream ( music ) ; // Stop music (and reset)
// Decrease loopCount to stop when required
if ( music - > loopCount > 0 )
{
music - > loopCount - - ; // Decrease loop count
PlayMusicStream ( music ) ; // Play again
}
2017-11-14 14:15:50 +03:00
else
{
2017-12-20 13:37:43 +03:00
if ( music - > loopCount = = - 1 ) PlayMusicStream ( music ) ;
2017-11-14 14:15:50 +03:00
}
2017-11-12 14:55:24 +03:00
}
else
{
// NOTE: In case window is minimized, music stream is stopped,
// just make sure to play again on window restore
if ( IsMusicPlaying ( music ) ) PlayMusicStream ( music ) ;
}
2014-04-19 18:36:49 +04:00
}
2016-05-12 04:14:59 +03:00
// Check if any music is playing
2016-08-01 13:49:17 +03:00
bool IsMusicPlaying ( Music music )
2014-04-09 22:25:26 +04:00
{
2017-11-12 14:55:24 +03:00
return IsAudioStreamPlaying ( music - > stream ) ;
2014-04-09 22:25:26 +04:00
}
2014-04-19 18:36:49 +04:00
// Set volume for music
2016-08-01 13:49:17 +03:00
void SetMusicVolume ( Music music , float volume )
2014-01-23 15:36:18 +04:00
{
2017-11-14 14:15:50 +03:00
SetAudioStreamVolume ( music - > stream , volume ) ;
2016-05-12 04:14:59 +03:00
}
2016-06-02 18:12:31 +03:00
// Set pitch for music
2016-08-01 13:49:17 +03:00
void SetMusicPitch ( Music music , float pitch )
2016-05-12 04:14:59 +03:00
{
2017-11-14 14:15:50 +03:00
SetAudioStreamPitch ( music - > stream , pitch ) ;
2014-01-23 15:36:18 +04:00
}
2017-01-24 02:32:16 +03:00
2017-02-06 03:03:58 +03:00
// Set music loop count (loop repeats)
// NOTE: If set to -1, means infinite loop
2017-11-12 14:55:24 +03:00
void SetMusicLoopCount ( Music music , int count )
2017-02-06 03:03:58 +03:00
{
music - > loopCount = count ;
}
2016-06-02 06:09:00 +03:00
// Get music time length (in seconds)
2016-08-01 13:49:17 +03:00
float GetMusicTimeLength ( Music music )
2014-01-23 15:36:18 +04:00
{
2016-08-01 13:49:17 +03:00
float totalSeconds = ( float ) music - > totalSamples / music - > stream . sampleRate ;
2016-08-16 12:09:55 +03:00
2014-04-19 18:36:49 +04:00
return totalSeconds ;
}
// Get current music time played (in seconds)
2016-08-01 13:49:17 +03:00
float GetMusicTimePlayed ( Music music )
2014-04-19 18:36:49 +04:00
{
2016-05-21 19:08:09 +03:00
float secondsPlayed = 0.0f ;
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
unsigned int samplesPlayed = music - > totalSamples - music - > samplesLeft ;
2016-12-25 03:58:56 +03:00
secondsPlayed = ( float ) samplesPlayed / music - > stream . sampleRate ;
2016-08-01 13:49:17 +03:00
return secondsPlayed ;
}
2017-11-12 13:59:16 +03:00
2016-08-01 13:49:17 +03:00
// Init audio stream (to stream audio pcm data)
2016-08-02 18:32:24 +03:00
AudioStream InitAudioStream ( unsigned int sampleRate , unsigned int sampleSize , unsigned int channels )
2016-08-01 13:49:17 +03:00
{
AudioStream stream = { 0 } ;
2016-08-16 12:09:55 +03:00
2016-08-01 13:49:17 +03:00
stream . sampleRate = sampleRate ;
stream . sampleSize = sampleSize ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// Only mono and stereo channels are supported, more channels require AL_EXT_MCFORMATS extension
if ( ( channels > 0 ) & & ( channels < 3 ) ) stream . channels = channels ;
else
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " Init audio stream: Number of channels not supported: %i " , channels ) ;
2016-12-25 03:58:56 +03:00
stream . channels = 1 ; // Fallback to mono channel
}
2016-08-01 13:49:17 +03:00
2017-11-18 01:42:14 +03:00
mal_format formatIn = ( ( stream . sampleSize = = 8 ) ? mal_format_u8 : ( ( stream . sampleSize = = 16 ) ? mal_format_s16 : mal_format_f32 ) ) ;
2017-11-12 13:59:16 +03:00
2017-11-24 14:54:00 +03:00
// The size of a streaming buffer must be at least double the size of a period.
2018-10-17 20:39:16 +03:00
unsigned int periodSize = device . bufferSizeInFrames / device . periods ;
2017-11-24 14:54:00 +03:00
unsigned int subBufferSize = AUDIO_BUFFER_SIZE ;
2017-12-20 13:37:43 +03:00
if ( subBufferSize < periodSize ) subBufferSize = periodSize ;
2017-11-24 14:54:00 +03:00
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = CreateAudioBuffer ( formatIn , stream . channels , stream . sampleRate , subBufferSize * 2 , AUDIO_BUFFER_USAGE_STREAM ) ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " InitAudioStream() : Failed to create audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return stream ;
}
2018-10-17 20:39:16 +03:00
audioBuffer - > looping = true ; // Always loop for streaming buffers.
2017-11-18 01:42:14 +03:00
stream . audioBuffer = audioBuffer ;
2016-08-16 12:09:55 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [AUD ID %i] Audio stream loaded successfully (%i Hz, %i bit, %s) " , stream . source , stream . sampleRate , stream . sampleSize , ( stream . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2016-08-01 13:49:17 +03:00
return stream ;
2014-04-19 18:36:49 +04:00
}
2016-08-01 13:49:17 +03:00
// Close audio stream and free memory
2016-08-02 18:32:24 +03:00
void CloseAudioStream ( AudioStream stream )
2016-08-01 13:49:17 +03:00
{
2017-12-20 13:37:43 +03:00
DeleteAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 13:59:16 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [AUD ID %i] Unloaded audio stream data " , stream . source ) ;
2016-08-01 13:49:17 +03:00
}
2016-08-02 18:32:24 +03:00
// Update audio stream buffers with data
2017-05-10 20:34:57 +03:00
// NOTE 1: Only updates one buffer of the stream source: unqueue -> update -> queue
// NOTE 2: To unqueue a buffer it needs to be processed: IsAudioBufferProcessed()
2017-02-10 00:19:48 +03:00
void UpdateAudioStream ( AudioStream stream , const void * data , int samplesCount )
2016-08-02 18:32:24 +03:00
{
2018-02-11 03:12:16 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) stream . audioBuffer ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateAudioStream() : No audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
2017-11-18 01:42:14 +03:00
if ( audioBuffer - > isSubBufferProcessed [ 0 ] | | audioBuffer - > isSubBufferProcessed [ 1 ] )
2017-11-12 13:59:16 +03:00
{
mal_uint32 subBufferToUpdate ;
2018-10-18 12:38:42 +03:00
2017-11-18 01:42:14 +03:00
if ( audioBuffer - > isSubBufferProcessed [ 0 ] & & audioBuffer - > isSubBufferProcessed [ 1 ] )
2017-11-12 13:59:16 +03:00
{
// Both buffers are available for updating. Update the first one and make sure the cursor is moved back to the front.
subBufferToUpdate = 0 ;
2017-11-18 01:42:14 +03:00
audioBuffer - > frameCursorPos = 0 ;
2017-11-12 13:59:16 +03:00
}
else
{
// Just update whichever sub-buffer is processed.
2017-11-18 01:42:14 +03:00
subBufferToUpdate = ( audioBuffer - > isSubBufferProcessed [ 0 ] ) ? 0 : 1 ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
mal_uint32 subBufferSizeInFrames = audioBuffer - > bufferSizeInFrames / 2 ;
2017-12-20 13:37:43 +03:00
unsigned char * subBuffer = audioBuffer - > buffer + ( ( subBufferSizeInFrames * stream . channels * ( stream . sampleSize / 8 ) ) * subBufferToUpdate ) ;
2017-11-12 13:59:16 +03:00
// Does this API expect a whole buffer to be updated in one go? Assuming so, but if not will need to change this logic.
if ( subBufferSizeInFrames > = ( mal_uint32 ) samplesCount )
{
mal_uint32 framesToWrite = subBufferSizeInFrames ;
2018-10-18 12:38:42 +03:00
2017-12-20 13:37:43 +03:00
if ( framesToWrite > ( mal_uint32 ) samplesCount ) framesToWrite = ( mal_uint32 ) samplesCount ;
2017-11-12 13:59:16 +03:00
2017-12-20 13:37:43 +03:00
mal_uint32 bytesToWrite = framesToWrite * stream . channels * ( stream . sampleSize / 8 ) ;
2017-11-12 13:59:16 +03:00
memcpy ( subBuffer , data , bytesToWrite ) ;
// Any leftover frames should be filled with zeros.
mal_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite ;
2018-10-18 12:38:42 +03:00
2017-12-20 13:37:43 +03:00
if ( leftoverFrameCount > 0 )
{
memset ( subBuffer + bytesToWrite , 0 , leftoverFrameCount * stream . channels * ( stream . sampleSize / 8 ) ) ;
2017-11-12 13:59:16 +03:00
}
2017-11-18 01:42:14 +03:00
audioBuffer - > isSubBufferProcessed [ subBufferToUpdate ] = false ;
2017-11-12 13:59:16 +03:00
}
else
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " UpdateAudioStream() : Attempting to write too many frames to buffer " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
}
else
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " Audio buffer not available for updating " ) ;
2017-11-12 13:59:16 +03:00
return ;
}
2016-08-02 18:32:24 +03:00
}
// Check if any audio stream buffers requires refill
bool IsAudioBufferProcessed ( AudioStream stream )
{
2017-12-20 13:37:43 +03:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) stream . audioBuffer ;
2017-11-18 01:42:14 +03:00
if ( audioBuffer = = NULL )
2017-11-12 13:59:16 +03:00
{
2017-11-18 01:42:14 +03:00
TraceLog ( LOG_ERROR , " IsAudioBufferProcessed() : No audio buffer " ) ;
2017-11-12 13:59:16 +03:00
return false ;
}
2017-11-18 01:42:14 +03:00
return audioBuffer - > isSubBufferProcessed [ 0 ] | | audioBuffer - > isSubBufferProcessed [ 1 ] ;
2016-08-01 13:49:17 +03:00
}
2014-04-19 18:36:49 +04:00
2016-08-02 18:32:24 +03:00
// Play audio stream
void PlayAudioStream ( AudioStream stream )
2014-04-19 18:36:49 +04:00
{
2017-12-20 13:37:43 +03:00
PlayAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2016-08-02 18:32:24 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
// Play audio stream
void PauseAudioStream ( AudioStream stream )
{
2017-12-20 13:37:43 +03:00
PauseAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2016-08-02 18:32:24 +03:00
}
2016-07-29 22:35:57 +03:00
2016-08-02 18:32:24 +03:00
// Resume audio stream playing
void ResumeAudioStream ( AudioStream stream )
{
2017-12-20 13:37:43 +03:00
ResumeAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2014-04-19 18:36:49 +04:00
}
2017-11-12 14:55:24 +03:00
// Check if audio stream is playing.
bool IsAudioStreamPlaying ( AudioStream stream )
{
2017-12-20 13:37:43 +03:00
return IsAudioBufferPlaying ( ( AudioBuffer * ) stream . audioBuffer ) ;
2017-11-12 14:55:24 +03:00
}
2016-08-02 18:32:24 +03:00
// Stop audio stream
void StopAudioStream ( AudioStream stream )
{
2017-12-20 13:37:43 +03:00
StopAudioBuffer ( ( AudioBuffer * ) stream . audioBuffer ) ;
2016-08-02 18:32:24 +03:00
}
2017-11-14 14:15:50 +03:00
void SetAudioStreamVolume ( AudioStream stream , float volume )
{
2017-12-20 13:37:43 +03:00
SetAudioBufferVolume ( ( AudioBuffer * ) stream . audioBuffer , volume ) ;
2017-11-14 14:15:50 +03:00
}
void SetAudioStreamPitch ( AudioStream stream , float pitch )
{
2017-12-20 13:37:43 +03:00
SetAudioBufferPitch ( ( AudioBuffer * ) stream . audioBuffer , pitch ) ;
2017-11-14 14:15:50 +03:00
}
2016-08-02 18:32:24 +03:00
//----------------------------------------------------------------------------------
// Module specific Functions Definition
//----------------------------------------------------------------------------------
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2013-11-19 02:38:44 +04:00
// Load WAV file into Wave structure
2014-04-19 18:36:49 +04:00
static Wave LoadWAV ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2013-12-01 15:34:31 +04:00
// Basic WAV headers structs
typedef struct {
char chunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int chunkSize ;
2013-12-01 15:34:31 +04:00
char format [ 4 ] ;
2016-12-27 19:37:35 +03:00
} WAVRiffHeader ;
2013-12-01 15:34:31 +04:00
typedef struct {
char subChunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int subChunkSize ;
2013-12-01 15:34:31 +04:00
short audioFormat ;
short numChannels ;
2014-11-09 10:06:58 +03:00
int sampleRate ;
int byteRate ;
2013-12-01 15:34:31 +04:00
short blockAlign ;
short bitsPerSample ;
2016-12-27 19:37:35 +03:00
} WAVFormat ;
2013-12-01 15:34:31 +04:00
typedef struct {
char subChunkID [ 4 ] ;
2014-11-09 10:06:58 +03:00
int subChunkSize ;
2016-12-27 19:37:35 +03:00
} WAVData ;
2014-09-03 18:51:28 +04:00
2016-12-27 19:37:35 +03:00
WAVRiffHeader wavRiffHeader ;
WAVFormat wavFormat ;
WAVData wavData ;
2014-09-03 18:51:28 +04:00
2016-01-23 15:22:13 +03:00
Wave wave = { 0 } ;
2013-12-01 15:34:31 +04:00
FILE * wavFile ;
2014-09-03 18:51:28 +04:00
2013-11-19 02:38:44 +04:00
wavFile = fopen ( fileName , " rb " ) ;
2014-09-03 18:51:28 +04:00
2014-12-31 20:03:32 +03:00
if ( wavFile = = NULL )
2013-11-23 16:30:54 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV file could not be opened " , fileName ) ;
2015-07-31 13:31:39 +03:00
wave . data = NULL ;
2013-11-23 16:30:54 +04:00
}
2014-04-09 22:25:26 +04:00
else
{
// Read in the first chunk into the struct
2016-12-27 19:37:35 +03:00
fread ( & wavRiffHeader , sizeof ( WAVRiffHeader ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for RIFF and WAVE tags
2016-12-25 03:58:56 +03:00
if ( strncmp ( wavRiffHeader . chunkID , " RIFF " , 4 ) | |
strncmp ( wavRiffHeader . format , " WAVE " , 4 ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid RIFF or WAVE Header " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Read in the 2nd chunk for the wave info
2016-12-27 19:37:35 +03:00
fread ( & wavFormat , sizeof ( WAVFormat ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for fmt tag
2016-12-25 03:58:56 +03:00
if ( ( wavFormat . subChunkID [ 0 ] ! = ' f ' ) | | ( wavFormat . subChunkID [ 1 ] ! = ' m ' ) | |
( wavFormat . subChunkID [ 2 ] ! = ' t ' ) | | ( wavFormat . subChunkID [ 3 ] ! = ' ' ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid Wave format " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Check for extra parameters;
2016-12-25 03:58:56 +03:00
if ( wavFormat . subChunkSize > 16 ) fseek ( wavFile , sizeof ( short ) , SEEK_CUR ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Read in the the last byte of data before the sound file
2016-12-27 19:37:35 +03:00
fread ( & wavData , sizeof ( WAVData ) , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Check for data tag
2016-12-25 03:58:56 +03:00
if ( ( wavData . subChunkID [ 0 ] ! = ' d ' ) | | ( wavData . subChunkID [ 1 ] ! = ' a ' ) | |
( wavData . subChunkID [ 2 ] ! = ' t ' ) | | ( wavData . subChunkID [ 3 ] ! = ' a ' ) )
2014-04-09 22:25:26 +04:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] Invalid data header " , fileName ) ;
2014-04-09 22:25:26 +04:00
}
else
{
// Allocate memory for data
2017-01-19 15:18:04 +03:00
wave . data = malloc ( wavData . subChunkSize ) ;
2014-09-03 18:51:28 +04:00
2014-04-09 22:25:26 +04:00
// Read in the sound data into the soundData variable
2017-02-12 01:17:56 +03:00
fread ( wave . data , wavData . subChunkSize , 1 , wavFile ) ;
2014-09-03 18:51:28 +04:00
2016-12-20 02:33:45 +03:00
// Store wave parameters
2016-12-25 03:58:56 +03:00
wave . sampleRate = wavFormat . sampleRate ;
wave . sampleSize = wavFormat . bitsPerSample ;
wave . channels = wavFormat . numChannels ;
2017-01-29 01:02:30 +03:00
2017-01-19 15:18:04 +03:00
// NOTE: Only support 8 bit, 16 bit and 32 bit sample sizes
if ( ( wave . sampleSize ! = 8 ) & & ( wave . sampleSize ! = 16 ) & & ( wave . sampleSize ! = 32 ) )
2016-12-25 03:58:56 +03:00
{
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV sample size (%ibit) not supported, converted to 16bit " , fileName , wave . sampleSize ) ;
2017-01-19 15:18:04 +03:00
WaveFormat ( & wave , wave . sampleRate , 16 , wave . channels ) ;
2016-12-25 03:58:56 +03:00
}
2017-01-19 15:18:04 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Only support up to 2 channels (mono, stereo)
2017-01-29 01:02:30 +03:00
if ( wave . channels > 2 )
2016-12-25 03:58:56 +03:00
{
WaveFormat ( & wave , wave . sampleRate , wave . sampleSize , 2 ) ;
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_WARNING , " [%s] WAV channels number (%i) not supported, converted to 2 channels " , fileName , wave . channels ) ;
2016-12-25 03:58:56 +03:00
}
2017-01-29 01:02:30 +03:00
2016-12-20 02:33:45 +03:00
// NOTE: subChunkSize comes in bytes, we need to translate it to number of samples
2016-12-25 03:58:56 +03:00
wave . sampleCount = ( wavData . subChunkSize / ( wave . sampleSize / 8 ) ) / wave . channels ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [%s] WAV file loaded successfully (%i Hz, %i bit, %s) " , fileName , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2014-04-09 22:25:26 +04:00
}
}
}
2013-11-23 16:30:54 +04:00
2014-04-09 22:25:26 +04:00
fclose ( wavFile ) ;
}
2014-09-03 18:51:28 +04:00
2013-11-23 16:30:54 +04:00
return wave ;
2013-12-01 15:34:31 +04:00
}
2017-03-26 23:49:01 +03:00
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2014-04-19 18:36:49 +04:00
// Load OGG file into Wave structure
2014-09-17 00:51:31 +04:00
// NOTE: Using stb_vorbis library
2016-09-08 01:20:06 +03:00
static Wave LoadOGG ( const char * fileName )
2013-11-19 02:38:44 +04:00
{
2017-05-03 15:16:53 +03:00
Wave wave = { 0 } ;
2014-09-03 18:51:28 +04:00
2014-04-19 18:36:49 +04:00
stb_vorbis * oggFile = stb_vorbis_open_filename ( fileName , NULL , NULL ) ;
2014-09-03 18:51:28 +04:00
2017-07-02 13:35:13 +03:00
if ( oggFile = = NULL ) TraceLog ( LOG_WARNING , " [%s] OGG file could not be opened " , fileName ) ;
2015-07-31 13:31:39 +03:00
else
{
stb_vorbis_info info = stb_vorbis_get_info ( oggFile ) ;
2017-05-03 15:16:53 +03:00
2015-07-31 13:31:39 +03:00
wave . sampleRate = info . sample_rate ;
2016-08-15 17:35:11 +03:00
wave . sampleSize = 16 ; // 16 bit per sample (short)
2015-07-31 13:31:39 +03:00
wave . channels = info . channels ;
2017-05-03 15:16:53 +03:00
wave . sampleCount = ( int ) stb_vorbis_stream_length_in_samples ( oggFile ) ; // Independent by channel
2017-01-29 01:02:30 +03:00
2015-07-31 13:31:39 +03:00
float totalSeconds = stb_vorbis_stream_length_in_seconds ( oggFile ) ;
2017-07-02 13:35:13 +03:00
if ( totalSeconds > 10 ) TraceLog ( LOG_WARNING , " [%s] Ogg audio length is larger than 10 seconds (%f), that's a big file in memory, consider music streaming " , fileName , totalSeconds ) ;
2014-09-03 18:51:28 +04:00
2017-01-18 19:04:20 +03:00
wave . data = ( short * ) malloc ( wave . sampleCount * wave . channels * sizeof ( short ) ) ;
2014-09-03 18:51:28 +04:00
2017-01-18 19:04:20 +03:00
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
int numSamplesOgg = stb_vorbis_get_samples_short_interleaved ( oggFile , info . channels , ( short * ) wave . data , wave . sampleCount * wave . channels ) ;
2015-02-02 02:53:49 +03:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_DEBUG , " [%s] Samples obtained: %i " , fileName , numSamplesOgg ) ;
2014-04-19 18:36:49 +04:00
2017-07-02 13:35:13 +03:00
TraceLog ( LOG_INFO , " [%s] OGG file loaded successfully (%i Hz, %i bit, %s) " , fileName , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2015-07-31 13:31:39 +03:00
stb_vorbis_close ( oggFile ) ;
}
2014-09-03 18:51:28 +04:00
2014-04-19 18:36:49 +04:00
return wave ;
2014-04-09 22:25:26 +04:00
}
2017-03-26 23:49:01 +03:00
# endif
2013-11-19 02:38:44 +04:00
2017-03-26 23:49:01 +03:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2016-10-10 19:22:55 +03:00
// Load FLAC file into Wave structure
// NOTE: Using dr_flac library
static Wave LoadFLAC ( const char * fileName )
{
Wave wave ;
// Decode an entire FLAC file in one go
uint64_t totalSampleCount ;
2016-12-26 12:52:57 +03:00
wave . data = drflac_open_and_decode_file_s16 ( fileName , & wave . channels , & wave . sampleRate , & totalSampleCount ) ;
2017-01-29 01:02:30 +03:00
2016-12-26 12:52:57 +03:00
wave . sampleCount = ( int ) totalSampleCount / wave . channels ;
wave . sampleSize = 16 ;
2017-01-29 01:02:30 +03:00
2016-12-25 03:58:56 +03:00
// NOTE: Only support up to 2 channels (mono, stereo)
2017-07-02 13:35:13 +03:00
if ( wave . channels > 2 ) TraceLog ( LOG_WARNING , " [%s] FLAC channels number (%i) not supported " , fileName , wave . channels ) ;
2016-12-26 12:52:57 +03:00
2017-07-02 13:35:13 +03:00
if ( wave . data = = NULL ) TraceLog ( LOG_WARNING , " [%s] FLAC data could not be loaded " , fileName ) ;
else TraceLog ( LOG_INFO , " [%s] FLAC file loaded successfully (%i Hz, %i bit, %s) " , fileName, wave.sampleRate, wave.sampleSize, (wave.channels == 1) ? " Mono " : " Stereo " ) ;
2018-09-19 16:57:46 +03:00
return wave ;
}
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
// Load MP3 file into Wave structure
// NOTE: Using dr_mp3 library
static Wave LoadMP3 ( const char * fileName )
{
2018-10-17 20:39:16 +03:00
Wave wave = { 0 } ;
2018-09-19 16:57:46 +03:00
// Decode an entire MP3 file in one go
2018-10-17 20:39:16 +03:00
uint64_t totalSampleCount = 0 ;
drmp3_config config = { 0 } ;
wave . data = drmp3_open_and_decode_file_f32 ( fileName , & config , & totalSampleCount ) ;
2018-09-19 16:57:46 +03:00
2018-10-17 20:39:16 +03:00
wave . channels = config . outputChannels ;
wave . sampleRate = config . outputSampleRate ;
wave . sampleCount = ( int ) totalSampleCount ;
wave . sampleSize = 32 ;
2018-09-19 16:57:46 +03:00
// NOTE: Only support up to 2 channels (mono, stereo)
if ( wave . channels > 2 ) TraceLog ( LOG_WARNING , " [%s] MP3 channels number (%i) not supported " , fileName , wave . channels ) ;
if ( wave . data = = NULL ) TraceLog ( LOG_WARNING , " [%s] MP3 data could not be loaded " , fileName ) ;
else TraceLog ( LOG_INFO , " [%s] MP3 file loaded successfully (%i Hz, %i bit, %s) " , fileName, wave.sampleRate, wave.sampleSize, (wave.channels == 1) ? " Mono " : " Stereo " ) ;
2017-01-29 01:02:30 +03:00
2016-10-10 19:22:55 +03:00
return wave ;
}
2017-03-26 23:49:01 +03:00
# endif
2016-10-10 19:22:55 +03:00
2015-07-31 13:31:39 +03:00
// Some required functions for audio standalone module version
# if defined(AUDIO_STANDALONE)
2017-03-29 01:35:42 +03:00
// Check file extension
bool IsFileExtension ( const char * fileName , const char * ext )
2015-07-31 13:31:39 +03:00
{
2017-03-29 01:35:42 +03:00
bool result = false ;
const char * fileExt ;
if ( ( fileExt = strrchr ( fileName , ' . ' ) ) ! = NULL )
{
if ( strcmp ( fileExt , ext ) = = 0 ) result = true ;
}
return result ;
2015-07-31 13:31:39 +03:00
}
2017-07-02 13:35:13 +03:00
// Show trace log messages (LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_DEBUG)
2015-07-31 13:31:39 +03:00
void TraceLog ( int msgType , const char * text , . . . )
{
va_list args ;
2017-07-02 13:35:13 +03:00
va_start ( args , text ) ;
2015-07-31 13:31:39 +03:00
2016-08-31 11:27:29 +03:00
switch ( msgType )
2015-07-31 13:31:39 +03:00
{
2017-07-02 13:35:13 +03:00
case LOG_INFO : fprintf ( stdout , " INFO: " ) ; break ;
case LOG_ERROR : fprintf ( stdout , " ERROR: " ) ; break ;
case LOG_WARNING : fprintf ( stdout , " WARNING: " ) ; break ;
case LOG_DEBUG : fprintf ( stdout , " DEBUG: " ) ; break ;
2015-07-31 13:31:39 +03:00
default : break ;
}
2017-07-02 13:35:13 +03:00
vfprintf ( stdout , text , args ) ;
fprintf ( stdout , " \n " ) ;
2015-07-31 13:31:39 +03:00
2017-07-02 13:35:13 +03:00
va_end ( args ) ;
2015-07-31 13:31:39 +03:00
2017-07-02 13:35:13 +03:00
if ( msgType = = LOG_ERROR ) exit ( 1 ) ;
2015-07-31 13:31:39 +03:00
}
2017-02-12 01:34:41 +03:00
# endif