GCC Code Coverage Report


Directory: ./
File: submodules/raylib/src/raudio.c
Date: 2023-09-29 04:53:15
Exec Total Coverage
Lines: 0 806 0.0%
Branches: 0 483 0.0%

Line Branch Exec Source
1 /**********************************************************************************************
2 *
3 * raudio v1.1 - A simple and easy-to-use audio library based on miniaudio
4 *
5 * FEATURES:
6 * - Manage audio device (init/close)
7 * - Manage raw audio context
8 * - Manage mixing channels
9 * - Load and unload audio files
10 * - Format wave data (sample rate, size, channels)
11 * - Play/Stop/Pause/Resume loaded audio
12 *
13 * CONFIGURATION:
14 * #define SUPPORT_MODULE_RAUDIO
15 * raudio module is included in the build
16 *
17 * #define RAUDIO_STANDALONE
18 * Define to use the module as standalone library (independently of raylib).
19 * Required types and functions are defined in the same module.
20 *
21 * #define SUPPORT_FILEFORMAT_WAV
22 * #define SUPPORT_FILEFORMAT_OGG
23 * #define SUPPORT_FILEFORMAT_MP3
24 * #define SUPPORT_FILEFORMAT_QOA
25 * #define SUPPORT_FILEFORMAT_FLAC
26 * #define SUPPORT_FILEFORMAT_XM
27 * #define SUPPORT_FILEFORMAT_MOD
28 * Selected desired fileformats to be supported for loading. Some of those formats are
29 * supported by default, to remove support, just comment unrequired #define in this module
30 *
31 * DEPENDENCIES:
32 * miniaudio.h - Audio device management lib (https://github.com/mackron/miniaudio)
33 * stb_vorbis.h - Ogg audio files loading (http://www.nothings.org/stb_vorbis/)
34 * dr_wav.h - WAV audio files loading (http://github.com/mackron/dr_libs)
35 * dr_mp3.h - MP3 audio file loading (https://github.com/mackron/dr_libs)
36 * dr_flac.h - FLAC audio file loading (https://github.com/mackron/dr_libs)
37 * jar_xm.h - XM module file loading
38 * jar_mod.h - MOD audio file loading
39 *
40 * CONTRIBUTORS:
41 * David Reid (github: @mackron) (Nov. 2017):
42 * - Complete port to miniaudio library
43 *
44 * Joshua Reisenauer (github: @kd7tck) (2015):
45 * - XM audio module support (jar_xm)
46 * - MOD audio module support (jar_mod)
47 * - Mixing channels support
48 * - Raw audio context support
49 *
50 *
51 * LICENSE: zlib/libpng
52 *
53 * Copyright (c) 2013-2023 Ramon Santamaria (@raysan5)
54 *
55 * This software is provided "as-is", without any express or implied warranty. In no event
56 * will the authors be held liable for any damages arising from the use of this software.
57 *
58 * Permission is granted to anyone to use this software for any purpose, including commercial
59 * applications, and to alter it and redistribute it freely, subject to the following restrictions:
60 *
61 * 1. The origin of this software must not be misrepresented; you must not claim that you
62 * wrote the original software. If you use this software in a product, an acknowledgment
63 * in the product documentation would be appreciated but is not required.
64 *
65 * 2. Altered source versions must be plainly marked as such, and must not be misrepresented
66 * as being the original software.
67 *
68 * 3. This notice may not be removed or altered from any source distribution.
69 *
70 **********************************************************************************************/
71
72 #if defined(RAUDIO_STANDALONE)
73 #include "raudio.h"
74 #else
75 #include "raylib.h" // Declares module functions
76
77 // Check if config flags have been externally provided on compilation line
78 #if !defined(EXTERNAL_CONFIG_FLAGS)
79 #include "config.h" // Defines module configuration flags
80 #endif
81 #include "utils.h" // Required for: fopen() Android mapping
82 #endif
83
84 #if defined(SUPPORT_MODULE_RAUDIO)
85
86 #if defined(_WIN32)
87 // To avoid conflicting windows.h symbols with raylib, some flags are defined
88 // WARNING: Those flags avoid inclusion of some Win32 headers that could be required
89 // by user at some point and won't be included...
90 //-------------------------------------------------------------------------------------
91
92 // If defined, the following flags inhibit definition of the indicated items.
93 #define NOGDICAPMASKS // CC_*, LC_*, PC_*, CP_*, TC_*, RC_
94 #define NOVIRTUALKEYCODES // VK_*
95 #define NOWINMESSAGES // WM_*, EM_*, LB_*, CB_*
96 #define NOWINSTYLES // WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_*
97 #define NOSYSMETRICS // SM_*
98 #define NOMENUS // MF_*
99 #define NOICONS // IDI_*
100 #define NOKEYSTATES // MK_*
101 #define NOSYSCOMMANDS // SC_*
102 #define NORASTEROPS // Binary and Tertiary raster ops
103 #define NOSHOWWINDOW // SW_*
104 #define OEMRESOURCE // OEM Resource values
105 #define NOATOM // Atom Manager routines
106 #define NOCLIPBOARD // Clipboard routines
107 #define NOCOLOR // Screen colors
108 #define NOCTLMGR // Control and Dialog routines
109 #define NODRAWTEXT // DrawText() and DT_*
110 #define NOGDI // All GDI defines and routines
111 #define NOKERNEL // All KERNEL defines and routines
112 #define NOUSER // All USER defines and routines
113 //#define NONLS // All NLS defines and routines
114 #define NOMB // MB_* and MessageBox()
115 #define NOMEMMGR // GMEM_*, LMEM_*, GHND, LHND, associated routines
116 #define NOMETAFILE // typedef METAFILEPICT
117 #define NOMINMAX // Macros min(a,b) and max(a,b)
118 #define NOMSG // typedef MSG and associated routines
119 #define NOOPENFILE // OpenFile(), OemToAnsi, AnsiToOem, and OF_*
120 #define NOSCROLL // SB_* and scrolling routines
121 #define NOSERVICE // All Service Controller routines, SERVICE_ equates, etc.
122 #define NOSOUND // Sound driver routines
123 #define NOTEXTMETRIC // typedef TEXTMETRIC and associated routines
124 #define NOWH // SetWindowsHook and WH_*
125 #define NOWINOFFSETS // GWL_*, GCL_*, associated routines
126 #define NOCOMM // COMM driver routines
127 #define NOKANJI // Kanji support stuff.
128 #define NOHELP // Help engine interface.
129 #define NOPROFILER // Profiler interface.
130 #define NODEFERWINDOWPOS // DeferWindowPos routines
131 #define NOMCX // Modem Configuration Extensions
132
133 // Type required before windows.h inclusion
134 typedef struct tagMSG *LPMSG;
135
136 #include <windows.h> // Windows functionality (miniaudio)
137
138 // Type required by some unused function...
139 typedef struct tagBITMAPINFOHEADER {
140 DWORD biSize;
141 LONG biWidth;
142 LONG biHeight;
143 WORD biPlanes;
144 WORD biBitCount;
145 DWORD biCompression;
146 DWORD biSizeImage;
147 LONG biXPelsPerMeter;
148 LONG biYPelsPerMeter;
149 DWORD biClrUsed;
150 DWORD biClrImportant;
151 } BITMAPINFOHEADER, *PBITMAPINFOHEADER;
152
153 #include <objbase.h> // Component Object Model (COM) header
154 #include <mmreg.h> // Windows Multimedia, defines some WAVE structs
155 #include <mmsystem.h> // Windows Multimedia, used by Windows GDI, defines DIBINDEX macro
156
157 // Some required types defined for MSVC/TinyC compiler
158 #if defined(_MSC_VER) || defined(__TINYC__)
159 #include "propidl.h"
160 #endif
161 #endif
162
163 #define MA_MALLOC RL_MALLOC
164 #define MA_FREE RL_FREE
165
166 #define MA_NO_JACK
167 #define MA_NO_WAV
168 #define MA_NO_FLAC
169 #define MA_NO_MP3
170
171 // Threading model: Default: [0] COINIT_MULTITHREADED: COM calls objects on any thread (free threading)
172 #define MA_COINIT_VALUE 2 // [2] COINIT_APARTMENTTHREADED: Each object has its own thread (apartment model)
173
174 #define MINIAUDIO_IMPLEMENTATION
175 //#define MA_DEBUG_OUTPUT
176 #include "external/miniaudio.h" // Audio device initialization and management
177 #undef PlaySound // Win32 API: windows.h > mmsystem.h defines PlaySound macro
178
179 #include <stdlib.h> // Required for: malloc(), free()
180 #include <stdio.h> // Required for: FILE, fopen(), fclose(), fread()
181 #include <string.h> // Required for: strcmp() [Used in IsFileExtension(), LoadWaveFromMemory(), LoadMusicStreamFromMemory()]
182
183 #if defined(RAUDIO_STANDALONE)
184 #ifndef TRACELOG
185 #define TRACELOG(level, ...) printf(__VA_ARGS__)
186 #endif
187
188 // Allow custom memory allocators
189 #ifndef RL_MALLOC
190 #define RL_MALLOC(sz) malloc(sz)
191 #endif
192 #ifndef RL_CALLOC
193 #define RL_CALLOC(n,sz) calloc(n,sz)
194 #endif
195 #ifndef RL_REALLOC
196 #define RL_REALLOC(ptr,sz) realloc(ptr,sz)
197 #endif
198 #ifndef RL_FREE
199 #define RL_FREE(ptr) free(ptr)
200 #endif
201 #endif
202
203 #if defined(SUPPORT_FILEFORMAT_WAV)
204 #define DRWAV_MALLOC RL_MALLOC
205 #define DRWAV_REALLOC RL_REALLOC
206 #define DRWAV_FREE RL_FREE
207
208 #define DR_WAV_IMPLEMENTATION
209 #include "external/dr_wav.h" // WAV loading functions
210 #endif
211
212 #if defined(SUPPORT_FILEFORMAT_OGG)
213 // TODO: Remap stb_vorbis malloc()/free() calls to RL_MALLOC/RL_FREE
214 #include "external/stb_vorbis.c" // OGG loading functions
215 #endif
216
217 #if defined(SUPPORT_FILEFORMAT_MP3)
218 #define DRMP3_MALLOC RL_MALLOC
219 #define DRMP3_REALLOC RL_REALLOC
220 #define DRMP3_FREE RL_FREE
221
222 #define DR_MP3_IMPLEMENTATION
223 #include "external/dr_mp3.h" // MP3 loading functions
224 #endif
225
226 #if defined(SUPPORT_FILEFORMAT_QOA)
227 #define QOA_MALLOC RL_MALLOC
228 #define QOA_FREE RL_FREE
229
230 #if defined(_MSC_VER) // Disable some MSVC warning
231 #pragma warning(push)
232 #pragma warning(disable : 4018)
233 #pragma warning(disable : 4267)
234 #pragma warning(disable : 4244)
235 #endif
236
237 #define QOA_IMPLEMENTATION
238 #include "external/qoa.h" // QOA loading and saving functions
239 #include "external/qoaplay.c" // QOA stream playing helper functions
240
241 #if defined(_MSC_VER)
242 #pragma warning(pop) // Disable MSVC warning suppression
243 #endif
244 #endif
245
246 #if defined(SUPPORT_FILEFORMAT_FLAC)
247 #define DRFLAC_MALLOC RL_MALLOC
248 #define DRFLAC_REALLOC RL_REALLOC
249 #define DRFLAC_FREE RL_FREE
250
251 #define DR_FLAC_IMPLEMENTATION
252 #define DR_FLAC_NO_WIN32_IO
253 #include "external/dr_flac.h" // FLAC loading functions
254 #endif
255
256 #if defined(SUPPORT_FILEFORMAT_XM)
257 #define JARXM_MALLOC RL_MALLOC
258 #define JARXM_FREE RL_FREE
259
260 #if defined(_MSC_VER) // Disable some MSVC warning
261 #pragma warning(push)
262 #pragma warning(disable : 4244)
263 #endif
264
265 #define JAR_XM_IMPLEMENTATION
266 #include "external/jar_xm.h" // XM loading functions
267
268 #if defined(_MSC_VER)
269 #pragma warning(pop) // Disable MSVC warning suppression
270 #endif
271 #endif
272
273 #if defined(SUPPORT_FILEFORMAT_MOD)
274 #define JARMOD_MALLOC RL_MALLOC
275 #define JARMOD_FREE RL_FREE
276
277 #define JAR_MOD_IMPLEMENTATION
278 #include "external/jar_mod.h" // MOD loading functions
279 #endif
280
281 //----------------------------------------------------------------------------------
282 // Defines and Macros
283 //----------------------------------------------------------------------------------
284 #ifndef AUDIO_DEVICE_FORMAT
285 #define AUDIO_DEVICE_FORMAT ma_format_f32 // Device output format (float-32bit)
286 #endif
287 #ifndef AUDIO_DEVICE_CHANNELS
288 #define AUDIO_DEVICE_CHANNELS 2 // Device output channels: stereo
289 #endif
290 #ifndef AUDIO_DEVICE_SAMPLE_RATE
291 #define AUDIO_DEVICE_SAMPLE_RATE 0 // Device output sample rate
292 #endif
293
294 #ifndef MAX_AUDIO_BUFFER_POOL_CHANNELS
295 #define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Audio pool channels
296 #endif
297
298 //----------------------------------------------------------------------------------
299 // Types and Structures Definition
300 //----------------------------------------------------------------------------------
301 #if defined(RAUDIO_STANDALONE)
302 // Trace log level
303 // NOTE: Organized by priority level
304 typedef enum {
305 LOG_ALL = 0, // Display all logs
306 LOG_TRACE, // Trace logging, intended for internal use only
307 LOG_DEBUG, // Debug logging, used for internal debugging, it should be disabled on release builds
308 LOG_INFO, // Info logging, used for program execution info
309 LOG_WARNING, // Warning logging, used on recoverable failures
310 LOG_ERROR, // Error logging, used on unrecoverable failures
311 LOG_FATAL, // Fatal logging, used to abort program: exit(EXIT_FAILURE)
312 LOG_NONE // Disable logging
313 } TraceLogLevel;
314 #endif
315
316 // Music context type
317 // NOTE: Depends on data structure provided by the library
318 // in charge of reading the different file types
319 typedef enum {
320 MUSIC_AUDIO_NONE = 0, // No audio context loaded
321 MUSIC_AUDIO_WAV, // WAV audio context
322 MUSIC_AUDIO_OGG, // OGG audio context
323 MUSIC_AUDIO_FLAC, // FLAC audio context
324 MUSIC_AUDIO_MP3, // MP3 audio context
325 MUSIC_AUDIO_QOA, // QOA audio context
326 MUSIC_MODULE_XM, // XM module audio context
327 MUSIC_MODULE_MOD // MOD module audio context
328 } MusicContextType;
329
330 // NOTE: Different logic is used when feeding data to the playback device
331 // depending on whether data is streamed (Music vs Sound)
332 typedef enum {
333 AUDIO_BUFFER_USAGE_STATIC = 0,
334 AUDIO_BUFFER_USAGE_STREAM
335 } AudioBufferUsage;
336
337 // Audio buffer struct
338 struct rAudioBuffer {
339 ma_data_converter converter; // Audio data converter
340
341 AudioCallback callback; // Audio buffer callback for buffer filling on audio threads
342 rAudioProcessor *processor; // Audio processor
343
344 float volume; // Audio buffer volume
345 float pitch; // Audio buffer pitch
346 float pan; // Audio buffer pan (0.0f to 1.0f)
347
348 bool playing; // Audio buffer state: AUDIO_PLAYING
349 bool paused; // Audio buffer state: AUDIO_PAUSED
350 bool looping; // Audio buffer looping, default to true for AudioStreams
351 int usage; // Audio buffer usage mode: STATIC or STREAM
352
353 bool isSubBufferProcessed[2]; // SubBuffer processed (virtual double buffer)
354 unsigned int sizeInFrames; // Total buffer size in frames
355 unsigned int frameCursorPos; // Frame cursor position
356 unsigned int framesProcessed; // Total frames processed in this buffer (required for play timing)
357
358 unsigned char *data; // Data buffer, on music stream keeps filling
359
360 rAudioBuffer *next; // Next audio buffer on the list
361 rAudioBuffer *prev; // Previous audio buffer on the list
362 };
363
364 // Audio processor struct
365 // NOTE: Useful to apply effects to an AudioBuffer
366 struct rAudioProcessor {
367 AudioCallback process; // Processor callback function
368 rAudioProcessor *next; // Next audio processor on the list
369 rAudioProcessor *prev; // Previous audio processor on the list
370 };
371
372 #define AudioBuffer rAudioBuffer // HACK: To avoid CoreAudio (macOS) symbol collision
373
374 // Audio data context
375 typedef struct AudioData {
376 struct {
377 ma_context context; // miniaudio context data
378 ma_device device; // miniaudio device
379 ma_mutex lock; // miniaudio mutex lock
380 bool isReady; // Check if audio device is ready
381 size_t pcmBufferSize; // Pre-allocated buffer size
382 void *pcmBuffer; // Pre-allocated buffer to read audio data from file/memory
383 } System;
384 struct {
385 AudioBuffer *first; // Pointer to first AudioBuffer in the list
386 AudioBuffer *last; // Pointer to last AudioBuffer in the list
387 int defaultSize; // Default audio buffer size for audio streams
388 } Buffer;
389 rAudioProcessor *mixedProcessor;
390 } AudioData;
391
392 //----------------------------------------------------------------------------------
393 // Global Variables Definition
394 //----------------------------------------------------------------------------------
395 static AudioData AUDIO = { // Global AUDIO context
396
397 // NOTE: Music buffer size is defined by number of samples, independent of sample size and channels number
398 // After some math, considering a sampleRate of 48000, a buffer refill rate of 1/60 seconds and a
399 // standard double-buffering system, a 4096 samples buffer has been chosen, it should be enough
400 // In case of music-stalls, just increase this number
401 .Buffer.defaultSize = 0,
402 .mixedProcessor = NULL
403 };
404
405 //----------------------------------------------------------------------------------
406 // Module specific Functions Declaration
407 //----------------------------------------------------------------------------------
408 static void OnLog(void *pUserData, ma_uint32 level, const char *pMessage);
409 static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const void *pFramesInput, ma_uint32 frameCount);
410 static void MixAudioFrames(float *framesOut, const float *framesIn, ma_uint32 frameCount, AudioBuffer *buffer);
411
412 #if defined(RAUDIO_STANDALONE)
413 static bool IsFileExtension(const char *fileName, const char *ext); // Check file extension
414 static const char *GetFileExtension(const char *fileName); // Get pointer to extension for a filename string (includes the dot: .png)
415
416 static unsigned char *LoadFileData(const char *fileName, unsigned int *bytesRead); // Load file data as byte array (read)
417 static bool SaveFileData(const char *fileName, void *data, unsigned int bytesToWrite); // Save data to file from byte array (write)
418 static bool SaveFileText(const char *fileName, char *text); // Save text data to file (write), string must be '\0' terminated
419 #endif
420
421 //----------------------------------------------------------------------------------
422 // AudioBuffer management functions declaration
423 // NOTE: Those functions are not exposed by raylib... for the moment
424 //----------------------------------------------------------------------------------
425 AudioBuffer *LoadAudioBuffer(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 sizeInFrames, int usage);
426 void UnloadAudioBuffer(AudioBuffer *buffer);
427
428 bool IsAudioBufferPlaying(AudioBuffer *buffer);
429 void PlayAudioBuffer(AudioBuffer *buffer);
430 void StopAudioBuffer(AudioBuffer *buffer);
431 void PauseAudioBuffer(AudioBuffer *buffer);
432 void ResumeAudioBuffer(AudioBuffer *buffer);
433 void SetAudioBufferVolume(AudioBuffer *buffer, float volume);
434 void SetAudioBufferPitch(AudioBuffer *buffer, float pitch);
435 void SetAudioBufferPan(AudioBuffer *buffer, float pan);
436 void TrackAudioBuffer(AudioBuffer *buffer);
437 void UntrackAudioBuffer(AudioBuffer *buffer);
438
439 //----------------------------------------------------------------------------------
440 // Module Functions Definition - Audio Device initialization and Closing
441 //----------------------------------------------------------------------------------
442 // Initialize audio device
443 void InitAudioDevice(void)
444 {
445 // Init audio context
446 ma_context_config ctxConfig = ma_context_config_init();
447 ma_log_callback_init(OnLog, NULL);
448
449 ma_result result = ma_context_init(NULL, 0, &ctxConfig, &AUDIO.System.context);
450 if (result != MA_SUCCESS)
451 {
452 TRACELOG(LOG_WARNING, "AUDIO: Failed to initialize context");
453 return;
454 }
455
456 // Init audio device
457 // NOTE: Using the default device. Format is floating point because it simplifies mixing.
458 ma_device_config config = ma_device_config_init(ma_device_type_playback);
459 config.playback.pDeviceID = NULL; // NULL for the default playback AUDIO.System.device.
460 config.playback.format = AUDIO_DEVICE_FORMAT;
461 config.playback.channels = AUDIO_DEVICE_CHANNELS;
462 config.capture.pDeviceID = NULL; // NULL for the default capture AUDIO.System.device.
463 config.capture.format = ma_format_s16;
464 config.capture.channels = 1;
465 config.sampleRate = AUDIO_DEVICE_SAMPLE_RATE;
466 config.dataCallback = OnSendAudioDataToDevice;
467 config.pUserData = NULL;
468
469 result = ma_device_init(&AUDIO.System.context, &config, &AUDIO.System.device);
470 if (result != MA_SUCCESS)
471 {
472 TRACELOG(LOG_WARNING, "AUDIO: Failed to initialize playback device");
473 ma_context_uninit(&AUDIO.System.context);
474 return;
475 }
476
477 // Keep the device running the whole time. May want to consider doing something a bit smarter and only have the device running
478 // while there's at least one sound being played.
479 result = ma_device_start(&AUDIO.System.device);
480 if (result != MA_SUCCESS)
481 {
482 TRACELOG(LOG_WARNING, "AUDIO: Failed to start playback device");
483 ma_device_uninit(&AUDIO.System.device);
484 ma_context_uninit(&AUDIO.System.context);
485 return;
486 }
487
488 // Mixing happens on a separate thread which means we need to synchronize. I'm using a mutex here to make things simple, but may
489 // want to look at something a bit smarter later on to keep everything real-time, if that's necessary.
490 if (ma_mutex_init(&AUDIO.System.lock) != MA_SUCCESS)
491 {
492 TRACELOG(LOG_WARNING, "AUDIO: Failed to create mutex for mixing");
493 ma_device_uninit(&AUDIO.System.device);
494 ma_context_uninit(&AUDIO.System.context);
495 return;
496 }
497
498 TRACELOG(LOG_INFO, "AUDIO: Device initialized successfully");
499 TRACELOG(LOG_INFO, " > Backend: miniaudio / %s", ma_get_backend_name(AUDIO.System.context.backend));
500 TRACELOG(LOG_INFO, " > Format: %s -> %s", ma_get_format_name(AUDIO.System.device.playback.format), ma_get_format_name(AUDIO.System.device.playback.internalFormat));
501 TRACELOG(LOG_INFO, " > Channels: %d -> %d", AUDIO.System.device.playback.channels, AUDIO.System.device.playback.internalChannels);
502 TRACELOG(LOG_INFO, " > Sample rate: %d -> %d", AUDIO.System.device.sampleRate, AUDIO.System.device.playback.internalSampleRate);
503 TRACELOG(LOG_INFO, " > Periods size: %d", AUDIO.System.device.playback.internalPeriodSizeInFrames*AUDIO.System.device.playback.internalPeriods);
504
505 AUDIO.System.isReady = true;
506 }
507
508 // Close the audio device for all contexts
509 void CloseAudioDevice(void)
510 {
511 if (AUDIO.System.isReady)
512 {
513 ma_mutex_uninit(&AUDIO.System.lock);
514 ma_device_uninit(&AUDIO.System.device);
515 ma_context_uninit(&AUDIO.System.context);
516
517 AUDIO.System.isReady = false;
518 RL_FREE(AUDIO.System.pcmBuffer);
519 AUDIO.System.pcmBuffer = NULL;
520 AUDIO.System.pcmBufferSize = 0;
521
522 TRACELOG(LOG_INFO, "AUDIO: Device closed successfully");
523 }
524 else TRACELOG(LOG_WARNING, "AUDIO: Device could not be closed, not currently initialized");
525 }
526
527 // Check if device has been initialized successfully
528 bool IsAudioDeviceReady(void)
529 {
530 return AUDIO.System.isReady;
531 }
532
533 // Set master volume (listener)
534 void SetMasterVolume(float volume)
535 {
536 ma_device_set_master_volume(&AUDIO.System.device, volume);
537 }
538
539 //----------------------------------------------------------------------------------
540 // Module Functions Definition - Audio Buffer management
541 //----------------------------------------------------------------------------------
542
543 // Initialize a new audio buffer (filled with silence)
544 AudioBuffer *LoadAudioBuffer(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 sizeInFrames, int usage)
545 {
546 AudioBuffer *audioBuffer = (AudioBuffer *)RL_CALLOC(1, sizeof(AudioBuffer));
547
548 if (audioBuffer == NULL)
549 {
550 TRACELOG(LOG_WARNING, "AUDIO: Failed to allocate memory for buffer");
551 return NULL;
552 }
553
554 if (sizeInFrames > 0) audioBuffer->data = RL_CALLOC(sizeInFrames*channels*ma_get_bytes_per_sample(format), 1);
555
556 // Audio data runs through a format converter
557 ma_data_converter_config converterConfig = ma_data_converter_config_init(format, AUDIO_DEVICE_FORMAT, channels, AUDIO_DEVICE_CHANNELS, sampleRate, AUDIO.System.device.sampleRate);
558 converterConfig.allowDynamicSampleRate = true;
559
560 ma_result result = ma_data_converter_init(&converterConfig, NULL, &audioBuffer->converter);
561
562 if (result != MA_SUCCESS)
563 {
564 TRACELOG(LOG_WARNING, "AUDIO: Failed to create data conversion pipeline");
565 RL_FREE(audioBuffer);
566 return NULL;
567 }
568
569 // Init audio buffer values
570 audioBuffer->volume = 1.0f;
571 audioBuffer->pitch = 1.0f;
572 audioBuffer->pan = 0.5f;
573
574 audioBuffer->callback = NULL;
575 audioBuffer->processor = NULL;
576
577 audioBuffer->playing = false;
578 audioBuffer->paused = false;
579 audioBuffer->looping = false;
580
581 audioBuffer->usage = usage;
582 audioBuffer->frameCursorPos = 0;
583 audioBuffer->sizeInFrames = sizeInFrames;
584
585 // Buffers should be marked as processed by default so that a call to
586 // UpdateAudioStream() immediately after initialization works correctly
587 audioBuffer->isSubBufferProcessed[0] = true;
588 audioBuffer->isSubBufferProcessed[1] = true;
589
590 // Track audio buffer to linked list next position
591 TrackAudioBuffer(audioBuffer);
592
593 return audioBuffer;
594 }
595
596 // Delete an audio buffer
597 void UnloadAudioBuffer(AudioBuffer *buffer)
598 {
599 if (buffer != NULL)
600 {
601 ma_data_converter_uninit(&buffer->converter, NULL);
602 UntrackAudioBuffer(buffer);
603 RL_FREE(buffer->data);
604 RL_FREE(buffer);
605 }
606 }
607
608 // Check if an audio buffer is playing
609 bool IsAudioBufferPlaying(AudioBuffer *buffer)
610 {
611 bool result = false;
612
613 if (buffer != NULL) result = (buffer->playing && !buffer->paused);
614
615 return result;
616 }
617
618 // Play an audio buffer
619 // NOTE: Buffer is restarted to the start.
620 // Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position should be maintained.
621 void PlayAudioBuffer(AudioBuffer *buffer)
622 {
623 if (buffer != NULL)
624 {
625 buffer->playing = true;
626 buffer->paused = false;
627 buffer->frameCursorPos = 0;
628 }
629 }
630
631 // Stop an audio buffer
632 void StopAudioBuffer(AudioBuffer *buffer)
633 {
634 if (buffer != NULL)
635 {
636 if (IsAudioBufferPlaying(buffer))
637 {
638 buffer->playing = false;
639 buffer->paused = false;
640 buffer->frameCursorPos = 0;
641 buffer->framesProcessed = 0;
642 buffer->isSubBufferProcessed[0] = true;
643 buffer->isSubBufferProcessed[1] = true;
644 }
645 }
646 }
647
648 // Pause an audio buffer
649 void PauseAudioBuffer(AudioBuffer *buffer)
650 {
651 if (buffer != NULL) buffer->paused = true;
652 }
653
654 // Resume an audio buffer
655 void ResumeAudioBuffer(AudioBuffer *buffer)
656 {
657 if (buffer != NULL) buffer->paused = false;
658 }
659
660 // Set volume for an audio buffer
661 void SetAudioBufferVolume(AudioBuffer *buffer, float volume)
662 {
663 if (buffer != NULL) buffer->volume = volume;
664 }
665
666 // Set pitch for an audio buffer
667 void SetAudioBufferPitch(AudioBuffer *buffer, float pitch)
668 {
669 if ((buffer != NULL) && (pitch > 0.0f))
670 {
671 // Pitching is just an adjustment of the sample rate.
672 // Note that this changes the duration of the sound:
673 // - higher pitches will make the sound faster
674 // - lower pitches make it slower
675 ma_uint32 outputSampleRate = (ma_uint32)((float)buffer->converter.sampleRateOut/pitch);
676 ma_data_converter_set_rate(&buffer->converter, buffer->converter.sampleRateIn, outputSampleRate);
677
678 buffer->pitch = pitch;
679 }
680 }
681
682 // Set pan for an audio buffer
683 void SetAudioBufferPan(AudioBuffer *buffer, float pan)
684 {
685 if (pan < 0.0f) pan = 0.0f;
686 else if (pan > 1.0f) pan = 1.0f;
687
688 if (buffer != NULL) buffer->pan = pan;
689 }
690
691 // Track audio buffer to linked list next position
692 void TrackAudioBuffer(AudioBuffer *buffer)
693 {
694 ma_mutex_lock(&AUDIO.System.lock);
695 {
696 if (AUDIO.Buffer.first == NULL) AUDIO.Buffer.first = buffer;
697 else
698 {
699 AUDIO.Buffer.last->next = buffer;
700 buffer->prev = AUDIO.Buffer.last;
701 }
702
703 AUDIO.Buffer.last = buffer;
704 }
705 ma_mutex_unlock(&AUDIO.System.lock);
706 }
707
708 // Untrack audio buffer from linked list
709 void UntrackAudioBuffer(AudioBuffer *buffer)
710 {
711 ma_mutex_lock(&AUDIO.System.lock);
712 {
713 if (buffer->prev == NULL) AUDIO.Buffer.first = buffer->next;
714 else buffer->prev->next = buffer->next;
715
716 if (buffer->next == NULL) AUDIO.Buffer.last = buffer->prev;
717 else buffer->next->prev = buffer->prev;
718
719 buffer->prev = NULL;
720 buffer->next = NULL;
721 }
722 ma_mutex_unlock(&AUDIO.System.lock);
723 }
724
725 //----------------------------------------------------------------------------------
726 // Module Functions Definition - Sounds loading and playing (.WAV)
727 //----------------------------------------------------------------------------------
728
729 // Load wave data from file
730 Wave LoadWave(const char *fileName)
731 {
732 Wave wave = { 0 };
733
734 // Loading file to memory
735 unsigned int fileSize = 0;
736 unsigned char *fileData = LoadFileData(fileName, &fileSize);
737
738 // Loading wave from memory data
739 if (fileData != NULL) wave = LoadWaveFromMemory(GetFileExtension(fileName), fileData, fileSize);
740
741 RL_FREE(fileData);
742
743 return wave;
744 }
745
746 // Load wave from memory buffer, fileType refers to extension: i.e. ".wav"
747 // WARNING: File extension must be provided in lower-case
748 Wave LoadWaveFromMemory(const char *fileType, const unsigned char *fileData, int dataSize)
749 {
750 Wave wave = { 0 };
751
752 if (false) { }
753 #if defined(SUPPORT_FILEFORMAT_WAV)
754 else if ((strcmp(fileType, ".wav") == 0) || (strcmp(fileType, ".WAV") == 0))
755 {
756 drwav wav = { 0 };
757 bool success = drwav_init_memory(&wav, fileData, dataSize, NULL);
758
759 if (success)
760 {
761 wave.frameCount = (unsigned int)wav.totalPCMFrameCount;
762 wave.sampleRate = wav.sampleRate;
763 wave.sampleSize = 16;
764 wave.channels = wav.channels;
765 wave.data = (short *)RL_MALLOC(wave.frameCount*wave.channels*sizeof(short));
766
767 // NOTE: We are forcing conversion to 16bit sample size on reading
768 drwav_read_pcm_frames_s16(&wav, wav.totalPCMFrameCount, wave.data);
769 }
770 else TRACELOG(LOG_WARNING, "WAVE: Failed to load WAV data");
771
772 drwav_uninit(&wav);
773 }
774 #endif
775 #if defined(SUPPORT_FILEFORMAT_OGG)
776 else if ((strcmp(fileType, ".ogg") == 0) || (strcmp(fileType, ".OGG") == 0))
777 {
778 stb_vorbis *oggData = stb_vorbis_open_memory((unsigned char *)fileData, dataSize, NULL, NULL);
779
780 if (oggData != NULL)
781 {
782 stb_vorbis_info info = stb_vorbis_get_info(oggData);
783
784 wave.sampleRate = info.sample_rate;
785 wave.sampleSize = 16; // By default, ogg data is 16 bit per sample (short)
786 wave.channels = info.channels;
787 wave.frameCount = (unsigned int)stb_vorbis_stream_length_in_samples(oggData); // NOTE: It returns frames!
788 wave.data = (short *)RL_MALLOC(wave.frameCount*wave.channels*sizeof(short));
789
790 // NOTE: Get the number of samples to process (be careful! we ask for number of shorts, not bytes!)
791 stb_vorbis_get_samples_short_interleaved(oggData, info.channels, (short *)wave.data, wave.frameCount*wave.channels);
792 stb_vorbis_close(oggData);
793 }
794 else TRACELOG(LOG_WARNING, "WAVE: Failed to load OGG data");
795 }
796 #endif
797 #if defined(SUPPORT_FILEFORMAT_MP3)
798 else if ((strcmp(fileType, ".mp3") == 0) || (strcmp(fileType, ".MP3") == 0))
799 {
800 drmp3_config config = { 0 };
801 unsigned long long int totalFrameCount = 0;
802
803 // NOTE: We are forcing conversion to 32bit float sample size on reading
804 wave.data = drmp3_open_memory_and_read_pcm_frames_f32(fileData, dataSize, &config, &totalFrameCount, NULL);
805 wave.sampleSize = 32;
806
807 if (wave.data != NULL)
808 {
809 wave.channels = config.channels;
810 wave.sampleRate = config.sampleRate;
811 wave.frameCount = (int)totalFrameCount;
812 }
813 else TRACELOG(LOG_WARNING, "WAVE: Failed to load MP3 data");
814
815 }
816 #endif
817 #if defined(SUPPORT_FILEFORMAT_QOA)
818 else if ((strcmp(fileType, ".qoa") == 0) || (strcmp(fileType, ".QOA") == 0))
819 {
820 qoa_desc qoa = { 0 };
821
822 // NOTE: Returned sample data is always 16 bit?
823 wave.data = qoa_decode(fileData, dataSize, &qoa);
824 wave.sampleSize = 16;
825
826 if (wave.data != NULL)
827 {
828 wave.channels = qoa.channels;
829 wave.sampleRate = qoa.samplerate;
830 wave.frameCount = qoa.samples;
831 }
832 else TRACELOG(LOG_WARNING, "WAVE: Failed to load QOA data");
833
834 }
835 #endif
836 #if defined(SUPPORT_FILEFORMAT_FLAC)
837 else if ((strcmp(fileType, ".flac") == 0) || (strcmp(fileType, ".FLAC") == 0))
838 {
839 unsigned long long int totalFrameCount = 0;
840
841 // NOTE: We are forcing conversion to 16bit sample size on reading
842 wave.data = drflac_open_memory_and_read_pcm_frames_s16(fileData, dataSize, &wave.channels, &wave.sampleRate, &totalFrameCount, NULL);
843 wave.sampleSize = 16;
844
845 if (wave.data != NULL) wave.frameCount = (unsigned int)totalFrameCount;
846 else TRACELOG(LOG_WARNING, "WAVE: Failed to load FLAC data");
847 }
848 #endif
849 else TRACELOG(LOG_WARNING, "WAVE: Data format not supported");
850
851 TRACELOG(LOG_INFO, "WAVE: Data loaded successfully (%i Hz, %i bit, %i channels)", wave.sampleRate, wave.sampleSize, wave.channels);
852
853 return wave;
854 }
855
856 // Checks if wave data is ready
857 bool IsWaveReady(Wave wave)
858 {
859 return ((wave.data != NULL) && // Validate wave data available
860 (wave.frameCount > 0) && // Validate frame count
861 (wave.sampleRate > 0) && // Validate sample rate is supported
862 (wave.sampleSize > 0) && // Validate sample size is supported
863 (wave.channels > 0)); // Validate number of channels supported
864 }
865
866 // Load sound from file
867 // NOTE: The entire file is loaded to memory to be played (no-streaming)
868 Sound LoadSound(const char *fileName)
869 {
870 Wave wave = LoadWave(fileName);
871
872 Sound sound = LoadSoundFromWave(wave);
873
874 UnloadWave(wave); // Sound is loaded, we can unload wave
875
876 return sound;
877 }
878
879 // Load sound from wave data
880 // NOTE: Wave data must be unallocated manually
881 Sound LoadSoundFromWave(Wave wave)
882 {
883 Sound sound = { 0 };
884
885 if (wave.data != NULL)
886 {
887 // When using miniaudio we need to do our own mixing.
888 // To simplify this we need convert the format of each sound to be consistent with
889 // the format used to open the playback AUDIO.System.device. We can do this two ways:
890 //
891 // 1) Convert the whole sound in one go at load time (here).
892 // 2) Convert the audio data in chunks at mixing time.
893 //
894 // First option has been selected, format conversion is done on the loading stage.
895 // The downside is that it uses more memory if the original sound is u8 or s16.
896 ma_format formatIn = ((wave.sampleSize == 8)? ma_format_u8 : ((wave.sampleSize == 16)? ma_format_s16 : ma_format_f32));
897 ma_uint32 frameCountIn = wave.frameCount;
898
899 ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, 0, AUDIO_DEVICE_FORMAT, AUDIO_DEVICE_CHANNELS, AUDIO.System.device.sampleRate, NULL, frameCountIn, formatIn, wave.channels, wave.sampleRate);
900 if (frameCount == 0) TRACELOG(LOG_WARNING, "SOUND: Failed to get frame count for format conversion");
901
902 AudioBuffer *audioBuffer = LoadAudioBuffer(AUDIO_DEVICE_FORMAT, AUDIO_DEVICE_CHANNELS, AUDIO.System.device.sampleRate, frameCount, AUDIO_BUFFER_USAGE_STATIC);
903 if (audioBuffer == NULL)
904 {
905 TRACELOG(LOG_WARNING, "SOUND: Failed to create buffer");
906 return sound; // early return to avoid dereferencing the audioBuffer null pointer
907 }
908
909 frameCount = (ma_uint32)ma_convert_frames(audioBuffer->data, frameCount, AUDIO_DEVICE_FORMAT, AUDIO_DEVICE_CHANNELS, AUDIO.System.device.sampleRate, wave.data, frameCountIn, formatIn, wave.channels, wave.sampleRate);
910 if (frameCount == 0) TRACELOG(LOG_WARNING, "SOUND: Failed format conversion");
911
912 sound.frameCount = frameCount;
913 sound.stream.sampleRate = AUDIO.System.device.sampleRate;
914 sound.stream.sampleSize = 32;
915 sound.stream.channels = AUDIO_DEVICE_CHANNELS;
916 sound.stream.buffer = audioBuffer;
917 }
918
919 return sound;
920 }
921
922 // Clone sound from existing sound data, clone does not own wave data
923 // Wave data must
924 // NOTE: Wave data must be unallocated manually and will be shared across all clones
925 Sound LoadSoundAlias(Sound source)
926 {
927 Sound sound = { 0 };
928
929 if (source.stream.buffer->data != NULL)
930 {
931 AudioBuffer* audioBuffer = LoadAudioBuffer(AUDIO_DEVICE_FORMAT, AUDIO_DEVICE_CHANNELS, AUDIO.System.device.sampleRate, source.frameCount, AUDIO_BUFFER_USAGE_STATIC);
932 if (audioBuffer == NULL)
933 {
934 TRACELOG(LOG_WARNING, "SOUND: Failed to create buffer");
935 return sound; // early return to avoid dereferencing the audioBuffer null pointer
936 }
937 audioBuffer->data = source.stream.buffer->data;
938 sound.frameCount = source.frameCount;
939 sound.stream.sampleRate = AUDIO.System.device.sampleRate;
940 sound.stream.sampleSize = 32;
941 sound.stream.channels = AUDIO_DEVICE_CHANNELS;
942 sound.stream.buffer = audioBuffer;
943 }
944
945 return sound;
946 }
947
948 // Checks if a sound is ready
949 bool IsSoundReady(Sound sound)
950 {
951 return ((sound.frameCount > 0) && // Validate frame count
952 (sound.stream.buffer != NULL) && // Validate stream buffer
953 (sound.stream.sampleRate > 0) && // Validate sample rate is supported
954 (sound.stream.sampleSize > 0) && // Validate sample size is supported
955 (sound.stream.channels > 0)); // Validate number of channels supported
956 }
957
958 // Unload wave data
959 void UnloadWave(Wave wave)
960 {
961 RL_FREE(wave.data);
962 //TRACELOG(LOG_INFO, "WAVE: Unloaded wave data from RAM");
963 }
964
965 // Unload sound
966 void UnloadSound(Sound sound)
967 {
968 UnloadAudioBuffer(sound.stream.buffer);
969 //TRACELOG(LOG_INFO, "SOUND: Unloaded sound data from RAM");
970 }
971
972 void UnloadSoundAlias(Sound alias)
973 {
974 // untrack and unload just the sound buffer, not the sample data, it is shared with the source for the alias
975 if (alias.stream.buffer != NULL)
976 {
977 ma_data_converter_uninit(&alias.stream.buffer->converter, NULL);
978 UntrackAudioBuffer(alias.stream.buffer);
979 RL_FREE(alias.stream.buffer);
980 }
981 }
982
983 // Update sound buffer with new data
984 void UpdateSound(Sound sound, const void *data, int sampleCount)
985 {
986 if (sound.stream.buffer != NULL)
987 {
988 StopAudioBuffer(sound.stream.buffer);
989
990 // TODO: May want to lock/unlock this since this data buffer is read at mixing time
991 memcpy(sound.stream.buffer->data, data, sampleCount*ma_get_bytes_per_frame(sound.stream.buffer->converter.formatIn, sound.stream.buffer->converter.channelsIn));
992 }
993 }
994
995 // Export wave data to file
996 bool ExportWave(Wave wave, const char *fileName)
997 {
998 bool success = false;
999
1000 if (false) { }
1001 #if defined(SUPPORT_FILEFORMAT_WAV)
1002 else if (IsFileExtension(fileName, ".wav"))
1003 {
1004 drwav wav = { 0 };
1005 drwav_data_format format = { 0 };
1006 format.container = drwav_container_riff;
1007 if (wave.sampleSize == 32) format.format = DR_WAVE_FORMAT_IEEE_FLOAT;
1008 else format.format = DR_WAVE_FORMAT_PCM;
1009 format.channels = wave.channels;
1010 format.sampleRate = wave.sampleRate;
1011 format.bitsPerSample = wave.sampleSize;
1012
1013 void *fileData = NULL;
1014 size_t fileDataSize = 0;
1015 success = drwav_init_memory_write(&wav, &fileData, &fileDataSize, &format, NULL);
1016 if (success) success = (int)drwav_write_pcm_frames(&wav, wave.frameCount, wave.data);
1017 drwav_result result = drwav_uninit(&wav);
1018
1019 if (result == DRWAV_SUCCESS) success = SaveFileData(fileName, (unsigned char *)fileData, (unsigned int)fileDataSize);
1020
1021 drwav_free(fileData, NULL);
1022 }
1023 #endif
1024 #if defined(SUPPORT_FILEFORMAT_QOA)
1025 else if (IsFileExtension(fileName, ".qoa"))
1026 {
1027 if (wave.sampleSize == 16)
1028 {
1029 qoa_desc qoa = { 0 };
1030 qoa.channels = wave.channels;
1031 qoa.samplerate = wave.sampleRate;
1032 qoa.samples = wave.frameCount;
1033
1034 int bytesWritten = qoa_write(fileName, wave.data, &qoa);
1035 if (bytesWritten > 0) success = true;
1036 }
1037 else TRACELOG(LOG_WARNING, "AUDIO: Wave data must be 16 bit per sample for QOA format export");
1038 }
1039 #endif
1040 else if (IsFileExtension(fileName, ".raw"))
1041 {
1042 // Export raw sample data (without header)
1043 // NOTE: It's up to the user to track wave parameters
1044 success = SaveFileData(fileName, wave.data, wave.frameCount*wave.channels*wave.sampleSize/8);
1045 }
1046
1047 if (success) TRACELOG(LOG_INFO, "FILEIO: [%s] Wave data exported successfully", fileName);
1048 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to export wave data", fileName);
1049
1050 return success;
1051 }
1052
1053 // Export wave sample data to code (.h)
1054 bool ExportWaveAsCode(Wave wave, const char *fileName)
1055 {
1056 bool success = false;
1057
1058 #ifndef TEXT_BYTES_PER_LINE
1059 #define TEXT_BYTES_PER_LINE 20
1060 #endif
1061
1062 int waveDataSize = wave.frameCount*wave.channels*wave.sampleSize/8;
1063
1064 // NOTE: Text data buffer size is estimated considering wave data size in bytes
1065 // and requiring 6 char bytes for every byte: "0x00, "
1066 char *txtData = (char *)RL_CALLOC(waveDataSize*6 + 2000, sizeof(char));
1067
1068 int byteCount = 0;
1069 byteCount += sprintf(txtData + byteCount, "\n//////////////////////////////////////////////////////////////////////////////////\n");
1070 byteCount += sprintf(txtData + byteCount, "// //\n");
1071 byteCount += sprintf(txtData + byteCount, "// WaveAsCode exporter v1.1 - Wave data exported as an array of bytes //\n");
1072 byteCount += sprintf(txtData + byteCount, "// //\n");
1073 byteCount += sprintf(txtData + byteCount, "// more info and bugs-report: github.com/raysan5/raylib //\n");
1074 byteCount += sprintf(txtData + byteCount, "// feedback and support: ray[at]raylib.com //\n");
1075 byteCount += sprintf(txtData + byteCount, "// //\n");
1076 byteCount += sprintf(txtData + byteCount, "// Copyright (c) 2018-2023 Ramon Santamaria (@raysan5) //\n");
1077 byteCount += sprintf(txtData + byteCount, "// //\n");
1078 byteCount += sprintf(txtData + byteCount, "//////////////////////////////////////////////////////////////////////////////////\n\n");
1079
1080 // Get file name from path and convert variable name to uppercase
1081 char varFileName[256] = { 0 };
1082 strcpy(varFileName, GetFileNameWithoutExt(fileName));
1083 for (int i = 0; varFileName[i] != '\0'; i++) if (varFileName[i] >= 'a' && varFileName[i] <= 'z') { varFileName[i] = varFileName[i] - 32; }
1084
1085 //Add wave information
1086 byteCount += sprintf(txtData + byteCount, "// Wave data information\n");
1087 byteCount += sprintf(txtData + byteCount, "#define %s_FRAME_COUNT %u\n", varFileName, wave.frameCount);
1088 byteCount += sprintf(txtData + byteCount, "#define %s_SAMPLE_RATE %u\n", varFileName, wave.sampleRate);
1089 byteCount += sprintf(txtData + byteCount, "#define %s_SAMPLE_SIZE %u\n", varFileName, wave.sampleSize);
1090 byteCount += sprintf(txtData + byteCount, "#define %s_CHANNELS %u\n\n", varFileName, wave.channels);
1091
1092 // Write wave data as an array of values
1093 // Wave data is exported as byte array for 8/16bit and float array for 32bit float data
1094 // NOTE: Frame data exported is channel-interlaced: frame01[sampleChannel1, sampleChannel2, ...], frame02[], frame03[]
1095 if (wave.sampleSize == 32)
1096 {
1097 byteCount += sprintf(txtData + byteCount, "static float %s_DATA[%i] = {\n", varFileName, waveDataSize/4);
1098 for (int i = 1; i < waveDataSize/4; i++) byteCount += sprintf(txtData + byteCount, ((i%TEXT_BYTES_PER_LINE == 0)? "%.4ff,\n " : "%.4ff, "), ((float *)wave.data)[i - 1]);
1099 byteCount += sprintf(txtData + byteCount, "%.4ff };\n", ((float *)wave.data)[waveDataSize/4 - 1]);
1100 }
1101 else
1102 {
1103 byteCount += sprintf(txtData + byteCount, "static unsigned char %s_DATA[%i] = { ", varFileName, waveDataSize);
1104 for (int i = 1; i < waveDataSize; i++) byteCount += sprintf(txtData + byteCount, ((i%TEXT_BYTES_PER_LINE == 0)? "0x%x,\n " : "0x%x, "), ((unsigned char *)wave.data)[i - 1]);
1105 byteCount += sprintf(txtData + byteCount, "0x%x };\n", ((unsigned char *)wave.data)[waveDataSize - 1]);
1106 }
1107
1108 // NOTE: Text data length exported is determined by '\0' (NULL) character
1109 success = SaveFileText(fileName, txtData);
1110
1111 RL_FREE(txtData);
1112
1113 if (success != 0) TRACELOG(LOG_INFO, "FILEIO: [%s] Wave as code exported successfully", fileName);
1114 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to export wave as code", fileName);
1115
1116 return success;
1117 }
1118
1119 // Play a sound
1120 void PlaySound(Sound sound)
1121 {
1122 PlayAudioBuffer(sound.stream.buffer);
1123 }
1124
1125 // Pause a sound
1126 void PauseSound(Sound sound)
1127 {
1128 PauseAudioBuffer(sound.stream.buffer);
1129 }
1130
1131 // Resume a paused sound
1132 void ResumeSound(Sound sound)
1133 {
1134 ResumeAudioBuffer(sound.stream.buffer);
1135 }
1136
1137 // Stop reproducing a sound
1138 void StopSound(Sound sound)
1139 {
1140 StopAudioBuffer(sound.stream.buffer);
1141 }
1142
1143 // Check if a sound is playing
1144 bool IsSoundPlaying(Sound sound)
1145 {
1146 return IsAudioBufferPlaying(sound.stream.buffer);
1147 }
1148
1149 // Set volume for a sound
1150 void SetSoundVolume(Sound sound, float volume)
1151 {
1152 SetAudioBufferVolume(sound.stream.buffer, volume);
1153 }
1154
1155 // Set pitch for a sound
1156 void SetSoundPitch(Sound sound, float pitch)
1157 {
1158 SetAudioBufferPitch(sound.stream.buffer, pitch);
1159 }
1160
1161 // Set pan for a sound
1162 void SetSoundPan(Sound sound, float pan)
1163 {
1164 SetAudioBufferPan(sound.stream.buffer, pan);
1165 }
1166
1167 // Convert wave data to desired format
1168 void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels)
1169 {
1170 ma_format formatIn = ((wave->sampleSize == 8)? ma_format_u8 : ((wave->sampleSize == 16)? ma_format_s16 : ma_format_f32));
1171 ma_format formatOut = ((sampleSize == 8)? ma_format_u8 : ((sampleSize == 16)? ma_format_s16 : ma_format_f32));
1172
1173 ma_uint32 frameCountIn = wave->frameCount;
1174 ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, 0, formatOut, channels, sampleRate, NULL, frameCountIn, formatIn, wave->channels, wave->sampleRate);
1175
1176 if (frameCount == 0)
1177 {
1178 TRACELOG(LOG_WARNING, "WAVE: Failed to get frame count for format conversion");
1179 return;
1180 }
1181
1182 void *data = RL_MALLOC(frameCount*channels*(sampleSize/8));
1183
1184 frameCount = (ma_uint32)ma_convert_frames(data, frameCount, formatOut, channels, sampleRate, wave->data, frameCountIn, formatIn, wave->channels, wave->sampleRate);
1185 if (frameCount == 0)
1186 {
1187 TRACELOG(LOG_WARNING, "WAVE: Failed format conversion");
1188 return;
1189 }
1190
1191 wave->frameCount = frameCount;
1192 wave->sampleSize = sampleSize;
1193 wave->sampleRate = sampleRate;
1194 wave->channels = channels;
1195
1196 RL_FREE(wave->data);
1197 wave->data = data;
1198 }
1199
1200 // Copy a wave to a new wave
1201 Wave WaveCopy(Wave wave)
1202 {
1203 Wave newWave = { 0 };
1204
1205 newWave.data = RL_MALLOC(wave.frameCount*wave.channels*wave.sampleSize/8);
1206
1207 if (newWave.data != NULL)
1208 {
1209 // NOTE: Size must be provided in bytes
1210 memcpy(newWave.data, wave.data, wave.frameCount*wave.channels*wave.sampleSize/8);
1211
1212 newWave.frameCount = wave.frameCount;
1213 newWave.sampleRate = wave.sampleRate;
1214 newWave.sampleSize = wave.sampleSize;
1215 newWave.channels = wave.channels;
1216 }
1217
1218 return newWave;
1219 }
1220
1221 // Crop a wave to defined samples range
1222 // NOTE: Security check in case of out-of-range
1223 void WaveCrop(Wave *wave, int initSample, int finalSample)
1224 {
1225 if ((initSample >= 0) && (initSample < finalSample) && ((unsigned int)finalSample < (wave->frameCount*wave->channels)))
1226 {
1227 int sampleCount = finalSample - initSample;
1228
1229 void *data = RL_MALLOC(sampleCount*wave->sampleSize/8);
1230
1231 memcpy(data, (unsigned char *)wave->data + (initSample*wave->channels*wave->sampleSize/8), sampleCount*wave->sampleSize/8);
1232
1233 RL_FREE(wave->data);
1234 wave->data = data;
1235 }
1236 else TRACELOG(LOG_WARNING, "WAVE: Crop range out of bounds");
1237 }
1238
1239 // Load samples data from wave as a floats array
1240 // NOTE 1: Returned sample values are normalized to range [-1..1]
1241 // NOTE 2: Sample data allocated should be freed with UnloadWaveSamples()
1242 float *LoadWaveSamples(Wave wave)
1243 {
1244 float *samples = (float *)RL_MALLOC(wave.frameCount*wave.channels*sizeof(float));
1245
1246 // NOTE: sampleCount is the total number of interlaced samples (including channels)
1247
1248 for (unsigned int i = 0; i < wave.frameCount*wave.channels; i++)
1249 {
1250 if (wave.sampleSize == 8) samples[i] = (float)(((unsigned char *)wave.data)[i] - 127)/256.0f;
1251 else if (wave.sampleSize == 16) samples[i] = (float)(((short *)wave.data)[i])/32767.0f;
1252 else if (wave.sampleSize == 32) samples[i] = ((float *)wave.data)[i];
1253 }
1254
1255 return samples;
1256 }
1257
1258 // Unload samples data loaded with LoadWaveSamples()
1259 void UnloadWaveSamples(float *samples)
1260 {
1261 RL_FREE(samples);
1262 }
1263
1264 //----------------------------------------------------------------------------------
1265 // Module Functions Definition - Music loading and stream playing
1266 //----------------------------------------------------------------------------------
1267
1268 // Load music stream from file
1269 Music LoadMusicStream(const char *fileName)
1270 {
1271 Music music = { 0 };
1272 bool musicLoaded = false;
1273
1274 if (false) { }
1275 #if defined(SUPPORT_FILEFORMAT_WAV)
1276 else if (IsFileExtension(fileName, ".wav"))
1277 {
1278 drwav *ctxWav = RL_CALLOC(1, sizeof(drwav));
1279 bool success = drwav_init_file(ctxWav, fileName, NULL);
1280
1281 music.ctxType = MUSIC_AUDIO_WAV;
1282 music.ctxData = ctxWav;
1283
1284 if (success)
1285 {
1286 int sampleSize = ctxWav->bitsPerSample;
1287 if (ctxWav->bitsPerSample == 24) sampleSize = 16; // Forcing conversion to s16 on UpdateMusicStream()
1288
1289 music.stream = LoadAudioStream(ctxWav->sampleRate, sampleSize, ctxWav->channels);
1290 music.frameCount = (unsigned int)ctxWav->totalPCMFrameCount;
1291 music.looping = true; // Looping enabled by default
1292 musicLoaded = true;
1293 }
1294 }
1295 #endif
1296 #if defined(SUPPORT_FILEFORMAT_OGG)
1297 else if (IsFileExtension(fileName, ".ogg"))
1298 {
1299 // Open ogg audio stream
1300 music.ctxType = MUSIC_AUDIO_OGG;
1301 music.ctxData = stb_vorbis_open_filename(fileName, NULL, NULL);
1302
1303 if (music.ctxData != NULL)
1304 {
1305 stb_vorbis_info info = stb_vorbis_get_info((stb_vorbis *)music.ctxData); // Get Ogg file info
1306
1307 // OGG bit rate defaults to 16 bit, it's enough for compressed format
1308 music.stream = LoadAudioStream(info.sample_rate, 16, info.channels);
1309
1310 // WARNING: It seems this function returns length in frames, not samples, so we multiply by channels
1311 music.frameCount = (unsigned int)stb_vorbis_stream_length_in_samples((stb_vorbis *)music.ctxData);
1312 music.looping = true; // Looping enabled by default
1313 musicLoaded = true;
1314 }
1315 }
1316 #endif
1317 #if defined(SUPPORT_FILEFORMAT_MP3)
1318 else if (IsFileExtension(fileName, ".mp3"))
1319 {
1320 drmp3 *ctxMp3 = RL_CALLOC(1, sizeof(drmp3));
1321 int result = drmp3_init_file(ctxMp3, fileName, NULL);
1322
1323 music.ctxType = MUSIC_AUDIO_MP3;
1324 music.ctxData = ctxMp3;
1325
1326 if (result > 0)
1327 {
1328 music.stream = LoadAudioStream(ctxMp3->sampleRate, 32, ctxMp3->channels);
1329 music.frameCount = (unsigned int)drmp3_get_pcm_frame_count(ctxMp3);
1330 music.looping = true; // Looping enabled by default
1331 musicLoaded = true;
1332 }
1333 }
1334 #endif
1335 #if defined(SUPPORT_FILEFORMAT_QOA)
1336 else if (IsFileExtension(fileName, ".qoa"))
1337 {
1338 qoaplay_desc *ctxQoa = qoaplay_open(fileName);
1339 music.ctxType = MUSIC_AUDIO_QOA;
1340 music.ctxData = ctxQoa;
1341
1342 if (ctxQoa->file != NULL)
1343 {
1344 // NOTE: We are loading samples are 32bit float normalized data, so,
1345 // we configure the output audio stream to also use float 32bit
1346 music.stream = LoadAudioStream(ctxQoa->info.samplerate, 32, ctxQoa->info.channels);
1347 music.frameCount = ctxQoa->info.samples;
1348 music.looping = true; // Looping enabled by default
1349 musicLoaded = true;
1350 }
1351 }
1352 #endif
1353 #if defined(SUPPORT_FILEFORMAT_FLAC)
1354 else if (IsFileExtension(fileName, ".flac"))
1355 {
1356 music.ctxType = MUSIC_AUDIO_FLAC;
1357 music.ctxData = drflac_open_file(fileName, NULL);
1358
1359 if (music.ctxData != NULL)
1360 {
1361 drflac *ctxFlac = (drflac *)music.ctxData;
1362
1363 music.stream = LoadAudioStream(ctxFlac->sampleRate, ctxFlac->bitsPerSample, ctxFlac->channels);
1364 music.frameCount = (unsigned int)ctxFlac->totalPCMFrameCount;
1365 music.looping = true; // Looping enabled by default
1366 musicLoaded = true;
1367 }
1368 }
1369 #endif
1370 #if defined(SUPPORT_FILEFORMAT_XM)
1371 else if (IsFileExtension(fileName, ".xm"))
1372 {
1373 jar_xm_context_t *ctxXm = NULL;
1374 int result = jar_xm_create_context_from_file(&ctxXm, AUDIO.System.device.sampleRate, fileName);
1375
1376 music.ctxType = MUSIC_MODULE_XM;
1377 music.ctxData = ctxXm;
1378
1379 if (result == 0) // XM AUDIO.System.context created successfully
1380 {
1381 jar_xm_set_max_loop_count(ctxXm, 0); // Set infinite number of loops
1382
1383 unsigned int bits = 32;
1384 if (AUDIO_DEVICE_FORMAT == ma_format_s16) bits = 16;
1385 else if (AUDIO_DEVICE_FORMAT == ma_format_u8) bits = 8;
1386
1387 // NOTE: Only stereo is supported for XM
1388 music.stream = LoadAudioStream(AUDIO.System.device.sampleRate, bits, AUDIO_DEVICE_CHANNELS);
1389 music.frameCount = (unsigned int)jar_xm_get_remaining_samples(ctxXm); // NOTE: Always 2 channels (stereo)
1390 music.looping = true; // Looping enabled by default
1391 jar_xm_reset(ctxXm); // make sure we start at the beginning of the song
1392 musicLoaded = true;
1393 }
1394 }
1395 #endif
1396 #if defined(SUPPORT_FILEFORMAT_MOD)
1397 else if (IsFileExtension(fileName, ".mod"))
1398 {
1399 jar_mod_context_t *ctxMod = RL_CALLOC(1, sizeof(jar_mod_context_t));
1400 jar_mod_init(ctxMod);
1401 int result = jar_mod_load_file(ctxMod, fileName);
1402
1403 music.ctxType = MUSIC_MODULE_MOD;
1404 music.ctxData = ctxMod;
1405
1406 if (result > 0)
1407 {
1408 // NOTE: Only stereo is supported for MOD
1409 music.stream = LoadAudioStream(AUDIO.System.device.sampleRate, 16, AUDIO_DEVICE_CHANNELS);
1410 music.frameCount = (unsigned int)jar_mod_max_samples(ctxMod); // NOTE: Always 2 channels (stereo)
1411 music.looping = true; // Looping enabled by default
1412 musicLoaded = true;
1413 }
1414 }
1415 #endif
1416 else TRACELOG(LOG_WARNING, "STREAM: [%s] File format not supported", fileName);
1417
1418 if (!musicLoaded)
1419 {
1420 if (false) { }
1421 #if defined(SUPPORT_FILEFORMAT_WAV)
1422 else if (music.ctxType == MUSIC_AUDIO_WAV) drwav_uninit((drwav *)music.ctxData);
1423 #endif
1424 #if defined(SUPPORT_FILEFORMAT_OGG)
1425 else if (music.ctxType == MUSIC_AUDIO_OGG) stb_vorbis_close((stb_vorbis *)music.ctxData);
1426 #endif
1427 #if defined(SUPPORT_FILEFORMAT_MP3)
1428 else if (music.ctxType == MUSIC_AUDIO_MP3) { drmp3_uninit((drmp3 *)music.ctxData); RL_FREE(music.ctxData); }
1429 #endif
1430 #if defined(SUPPORT_FILEFORMAT_QOA)
1431 else if (music.ctxType == MUSIC_AUDIO_QOA) qoaplay_close((qoaplay_desc *)music.ctxData);
1432 #endif
1433 #if defined(SUPPORT_FILEFORMAT_FLAC)
1434 else if (music.ctxType == MUSIC_AUDIO_FLAC) drflac_free((drflac *)music.ctxData, NULL);
1435 #endif
1436 #if defined(SUPPORT_FILEFORMAT_XM)
1437 else if (music.ctxType == MUSIC_MODULE_XM) jar_xm_free_context((jar_xm_context_t *)music.ctxData);
1438 #endif
1439 #if defined(SUPPORT_FILEFORMAT_MOD)
1440 else if (music.ctxType == MUSIC_MODULE_MOD) { jar_mod_unload((jar_mod_context_t *)music.ctxData); RL_FREE(music.ctxData); }
1441 #endif
1442
1443 music.ctxData = NULL;
1444 TRACELOG(LOG_WARNING, "FILEIO: [%s] Music file could not be opened", fileName);
1445 }
1446 else
1447 {
1448 // Show some music stream info
1449 TRACELOG(LOG_INFO, "FILEIO: [%s] Music file loaded successfully", fileName);
1450 TRACELOG(LOG_INFO, " > Sample rate: %i Hz", music.stream.sampleRate);
1451 TRACELOG(LOG_INFO, " > Sample size: %i bits", music.stream.sampleSize);
1452 TRACELOG(LOG_INFO, " > Channels: %i (%s)", music.stream.channels, (music.stream.channels == 1)? "Mono" : (music.stream.channels == 2)? "Stereo" : "Multi");
1453 TRACELOG(LOG_INFO, " > Total frames: %i", music.frameCount);
1454 }
1455
1456 return music;
1457 }
1458
1459 // Load music stream from memory buffer, fileType refers to extension: i.e. ".wav"
1460 // WARNING: File extension must be provided in lower-case
1461 Music LoadMusicStreamFromMemory(const char *fileType, const unsigned char *data, int dataSize)
1462 {
1463 Music music = { 0 };
1464 bool musicLoaded = false;
1465
1466 if (false) { }
1467 #if defined(SUPPORT_FILEFORMAT_WAV)
1468 else if ((strcmp(fileType, ".wav") == 0) || (strcmp(fileType, ".WAV") == 0))
1469 {
1470 drwav *ctxWav = RL_CALLOC(1, sizeof(drwav));
1471
1472 bool success = drwav_init_memory(ctxWav, (const void *)data, dataSize, NULL);
1473
1474 music.ctxType = MUSIC_AUDIO_WAV;
1475 music.ctxData = ctxWav;
1476
1477 if (success)
1478 {
1479 int sampleSize = ctxWav->bitsPerSample;
1480 if (ctxWav->bitsPerSample == 24) sampleSize = 16; // Forcing conversion to s16 on UpdateMusicStream()
1481
1482 music.stream = LoadAudioStream(ctxWav->sampleRate, sampleSize, ctxWav->channels);
1483 music.frameCount = (unsigned int)ctxWav->totalPCMFrameCount;
1484 music.looping = true; // Looping enabled by default
1485 musicLoaded = true;
1486 }
1487 }
1488 #endif
1489 #if defined(SUPPORT_FILEFORMAT_OGG)
1490 else if ((strcmp(fileType, ".ogg") == 0) || (strcmp(fileType, ".OGG") == 0))
1491 {
1492 // Open ogg audio stream
1493 music.ctxType = MUSIC_AUDIO_OGG;
1494 //music.ctxData = stb_vorbis_open_filename(fileName, NULL, NULL);
1495 music.ctxData = stb_vorbis_open_memory((const unsigned char *)data, dataSize, NULL, NULL);
1496
1497 if (music.ctxData != NULL)
1498 {
1499 stb_vorbis_info info = stb_vorbis_get_info((stb_vorbis *)music.ctxData); // Get Ogg file info
1500
1501 // OGG bit rate defaults to 16 bit, it's enough for compressed format
1502 music.stream = LoadAudioStream(info.sample_rate, 16, info.channels);
1503
1504 // WARNING: It seems this function returns length in frames, not samples, so we multiply by channels
1505 music.frameCount = (unsigned int)stb_vorbis_stream_length_in_samples((stb_vorbis *)music.ctxData);
1506 music.looping = true; // Looping enabled by default
1507 musicLoaded = true;
1508 }
1509 }
1510 #endif
1511 #if defined(SUPPORT_FILEFORMAT_MP3)
1512 else if ((strcmp(fileType, ".mp3") == 0) || (strcmp(fileType, ".MP3") == 0))
1513 {
1514 drmp3 *ctxMp3 = RL_CALLOC(1, sizeof(drmp3));
1515 int success = drmp3_init_memory(ctxMp3, (const void*)data, dataSize, NULL);
1516
1517 music.ctxType = MUSIC_AUDIO_MP3;
1518 music.ctxData = ctxMp3;
1519
1520 if (success)
1521 {
1522 music.stream = LoadAudioStream(ctxMp3->sampleRate, 32, ctxMp3->channels);
1523 music.frameCount = (unsigned int)drmp3_get_pcm_frame_count(ctxMp3);
1524 music.looping = true; // Looping enabled by default
1525 musicLoaded = true;
1526 }
1527 }
1528 #endif
1529 #if defined(SUPPORT_FILEFORMAT_QOA)
1530 else if ((strcmp(fileType, ".qoa") == 0) || (strcmp(fileType, ".QOA") == 0))
1531 {
1532 qoaplay_desc *ctxQoa = qoaplay_open_memory(data, dataSize);
1533 music.ctxType = MUSIC_AUDIO_QOA;
1534 music.ctxData = ctxQoa;
1535
1536 if ((ctxQoa->file_data != NULL) && (ctxQoa->file_data_size != 0))
1537 {
1538 // NOTE: We are loading samples are 32bit float normalized data, so,
1539 // we configure the output audio stream to also use float 32bit
1540 music.stream = LoadAudioStream(ctxQoa->info.samplerate, 32, ctxQoa->info.channels);
1541 music.frameCount = ctxQoa->info.samples;
1542 music.looping = true; // Looping enabled by default
1543 musicLoaded = true;
1544 }
1545 }
1546 #endif
1547 #if defined(SUPPORT_FILEFORMAT_FLAC)
1548 else if ((strcmp(fileType, ".flac") == 0) || (strcmp(fileType, ".FLAC") == 0))
1549 {
1550 music.ctxType = MUSIC_AUDIO_FLAC;
1551 music.ctxData = drflac_open_memory((const void*)data, dataSize, NULL);
1552
1553 if (music.ctxData != NULL)
1554 {
1555 drflac *ctxFlac = (drflac *)music.ctxData;
1556
1557 music.stream = LoadAudioStream(ctxFlac->sampleRate, ctxFlac->bitsPerSample, ctxFlac->channels);
1558 music.frameCount = (unsigned int)ctxFlac->totalPCMFrameCount;
1559 music.looping = true; // Looping enabled by default
1560 musicLoaded = true;
1561 }
1562 }
1563 #endif
1564 #if defined(SUPPORT_FILEFORMAT_XM)
1565 else if ((strcmp(fileType, ".xm") == 0) || (strcmp(fileType, ".XM") == 0))
1566 {
1567 jar_xm_context_t *ctxXm = NULL;
1568 int result = jar_xm_create_context_safe(&ctxXm, (const char *)data, dataSize, AUDIO.System.device.sampleRate);
1569 if (result == 0) // XM AUDIO.System.context created successfully
1570 {
1571 music.ctxType = MUSIC_MODULE_XM;
1572 jar_xm_set_max_loop_count(ctxXm, 0); // Set infinite number of loops
1573
1574 unsigned int bits = 32;
1575 if (AUDIO_DEVICE_FORMAT == ma_format_s16) bits = 16;
1576 else if (AUDIO_DEVICE_FORMAT == ma_format_u8) bits = 8;
1577
1578 // NOTE: Only stereo is supported for XM
1579 music.stream = LoadAudioStream(AUDIO.System.device.sampleRate, bits, 2);
1580 music.frameCount = (unsigned int)jar_xm_get_remaining_samples(ctxXm); // NOTE: Always 2 channels (stereo)
1581 music.looping = true; // Looping enabled by default
1582 jar_xm_reset(ctxXm); // make sure we start at the beginning of the song
1583
1584 music.ctxData = ctxXm;
1585 musicLoaded = true;
1586 }
1587 }
1588 #endif
1589 #if defined(SUPPORT_FILEFORMAT_MOD)
1590 else if ((strcmp(fileType, ".mod") == 0) || (strcmp(fileType, ".MOD") == 0))
1591 {
1592 jar_mod_context_t *ctxMod = (jar_mod_context_t *)RL_MALLOC(sizeof(jar_mod_context_t));
1593 int result = 0;
1594
1595 jar_mod_init(ctxMod);
1596
1597 // Copy data to allocated memory for default UnloadMusicStream
1598 unsigned char *newData = (unsigned char *)RL_MALLOC(dataSize);
1599 int it = dataSize/sizeof(unsigned char);
1600 for (int i = 0; i < it; i++) newData[i] = data[i];
1601
1602 // Memory loaded version for jar_mod_load_file()
1603 if (dataSize && (dataSize < 32*1024*1024))
1604 {
1605 ctxMod->modfilesize = dataSize;
1606 ctxMod->modfile = newData;
1607 if (jar_mod_load(ctxMod, (void *)ctxMod->modfile, dataSize)) result = dataSize;
1608 }
1609
1610 if (result > 0)
1611 {
1612 music.ctxType = MUSIC_MODULE_MOD;
1613
1614 // NOTE: Only stereo is supported for MOD
1615 music.stream = LoadAudioStream(AUDIO.System.device.sampleRate, 16, 2);
1616 music.frameCount = (unsigned int)jar_mod_max_samples(ctxMod); // NOTE: Always 2 channels (stereo)
1617 music.looping = true; // Looping enabled by default
1618 musicLoaded = true;
1619
1620 music.ctxData = ctxMod;
1621 musicLoaded = true;
1622 }
1623 }
1624 #endif
1625 else TRACELOG(LOG_WARNING, "STREAM: Data format not supported");
1626
1627 if (!musicLoaded)
1628 {
1629 if (false) { }
1630 #if defined(SUPPORT_FILEFORMAT_WAV)
1631 else if (music.ctxType == MUSIC_AUDIO_WAV) drwav_uninit((drwav *)music.ctxData);
1632 #endif
1633 #if defined(SUPPORT_FILEFORMAT_OGG)
1634 else if (music.ctxType == MUSIC_AUDIO_OGG) stb_vorbis_close((stb_vorbis *)music.ctxData);
1635 #endif
1636 #if defined(SUPPORT_FILEFORMAT_MP3)
1637 else if (music.ctxType == MUSIC_AUDIO_MP3) { drmp3_uninit((drmp3 *)music.ctxData); RL_FREE(music.ctxData); }
1638 #endif
1639 #if defined(SUPPORT_FILEFORMAT_QOA)
1640 else if (music.ctxType == MUSIC_AUDIO_QOA) qoaplay_close((qoaplay_desc *)music.ctxData);
1641 #endif
1642 #if defined(SUPPORT_FILEFORMAT_FLAC)
1643 else if (music.ctxType == MUSIC_AUDIO_FLAC) drflac_free((drflac *)music.ctxData, NULL);
1644 #endif
1645 #if defined(SUPPORT_FILEFORMAT_XM)
1646 else if (music.ctxType == MUSIC_MODULE_XM) jar_xm_free_context((jar_xm_context_t *)music.ctxData);
1647 #endif
1648 #if defined(SUPPORT_FILEFORMAT_MOD)
1649 else if (music.ctxType == MUSIC_MODULE_MOD) { jar_mod_unload((jar_mod_context_t *)music.ctxData); RL_FREE(music.ctxData); }
1650 #endif
1651
1652 music.ctxData = NULL;
1653 TRACELOG(LOG_WARNING, "FILEIO: Music data could not be loaded");
1654 }
1655 else
1656 {
1657 // Show some music stream info
1658 TRACELOG(LOG_INFO, "FILEIO: Music data loaded successfully");
1659 TRACELOG(LOG_INFO, " > Sample rate: %i Hz", music.stream.sampleRate);
1660 TRACELOG(LOG_INFO, " > Sample size: %i bits", music.stream.sampleSize);
1661 TRACELOG(LOG_INFO, " > Channels: %i (%s)", music.stream.channels, (music.stream.channels == 1)? "Mono" : (music.stream.channels == 2)? "Stereo" : "Multi");
1662 TRACELOG(LOG_INFO, " > Total frames: %i", music.frameCount);
1663 }
1664
1665 return music;
1666 }
1667
1668 // Checks if a music stream is ready
1669 bool IsMusicReady(Music music)
1670 {
1671 return ((music.ctxData != NULL) && // Validate context loaded
1672 (music.frameCount > 0) && // Validate audio frame count
1673 (music.stream.sampleRate > 0) && // Validate sample rate is supported
1674 (music.stream.sampleSize > 0) && // Validate sample size is supported
1675 (music.stream.channels > 0)); // Validate number of channels supported
1676 }
1677
1678 // Unload music stream
1679 void UnloadMusicStream(Music music)
1680 {
1681 UnloadAudioStream(music.stream);
1682
1683 if (music.ctxData != NULL)
1684 {
1685 if (false) { }
1686 #if defined(SUPPORT_FILEFORMAT_WAV)
1687 else if (music.ctxType == MUSIC_AUDIO_WAV) drwav_uninit((drwav *)music.ctxData);
1688 #endif
1689 #if defined(SUPPORT_FILEFORMAT_OGG)
1690 else if (music.ctxType == MUSIC_AUDIO_OGG) stb_vorbis_close((stb_vorbis *)music.ctxData);
1691 #endif
1692 #if defined(SUPPORT_FILEFORMAT_MP3)
1693 else if (music.ctxType == MUSIC_AUDIO_MP3) { drmp3_uninit((drmp3 *)music.ctxData); RL_FREE(music.ctxData); }
1694 #endif
1695 #if defined(SUPPORT_FILEFORMAT_QOA)
1696 else if (music.ctxType == MUSIC_AUDIO_QOA) qoaplay_close((qoaplay_desc *)music.ctxData);
1697 #endif
1698 #if defined(SUPPORT_FILEFORMAT_FLAC)
1699 else if (music.ctxType == MUSIC_AUDIO_FLAC) drflac_free((drflac *)music.ctxData, NULL);
1700 #endif
1701 #if defined(SUPPORT_FILEFORMAT_XM)
1702 else if (music.ctxType == MUSIC_MODULE_XM) jar_xm_free_context((jar_xm_context_t *)music.ctxData);
1703 #endif
1704 #if defined(SUPPORT_FILEFORMAT_MOD)
1705 else if (music.ctxType == MUSIC_MODULE_MOD) { jar_mod_unload((jar_mod_context_t *)music.ctxData); RL_FREE(music.ctxData); }
1706 #endif
1707 }
1708 }
1709
1710 // Start music playing (open stream)
1711 void PlayMusicStream(Music music)
1712 {
1713 if (music.stream.buffer != NULL)
1714 {
1715 // For music streams, we need to make sure we maintain the frame cursor position
1716 // This is a hack for this section of code in UpdateMusicStream()
1717 // NOTE: In case window is minimized, music stream is stopped, just make sure to
1718 // play again on window restore: if (IsMusicStreamPlaying(music)) PlayMusicStream(music);
1719 ma_uint32 frameCursorPos = music.stream.buffer->frameCursorPos;
1720 PlayAudioStream(music.stream); // WARNING: This resets the cursor position.
1721 music.stream.buffer->frameCursorPos = frameCursorPos;
1722 }
1723 }
1724
1725 // Pause music playing
1726 void PauseMusicStream(Music music)
1727 {
1728 PauseAudioStream(music.stream);
1729 }
1730
1731 // Resume music playing
1732 void ResumeMusicStream(Music music)
1733 {
1734 ResumeAudioStream(music.stream);
1735 }
1736
1737 // Stop music playing (close stream)
1738 void StopMusicStream(Music music)
1739 {
1740 StopAudioStream(music.stream);
1741
1742 switch (music.ctxType)
1743 {
1744 #if defined(SUPPORT_FILEFORMAT_WAV)
1745 case MUSIC_AUDIO_WAV: drwav_seek_to_first_pcm_frame((drwav *)music.ctxData); break;
1746 #endif
1747 #if defined(SUPPORT_FILEFORMAT_OGG)
1748 case MUSIC_AUDIO_OGG: stb_vorbis_seek_start((stb_vorbis *)music.ctxData); break;
1749 #endif
1750 #if defined(SUPPORT_FILEFORMAT_MP3)
1751 case MUSIC_AUDIO_MP3: drmp3_seek_to_start_of_stream((drmp3 *)music.ctxData); break;
1752 #endif
1753 #if defined(SUPPORT_FILEFORMAT_QOA)
1754 case MUSIC_AUDIO_QOA: qoaplay_rewind((qoaplay_desc *)music.ctxData); break;
1755 #endif
1756 #if defined(SUPPORT_FILEFORMAT_FLAC)
1757 case MUSIC_AUDIO_FLAC: drflac__seek_to_first_frame((drflac *)music.ctxData); break;
1758 #endif
1759 #if defined(SUPPORT_FILEFORMAT_XM)
1760 case MUSIC_MODULE_XM: jar_xm_reset((jar_xm_context_t *)music.ctxData); break;
1761 #endif
1762 #if defined(SUPPORT_FILEFORMAT_MOD)
1763 case MUSIC_MODULE_MOD: jar_mod_seek_start((jar_mod_context_t *)music.ctxData); break;
1764 #endif
1765 default: break;
1766 }
1767 }
1768
1769 // Seek music to a certain position (in seconds)
1770 void SeekMusicStream(Music music, float position)
1771 {
1772 // Seeking is not supported in module formats
1773 if ((music.ctxType == MUSIC_MODULE_XM) || (music.ctxType == MUSIC_MODULE_MOD)) return;
1774
1775 unsigned int positionInFrames = (unsigned int)(position*music.stream.sampleRate);
1776
1777 switch (music.ctxType)
1778 {
1779 #if defined(SUPPORT_FILEFORMAT_WAV)
1780 case MUSIC_AUDIO_WAV: drwav_seek_to_pcm_frame((drwav *)music.ctxData, positionInFrames); break;
1781 #endif
1782 #if defined(SUPPORT_FILEFORMAT_OGG)
1783 case MUSIC_AUDIO_OGG: stb_vorbis_seek_frame((stb_vorbis *)music.ctxData, positionInFrames); break;
1784 #endif
1785 #if defined(SUPPORT_FILEFORMAT_MP3)
1786 case MUSIC_AUDIO_MP3: drmp3_seek_to_pcm_frame((drmp3 *)music.ctxData, positionInFrames); break;
1787 #endif
1788 #if defined(SUPPORT_FILEFORMAT_QOA)
1789 case MUSIC_AUDIO_QOA: qoaplay_seek_frame((qoaplay_desc *)music.ctxData, positionInFrames); break;
1790 #endif
1791 #if defined(SUPPORT_FILEFORMAT_FLAC)
1792 case MUSIC_AUDIO_FLAC: drflac_seek_to_pcm_frame((drflac *)music.ctxData, positionInFrames); break;
1793 #endif
1794 default: break;
1795 }
1796
1797 music.stream.buffer->framesProcessed = positionInFrames;
1798 }
1799
1800 // Update (re-fill) music buffers if data already processed
1801 void UpdateMusicStream(Music music)
1802 {
1803 if (music.stream.buffer == NULL) return;
1804
1805 unsigned int subBufferSizeInFrames = music.stream.buffer->sizeInFrames/2;
1806
1807 // On first call of this function we lazily pre-allocated a temp buffer to read audio files/memory data in
1808 int frameSize = music.stream.channels*music.stream.sampleSize/8;
1809 unsigned int pcmSize = subBufferSizeInFrames*frameSize;
1810
1811 if (AUDIO.System.pcmBufferSize < pcmSize)
1812 {
1813 RL_FREE(AUDIO.System.pcmBuffer);
1814 AUDIO.System.pcmBuffer = RL_CALLOC(1, pcmSize);
1815 AUDIO.System.pcmBufferSize = pcmSize;
1816 }
1817
1818 // Check both sub-buffers to check if they require refilling
1819 for (int i = 0; i < 2; i++)
1820 {
1821 if ((music.stream.buffer != NULL) && !music.stream.buffer->isSubBufferProcessed[i]) continue; // No refilling required, move to next sub-buffer
1822
1823 unsigned int framesLeft = music.frameCount - music.stream.buffer->framesProcessed; // Frames left to be processed
1824 unsigned int framesToStream = 0; // Total frames to be streamed
1825
1826 if ((framesLeft >= subBufferSizeInFrames) || music.looping) framesToStream = subBufferSizeInFrames;
1827 else framesToStream = framesLeft;
1828
1829 int frameCountStillNeeded = framesToStream;
1830 int frameCountReadTotal = 0;
1831
1832 switch (music.ctxType)
1833 {
1834 #if defined(SUPPORT_FILEFORMAT_WAV)
1835 case MUSIC_AUDIO_WAV:
1836 {
1837 if (music.stream.sampleSize == 16)
1838 {
1839 while (true)
1840 {
1841 int frameCountRead = (int)drwav_read_pcm_frames_s16((drwav *)music.ctxData, frameCountStillNeeded, (short *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize));
1842 frameCountReadTotal += frameCountRead;
1843 frameCountStillNeeded -= frameCountRead;
1844 if (frameCountStillNeeded == 0) break;
1845 else drwav_seek_to_first_pcm_frame((drwav *)music.ctxData);
1846 }
1847 }
1848 else if (music.stream.sampleSize == 32)
1849 {
1850 while (true)
1851 {
1852 int frameCountRead = (int)drwav_read_pcm_frames_f32((drwav *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize));
1853 frameCountReadTotal += frameCountRead;
1854 frameCountStillNeeded -= frameCountRead;
1855 if (frameCountStillNeeded == 0) break;
1856 else drwav_seek_to_first_pcm_frame((drwav *)music.ctxData);
1857 }
1858 }
1859 } break;
1860 #endif
1861 #if defined(SUPPORT_FILEFORMAT_OGG)
1862 case MUSIC_AUDIO_OGG:
1863 {
1864 while (true)
1865 {
1866 int frameCountRead = stb_vorbis_get_samples_short_interleaved((stb_vorbis *)music.ctxData, music.stream.channels, (short *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize), frameCountStillNeeded*music.stream.channels);
1867 frameCountReadTotal += frameCountRead;
1868 frameCountStillNeeded -= frameCountRead;
1869 if (frameCountStillNeeded == 0) break;
1870 else stb_vorbis_seek_start((stb_vorbis *)music.ctxData);
1871 }
1872 } break;
1873 #endif
1874 #if defined(SUPPORT_FILEFORMAT_MP3)
1875 case MUSIC_AUDIO_MP3:
1876 {
1877 while (true)
1878 {
1879 int frameCountRead = (int)drmp3_read_pcm_frames_f32((drmp3 *)music.ctxData, frameCountStillNeeded, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize));
1880 frameCountReadTotal += frameCountRead;
1881 frameCountStillNeeded -= frameCountRead;
1882 if (frameCountStillNeeded == 0) break;
1883 else drmp3_seek_to_start_of_stream((drmp3 *)music.ctxData);
1884 }
1885 } break;
1886 #endif
1887 #if defined(SUPPORT_FILEFORMAT_QOA)
1888 case MUSIC_AUDIO_QOA:
1889 {
1890 unsigned int frameCountRead = qoaplay_decode((qoaplay_desc *)music.ctxData, (float *)AUDIO.System.pcmBuffer, framesToStream);
1891 frameCountReadTotal += frameCountRead;
1892 /*
1893 while (true)
1894 {
1895 int frameCountRead = (int)qoaplay_decode((qoaplay_desc *)music.ctxData, (float *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize), frameCountStillNeeded);
1896 frameCountReadTotal += frameCountRead;
1897 frameCountStillNeeded -= frameCountRead;
1898 if (frameCountStillNeeded == 0) break;
1899 else qoaplay_rewind((qoaplay_desc *)music.ctxData);
1900 }
1901 */
1902 } break;
1903 #endif
1904 #if defined(SUPPORT_FILEFORMAT_FLAC)
1905 case MUSIC_AUDIO_FLAC:
1906 {
1907 while (true)
1908 {
1909 int frameCountRead = drflac_read_pcm_frames_s16((drflac *)music.ctxData, frameCountStillNeeded, (short *)((char *)AUDIO.System.pcmBuffer + frameCountReadTotal*frameSize));
1910 frameCountReadTotal += frameCountRead;
1911 frameCountStillNeeded -= frameCountRead;
1912 if (frameCountStillNeeded == 0) break;
1913 else drflac__seek_to_first_frame((drflac *)music.ctxData);
1914 }
1915 } break;
1916 #endif
1917 #if defined(SUPPORT_FILEFORMAT_XM)
1918 case MUSIC_MODULE_XM:
1919 {
1920 // NOTE: Internally we consider 2 channels generation, so sampleCount/2
1921 if (AUDIO_DEVICE_FORMAT == ma_format_f32) jar_xm_generate_samples((jar_xm_context_t *)music.ctxData, (float *)AUDIO.System.pcmBuffer, framesToStream);
1922 else if (AUDIO_DEVICE_FORMAT == ma_format_s16) jar_xm_generate_samples_16bit((jar_xm_context_t *)music.ctxData, (short *)AUDIO.System.pcmBuffer, framesToStream);
1923 else if (AUDIO_DEVICE_FORMAT == ma_format_u8) jar_xm_generate_samples_8bit((jar_xm_context_t *)music.ctxData, (char *)AUDIO.System.pcmBuffer, framesToStream);
1924 //jar_xm_reset((jar_xm_context_t *)music.ctxData);
1925
1926 } break;
1927 #endif
1928 #if defined(SUPPORT_FILEFORMAT_MOD)
1929 case MUSIC_MODULE_MOD:
1930 {
1931 // NOTE: 3rd parameter (nbsample) specify the number of stereo 16bits samples you want, so sampleCount/2
1932 jar_mod_fillbuffer((jar_mod_context_t *)music.ctxData, (short *)AUDIO.System.pcmBuffer, framesToStream, 0);
1933 //jar_mod_seek_start((jar_mod_context_t *)music.ctxData);
1934
1935 } break;
1936 #endif
1937 default: break;
1938 }
1939
1940 UpdateAudioStream(music.stream, AUDIO.System.pcmBuffer, framesToStream);
1941
1942 music.stream.buffer->framesProcessed = music.stream.buffer->framesProcessed%music.frameCount;
1943
1944 if (framesLeft <= subBufferSizeInFrames)
1945 {
1946 if (!music.looping)
1947 {
1948 // Streaming is ending, we filled latest frames from input
1949 StopMusicStream(music);
1950 return;
1951 }
1952 }
1953 }
1954
1955 // NOTE: In case window is minimized, music stream is stopped,
1956 // just make sure to play again on window restore
1957 if (IsMusicStreamPlaying(music)) PlayMusicStream(music);
1958 }
1959
1960 // Check if any music is playing
1961 bool IsMusicStreamPlaying(Music music)
1962 {
1963 return IsAudioStreamPlaying(music.stream);
1964 }
1965
1966 // Set volume for music
1967 void SetMusicVolume(Music music, float volume)
1968 {
1969 SetAudioStreamVolume(music.stream, volume);
1970 }
1971
1972 // Set pitch for music
1973 void SetMusicPitch(Music music, float pitch)
1974 {
1975 SetAudioBufferPitch(music.stream.buffer, pitch);
1976 }
1977
1978 // Set pan for a music
1979 void SetMusicPan(Music music, float pan)
1980 {
1981 SetAudioBufferPan(music.stream.buffer, pan);
1982 }
1983
1984 // Get music time length (in seconds)
1985 float GetMusicTimeLength(Music music)
1986 {
1987 float totalSeconds = 0.0f;
1988
1989 totalSeconds = (float)music.frameCount/music.stream.sampleRate;
1990
1991 return totalSeconds;
1992 }
1993
1994 // Get current music time played (in seconds)
1995 float GetMusicTimePlayed(Music music)
1996 {
1997 float secondsPlayed = 0.0f;
1998 if (music.stream.buffer != NULL)
1999 {
2000 #if defined(SUPPORT_FILEFORMAT_XM)
2001 if (music.ctxType == MUSIC_MODULE_XM)
2002 {
2003 uint64_t framesPlayed = 0;
2004
2005 jar_xm_get_position(music.ctxData, NULL, NULL, NULL, &framesPlayed);
2006 secondsPlayed = (float)framesPlayed/music.stream.sampleRate;
2007 }
2008 else
2009 #endif
2010 {
2011 //ma_uint32 frameSizeInBytes = ma_get_bytes_per_sample(music.stream.buffer->dsp.formatConverterIn.config.formatIn)*music.stream.buffer->dsp.formatConverterIn.config.channels;
2012 int framesProcessed = (int)music.stream.buffer->framesProcessed;
2013 int subBufferSize = (int)music.stream.buffer->sizeInFrames/2;
2014 int framesInFirstBuffer = music.stream.buffer->isSubBufferProcessed[0]? 0 : subBufferSize;
2015 int framesInSecondBuffer = music.stream.buffer->isSubBufferProcessed[1]? 0 : subBufferSize;
2016 int framesSentToMix = music.stream.buffer->frameCursorPos%subBufferSize;
2017 int framesPlayed = (framesProcessed - framesInFirstBuffer - framesInSecondBuffer + framesSentToMix)%(int)music.frameCount;
2018 if (framesPlayed < 0) framesPlayed += music.frameCount;
2019 secondsPlayed = (float)framesPlayed/music.stream.sampleRate;
2020 }
2021 }
2022
2023 return secondsPlayed;
2024 }
2025
2026 // Load audio stream (to stream audio pcm data)
2027 AudioStream LoadAudioStream(unsigned int sampleRate, unsigned int sampleSize, unsigned int channels)
2028 {
2029 AudioStream stream = { 0 };
2030
2031 stream.sampleRate = sampleRate;
2032 stream.sampleSize = sampleSize;
2033 stream.channels = channels;
2034
2035 ma_format formatIn = ((stream.sampleSize == 8)? ma_format_u8 : ((stream.sampleSize == 16)? ma_format_s16 : ma_format_f32));
2036
2037 // The size of a streaming buffer must be at least double the size of a period
2038 unsigned int periodSize = AUDIO.System.device.playback.internalPeriodSizeInFrames;
2039
2040 // If the buffer is not set, compute one that would give us a buffer good enough for a decent frame rate
2041 unsigned int subBufferSize = (AUDIO.Buffer.defaultSize == 0)? AUDIO.System.device.sampleRate/30 : AUDIO.Buffer.defaultSize;
2042
2043 if (subBufferSize < periodSize) subBufferSize = periodSize;
2044
2045 // Create a double audio buffer of defined size
2046 stream.buffer = LoadAudioBuffer(formatIn, stream.channels, stream.sampleRate, subBufferSize*2, AUDIO_BUFFER_USAGE_STREAM);
2047
2048 if (stream.buffer != NULL)
2049 {
2050 stream.buffer->looping = true; // Always loop for streaming buffers
2051 TRACELOG(LOG_INFO, "STREAM: Initialized successfully (%i Hz, %i bit, %s)", stream.sampleRate, stream.sampleSize, (stream.channels == 1)? "Mono" : "Stereo");
2052 }
2053 else TRACELOG(LOG_WARNING, "STREAM: Failed to load audio buffer, stream could not be created");
2054
2055 return stream;
2056 }
2057
2058 // Checks if an audio stream is ready
2059 bool IsAudioStreamReady(AudioStream stream)
2060 {
2061 return ((stream.buffer != NULL) && // Validate stream buffer
2062 (stream.sampleRate > 0) && // Validate sample rate is supported
2063 (stream.sampleSize > 0) && // Validate sample size is supported
2064 (stream.channels > 0)); // Validate number of channels supported
2065 }
2066
2067 // Unload audio stream and free memory
2068 void UnloadAudioStream(AudioStream stream)
2069 {
2070 UnloadAudioBuffer(stream.buffer);
2071
2072 TRACELOG(LOG_INFO, "STREAM: Unloaded audio stream data from RAM");
2073 }
2074
2075 // Update audio stream buffers with data
2076 // NOTE 1: Only updates one buffer of the stream source: dequeue -> update -> queue
2077 // NOTE 2: To dequeue a buffer it needs to be processed: IsAudioStreamProcessed()
2078 void UpdateAudioStream(AudioStream stream, const void *data, int frameCount)
2079 {
2080 if (stream.buffer != NULL)
2081 {
2082 if (stream.buffer->isSubBufferProcessed[0] || stream.buffer->isSubBufferProcessed[1])
2083 {
2084 ma_uint32 subBufferToUpdate = 0;
2085
2086 if (stream.buffer->isSubBufferProcessed[0] && stream.buffer->isSubBufferProcessed[1])
2087 {
2088 // Both buffers are available for updating.
2089 // Update the first one and make sure the cursor is moved back to the front.
2090 subBufferToUpdate = 0;
2091 stream.buffer->frameCursorPos = 0;
2092 }
2093 else
2094 {
2095 // Just update whichever sub-buffer is processed.
2096 subBufferToUpdate = (stream.buffer->isSubBufferProcessed[0])? 0 : 1;
2097 }
2098
2099 ma_uint32 subBufferSizeInFrames = stream.buffer->sizeInFrames/2;
2100 unsigned char *subBuffer = stream.buffer->data + ((subBufferSizeInFrames*stream.channels*(stream.sampleSize/8))*subBufferToUpdate);
2101
2102 // Total frames processed in buffer is always the complete size, filled with 0 if required
2103 stream.buffer->framesProcessed += subBufferSizeInFrames;
2104
2105 // Does this API expect a whole buffer to be updated in one go?
2106 // Assuming so, but if not will need to change this logic.
2107 if (subBufferSizeInFrames >= (ma_uint32)frameCount)
2108 {
2109 ma_uint32 framesToWrite = (ma_uint32)frameCount;
2110
2111 ma_uint32 bytesToWrite = framesToWrite*stream.channels*(stream.sampleSize/8);
2112 memcpy(subBuffer, data, bytesToWrite);
2113
2114 // Any leftover frames should be filled with zeros.
2115 ma_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite;
2116
2117 if (leftoverFrameCount > 0) memset(subBuffer + bytesToWrite, 0, leftoverFrameCount*stream.channels*(stream.sampleSize/8));
2118
2119 stream.buffer->isSubBufferProcessed[subBufferToUpdate] = false;
2120 }
2121 else TRACELOG(LOG_WARNING, "STREAM: Attempting to write too many frames to buffer");
2122 }
2123 else TRACELOG(LOG_WARNING, "STREAM: Buffer not available for updating");
2124 }
2125 }
2126
2127 // Check if any audio stream buffers requires refill
2128 bool IsAudioStreamProcessed(AudioStream stream)
2129 {
2130 if (stream.buffer == NULL) return false;
2131
2132 return (stream.buffer->isSubBufferProcessed[0] || stream.buffer->isSubBufferProcessed[1]);
2133 }
2134
2135 // Play audio stream
2136 void PlayAudioStream(AudioStream stream)
2137 {
2138 PlayAudioBuffer(stream.buffer);
2139 }
2140
2141 // Play audio stream
2142 void PauseAudioStream(AudioStream stream)
2143 {
2144 PauseAudioBuffer(stream.buffer);
2145 }
2146
2147 // Resume audio stream playing
2148 void ResumeAudioStream(AudioStream stream)
2149 {
2150 ResumeAudioBuffer(stream.buffer);
2151 }
2152
2153 // Check if audio stream is playing.
2154 bool IsAudioStreamPlaying(AudioStream stream)
2155 {
2156 return IsAudioBufferPlaying(stream.buffer);
2157 }
2158
2159 // Stop audio stream
2160 void StopAudioStream(AudioStream stream)
2161 {
2162 StopAudioBuffer(stream.buffer);
2163 }
2164
2165 // Set volume for audio stream (1.0 is max level)
2166 void SetAudioStreamVolume(AudioStream stream, float volume)
2167 {
2168 SetAudioBufferVolume(stream.buffer, volume);
2169 }
2170
2171 // Set pitch for audio stream (1.0 is base level)
2172 void SetAudioStreamPitch(AudioStream stream, float pitch)
2173 {
2174 SetAudioBufferPitch(stream.buffer, pitch);
2175 }
2176
2177 // Set pan for audio stream
2178 void SetAudioStreamPan(AudioStream stream, float pan)
2179 {
2180 SetAudioBufferPan(stream.buffer, pan);
2181 }
2182
2183 // Default size for new audio streams
2184 void SetAudioStreamBufferSizeDefault(int size)
2185 {
2186 AUDIO.Buffer.defaultSize = size;
2187 }
2188
2189 // Audio thread callback to request new data
2190 void SetAudioStreamCallback(AudioStream stream, AudioCallback callback)
2191 {
2192 if (stream.buffer != NULL) stream.buffer->callback = callback;
2193 }
2194
2195 // Add processor to audio stream. Contrary to buffers, the order of processors is important.
2196 // The new processor must be added at the end. As there aren't supposed to be a lot of processors attached to
2197 // a given stream, we iterate through the list to find the end. That way we don't need a pointer to the last element.
2198 void AttachAudioStreamProcessor(AudioStream stream, AudioCallback process)
2199 {
2200 ma_mutex_lock(&AUDIO.System.lock);
2201
2202 rAudioProcessor *processor = (rAudioProcessor *)RL_CALLOC(1, sizeof(rAudioProcessor));
2203 processor->process = process;
2204
2205 rAudioProcessor *last = stream.buffer->processor;
2206
2207 while (last && last->next)
2208 {
2209 last = last->next;
2210 }
2211 if (last)
2212 {
2213 processor->prev = last;
2214 last->next = processor;
2215 }
2216 else stream.buffer->processor = processor;
2217
2218 ma_mutex_unlock(&AUDIO.System.lock);
2219 }
2220
2221 // Remove processor from audio stream
2222 void DetachAudioStreamProcessor(AudioStream stream, AudioCallback process)
2223 {
2224 ma_mutex_lock(&AUDIO.System.lock);
2225
2226 rAudioProcessor *processor = stream.buffer->processor;
2227
2228 while (processor)
2229 {
2230 rAudioProcessor *next = processor->next;
2231 rAudioProcessor *prev = processor->prev;
2232
2233 if (processor->process == process)
2234 {
2235 if (stream.buffer->processor == processor) stream.buffer->processor = next;
2236 if (prev) prev->next = next;
2237 if (next) next->prev = prev;
2238
2239 RL_FREE(processor);
2240 }
2241
2242 processor = next;
2243 }
2244
2245 ma_mutex_unlock(&AUDIO.System.lock);
2246 }
2247
2248 // Add processor to audio pipeline. Order of processors is important
2249 // Works the same way as {Attach,Detach}AudioStreamProcessor() functions, except
2250 // these two work on the already mixed output just before sending it to the sound hardware
2251 void AttachAudioMixedProcessor(AudioCallback process)
2252 {
2253 ma_mutex_lock(&AUDIO.System.lock);
2254
2255 rAudioProcessor *processor = (rAudioProcessor *)RL_CALLOC(1, sizeof(rAudioProcessor));
2256 processor->process = process;
2257
2258 rAudioProcessor *last = AUDIO.mixedProcessor;
2259
2260 while (last && last->next)
2261 {
2262 last = last->next;
2263 }
2264 if (last)
2265 {
2266 processor->prev = last;
2267 last->next = processor;
2268 }
2269 else AUDIO.mixedProcessor = processor;
2270
2271 ma_mutex_unlock(&AUDIO.System.lock);
2272 }
2273
2274 // Remove processor from audio pipeline
2275 void DetachAudioMixedProcessor(AudioCallback process)
2276 {
2277 ma_mutex_lock(&AUDIO.System.lock);
2278
2279 rAudioProcessor *processor = AUDIO.mixedProcessor;
2280
2281 while (processor)
2282 {
2283 rAudioProcessor *next = processor->next;
2284 rAudioProcessor *prev = processor->prev;
2285
2286 if (processor->process == process)
2287 {
2288 if (AUDIO.mixedProcessor == processor) AUDIO.mixedProcessor = next;
2289 if (prev) prev->next = next;
2290 if (next) next->prev = prev;
2291
2292 RL_FREE(processor);
2293 }
2294
2295 processor = next;
2296 }
2297
2298 ma_mutex_unlock(&AUDIO.System.lock);
2299 }
2300
2301
2302 //----------------------------------------------------------------------------------
2303 // Module specific Functions Definition
2304 //----------------------------------------------------------------------------------
2305
2306 // Log callback function
2307 static void OnLog(void *pUserData, ma_uint32 level, const char *pMessage)
2308 {
2309 TRACELOG(LOG_WARNING, "miniaudio: %s", pMessage); // All log messages from miniaudio are errors
2310 }
2311
2312 // Reads audio data from an AudioBuffer object in internal format.
2313 static ma_uint32 ReadAudioBufferFramesInInternalFormat(AudioBuffer *audioBuffer, void *framesOut, ma_uint32 frameCount)
2314 {
2315 // Using audio buffer callback
2316 if (audioBuffer->callback)
2317 {
2318 audioBuffer->callback(framesOut, frameCount);
2319 audioBuffer->framesProcessed += frameCount;
2320
2321 return frameCount;
2322 }
2323
2324 ma_uint32 subBufferSizeInFrames = (audioBuffer->sizeInFrames > 1)? audioBuffer->sizeInFrames/2 : audioBuffer->sizeInFrames;
2325 ma_uint32 currentSubBufferIndex = audioBuffer->frameCursorPos/subBufferSizeInFrames;
2326
2327 if (currentSubBufferIndex > 1) return 0;
2328
2329 // Another thread can update the processed state of buffers, so
2330 // we just take a copy here to try and avoid potential synchronization problems
2331 bool isSubBufferProcessed[2] = { 0 };
2332 isSubBufferProcessed[0] = audioBuffer->isSubBufferProcessed[0];
2333 isSubBufferProcessed[1] = audioBuffer->isSubBufferProcessed[1];
2334
2335 ma_uint32 frameSizeInBytes = ma_get_bytes_per_frame(audioBuffer->converter.formatIn, audioBuffer->converter.channelsIn);
2336
2337 // Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0
2338 ma_uint32 framesRead = 0;
2339 while (1)
2340 {
2341 // We break from this loop differently depending on the buffer's usage
2342 // - For static buffers, we simply fill as much data as we can
2343 // - For streaming buffers we only fill half of the buffer that are processed
2344 // Unprocessed halves must keep their audio data in-tact
2345 if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC)
2346 {
2347 if (framesRead >= frameCount) break;
2348 }
2349 else
2350 {
2351 if (isSubBufferProcessed[currentSubBufferIndex]) break;
2352 }
2353
2354 ma_uint32 totalFramesRemaining = (frameCount - framesRead);
2355 if (totalFramesRemaining == 0) break;
2356
2357 ma_uint32 framesRemainingInOutputBuffer;
2358 if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC)
2359 {
2360 framesRemainingInOutputBuffer = audioBuffer->sizeInFrames - audioBuffer->frameCursorPos;
2361 }
2362 else
2363 {
2364 ma_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames*currentSubBufferIndex;
2365 framesRemainingInOutputBuffer = subBufferSizeInFrames - (audioBuffer->frameCursorPos - firstFrameIndexOfThisSubBuffer);
2366 }
2367
2368 ma_uint32 framesToRead = totalFramesRemaining;
2369 if (framesToRead > framesRemainingInOutputBuffer) framesToRead = framesRemainingInOutputBuffer;
2370
2371 memcpy((unsigned char *)framesOut + (framesRead*frameSizeInBytes), audioBuffer->data + (audioBuffer->frameCursorPos*frameSizeInBytes), framesToRead*frameSizeInBytes);
2372 audioBuffer->frameCursorPos = (audioBuffer->frameCursorPos + framesToRead)%audioBuffer->sizeInFrames;
2373 framesRead += framesToRead;
2374
2375 // If we've read to the end of the buffer, mark it as processed
2376 if (framesToRead == framesRemainingInOutputBuffer)
2377 {
2378 audioBuffer->isSubBufferProcessed[currentSubBufferIndex] = true;
2379 isSubBufferProcessed[currentSubBufferIndex] = true;
2380
2381 currentSubBufferIndex = (currentSubBufferIndex + 1)%2;
2382
2383 // We need to break from this loop if we're not looping
2384 if (!audioBuffer->looping)
2385 {
2386 StopAudioBuffer(audioBuffer);
2387 break;
2388 }
2389 }
2390 }
2391
2392 // Zero-fill excess
2393 ma_uint32 totalFramesRemaining = (frameCount - framesRead);
2394 if (totalFramesRemaining > 0)
2395 {
2396 memset((unsigned char *)framesOut + (framesRead*frameSizeInBytes), 0, totalFramesRemaining*frameSizeInBytes);
2397
2398 // For static buffers we can fill the remaining frames with silence for safety, but we don't want
2399 // to report those frames as "read". The reason for this is that the caller uses the return value
2400 // to know whether a non-looping sound has finished playback.
2401 if (audioBuffer->usage != AUDIO_BUFFER_USAGE_STATIC) framesRead += totalFramesRemaining;
2402 }
2403
2404 return framesRead;
2405 }
2406
2407 // Reads audio data from an AudioBuffer object in device format. Returned data will be in a format appropriate for mixing.
2408 static ma_uint32 ReadAudioBufferFramesInMixingFormat(AudioBuffer *audioBuffer, float *framesOut, ma_uint32 frameCount)
2409 {
2410 // What's going on here is that we're continuously converting data from the AudioBuffer's internal format to the mixing format, which
2411 // should be defined by the output format of the data converter. We do this until frameCount frames have been output. The important
2412 // detail to remember here is that we never, ever attempt to read more input data than is required for the specified number of output
2413 // frames. This can be achieved with ma_data_converter_get_required_input_frame_count().
2414 ma_uint8 inputBuffer[4096] = { 0 };
2415 ma_uint32 inputBufferFrameCap = sizeof(inputBuffer)/ma_get_bytes_per_frame(audioBuffer->converter.formatIn, audioBuffer->converter.channelsIn);
2416
2417 ma_uint32 totalOutputFramesProcessed = 0;
2418 while (totalOutputFramesProcessed < frameCount)
2419 {
2420 ma_uint64 outputFramesToProcessThisIteration = frameCount - totalOutputFramesProcessed;
2421 ma_uint64 inputFramesToProcessThisIteration = 0;
2422
2423 (void)ma_data_converter_get_required_input_frame_count(&audioBuffer->converter, outputFramesToProcessThisIteration, &inputFramesToProcessThisIteration);
2424 if (inputFramesToProcessThisIteration > inputBufferFrameCap)
2425 {
2426 inputFramesToProcessThisIteration = inputBufferFrameCap;
2427 }
2428
2429 float *runningFramesOut = framesOut + (totalOutputFramesProcessed*audioBuffer->converter.channelsOut);
2430
2431 /* At this point we can convert the data to our mixing format. */
2432 ma_uint64 inputFramesProcessedThisIteration = ReadAudioBufferFramesInInternalFormat(audioBuffer, inputBuffer, (ma_uint32)inputFramesToProcessThisIteration); /* Safe cast. */
2433 ma_uint64 outputFramesProcessedThisIteration = outputFramesToProcessThisIteration;
2434 ma_data_converter_process_pcm_frames(&audioBuffer->converter, inputBuffer, &inputFramesProcessedThisIteration, runningFramesOut, &outputFramesProcessedThisIteration);
2435
2436 totalOutputFramesProcessed += (ma_uint32)outputFramesProcessedThisIteration; /* Safe cast. */
2437
2438 if (inputFramesProcessedThisIteration < inputFramesToProcessThisIteration)
2439 {
2440 break; /* Ran out of input data. */
2441 }
2442
2443 /* This should never be hit, but will add it here for safety. Ensures we get out of the loop when no input nor output frames are processed. */
2444 if (inputFramesProcessedThisIteration == 0 && outputFramesProcessedThisIteration == 0)
2445 {
2446 break;
2447 }
2448 }
2449
2450 return totalOutputFramesProcessed;
2451 }
2452
2453 // Sending audio data to device callback function
2454 // This function will be called when miniaudio needs more data
2455 // NOTE: All the mixing takes place here
2456 static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const void *pFramesInput, ma_uint32 frameCount)
2457 {
2458 (void)pDevice;
2459
2460 // Mixing is basically just an accumulation, we need to initialize the output buffer to 0
2461 memset(pFramesOut, 0, frameCount*pDevice->playback.channels*ma_get_bytes_per_sample(pDevice->playback.format));
2462
2463 // Using a mutex here for thread-safety which makes things not real-time
2464 // This is unlikely to be necessary for this project, but may want to consider how you might want to avoid this
2465 ma_mutex_lock(&AUDIO.System.lock);
2466 {
2467 for (AudioBuffer *audioBuffer = AUDIO.Buffer.first; audioBuffer != NULL; audioBuffer = audioBuffer->next)
2468 {
2469 // Ignore stopped or paused sounds
2470 if (!audioBuffer->playing || audioBuffer->paused) continue;
2471
2472 ma_uint32 framesRead = 0;
2473
2474 while (1)
2475 {
2476 if (framesRead >= frameCount) break;
2477
2478 // Just read as much data as we can from the stream
2479 ma_uint32 framesToRead = (frameCount - framesRead);
2480
2481 while (framesToRead > 0)
2482 {
2483 float tempBuffer[1024] = { 0 }; // Frames for stereo
2484
2485 ma_uint32 framesToReadRightNow = framesToRead;
2486 if (framesToReadRightNow > sizeof(tempBuffer)/sizeof(tempBuffer[0])/AUDIO_DEVICE_CHANNELS)
2487 {
2488 framesToReadRightNow = sizeof(tempBuffer)/sizeof(tempBuffer[0])/AUDIO_DEVICE_CHANNELS;
2489 }
2490
2491 ma_uint32 framesJustRead = ReadAudioBufferFramesInMixingFormat(audioBuffer, tempBuffer, framesToReadRightNow);
2492 if (framesJustRead > 0)
2493 {
2494 float *framesOut = (float *)pFramesOut + (framesRead*AUDIO.System.device.playback.channels);
2495 float *framesIn = tempBuffer;
2496
2497 // Apply processors chain if defined
2498 rAudioProcessor *processor = audioBuffer->processor;
2499 while (processor)
2500 {
2501 processor->process(framesIn, framesJustRead);
2502 processor = processor->next;
2503 }
2504
2505 MixAudioFrames(framesOut, framesIn, framesJustRead, audioBuffer);
2506
2507 framesToRead -= framesJustRead;
2508 framesRead += framesJustRead;
2509 }
2510
2511 if (!audioBuffer->playing)
2512 {
2513 framesRead = frameCount;
2514 break;
2515 }
2516
2517 // If we weren't able to read all the frames we requested, break
2518 if (framesJustRead < framesToReadRightNow)
2519 {
2520 if (!audioBuffer->looping)
2521 {
2522 StopAudioBuffer(audioBuffer);
2523 break;
2524 }
2525 else
2526 {
2527 // Should never get here, but just for safety,
2528 // move the cursor position back to the start and continue the loop
2529 audioBuffer->frameCursorPos = 0;
2530 continue;
2531 }
2532 }
2533 }
2534
2535 // If for some reason we weren't able to read every frame we'll need to break from the loop
2536 // Not doing this could theoretically put us into an infinite loop
2537 if (framesToRead > 0) break;
2538 }
2539 }
2540 }
2541
2542 rAudioProcessor *processor = AUDIO.mixedProcessor;
2543 while (processor)
2544 {
2545 processor->process(pFramesOut, frameCount);
2546 processor = processor->next;
2547 }
2548
2549 ma_mutex_unlock(&AUDIO.System.lock);
2550 }
2551
2552 // Main mixing function, pretty simple in this project, just an accumulation
2553 // NOTE: framesOut is both an input and an output, it is initially filled with zeros outside of this function
2554 static void MixAudioFrames(float *framesOut, const float *framesIn, ma_uint32 frameCount, AudioBuffer *buffer)
2555 {
2556 const float localVolume = buffer->volume;
2557 const ma_uint32 channels = AUDIO.System.device.playback.channels;
2558
2559 if (channels == 2) // We consider panning
2560 {
2561 const float left = buffer->pan;
2562 const float right = 1.0f - left;
2563
2564 // Fast sine approximation in [0..1] for pan law: y = 0.5f*x*(3 - x*x);
2565 const float levels[2] = { localVolume*0.5f*left*(3.0f - left*left), localVolume*0.5f*right*(3.0f - right*right) };
2566
2567 float *frameOut = framesOut;
2568 const float *frameIn = framesIn;
2569
2570 for (ma_uint32 frame = 0; frame < frameCount; frame++)
2571 {
2572 frameOut[0] += (frameIn[0]*levels[0]);
2573 frameOut[1] += (frameIn[1]*levels[1]);
2574
2575 frameOut += 2;
2576 frameIn += 2;
2577 }
2578 }
2579 else // We do not consider panning
2580 {
2581 for (ma_uint32 frame = 0; frame < frameCount; frame++)
2582 {
2583 for (ma_uint32 c = 0; c < channels; c++)
2584 {
2585 float *frameOut = framesOut + (frame*channels);
2586 const float *frameIn = framesIn + (frame*channels);
2587
2588 // Output accumulates input multiplied by volume to provided output (usually 0)
2589 frameOut[c] += (frameIn[c]*localVolume);
2590 }
2591 }
2592 }
2593 }
2594
2595 // Some required functions for audio standalone module version
2596 #if defined(RAUDIO_STANDALONE)
2597 // Check file extension
2598 static bool IsFileExtension(const char *fileName, const char *ext)
2599 {
2600 bool result = false;
2601 const char *fileExt;
2602
2603 if ((fileExt = strrchr(fileName, '.')) != NULL)
2604 {
2605 if (strcmp(fileExt, ext) == 0) result = true;
2606 }
2607
2608 return result;
2609 }
2610
2611 // Get pointer to extension for a filename string (includes the dot: .png)
2612 static const char *GetFileExtension(const char *fileName)
2613 {
2614 const char *dot = strrchr(fileName, '.');
2615
2616 if (!dot || dot == fileName) return NULL;
2617
2618 return dot;
2619 }
2620
2621 // Load data from file into a buffer
2622 static unsigned char *LoadFileData(const char *fileName, unsigned int *bytesRead)
2623 {
2624 unsigned char *data = NULL;
2625 *bytesRead = 0;
2626
2627 if (fileName != NULL)
2628 {
2629 FILE *file = fopen(fileName, "rb");
2630
2631 if (file != NULL)
2632 {
2633 // WARNING: On binary streams SEEK_END could not be found,
2634 // using fseek() and ftell() could not work in some (rare) cases
2635 fseek(file, 0, SEEK_END);
2636 int size = ftell(file);
2637 fseek(file, 0, SEEK_SET);
2638
2639 if (size > 0)
2640 {
2641 data = (unsigned char *)RL_MALLOC(size*sizeof(unsigned char));
2642
2643 // NOTE: fread() returns number of read elements instead of bytes, so we read [1 byte, size elements]
2644 unsigned int count = (unsigned int)fread(data, sizeof(unsigned char), size, file);
2645 *bytesRead = count;
2646
2647 if (count != size) TRACELOG(LOG_WARNING, "FILEIO: [%s] File partially loaded", fileName);
2648 else TRACELOG(LOG_INFO, "FILEIO: [%s] File loaded successfully", fileName);
2649 }
2650 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to read file", fileName);
2651
2652 fclose(file);
2653 }
2654 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to open file", fileName);
2655 }
2656 else TRACELOG(LOG_WARNING, "FILEIO: File name provided is not valid");
2657
2658 return data;
2659 }
2660
2661 // Save data to file from buffer
2662 static bool SaveFileData(const char *fileName, void *data, unsigned int bytesToWrite)
2663 {
2664 if (fileName != NULL)
2665 {
2666 FILE *file = fopen(fileName, "wb");
2667
2668 if (file != NULL)
2669 {
2670 unsigned int count = (unsigned int)fwrite(data, sizeof(unsigned char), bytesToWrite, file);
2671
2672 if (count == 0) TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to write file", fileName);
2673 else if (count != bytesToWrite) TRACELOG(LOG_WARNING, "FILEIO: [%s] File partially written", fileName);
2674 else TRACELOG(LOG_INFO, "FILEIO: [%s] File saved successfully", fileName);
2675
2676 fclose(file);
2677 }
2678 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to open file", fileName);
2679 }
2680 else TRACELOG(LOG_WARNING, "FILEIO: File name provided is not valid");
2681 }
2682
2683 // Save text data to file (write), string must be '\0' terminated
2684 static bool SaveFileText(const char *fileName, char *text)
2685 {
2686 if (fileName != NULL)
2687 {
2688 FILE *file = fopen(fileName, "wt");
2689
2690 if (file != NULL)
2691 {
2692 int count = fprintf(file, "%s", text);
2693
2694 if (count == 0) TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to write text file", fileName);
2695 else TRACELOG(LOG_INFO, "FILEIO: [%s] Text file saved successfully", fileName);
2696
2697 fclose(file);
2698 }
2699 else TRACELOG(LOG_WARNING, "FILEIO: [%s] Failed to open text file", fileName);
2700 }
2701 else TRACELOG(LOG_WARNING, "FILEIO: File name provided is not valid");
2702 }
2703 #endif
2704
2705 #undef AudioBuffer
2706
2707 #endif // SUPPORT_MODULE_RAUDIO
2708