Merge remote-tracking branch 'refs/remotes/raysan5/develop' into develop

This commit is contained in:
victorfisac 2016-05-20 14:03:23 +02:00
commit 4f1bee3165
119 changed files with 41193 additions and 3061 deletions

5
.gitignore vendored
View File

@ -61,4 +61,7 @@ xcschememanagement.plist
xcuserdata/ xcuserdata/
DerivedData/ DerivedData/
*.dll *.dll
src/libraylib.a src/libraylib.a
# oculus example
!examples/oculus_glfw_sample/LibOVRRT32_1.dll

View File

@ -85,13 +85,13 @@ ifeq ($(PLATFORM),PLATFORM_DESKTOP)
# add standard directories for GNU/Linux # add standard directories for GNU/Linux
ifeq ($(PLATFORM_OS),LINUX) ifeq ($(PLATFORM_OS),LINUX)
INCLUDES = -I. -I../src -I/usr/local/include/raylib/ INCLUDES = -I. -I../src -I/usr/local/include/raylib/
else ifeq ($(PLATFORM_OS),OSX)
INCLUDES = -I. -I../src
else else
INCLUDES = -I. -I../../src -IC:/raylib/raylib/src INCLUDES = -I. -I../../src -IC:/raylib/raylib/src
# external libraries headers # external libraries headers
# GLFW3 # GLFW3
INCLUDES += -I../../external/glfw3/include INCLUDES += -I../../external/glfw3/include
# GLEW - Not required any more, replaced by GLAD
#INCLUDES += -I../external/glew/include
# OpenAL Soft # OpenAL Soft
INCLUDES += -I../../external/openal_soft/include INCLUDES += -I../../external/openal_soft/include
endif endif
@ -105,6 +105,8 @@ ifeq ($(PLATFORM),PLATFORM_DESKTOP)
# add standard directories for GNU/Linux # add standard directories for GNU/Linux
ifeq ($(PLATFORM_OS),LINUX) ifeq ($(PLATFORM_OS),LINUX)
LFLAGS = -L. -L../../src LFLAGS = -L. -L../../src
else ifeq ($(PLATFORM_OS),OSX)
LFLAGS = -L. -L../src
else else
LFLAGS = -L. -L../../src -LC:/raylib/raylib/src LFLAGS = -L. -L../../src -LC:/raylib/raylib/src
# external libraries to link with # external libraries to link with
@ -113,30 +115,6 @@ ifeq ($(PLATFORM),PLATFORM_DESKTOP)
ifneq ($(PLATFORM_OS),OSX) ifneq ($(PLATFORM_OS),OSX)
# OpenAL Soft # OpenAL Soft
LFLAGS += -L../../external/openal_soft/lib/$(LIBPATH) LFLAGS += -L../../external/openal_soft/lib/$(LIBPATH)
# GLEW: Not used, replaced by GLAD
#LFLAGS += -L../../external/glew/lib/$(LIBPATH)
endif
endif
endif
# define library paths containing required libs
ifeq ($(PLATFORM),PLATFORM_RPI)
LFLAGS = -L. -L../../src -L/opt/vc/lib
endif
ifeq ($(PLATFORM),PLATFORM_DESKTOP)
# add standard directories for GNU/Linux
ifeq ($(PLATFORM_OS),LINUX)
LFLAGS = -L. -L../../src
else
LFLAGS = -L. -L../../src -LC:/raylib/raylib/src
# external libraries to link with
# GLFW3
LFLAGS += -L../../external/glfw3/lib/$(LIBPATH)
ifneq ($(PLATFORM_OS),OSX)
# OpenAL Soft
LFLAGS += -L../../external/openal_soft/lib/$(LIBPATH)
# GLEW
LFLAGS += -L../../external/glew/lib/$(LIBPATH)
endif endif
endif endif
endif endif
@ -147,15 +125,15 @@ ifeq ($(PLATFORM),PLATFORM_DESKTOP)
ifeq ($(PLATFORM_OS),LINUX) ifeq ($(PLATFORM_OS),LINUX)
# libraries for Debian GNU/Linux desktop compiling # libraries for Debian GNU/Linux desktop compiling
# requires the following packages: # requires the following packages:
# libglfw3-dev libopenal-dev libglew-dev libegl1-mesa-dev # libglfw3-dev libopenal-dev libegl1-mesa-dev
LIBS = -lraylib -lglfw3 -lGL -lopenal -lm -pthread -ldl -lX11 \ LIBS = -lraylib -lglfw3 -lGL -lopenal -lm -pthread -ldl -lX11 \
-lXrandr -lXinerama -lXi -lXxf86vm -lXcursor -lXrandr -lXinerama -lXi -lXxf86vm -lXcursor
else else
ifeq ($(PLATFORM_OS),OSX) ifeq ($(PLATFORM_OS),OSX)
# libraries for OS X 10.9 desktop compiling # libraries for OS X 10.9 desktop compiling
# requires the following packages: # requires the following packages:
# libglfw3-dev libopenal-dev libglew-dev libegl1-mesa-dev # libglfw3-dev libopenal-dev libegl1-mesa-dev
LIBS = -lraylib -lglfw -framework OpenGL -framework OpenAl -framework Cocoa LIBS = -lraylib -lglfw3 -framework OpenGL -framework OpenAl -framework Cocoa
else else
# libraries for Windows desktop compiling # libraries for Windows desktop compiling
# NOTE: GLFW3 and OpenAL Soft libraries should be installed # NOTE: GLFW3 and OpenAL Soft libraries should be installed

View File

@ -22,11 +22,14 @@ int main()
Camera2D camera; Camera2D camera;
camera.position = (Vector2){ 0, 0 }; camera.offset = (Vector2){ 0, 0 };
camera.origin = (Vector2){ 100, 100 }; camera.target = (Vector2){ 400, 200 };
camera.rotation = 0.0f; camera.rotation = 0.0f;
camera.zoom = 1.0f; camera.zoom = 1.0f;
Rectangle player = { 400, 200, 40, 40 };
camera.target = (Vector2){ player.x + 20, player.y + 20 };
SetTargetFPS(60); SetTargetFPS(60);
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
@ -35,16 +38,28 @@ int main()
{ {
// Update // Update
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
if (IsKeyDown(KEY_RIGHT)) camera.position.x--; if (IsKeyDown(KEY_RIGHT)) player.x -= 2;
else if (IsKeyDown(KEY_LEFT)) camera.position.x++; else if (IsKeyDown(KEY_LEFT)) player.x += 2;
else if (IsKeyDown(KEY_UP)) camera.position.y++; else if (IsKeyDown(KEY_UP)) player.y -= 2;
else if (IsKeyDown(KEY_DOWN)) camera.position.y--; else if (IsKeyDown(KEY_DOWN)) player.y += 2;
// Camera target follows player
camera.target = (Vector2){ player.x + 20, player.y + 20 };
if (IsKeyDown(KEY_R)) camera.rotation--; if (IsKeyDown(KEY_R)) camera.rotation--;
else if (IsKeyDown(KEY_F)) camera.rotation++; else if (IsKeyDown(KEY_F)) camera.rotation++;
if (IsKeyDown(KEY_W)) camera.zoom += 0.005f; // Camera controls
if (IsKeyDown(KEY_S)) camera.zoom -= 0.005f; if (IsKeyDown(KEY_R)) camera.rotation--;
else if (IsKeyDown(KEY_F)) camera.rotation++;
camera.zoom += ((float)GetMouseWheelMove()*0.05f);
if (IsKeyPressed(KEY_Z))
{
camera.zoom = 1.0f;
camera.rotation = 0.0f;
}
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Draw // Draw
@ -56,7 +71,10 @@ int main()
DrawText("2D CAMERA TEST", 20, 20, 20, GRAY); DrawText("2D CAMERA TEST", 20, 20, 20, GRAY);
DrawRectangle(0, 300, screenWidth, 50, GRAY); DrawRectangle(0, 300, screenWidth, 50, GRAY);
DrawRectangle(400, 250, 40, 40, RED); DrawRectangleRec(player, RED);
DrawRectangle(camera.origin.x, 0, 1, screenHeight, GREEN);
DrawRectangle(0, camera.origin.y, screenWidth, 1, GREEN);
EndDrawing(); EndDrawing();
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------

View File

@ -34,7 +34,7 @@ int main()
//SetGesturesEnabled(0b0000000000001001); // Enable only some gestures to be detected //SetGesturesEnabled(0b0000000000001001); // Enable only some gestures to be detected
SetTargetFPS(30); SetTargetFPS(60);
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
// Main game loop // Main game loop
@ -43,12 +43,11 @@ int main()
// Update // Update
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
lastGesture = currentGesture; lastGesture = currentGesture;
currentGesture = GetGestureDetected();
touchPosition = GetTouchPosition(0); touchPosition = GetTouchPosition(0);
if (CheckCollisionPointRec(touchPosition, touchArea) && IsGestureDetected()) if (CheckCollisionPointRec(touchPosition, touchArea) && (currentGesture != GESTURE_NONE))
{ {
currentGesture = GetGestureType();
if (currentGesture != lastGesture) if (currentGesture != lastGesture)
{ {
// Store gesture string // Store gesture string
@ -62,6 +61,8 @@ int main()
case GESTURE_SWIPE_LEFT: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE LEFT"); break; case GESTURE_SWIPE_LEFT: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE LEFT"); break;
case GESTURE_SWIPE_UP: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE UP"); break; case GESTURE_SWIPE_UP: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE UP"); break;
case GESTURE_SWIPE_DOWN: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE DOWN"); break; case GESTURE_SWIPE_DOWN: strcpy(gestureStrings[gesturesCount], "GESTURE SWIPE DOWN"); break;
case GESTURE_PINCH_IN: strcpy(gestureStrings[gesturesCount], "GESTURE PINCH IN"); break;
case GESTURE_PINCH_OUT: strcpy(gestureStrings[gesturesCount], "GESTURE PINCH OUT"); break;
default: break; default: break;
} }
@ -76,7 +77,6 @@ int main()
} }
} }
} }
else currentGesture = GESTURE_NONE;
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Draw // Draw

View File

@ -43,7 +43,7 @@ int main()
UpdateCamera(&camera); // Update internal camera and our camera UpdateCamera(&camera); // Update internal camera and our camera
// Calculate cube screen space position (with a little offset to be in top) // Calculate cube screen space position (with a little offset to be in top)
cubeScreenPosition = WorldToScreen((Vector3){cubePosition.x, cubePosition.y + 2.5f, cubePosition.z}, camera); cubeScreenPosition = GetWorldToScreen((Vector3){cubePosition.x, cubePosition.y + 2.5f, cubePosition.z}, camera);
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Draw // Draw

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

View File

@ -0,0 +1,196 @@
/********************************************************************************//**
\file OVR_CAPI_Util.h
\brief This header provides LibOVR utility function declarations
\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
*************************************************************************************/
#ifndef OVR_CAPI_Util_h
#define OVR_CAPI_Util_h
#include "../OVR_CAPI.h"
#ifdef __cplusplus
extern "C" {
#endif
/// Enumerates modifications to the projection matrix based on the application's needs.
///
/// \see ovrMatrix4f_Projection
///
typedef enum ovrProjectionModifier_
{
/// Use for generating a default projection matrix that is:
/// * Right-handed.
/// * Near depth values stored in the depth buffer are smaller than far depth values.
/// * Both near and far are explicitly defined.
/// * With a clipping range that is (0 to w).
ovrProjection_None = 0x00,
/// Enable if using left-handed transformations in your application.
ovrProjection_LeftHanded = 0x01,
/// After the projection transform is applied, far values stored in the depth buffer will be less than closer depth values.
/// NOTE: Enable only if the application is using a floating-point depth buffer for proper precision.
ovrProjection_FarLessThanNear = 0x02,
/// When this flag is used, the zfar value pushed into ovrMatrix4f_Projection() will be ignored
/// NOTE: Enable only if ovrProjection_FarLessThanNear is also enabled where the far clipping plane will be pushed to infinity.
ovrProjection_FarClipAtInfinity = 0x04,
/// Enable if the application is rendering with OpenGL and expects a projection matrix with a clipping range of (-w to w).
/// Ignore this flag if your application already handles the conversion from D3D range (0 to w) to OpenGL.
ovrProjection_ClipRangeOpenGL = 0x08,
} ovrProjectionModifier;
/// Return values for ovr_Detect.
///
/// \see ovr_Detect
///
typedef struct OVR_ALIGNAS(8) ovrDetectResult_
{
/// Is ovrFalse when the Oculus Service is not running.
/// This means that the Oculus Service is either uninstalled or stopped.
/// IsOculusHMDConnected will be ovrFalse in this case.
/// Is ovrTrue when the Oculus Service is running.
/// This means that the Oculus Service is installed and running.
/// IsOculusHMDConnected will reflect the state of the HMD.
ovrBool IsOculusServiceRunning;
/// Is ovrFalse when an Oculus HMD is not detected.
/// If the Oculus Service is not running, this will be ovrFalse.
/// Is ovrTrue when an Oculus HMD is detected.
/// This implies that the Oculus Service is also installed and running.
ovrBool IsOculusHMDConnected;
OVR_UNUSED_STRUCT_PAD(pad0, 6) ///< \internal struct padding
} ovrDetectResult;
OVR_STATIC_ASSERT(sizeof(ovrDetectResult) == 8, "ovrDetectResult size mismatch");
/// Detects Oculus Runtime and Device Status
///
/// Checks for Oculus Runtime and Oculus HMD device status without loading the LibOVRRT
/// shared library. This may be called before ovr_Initialize() to help decide whether or
/// not to initialize LibOVR.
///
/// \param[in] timeoutMilliseconds Specifies a timeout to wait for HMD to be attached or 0 to poll.
///
/// \return Returns an ovrDetectResult object indicating the result of detection.
///
/// \see ovrDetectResult
///
OVR_PUBLIC_FUNCTION(ovrDetectResult) ovr_Detect(int timeoutMilliseconds);
// On the Windows platform,
#ifdef _WIN32
/// This is the Windows Named Event name that is used to check for HMD connected state.
#define OVR_HMD_CONNECTED_EVENT_NAME L"OculusHMDConnected"
#endif // _WIN32
/// Used to generate projection from ovrEyeDesc::Fov.
///
/// \param[in] fov Specifies the ovrFovPort to use.
/// \param[in] znear Distance to near Z limit.
/// \param[in] zfar Distance to far Z limit.
/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
///
/// \return Returns the calculated projection matrix.
///
/// \see ovrProjectionModifier
///
OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_Projection(ovrFovPort fov, float znear, float zfar, unsigned int projectionModFlags);
/// Extracts the required data from the result of ovrMatrix4f_Projection.
///
/// \param[in] projection Specifies the project matrix from which to extract ovrTimewarpProjectionDesc.
/// \param[in] projectionModFlags A combination of the ovrProjectionModifier flags.
/// \return Returns the extracted ovrTimewarpProjectionDesc.
/// \see ovrTimewarpProjectionDesc
///
OVR_PUBLIC_FUNCTION(ovrTimewarpProjectionDesc) ovrTimewarpProjectionDesc_FromProjection(ovrMatrix4f projection, unsigned int projectionModFlags);
/// Generates an orthographic sub-projection.
///
/// Used for 2D rendering, Y is down.
///
/// \param[in] projection The perspective matrix that the orthographic matrix is derived from.
/// \param[in] orthoScale Equal to 1.0f / pixelsPerTanAngleAtCenter.
/// \param[in] orthoDistance Equal to the distance from the camera in meters, such as 0.8m.
/// \param[in] HmdToEyeOffsetX Specifies the offset of the eye from the center.
///
/// \return Returns the calculated projection matrix.
///
OVR_PUBLIC_FUNCTION(ovrMatrix4f) ovrMatrix4f_OrthoSubProjection(ovrMatrix4f projection, ovrVector2f orthoScale,
float orthoDistance, float HmdToEyeOffsetX);
/// Computes offset eye poses based on headPose returned by ovrTrackingState.
///
/// \param[in] headPose Indicates the HMD position and orientation to use for the calculation.
/// \param[in] HmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from
/// ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average
/// of the two vectors for both eyes.
/// \param[out] outEyePoses If outEyePoses are used for rendering, they should be passed to
/// ovr_SubmitFrame in ovrLayerEyeFov::RenderPose or ovrLayerEyeFovDepth::RenderPose.
///
OVR_PUBLIC_FUNCTION(void) ovr_CalcEyePoses(ovrPosef headPose,
const ovrVector3f HmdToEyeOffset[2],
ovrPosef outEyePoses[2]);
/// Returns the predicted head pose in outHmdTrackingState and offset eye poses in outEyePoses.
///
/// This is a thread-safe function where caller should increment frameIndex with every frame
/// and pass that index where applicable to functions called on the rendering thread.
/// Assuming outEyePoses are used for rendering, it should be passed as a part of ovrLayerEyeFov.
/// The caller does not need to worry about applying HmdToEyeOffset to the returned outEyePoses variables.
///
/// \param[in] hmd Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] frameIndex Specifies the targeted frame index, or 0 to refer to one frame after
/// the last time ovr_SubmitFrame was called.
/// \param[in] HmdToEyeOffset Can be ovrEyeRenderDesc.HmdToEyeOffset returned from
/// ovr_GetRenderDesc. For monoscopic rendering, use a vector that is the average
/// of the two vectors for both eyes.
/// \param[in] latencyMarker Specifies that this call is the point in time where
/// the "App-to-Mid-Photon" latency timer starts from. If a given ovrLayer
/// provides "SensorSampleTimestamp", that will override the value stored here.
/// \param[out] outEyePoses The predicted eye poses.
/// \param[out] outSensorSampleTime The time when this function was called. May be NULL, in which case it is ignored.
///
OVR_PUBLIC_FUNCTION(void) ovr_GetEyePoses(ovrSession session, long long frameIndex, ovrBool latencyMarker,
const ovrVector3f HmdToEyeOffset[2],
ovrPosef outEyePoses[2],
double* outSensorSampleTime);
/// Tracking poses provided by the SDK come in a right-handed coordinate system. If an application
/// is passing in ovrProjection_LeftHanded into ovrMatrix4f_Projection, then it should also use
/// this function to flip the HMD tracking poses to be left-handed.
///
/// While this utility function is intended to convert a left-handed ovrPosef into a right-handed
/// coordinate system, it will also work for converting right-handed to left-handed since the
/// flip operation is the same for both cases.
///
/// \param[in] inPose that is right-handed
/// \param[out] outPose that is requested to be left-handed (can be the same pointer to inPose)
///
OVR_PUBLIC_FUNCTION(void) ovrPosef_FlipHandedness(const ovrPosef* inPose, ovrPosef* outPose);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif // Header include guard

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
/************************************************************************************
Filename : OVR_StereoProjection.h
Content : Stereo projection functions
Created : November 30, 2013
Authors : Tom Fosyth
Copyright : Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
Licensed under the Oculus VR Rift SDK License Version 3.3 (the "License");
you may not use the Oculus VR Rift SDK except in compliance with the License,
which is provided at the time of installation or download, or which
otherwise accompanies this software in either electronic or hard copy form.
You may obtain a copy of the License at
http://www.oculusvr.com/licenses/LICENSE-3.3
Unless required by applicable law or agreed to in writing, the Oculus VR SDK
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*************************************************************************************/
#ifndef OVR_StereoProjection_h
#define OVR_StereoProjection_h
#include "Extras/OVR_Math.h"
namespace OVR {
//-----------------------------------------------------------------------------------
// ***** Stereo Enumerations
// StereoEye specifies which eye we are rendering for; it is used to
// retrieve StereoEyeParams.
enum StereoEye
{
StereoEye_Left,
StereoEye_Right,
StereoEye_Center
};
//-----------------------------------------------------------------------------------
// ***** Propjection functions
Matrix4f CreateProjection ( bool rightHanded, bool isOpenGL, FovPort fov, StereoEye eye,
float zNear = 0.01f, float zFar = 10000.0f,
bool flipZ = false, bool farAtInfinity = false);
Matrix4f CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType,
float tanHalfFovX, float tanHalfFovY,
float unitsX, float unitsY, float distanceFromCamera,
float interpupillaryDistance, Matrix4f const &projection,
float zNear = 0.0f, float zFar = 0.0f,
bool flipZ = false, bool farAtInfinity = false);
ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort fov );
} //namespace OVR
#endif // OVR_StereoProjection_h

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/********************************************************************************//**
\file OVR_CAPI_Audio.h
\brief CAPI audio functions.
\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
************************************************************************************/
#ifndef OVR_CAPI_Audio_h
#define OVR_CAPI_Audio_h
#ifdef _WIN32
#include <windows.h>
#include "OVR_CAPI.h"
#define OVR_AUDIO_MAX_DEVICE_STR_SIZE 128
/// Gets the ID of the preferred VR audio output device.
///
/// \param[out] deviceOutId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutWaveId(UINT* deviceOutId);
/// Gets the ID of the preferred VR audio input device.
///
/// \param[out] deviceInId The ID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be WAVE_MAPPER.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInWaveId(UINT* deviceInId);
/// Gets the GUID of the preferred VR audio device as a string.
///
/// \param[out] deviceOutStrBuffer A buffer where the GUID string for the device will copied to.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuidStr(WCHAR deviceOutStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
/// Gets the GUID of the preferred VR audio device.
///
/// \param[out] deviceOutGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceOutGuid(GUID* deviceOutGuid);
/// Gets the GUID of the preferred VR microphone device as a string.
///
/// \param[out] deviceInStrBuffer A buffer where the GUID string for the device will copied to.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuidStr(WCHAR deviceInStrBuffer[OVR_AUDIO_MAX_DEVICE_STR_SIZE]);
/// Gets the GUID of the preferred VR microphone device.
///
/// \param[out] deviceInGuid The GUID of the user's preferred VR audio device to use, which will be valid upon a successful return value, else it will be NULL.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetAudioDeviceInGuid(GUID* deviceInGuid);
#endif //OVR_OS_MS
#endif // OVR_CAPI_Audio_h

View File

@ -0,0 +1,131 @@
/********************************************************************************//**
\file OVR_CAPI_D3D.h
\brief D3D specific structures used by the CAPI interface.
\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
************************************************************************************/
#ifndef OVR_CAPI_D3D_h
#define OVR_CAPI_D3D_h
#include "OVR_CAPI.h"
#include "OVR_Version.h"
#if defined (_WIN32)
#include <Unknwn.h>
//-----------------------------------------------------------------------------------
// ***** Direct3D Specific
/// Create Texture Swap Chain suitable for use with Direct3D 11 and 12.
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
/// which must be the same one the application renders to the eye textures with.
/// \param[in] desc Specifies requested texture properties. See notes for more info about texture format.
/// \param[in] bindFlags Specifies what ovrTextureBindFlags the application requires for this texture chain.
/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon a successful return value, else it will be NULL.
/// This texture chain must be eventually destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
/// \note The texture format provided in \a desc should be thought of as the format the distortion-compositor will use for the
/// ShaderResourceView when reading the contents of the texture. To that end, it is highly recommended that the application
/// requests texture swapchain formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor
/// does sRGB-correct rendering. As such, the compositor relies on the GPU's hardware sampler to do the sRGB-to-linear
/// conversion. If the application still prefers to render to a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) while handling the
/// linear-to-gamma conversion via HLSL code, then the application must still request the corresponding sRGB format and also use
/// the \a ovrTextureMisc_DX_Typeless flag in the ovrTextureSwapChainDesc's Flag field. This will allow the application to create
/// a RenderTargetView that is the desired linear format while the compositor continues to treat it as sRGB. Failure to do so
/// will cause the compositor to apply unexpected gamma conversions leading to gamma-curve artifacts. The \a ovrTextureMisc_DX_Typeless
/// flag for depth buffer formats (e.g. OVR_FORMAT_D32_FLOAT) is ignored as they are always converted to be typeless.
///
/// \see ovr_GetTextureSwapChainLength
/// \see ovr_GetTextureSwapChainCurrentIndex
/// \see ovr_GetTextureSwapChainDesc
/// \see ovr_GetTextureSwapChainBufferDX
/// \see ovr_DestroyTextureSwapChain
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainDX(ovrSession session,
IUnknown* d3dPtr,
const ovrTextureSwapChainDesc* desc,
ovrTextureSwapChain* out_TextureSwapChain);
/// Get a specific buffer within the chain as any compatible COM interface (similar to QueryInterface)
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainDX
/// \param[in] index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength),
/// or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for.
/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
/// <b>Example code</b>
/// \code{.cpp}
/// ovr_GetTextureSwapChainBuffer(session, chain, 0, IID_ID3D11Texture2D, &d3d11Texture);
/// ovr_GetTextureSwapChainBuffer(session, chain, 1, IID_PPV_ARGS(&dxgiResource));
/// \endcode
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferDX(ovrSession session,
ovrTextureSwapChain chain,
int index,
IID iid,
void** out_Buffer);
/// Create Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
///
/// A second call to ovr_CreateMirrorTextureDX for a given ovrSession before destroying the first one
/// is not supported and will result in an error return.
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] d3dPtr Specifies the application's D3D11Device to create resources with or the D3D12CommandQueue
/// which must be the same one the application renders to the textures with.
/// \param[in] desc Specifies requested texture properties. See notes for more info about texture format.
/// \param[out] out_MirrorTexture Returns the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
/// \note The texture format provided in \a desc should be thought of as the format the compositor will use for the RenderTargetView when
/// writing into mirror texture. To that end, it is highly recommended that the application requests a mirror texture format that is
/// in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the compositor does sRGB-correct rendering. If however the application wants
/// to still read the mirror texture as a linear format (e.g. OVR_FORMAT_R8G8B8A8_UNORM) and handle the sRGB-to-linear conversion in
/// HLSL code, then it is recommended the application still requests an sRGB format and also use the \a ovrTextureMisc_DX_Typeless flag in the
/// ovrMirrorTextureDesc's Flags field. This will allow the application to bind a ShaderResourceView that is a linear format while the
/// compositor continues to treat is as sRGB. Failure to do so will cause the compositor to apply unexpected gamma conversions leading to
/// gamma-curve artifacts.
///
/// \see ovr_GetMirrorTextureBufferDX
/// \see ovr_DestroyMirrorTexture
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureDX(ovrSession session,
IUnknown* d3dPtr,
const ovrMirrorTextureDesc* desc,
ovrMirrorTexture* out_MirrorTexture);
/// Get a the underlying buffer as any compatible COM interface (similar to QueryInterface)
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureDX
/// \param[in] iid Specifies the interface ID of the interface pointer to query the buffer for.
/// \param[out] out_Buffer Returns the COM interface pointer retrieved.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferDX(ovrSession session,
ovrMirrorTexture mirrorTexture,
IID iid,
void** out_Buffer);
#endif // _WIN32
#endif // OVR_CAPI_D3D_h

View File

@ -0,0 +1,99 @@
/********************************************************************************//**
\file OVR_CAPI_GL.h
\brief OpenGL-specific structures used by the CAPI interface.
\copyright Copyright 2015 Oculus VR, LLC. All Rights reserved.
************************************************************************************/
#ifndef OVR_CAPI_GL_h
#define OVR_CAPI_GL_h
#include "OVR_CAPI.h"
/// Creates a TextureSwapChain suitable for use with OpenGL.
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] desc Specifies the requested texture properties. See notes for more info about texture format.
/// \param[out] out_TextureSwapChain Returns the created ovrTextureSwapChain, which will be valid upon
/// a successful return value, else it will be NULL. This texture swap chain must be eventually
/// destroyed via ovr_DestroyTextureSwapChain before destroying the HMD with ovr_Destroy.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
/// \note The \a format provided should be thought of as the format the distortion compositor will use when reading
/// the contents of the texture. To that end, it is highly recommended that the application requests texture swap chain
/// formats that are in sRGB-space (e.g. OVR_FORMAT_R8G8B8A8_UNORM_SRGB) as the distortion compositor does sRGB-correct
/// rendering. Furthermore, the app should then make sure "glEnable(GL_FRAMEBUFFER_SRGB);" is called before rendering
/// into these textures. Even though it is not recommended, if the application would like to treat the texture as a linear
/// format and do linear-to-gamma conversion in GLSL, then the application can avoid calling "glEnable(GL_FRAMEBUFFER_SRGB);",
/// but should still pass in an sRGB variant for the \a format. Failure to do so will cause the distortion compositor
/// to apply incorrect gamma conversions leading to gamma-curve artifacts.
///
/// \see ovr_GetTextureSwapChainLength
/// \see ovr_GetTextureSwapChainCurrentIndex
/// \see ovr_GetTextureSwapChainDesc
/// \see ovr_GetTextureSwapChainBufferGL
/// \see ovr_DestroyTextureSwapChain
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateTextureSwapChainGL(ovrSession session,
const ovrTextureSwapChainDesc* desc,
ovrTextureSwapChain* out_TextureSwapChain);
/// Get a specific buffer within the chain as a GL texture name
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] chain Specifies an ovrTextureSwapChain previously returned by ovr_CreateTextureSwapChainGL
/// \param[in] index Specifies the index within the chain to retrieve. Must be between 0 and length (see ovr_GetTextureSwapChainLength)
/// or may pass -1 to get the buffer at the CurrentIndex location. (Saving a call to GetTextureSwapChainCurrentIndex)
/// \param[out] out_TexId Returns the GL texture object name associated with the specific index requested
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetTextureSwapChainBufferGL(ovrSession session,
ovrTextureSwapChain chain,
int index,
unsigned int* out_TexId);
/// Creates a Mirror Texture which is auto-refreshed to mirror Rift contents produced by this application.
///
/// A second call to ovr_CreateMirrorTextureGL for a given ovrSession before destroying the first one
/// is not supported and will result in an error return.
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] desc Specifies the requested mirror texture description.
/// \param[out] out_MirrorTexture Specifies the created ovrMirrorTexture, which will be valid upon a successful return value, else it will be NULL.
/// This texture must be eventually destroyed via ovr_DestroyMirrorTexture before destroying the HMD with ovr_Destroy.
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
/// \note The \a format provided should be thought of as the format the distortion compositor will use when writing into the mirror
/// texture. It is highly recommended that mirror textures are requested as sRGB formats because the distortion compositor
/// does sRGB-correct rendering. If the application requests a non-sRGB format (e.g. R8G8B8A8_UNORM) as the mirror texture,
/// then the application might have to apply a manual linear-to-gamma conversion when reading from the mirror texture.
/// Failure to do so can result in incorrect gamma conversions leading to gamma-curve artifacts and color banding.
///
/// \see ovr_GetMirrorTextureBufferGL
/// \see ovr_DestroyMirrorTexture
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_CreateMirrorTextureGL(ovrSession session,
const ovrMirrorTextureDesc* desc,
ovrMirrorTexture* out_MirrorTexture);
/// Get a the underlying buffer as a GL texture name
///
/// \param[in] session Specifies an ovrSession previously returned by ovr_Create.
/// \param[in] mirrorTexture Specifies an ovrMirrorTexture previously returned by ovr_CreateMirrorTextureGL
/// \param[out] out_TexId Specifies the GL texture object name associated with the mirror texture
///
/// \return Returns an ovrResult indicating success or failure. In the case of failure, use
/// ovr_GetLastErrorInfo to get more information.
///
OVR_PUBLIC_FUNCTION(ovrResult) ovr_GetMirrorTextureBufferGL(ovrSession session,
ovrMirrorTexture mirrorTexture,
unsigned int* out_TexId);
#endif // OVR_CAPI_GL_h

View File

@ -0,0 +1,53 @@
/********************************************************************************//**
\file OVR_CAPI.h
\brief Keys for CAPI proprty function calls
\copyright Copyright 2015 Oculus VR, LLC All Rights reserved.
************************************************************************************/
#ifndef OVR_CAPI_Keys_h
#define OVR_CAPI_Keys_h
#include "OVR_Version.h"
#define OVR_KEY_USER "User" // string
#define OVR_KEY_NAME "Name" // string
#define OVR_KEY_GENDER "Gender" // string "Male", "Female", or "Unknown"
#define OVR_DEFAULT_GENDER "Unknown"
#define OVR_KEY_PLAYER_HEIGHT "PlayerHeight" // float meters
#define OVR_DEFAULT_PLAYER_HEIGHT 1.778f
#define OVR_KEY_EYE_HEIGHT "EyeHeight" // float meters
#define OVR_DEFAULT_EYE_HEIGHT 1.675f
#define OVR_KEY_NECK_TO_EYE_DISTANCE "NeckEyeDistance" // float[2] meters
#define OVR_DEFAULT_NECK_TO_EYE_HORIZONTAL 0.0805f
#define OVR_DEFAULT_NECK_TO_EYE_VERTICAL 0.075f
#define OVR_KEY_EYE_TO_NOSE_DISTANCE "EyeToNoseDist" // float[2] meters
#define OVR_PERF_HUD_MODE "PerfHudMode" // int, allowed values are defined in enum ovrPerfHudMode
#define OVR_LAYER_HUD_MODE "LayerHudMode" // int, allowed values are defined in enum ovrLayerHudMode
#define OVR_LAYER_HUD_CURRENT_LAYER "LayerHudCurrentLayer" // int, The layer to show
#define OVR_LAYER_HUD_SHOW_ALL_LAYERS "LayerHudShowAll" // bool, Hide other layers when the hud is enabled
#define OVR_DEBUG_HUD_STEREO_MODE "DebugHudStereoMode" // int, allowed values are defined in enum ovrDebugHudStereoMode
#define OVR_DEBUG_HUD_STEREO_GUIDE_INFO_ENABLE "DebugHudStereoGuideInfoEnable" // bool
#define OVR_DEBUG_HUD_STEREO_GUIDE_SIZE "DebugHudStereoGuideSize2f" // float[2]
#define OVR_DEBUG_HUD_STEREO_GUIDE_POSITION "DebugHudStereoGuidePosition3f" // float[3]
#define OVR_DEBUG_HUD_STEREO_GUIDE_YAWPITCHROLL "DebugHudStereoGuideYawPitchRoll3f" // float[3]
#define OVR_DEBUG_HUD_STEREO_GUIDE_COLOR "DebugHudStereoGuideColor4f" // float[4]
#endif // OVR_CAPI_Keys_h

View File

@ -0,0 +1,191 @@
/********************************************************************************//**
\file OVR_ErrorCode.h
\brief This header provides LibOVR error code declarations.
\copyright Copyright 2015-2016 Oculus VR, LLC All Rights reserved.
*************************************************************************************/
#ifndef OVR_ErrorCode_h
#define OVR_ErrorCode_h
#include "OVR_Version.h"
#include <stdint.h>
#ifndef OVR_RESULT_DEFINED
#define OVR_RESULT_DEFINED ///< Allows ovrResult to be independently defined.
/// API call results are represented at the highest level by a single ovrResult.
typedef int32_t ovrResult;
#endif
/// \brief Indicates if an ovrResult indicates success.
///
/// Some functions return additional successful values other than ovrSucces and
/// require usage of this macro to indicate successs.
///
#if !defined(OVR_SUCCESS)
#define OVR_SUCCESS(result) (result >= 0)
#endif
/// \brief Indicates if an ovrResult indicates an unqualified success.
///
/// This is useful for indicating that the code intentionally wants to
/// check for result == ovrSuccess as opposed to OVR_SUCCESS(), which
/// checks for result >= ovrSuccess.
///
#if !defined(OVR_UNQUALIFIED_SUCCESS)
#define OVR_UNQUALIFIED_SUCCESS(result) (result == ovrSuccess)
#endif
/// \brief Indicates if an ovrResult indicates failure.
///
#if !defined(OVR_FAILURE)
#define OVR_FAILURE(result) (!OVR_SUCCESS(result))
#endif
// Success is a value greater or equal to 0, while all error types are negative values.
#ifndef OVR_SUCCESS_DEFINED
#define OVR_SUCCESS_DEFINED ///< Allows ovrResult to be independently defined.
typedef enum ovrSuccessType_
{
/// This is a general success result. Use OVR_SUCCESS to test for success.
ovrSuccess = 0,
/// Returned from a call to SubmitFrame. The call succeeded, but what the app
/// rendered will not be visible on the HMD. Ideally the app should continue
/// calling SubmitFrame, but not do any rendering. When the result becomes
/// ovrSuccess, rendering should continue as usual.
ovrSuccess_NotVisible = 1000,
ovrSuccess_HMDFirmwareMismatch = 4100, ///< The HMD Firmware is out of date but is acceptable.
ovrSuccess_TrackerFirmwareMismatch = 4101, ///< The Tracker Firmware is out of date but is acceptable.
ovrSuccess_ControllerFirmwareMismatch = 4104, ///< The controller firmware is out of date but is acceptable.
ovrSuccess_TrackerDriverNotFound = 4105, ///< The tracker driver interface was not found. Can be a temporary error
} ovrSuccessType;
#endif
typedef enum ovrErrorType_
{
/* General errors */
ovrError_MemoryAllocationFailure = -1000, ///< Failure to allocate memory.
ovrError_SocketCreationFailure = -1001, ///< Failure to create a socket.
ovrError_InvalidSession = -1002, ///< Invalid ovrSession parameter provided.
ovrError_Timeout = -1003, ///< The operation timed out.
ovrError_NotInitialized = -1004, ///< The system or component has not been initialized.
ovrError_InvalidParameter = -1005, ///< Invalid parameter provided. See error info or log for details.
ovrError_ServiceError = -1006, ///< Generic service error. See error info or log for details.
ovrError_NoHmd = -1007, ///< The given HMD doesn't exist.
ovrError_Unsupported = -1009, ///< Function call is not supported on this hardware/software
ovrError_DeviceUnavailable = -1010, ///< Specified device type isn't available.
ovrError_InvalidHeadsetOrientation = -1011, ///< The headset was in an invalid orientation for the requested operation (e.g. vertically oriented during ovr_RecenterPose).
ovrError_ClientSkippedDestroy = -1012, ///< The client failed to call ovr_Destroy on an active session before calling ovr_Shutdown. Or the client crashed.
ovrError_ClientSkippedShutdown = -1013, ///< The client failed to call ovr_Shutdown or the client crashed.
/* Audio error range, reserved for Audio errors. */
ovrError_AudioReservedBegin = -2000, ///< First Audio error.
ovrError_AudioDeviceNotFound = -2001, ///< Failure to find the specified audio device.
ovrError_AudioComError = -2002, ///< Generic COM error.
ovrError_AudioReservedEnd = -2999, ///< Last Audio error.
/* Initialization errors. */
ovrError_Initialize = -3000, ///< Generic initialization error.
ovrError_LibLoad = -3001, ///< Couldn't load LibOVRRT.
ovrError_LibVersion = -3002, ///< LibOVRRT version incompatibility.
ovrError_ServiceConnection = -3003, ///< Couldn't connect to the OVR Service.
ovrError_ServiceVersion = -3004, ///< OVR Service version incompatibility.
ovrError_IncompatibleOS = -3005, ///< The operating system version is incompatible.
ovrError_DisplayInit = -3006, ///< Unable to initialize the HMD display.
ovrError_ServerStart = -3007, ///< Unable to start the server. Is it already running?
ovrError_Reinitialization = -3008, ///< Attempting to re-initialize with a different version.
ovrError_MismatchedAdapters = -3009, ///< Chosen rendering adapters between client and service do not match
ovrError_LeakingResources = -3010, ///< Calling application has leaked resources
ovrError_ClientVersion = -3011, ///< Client version too old to connect to service
ovrError_OutOfDateOS = -3012, ///< The operating system is out of date.
ovrError_OutOfDateGfxDriver = -3013, ///< The graphics driver is out of date.
ovrError_IncompatibleGPU = -3014, ///< The graphics hardware is not supported
ovrError_NoValidVRDisplaySystem = -3015, ///< No valid VR display system found.
ovrError_Obsolete = -3016, ///< Feature or API is obsolete and no longer supported.
ovrError_DisabledOrDefaultAdapter = -3017, ///< No supported VR display system found, but disabled or driverless adapter found.
ovrError_HybridGraphicsNotSupported = -3018, ///< The system is using hybrid graphics (Optimus, etc...), which is not support.
ovrError_DisplayManagerInit = -3019, ///< Initialization of the DisplayManager failed.
ovrError_TrackerDriverInit = -3020, ///< Failed to get the interface for an attached tracker
/* Hardware errors */
ovrError_InvalidBundleAdjustment = -4000, ///< Headset has no bundle adjustment data.
ovrError_USBBandwidth = -4001, ///< The USB hub cannot handle the camera frame bandwidth.
ovrError_USBEnumeratedSpeed = -4002, ///< The USB camera is not enumerating at the correct device speed.
ovrError_ImageSensorCommError = -4003, ///< Unable to communicate with the image sensor.
ovrError_GeneralTrackerFailure = -4004, ///< We use this to report various sensor issues that don't fit in an easily classifiable bucket.
ovrError_ExcessiveFrameTruncation = -4005, ///< A more than acceptable number of frames are coming back truncated.
ovrError_ExcessiveFrameSkipping = -4006, ///< A more than acceptable number of frames have been skipped.
ovrError_SyncDisconnected = -4007, ///< The sensor is not receiving the sync signal (cable disconnected?).
ovrError_TrackerMemoryReadFailure = -4008, ///< Failed to read memory from the sensor.
ovrError_TrackerMemoryWriteFailure = -4009, ///< Failed to write memory from the sensor.
ovrError_TrackerFrameTimeout = -4010, ///< Timed out waiting for a camera frame.
ovrError_TrackerTruncatedFrame = -4011, ///< Truncated frame returned from sensor.
ovrError_TrackerDriverFailure = -4012, ///< The sensor driver has encountered a problem.
ovrError_TrackerNRFFailure = -4013, ///< The sensor wireless subsystem has encountered a problem.
ovrError_HardwareGone = -4014, ///< The hardware has been unplugged
ovrError_NordicEnabledNoSync = -4015, ///< The nordic indicates that sync is enabled but it is not sending sync pulses
ovrError_NordicSyncNoFrames = -4016, ///< It looks like we're getting a sync signal, but no camera frames have been received
ovrError_CatastrophicFailure = -4017, ///< A catastrophic failure has occurred. We will attempt to recover by resetting the device
ovrError_HMDFirmwareMismatch = -4100, ///< The HMD Firmware is out of date and is unacceptable.
ovrError_TrackerFirmwareMismatch = -4101, ///< The sensor Firmware is out of date and is unacceptable.
ovrError_BootloaderDeviceDetected = -4102, ///< A bootloader HMD is detected by the service.
ovrError_TrackerCalibrationError = -4103, ///< The sensor calibration is missing or incorrect.
ovrError_ControllerFirmwareMismatch = -4104, ///< The controller firmware is out of date and is unacceptable.
ovrError_IMUTooManyLostSamples = -4200, ///< Too many lost IMU samples.
ovrError_IMURateError = -4201, ///< IMU rate is outside of the expected range.
ovrError_FeatureReportFailure = -4202, ///< A feature report has failed.
/* Synchronization errors */
ovrError_Incomplete = -5000, ///<Requested async work not yet complete.
ovrError_Abandoned = -5001, ///<Requested async work was abandoned and result is incomplete.
/* Rendering errors */
ovrError_DisplayLost = -6000, ///<In the event of a system-wide graphics reset or cable unplug this is returned to the app.
ovrError_TextureSwapChainFull = -6001, ///<ovr_CommitTextureSwapChain was called too many times on a texture swapchain without calling submit to use the chain.
ovrError_TextureSwapChainInvalid = -6002, ///<The ovrTextureSwapChain is in an incomplete or inconsistent state. Ensure ovr_CommitTextureSwapChain was called at least once first.
/* Fatal errors */
ovrError_RuntimeException = -7000, ///< A runtime exception occurred. The application is required to shutdown LibOVR and re-initialize it before this error state will be cleared.
ovrError_MetricsUnknownApp = -90000,
ovrError_MetricsDuplicateApp = -90001,
ovrError_MetricsNoEvents = -90002,
ovrError_MetricsRuntime = -90003,
ovrError_MetricsFile = -90004,
ovrError_MetricsNoClientInfo = -90005,
ovrError_MetricsNoAppMetaData = -90006,
ovrError_MetricsNoApp = -90007,
ovrError_MetricsOafFailure = -90008,
ovrError_MetricsSessionAlreadyActive = -90009,
ovrError_MetricsSessionNotActive = -90010,
} ovrErrorType;
/// Provides information about the last error.
/// \see ovr_GetLastErrorInfo
typedef struct ovrErrorInfo_
{
ovrResult Result; ///< The result from the last API call that generated an error ovrResult.
char ErrorString[512]; ///< A UTF8-encoded null-terminated English string describing the problem. The format of this string is subject to change in future versions.
} ovrErrorInfo;
#endif /* OVR_ErrorCode_h */

View File

@ -0,0 +1,60 @@
/********************************************************************************//**
\file OVR_Version.h
\brief This header provides LibOVR version identification.
\copyright Copyright 2014-2016 Oculus VR, LLC All Rights reserved.
*************************************************************************************/
#ifndef OVR_Version_h
#define OVR_Version_h
/// Conventional string-ification macro.
#if !defined(OVR_STRINGIZE)
#define OVR_STRINGIZEIMPL(x) #x
#define OVR_STRINGIZE(x) OVR_STRINGIZEIMPL(x)
#endif
// Master version numbers
#define OVR_PRODUCT_VERSION 1 // Product version doesn't participate in semantic versioning.
#define OVR_MAJOR_VERSION 1 // If you change these values then you need to also make sure to change LibOVR/Projects/Windows/LibOVR.props in parallel.
#define OVR_MINOR_VERSION 3 //
#define OVR_PATCH_VERSION 0
#define OVR_BUILD_NUMBER 0
// This is the ((product * 100) + major) version of the service that the DLL is compatible with.
// When we backport changes to old versions of the DLL we update the old DLLs
// to move this version number up to the latest version.
// The DLL is responsible for checking that the service is the version it supports
// and returning an appropriate error message if it has not been made compatible.
#define OVR_DLL_COMPATIBLE_VERSION 101
#define OVR_FEATURE_VERSION 0
/// "Major.Minor.Patch"
#if !defined(OVR_VERSION_STRING)
#define OVR_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION)
#endif
/// "Major.Minor.Patch.Build"
#if !defined(OVR_DETAILED_VERSION_STRING)
#define OVR_DETAILED_VERSION_STRING OVR_STRINGIZE(OVR_MAJOR_VERSION.OVR_MINOR_VERSION.OVR_PATCH_VERSION.OVR_BUILD_NUMBER)
#endif
/// \brief file description for version info
/// This appears in the user-visible file properties. It is intended to convey publicly
/// available additional information such as feature builds.
#if !defined(OVR_FILE_DESCRIPTION_STRING)
#if defined(_DEBUG)
#define OVR_FILE_DESCRIPTION_STRING "dev build debug"
#else
#define OVR_FILE_DESCRIPTION_STRING "dev build"
#endif
#endif
#endif // OVR_Version_h

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,504 @@
/*******************************************************************************************
*
* raylib Oculus minimum sample (OpenGL 3.3 Core)
*
* NOTE: This example requires raylib module [rlgl]
*
* Compile rlgl using:
* gcc -c rlgl.c -Wall -std=c99 -DRLGL_STANDALONE -DRAYMATH_IMPLEMENTATION -DGRAPHICS_API_OPENGL_33
*
* Compile example using:
* gcc -o oculus_glfw_sample.exe oculus_glfw_sample.c rlgl.o glad.o -L. -lLibOVRRT32_1 -lglfw3 -lopengl32 -lgdi32 -std=c99
*
* This example has been created using raylib 1.5 (www.raylib.com)
* raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
*
* Copyright (c) 2015 Ramon Santamaria (@raysan5)
*
********************************************************************************************/
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "glad.h" // Extensions loading library
#include <GLFW/glfw3.h> // Windows/Context and inputs management
#include "OculusSDK/LibOVR/Include/OVR_CAPI_GL.h" // Oculus SDK for OpenGL
#define RLGL_STANDALONE
#include "rlgl.h"
// OVR device variables
ovrSession session;
ovrHmdDesc hmdDesc;
ovrGraphicsLuid luid;
// OVR OpenGL required variables
GLuint fbo = 0;
GLuint depthBuffer = 0;
ovrTextureSwapChain eyeTexture;
GLuint mirrorFbo = 0;
ovrMirrorTexture mirrorTexture;
ovrEyeRenderDesc eyeRenderDescs[2];
Matrix eyeProjections[2];
ovrLayerEyeFov eyeLayer;
ovrViewScaleDesc viewScaleDesc;
Vector2 renderTargetSize = { 0, 0 };
Vector2 mirrorSize;
unsigned int frame = 0;
// GLFW variables
GLFWwindow *window = NULL;
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
typedef enum { LOG_INFO = 0, LOG_ERROR, LOG_WARNING, LOG_DEBUG, LOG_OTHER } TraceLogType;
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
static void ErrorCallback(int error, const char* description)
{
fputs(description, stderr);
}
static void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
}
static void DrawRectangleV(Vector2 position, Vector2 size, Color color);
static void TraceLog(int msgType, const char *text, ...);
static Matrix FromOvrMatrix(ovrMatrix4f ovrM);
void DrawGrid(int slices, float spacing);
void DrawCube(Vector3 position, float width, float height, float length, Color color);
//----------------------------------------------------------------------------------
// Main Entry point
//----------------------------------------------------------------------------------
int main()
{
ovrResult result = ovr_Initialize(NULL);
if (OVR_FAILURE(result)) TraceLog(LOG_ERROR, "OVR: Could not initialize Oculus device");
result = ovr_Create(&session, &luid);
if (OVR_FAILURE(result))
{
TraceLog(LOG_WARNING, "OVR: Could not create Oculus session");
ovr_Shutdown();
}
hmdDesc = ovr_GetHmdDesc(session);
TraceLog(LOG_INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
TraceLog(LOG_INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
TraceLog(LOG_INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
TraceLog(LOG_INFO, "OVR: Product Type: %i", hmdDesc.Type);
TraceLog(LOG_INFO, "OVR: Serian Number: %s", hmdDesc.SerialNumber);
TraceLog(LOG_INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
memset(&eyeLayer, 0, sizeof(ovrLayerEyeFov));
eyeLayer.Header.Type = ovrLayerType_EyeFov;
eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft;
for (int eye = 0; eye < 2; eye++)
{
eyeRenderDescs[eye] = ovr_GetRenderDesc(session, eye, hmdDesc.DefaultEyeFov[eye]);
ovrMatrix4f ovrPerspectiveProjection = ovrMatrix4f_Projection(eyeRenderDescs[eye].Fov, 0.01f, 1000.0f, ovrProjection_ClipRangeOpenGL);
// NOTE struct ovrMatrix4f { float M[4][4] }
eyeProjections[eye] = FromOvrMatrix(ovrPerspectiveProjection);
viewScaleDesc.HmdToEyeOffset[eye] = eyeRenderDescs[eye].HmdToEyeOffset;
eyeLayer.Fov[eye] = eyeRenderDescs[eye].Fov;
ovrSizei eyeSize = ovr_GetFovTextureSize(session, eye, eyeLayer.Fov[eye], 1.0f);
eyeLayer.Viewport[eye].Size = eyeSize;
eyeLayer.Viewport[eye].Pos.x = renderTargetSize.x;
eyeLayer.Viewport[eye].Pos.y = 0;
renderTargetSize.y = eyeSize.h; //std::max(renderTargetSize.y, (uint32_t)eyeSize.h);
renderTargetSize.x += eyeSize.w;
}
// Make the on screen window 1/2 the resolution of the device
mirrorSize.x = hmdDesc.Resolution.w/2;
mirrorSize.y = hmdDesc.Resolution.h/2;
// GLFW3 Initialization + OpenGL 3.3 Context + Extensions
//--------------------------------------------------------
if (!glfwInit())
{
TraceLog(LOG_WARNING, "GLFW3: Can not initialize GLFW");
exit(EXIT_FAILURE);
}
else TraceLog(LOG_INFO, "GLFW3: GLFW initialized successfully");
glfwWindowHint(GLFW_DEPTH_BITS, 16);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GL_TRUE);
//glfwWindowHint(GLFW_DECORATED, GL_FALSE); // Mandatory on Oculus Rift to avoid program crash? --> NO
window = glfwCreateWindow(mirrorSize.x, mirrorSize.y, "raylib oculus sample", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
else TraceLog(LOG_INFO, "GLFW3: Window created successfully");
glfwSetErrorCallback(ErrorCallback);
glfwSetKeyCallback(window, KeyCallback);
glfwMakeContextCurrent(window);
glfwSwapInterval(0);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
TraceLog(LOG_WARNING, "GLAD: Cannot load OpenGL extensions");
exit(1);
}
else TraceLog(LOG_INFO, "GLAD: OpenGL extensions loaded successfully");
// Initialize OVR OpenGL swap chain textures
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Width = renderTargetSize.x;
desc.Height = renderTargetSize.y;
desc.MipLevels = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
result = ovr_CreateTextureSwapChainGL(session, &desc, &eyeTexture);
eyeLayer.ColorTexture[0] = eyeTexture;
if (!OVR_SUCCESS(result)) TraceLog(LOG_WARNING, "Failed to create swap textures");
int length = 0;
result = ovr_GetTextureSwapChainLength(session, eyeTexture, &length);
if (!OVR_SUCCESS(result) || !length) TraceLog(LOG_WARNING, "Unable to count swap chain textures");
for (int i = 0; i < length; ++i)
{
GLuint chainTexId;
ovr_GetTextureSwapChainBufferGL(session, eyeTexture, i, &chainTexId);
glBindTexture(GL_TEXTURE_2D, chainTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
glBindTexture(GL_TEXTURE_2D, 0);
// Setup framebuffer object
glGenFramebuffers(1, &fbo);
glGenRenderbuffers(1, &depthBuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
glBindRenderbuffer(GL_RENDERBUFFER, depthBuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, renderTargetSize.x, renderTargetSize.y);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthBuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
// Setup mirror texture
ovrMirrorTextureDesc mirrorDesc;
memset(&mirrorDesc, 0, sizeof(mirrorDesc));
mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
mirrorDesc.Width = mirrorSize.x;
mirrorDesc.Height = mirrorSize.y;
if (!OVR_SUCCESS(ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirrorTexture))) TraceLog(LOG_WARNING, "Could not create mirror texture");
glGenFramebuffers(1, &mirrorFbo);
// Recenter OVR tracking origin
ovr_RecenterTrackingOrigin(session);
// Initialize rlgl internal buffers and OpenGL state
rlglInit();
rlglInitGraphics(0, 0, mirrorSize.x, mirrorSize.y);
rlClearColor(245, 245, 245, 255); // Define clear color
glEnable(GL_DEPTH_TEST);
Vector2 position = { mirrorSize.x/2 - 100, mirrorSize.y/2 - 100 };
Vector2 size = { 200, 200 };
Color color = { 180, 20, 20, 255 };
Vector3 cubePosition = { 0.0f, 0.0f, 0.0f };
while (!glfwWindowShouldClose(window))
{
// Update
//----------------------------------------------------------------------------------
frame++;
ovrPosef eyePoses[2];
ovr_GetEyePoses(session, frame, ovrTrue, viewScaleDesc.HmdToEyeOffset, eyePoses, &eyeLayer.SensorSampleTime);
//----------------------------------------------------------------------------------
// Draw
//----------------------------------------------------------------------------------
int curIndex;
ovr_GetTextureSwapChainCurrentIndex(session, eyeTexture, &curIndex);
GLuint curTexId;
ovr_GetTextureSwapChainBufferGL(session, eyeTexture, curIndex, &curTexId);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, curTexId, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (int eye = 0; eye < 2; eye++)
{
glViewport(eyeLayer.Viewport[eye].Pos.x, eyeLayer.Viewport[eye].Pos.y,
eyeLayer.Viewport[eye].Size.w, eyeLayer.Viewport[eye].Size.h);
eyeLayer.RenderPose[eye] = eyePoses[eye];
// Convert struct ovrPosef { ovrQuatf Orientation; ovrVector3f Position; } to Matrix
// TODO: Review maths!
Matrix eyeOrientation = QuaternionToMatrix((Quaternion){ -eyePoses[eye].Orientation.x, -eyePoses[eye].Orientation.y, -eyePoses[eye].Orientation.z, -eyePoses[eye].Orientation.w });
Matrix eyePosition = MatrixTranslate(-eyePoses[eye].Position.x, -eyePoses[eye].Position.y, -eyePoses[eye].Position.z);
Matrix mvp = MatrixMultiply(eyeProjections[eye], MatrixMultiply(eyeOrientation, eyePosition));
// NOTE: Nothing is drawn until rlglDraw()
DrawRectangleV(position, size, color);
//DrawCube(cubePosition, 2.0f, 2.0f, 2.0f, color);
//DrawGrid(10, 1.0f);
// NOTE: rlglDraw() must be modified to support an external modelview-projection matrix
// TODO: Still working on it (now uses internal mvp)
rlglDraw(mvp);
}
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
ovr_CommitTextureSwapChain(session, eyeTexture);
ovrLayerHeader *headerList = &eyeLayer.Header;
ovr_SubmitFrame(session, frame, &viewScaleDesc, &headerList, 1);
// Blit mirror texture to back buffer
GLuint mirrorTextureId;
ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &mirrorTextureId);
glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFbo);
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mirrorTextureId, 0);
glBlitFramebuffer(0, 0, mirrorSize.x, mirrorSize.y, 0, mirrorSize.y, mirrorSize.x, 0, GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glfwSwapBuffers(window);
glfwPollEvents();
//----------------------------------------------------------------------------------
}
// De-Initialization
//--------------------------------------------------------------------------------------
if (mirrorFbo) glDeleteFramebuffers(1, &mirrorFbo);
if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture);
if (fbo) glDeleteFramebuffers(1, &fbo);
if (depthBuffer) glDeleteTextures(1, &depthBuffer);
if (eyeTexture) ovr_DestroyTextureSwapChain(session, eyeTexture);
rlglClose();
glfwDestroyWindow(window);
glfwTerminate();
ovr_Destroy(session); // Must be called after glfwTerminate()
ovr_Shutdown();
//--------------------------------------------------------------------------------------
return 0;
}
//----------------------------------------------------------------------------------
// Module specific Functions Definitions
//----------------------------------------------------------------------------------
// Draw rectangle using rlgl OpenGL 1.1 style coding (translated to OpenGL 3.3 internally)
static void DrawRectangleV(Vector2 position, Vector2 size, Color color)
{
rlBegin(RL_TRIANGLES);
rlColor4ub(color.r, color.g, color.b, color.a);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y);
rlEnd();
}
// Output a trace log message
// NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning
static void TraceLog(int msgType, const char *text, ...)
{
va_list args;
va_start(args, text);
switch(msgType)
{
case LOG_INFO: fprintf(stdout, "INFO: "); break;
case LOG_ERROR: fprintf(stdout, "ERROR: "); break;
case LOG_WARNING: fprintf(stdout, "WARNING: "); break;
case LOG_DEBUG: fprintf(stdout, "DEBUG: "); break;
default: break;
}
vfprintf(stdout, text, args);
fprintf(stdout, "\n");
va_end(args);
//if (msgType == LOG_ERROR) exit(1);
}
static Matrix FromOvrMatrix(ovrMatrix4f ovrmat)
{
Matrix rmat;
rmat.m0 = ovrmat.M[0][0];
rmat.m1 = ovrmat.M[1][0];
rmat.m2 = ovrmat.M[2][0];
rmat.m3 = ovrmat.M[3][0];
rmat.m4 = ovrmat.M[0][1];
rmat.m5 = ovrmat.M[1][1];
rmat.m6 = ovrmat.M[2][1];
rmat.m7 = ovrmat.M[3][1];
rmat.m8 = ovrmat.M[0][2];
rmat.m9 = ovrmat.M[1][2];
rmat.m10 = ovrmat.M[2][2];
rmat.m11 = ovrmat.M[3][2];
rmat.m12 = ovrmat.M[0][3];
rmat.m13 = ovrmat.M[1][3];
rmat.m14 = ovrmat.M[2][3];
rmat.m15 = ovrmat.M[3][3];
//MatrixTranspose(&rmat);
return rmat;
}
// Draw cube
// NOTE: Cube position is the center position
void DrawCube(Vector3 position, float width, float height, float length, Color color)
{
float x = 0.0f;
float y = 0.0f;
float z = 0.0f;
rlPushMatrix();
// NOTE: Be careful! Function order matters (rotate -> scale -> translate)
rlTranslatef(position.x, position.y, position.z);
//rlScalef(2.0f, 2.0f, 2.0f);
//rlRotatef(45, 0, 1, 0);
rlBegin(RL_TRIANGLES);
rlColor4ub(color.r, color.g, color.b, color.a);
// Front Face -----------------------------------------------------
rlVertex3f(x-width/2, y-height/2, z+length/2); // Bottom Left
rlVertex3f(x+width/2, y-height/2, z+length/2); // Bottom Right
rlVertex3f(x-width/2, y+height/2, z+length/2); // Top Left
rlVertex3f(x+width/2, y+height/2, z+length/2); // Top Right
rlVertex3f(x-width/2, y+height/2, z+length/2); // Top Left
rlVertex3f(x+width/2, y-height/2, z+length/2); // Bottom Right
// Back Face ------------------------------------------------------
rlVertex3f(x-width/2, y-height/2, z-length/2); // Bottom Left
rlVertex3f(x-width/2, y+height/2, z-length/2); // Top Left
rlVertex3f(x+width/2, y-height/2, z-length/2); // Bottom Right
rlVertex3f(x+width/2, y+height/2, z-length/2); // Top Right
rlVertex3f(x+width/2, y-height/2, z-length/2); // Bottom Right
rlVertex3f(x-width/2, y+height/2, z-length/2); // Top Left
// Top Face -------------------------------------------------------
rlVertex3f(x-width/2, y+height/2, z-length/2); // Top Left
rlVertex3f(x-width/2, y+height/2, z+length/2); // Bottom Left
rlVertex3f(x+width/2, y+height/2, z+length/2); // Bottom Right
rlVertex3f(x+width/2, y+height/2, z-length/2); // Top Right
rlVertex3f(x-width/2, y+height/2, z-length/2); // Top Left
rlVertex3f(x+width/2, y+height/2, z+length/2); // Bottom Right
// Bottom Face ----------------------------------------------------
rlVertex3f(x-width/2, y-height/2, z-length/2); // Top Left
rlVertex3f(x+width/2, y-height/2, z+length/2); // Bottom Right
rlVertex3f(x-width/2, y-height/2, z+length/2); // Bottom Left
rlVertex3f(x+width/2, y-height/2, z-length/2); // Top Right
rlVertex3f(x+width/2, y-height/2, z+length/2); // Bottom Right
rlVertex3f(x-width/2, y-height/2, z-length/2); // Top Left
// Right face -----------------------------------------------------
rlVertex3f(x+width/2, y-height/2, z-length/2); // Bottom Right
rlVertex3f(x+width/2, y+height/2, z-length/2); // Top Right
rlVertex3f(x+width/2, y+height/2, z+length/2); // Top Left
rlVertex3f(x+width/2, y-height/2, z+length/2); // Bottom Left
rlVertex3f(x+width/2, y-height/2, z-length/2); // Bottom Right
rlVertex3f(x+width/2, y+height/2, z+length/2); // Top Left
// Left Face ------------------------------------------------------
rlVertex3f(x-width/2, y-height/2, z-length/2); // Bottom Right
rlVertex3f(x-width/2, y+height/2, z+length/2); // Top Left
rlVertex3f(x-width/2, y+height/2, z-length/2); // Top Right
rlVertex3f(x-width/2, y-height/2, z+length/2); // Bottom Left
rlVertex3f(x-width/2, y+height/2, z+length/2); // Top Left
rlVertex3f(x-width/2, y-height/2, z-length/2); // Bottom Right
rlEnd();
rlPopMatrix();
}
// Draw a grid centered at (0, 0, 0)
void DrawGrid(int slices, float spacing)
{
int halfSlices = slices / 2;
rlBegin(RL_LINES);
for(int i = -halfSlices; i <= halfSlices; i++)
{
if (i == 0)
{
rlColor3f(0.5f, 0.5f, 0.5f);
rlColor3f(0.5f, 0.5f, 0.5f);
rlColor3f(0.5f, 0.5f, 0.5f);
rlColor3f(0.5f, 0.5f, 0.5f);
}
else
{
rlColor3f(0.75f, 0.75f, 0.75f);
rlColor3f(0.75f, 0.75f, 0.75f);
rlColor3f(0.75f, 0.75f, 0.75f);
rlColor3f(0.75f, 0.75f, 0.75f);
}
rlVertex3f((float)i*spacing, 0.0f, (float)-halfSlices*spacing);
rlVertex3f((float)i*spacing, 0.0f, (float)halfSlices*spacing);
rlVertex3f((float)-halfSlices*spacing, 0.0f, (float)i*spacing);
rlVertex3f((float)halfSlices*spacing, 0.0f, (float)i*spacing);
}
rlEnd();
}

View File

@ -0,0 +1,498 @@
/*******************************************************************************************
*
* raylib Oculus minimum sample (OpenGL 3.3 Core)
*
* NOTE: This example requires raylib module [rlgl]
*
* Compile rlgl using:
* gcc -c rlgl.c -Wall -std=c99 -DRLGL_STANDALONE -DRAYMATH_IMPLEMENTATION -DGRAPHICS_API_OPENGL_33
*
* Compile example using:
* gcc -o oculus_glfw_sample.exe oculus_glfw_sample.c rlgl.o glad.o -L. -lLibOVRRT32_1 -lglfw3 -lopengl32 -lgdi32 -std=c99
*
* This example has been created using raylib 1.5 (www.raylib.com)
* raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
*
* Copyright (c) 2015 Ramon Santamaria (@raysan5)
*
********************************************************************************************/
#if defined(_WIN32)
#define GLFW_EXPOSE_NATIVE_WIN32
#define GLFW_EXPOSE_NATIVE_WGL
#define OVR_OS_WIN32
#elif defined(__APPLE__)
#define GLFW_EXPOSE_NATIVE_COCOA
#define GLFW_EXPOSE_NATIVE_NSGL
#define OVR_OS_MAC
#elif defined(__linux__)
#define GLFW_EXPOSE_NATIVE_X11
#define GLFW_EXPOSE_NATIVE_GLX
#define OVR_OS_LINUX
#endif
#include "glad.h" // Extensions loading library
#include <GLFW/glfw3.h>
#include <GLFW/glfw3native.h>
#include "OculusSDK/LibOVR/Include/OVR_CAPI_GL.h" // Oculus SDK for OpenGL
//#include "GL/CAPI_GLE.h" // stripped-down GLEW/GLAD library to manage extensions (really required?)
//#include "Extras/OVR_Math.h" // math utilities C++ (really required?)
#define RLGL_STANDALONE
#include "rlgl.h"
#include <stdlib.h>
#include <stdio.h>
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
typedef struct OculusBuffer {
ovrTextureSwapChain textureChain;
GLuint depthId;
GLuint fboId;
int width;
int height;
} OculusBuffer;
typedef enum { LOG_INFO = 0, LOG_ERROR, LOG_WARNING, LOG_DEBUG, LOG_OTHER } TraceLogType;
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height);
static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer);
static void SetOculusBuffer(ovrSession session, OculusBuffer buffer);
static void UnsetOculusBuffer(OculusBuffer buffer);
static void ErrorCallback(int error, const char* description)
{
fputs(description, stderr);
}
static void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
}
static void DrawRectangleV(Vector2 position, Vector2 size, Color color);
static void TraceLog(int msgType, const char *text, ...);
//----------------------------------------------------------------------------------
// Main Entry point
//----------------------------------------------------------------------------------
int main()
{
// Initialization
//--------------------------------------------------------------------------------------
ovrSession session;
ovrGraphicsLuid luid; // Useless for OpenGL since SDK 0.7
ovrHmdDesc hmdDesc;
ovrResult result = ovr_Initialize(NULL);
if (OVR_FAILURE(result)) TraceLog(LOG_ERROR, "OVR: Could not initialize Oculus device");
result = ovr_Create(&session, &luid);
if (OVR_FAILURE(result))
{
TraceLog(LOG_WARNING, "OVR: Could not create Oculus session");
ovr_Shutdown();
}
hmdDesc = ovr_GetHmdDesc(session);
TraceLog(LOG_INFO, "OVR: Product Name: %s", hmdDesc.ProductName);
TraceLog(LOG_INFO, "OVR: Manufacturer: %s", hmdDesc.Manufacturer);
TraceLog(LOG_INFO, "OVR: Product ID: %i", hmdDesc.ProductId);
TraceLog(LOG_INFO, "OVR: Product Type: %i", hmdDesc.Type);
TraceLog(LOG_INFO, "OVR: Serian Number: %s", hmdDesc.SerialNumber);
TraceLog(LOG_INFO, "OVR: Resolution: %ix%i", hmdDesc.Resolution.w, hmdDesc.Resolution.h);
int screenWidth = hmdDesc.Resolution.w/2 + 100; // Added 100 pixels for testing
int screenHeight = hmdDesc.Resolution.h/2 + 100; // Added 100 pixels for testing
// GLFW3 Initialization + OpenGL 3.3 Context + Extensions
//--------------------------------------------------------
GLFWwindow *window;
glfwSetErrorCallback(ErrorCallback);
if (!glfwInit())
{
TraceLog(LOG_WARNING, "GLFW3: Can not initialize GLFW");
exit(EXIT_FAILURE);
}
else TraceLog(LOG_INFO, "GLFW3: GLFW initialized successfully");
glfwWindowHint(GLFW_DEPTH_BITS, 16);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
//glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, GL_TRUE);
glfwWindowHint(GLFW_DECORATED, GL_FALSE); // Mandatory on Oculus Rift to avoid program crash!
window = glfwCreateWindow(screenWidth, screenHeight, "rlgl standalone", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
else TraceLog(LOG_INFO, "GLFW3: Window created successfully");
glfwSetKeyCallback(window, KeyCallback);
glfwMakeContextCurrent(window);
glfwSwapInterval(0);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
TraceLog(LOG_WARNING, "GLAD: Cannot load OpenGL extensions");
exit(1);
}
else TraceLog(LOG_INFO, "GLAD: OpenGL extensions loaded successfully");
rlglInit();
rlglInitGraphics(0, 0, screenWidth, screenHeight);
rlClearColor(245, 245, 245, 255); // Define clear color
Vector2 position = { screenWidth/2 - 100, screenHeight/2 - 100 };
Vector2 size = { 200, 200 };
Color color = { 180, 20, 20, 255 };
//---------------------------------------------------------------------------
OculusBuffer eyeRenderBuffer[2];
GLuint mirrorFBO = 0;
ovrMirrorTexture mirrorTexture = NULL;
bool isVisible = true;
long long frameIndex = 0;
// Make eyes render buffers
ovrSizei recommendedTexSizeLeft = ovr_GetFovTextureSize(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0], 1.0f);
eyeRenderBuffer[0] = LoadOculusBuffer(session, recommendedTexSizeLeft.w, recommendedTexSizeLeft.h);
ovrSizei recommendedTexSizeRight = ovr_GetFovTextureSize(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1], 1.0f);
eyeRenderBuffer[1] = LoadOculusBuffer(session, recommendedTexSizeRight.w, recommendedTexSizeRight.h);
// Note: the mirror window can be any size, for this sample we use 1/2 the HMD resolution
ovrSizei windowSize = { hmdDesc.Resolution.w/2, hmdDesc.Resolution.h/2 };
// Define mirror texture descriptor
ovrMirrorTextureDesc mirrorDesc;
memset(&mirrorDesc, 0, sizeof(mirrorDesc));
mirrorDesc.Width = windowSize.w;
mirrorDesc.Height = windowSize.h;
mirrorDesc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
// Create mirror texture and an FBO used to copy mirror texture to back buffer
result = ovr_CreateMirrorTextureGL(session, &mirrorDesc, &mirrorTexture);
if (!OVR_SUCCESS(result)) TraceLog(LOG_WARNING, "OVR: Failed to create mirror texture");
// Configure the mirror read buffer
GLuint texId;
ovr_GetMirrorTextureBufferGL(session, mirrorTexture, &texId);
glGenFramebuffers(1, &mirrorFBO);
glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBO);
glFramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texId, 0);
glFramebufferRenderbuffer(GL_READ_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
glDeleteFramebuffers(1, &mirrorFBO);
TraceLog(LOG_WARNING, "OVR: Could not initialize mirror framebuffers");
}
glClearColor(1.0f, 0.1f, 0.1f, 0.0f);
glEnable(GL_DEPTH_TEST);
ovr_RecenterTrackingOrigin(session);
// FloorLevel will give tracking poses where the floor height is 0
ovr_SetTrackingOriginType(session, ovrTrackingOrigin_FloorLevel);
//--------------------------------------------------------------------------------------
// Main loop
while (!glfwWindowShouldClose(window))
{
// Update
//----------------------------------------------------------------------------------
frameIndex++;
// TODO: Update game here!
// Call ovr_GetRenderDesc each frame to get the ovrEyeRenderDesc, as the returned values (e.g. HmdToEyeOffset) may change at runtime.
ovrEyeRenderDesc eyeRenderDesc[2];
eyeRenderDesc[0] = ovr_GetRenderDesc(session, ovrEye_Left, hmdDesc.DefaultEyeFov[0]);
eyeRenderDesc[1] = ovr_GetRenderDesc(session, ovrEye_Right, hmdDesc.DefaultEyeFov[1]);
// Get eye poses, feeding in correct IPD offset
ovrPosef eyeRenderPose[2];
ovrVector3f hmdToEyeOffset[2] = { eyeRenderDesc[0].HmdToEyeOffset, eyeRenderDesc[1].HmdToEyeOffset };
double sensorSampleTime; // sensorSampleTime is fed into the layer later
ovr_GetEyePoses(session, frameIndex, ovrTrue, hmdToEyeOffset, eyeRenderPose, &sensorSampleTime);
//----------------------------------------------------------------------------------
// Draw
//----------------------------------------------------------------------------------
// Clear screen to red color
glClearColor(1.0f, 0.1f, 0.1f, 0.0f);
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (isVisible)
{
for (int eye = 0; eye < 2; ++eye)
{
SetOculusBuffer(session, eyeRenderBuffer[eye]);
// TODO: Get view and projection matrices for the eye
// Sample using Oculus OVR_Math.h (C++)
/*
Matrix4f projection[eye] = Matrix4f(ovrMatrix4f_Projection(eyeRenderDesc[eye].Fov, 0.01f, 10000.0f, ovrProjection_None));
Matrix4f eyeOrientation[eye] = Matrix4f(Quatf(eyeRenderPose[eye].Orientation).Inverted());
Matrix4f eyePose[eye] = Matrix4f::Translation(-Vector3f(eyeRenderPose[eye].Position));
Matrix4f mvp = projection[eye]*eyeOrientation[eye]*eyePose[eye];
*/
// Sample using custom raymath.h (C) -INCOMPLETE-
/*
Matrix projection = MatrixPerspective(eyeRenderDesc[eye].Fov, ((double)screenWidth/(double)screenHeight), 0.01, 1000.0);
Matrix eyeOrientation = QuaternionToMatrix((Quaternion){ -eyeRenderPose[eye].Orientation.x, -eyeRenderPose[eye].Orientation.y,
-eyeRenderPose[eye].Orientation.z, -eyeRenderPose[eye].Orientation.w });
Matrix eyePose = MatrixTranslate(-eyeRenderPose[eye].Position.x, -eyeRenderPose[eye].Position.y, -eyeRenderPose[eye].Position.z);
Matrix mvp = MatrixMultiply(projection, MatrixMultiply(eyeOrientation, eyePose));
*/
// Render everything
// TODO: Pass calculated mvp matrix to default shader to consider projection and orientation!
//DrawRectangleV(position, size, color);
//rlglDraw();
UnsetOculusBuffer(eyeRenderBuffer[eye]);
// Commit changes to the textures so they get picked up frame
ovr_CommitTextureSwapChain(session, eyeRenderBuffer[eye].textureChain);
}
}
// Set up positional data
ovrViewScaleDesc viewScaleDesc;
viewScaleDesc.HmdSpaceToWorldScaleInMeters = 1.0f;
viewScaleDesc.HmdToEyeOffset[0] = hmdToEyeOffset[0];
viewScaleDesc.HmdToEyeOffset[1] = hmdToEyeOffset[1];
// Create the main eye layer
ovrLayerEyeFov eyeLayer;
eyeLayer.Header.Type = ovrLayerType_EyeFov;
eyeLayer.Header.Flags = ovrLayerFlag_TextureOriginAtBottomLeft; // Because OpenGL
for (int eye = 0; eye < 2; eye++)
{
eyeLayer.ColorTexture[eye] = eyeRenderBuffer[eye].textureChain;
eyeLayer.Viewport[eye] = (ovrRecti){ eyeRenderBuffer[eye].width, eyeRenderBuffer[eye].height };
eyeLayer.Fov[eye] = hmdDesc.DefaultEyeFov[eye];
eyeLayer.RenderPose[eye] = eyeRenderPose[eye];
eyeLayer.SensorSampleTime = sensorSampleTime;
}
// Append all the layers to global list
ovrLayerHeader *layerList = &eyeLayer.Header;
ovrResult result = ovr_SubmitFrame(session, frameIndex, NULL, &layerList, 1);
// exit the rendering loop if submit returns an error, will retry on ovrError_DisplayLost
if (!OVR_SUCCESS(result)) return 1;
isVisible = (result == ovrSuccess);
// Get session status information
ovrSessionStatus sessionStatus;
ovr_GetSessionStatus(session, &sessionStatus);
if (sessionStatus.ShouldQuit) TraceLog(LOG_WARNING, "OVR: Session should quit.");
if (sessionStatus.ShouldRecenter) ovr_RecenterTrackingOrigin(session);
// Blit mirror texture to back buffer
glBindFramebuffer(GL_READ_FRAMEBUFFER, mirrorFBO);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
GLint w = mirrorDesc.Width;
GLint h = mirrorDesc.Height;
glBlitFramebuffer(0, h, w, 0, 0, 0, w, h, GL_COLOR_BUFFER_BIT, GL_NEAREST);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
glfwSwapBuffers(window);
glfwPollEvents();
//----------------------------------------------------------------------------------
}
// De-Initialization
//--------------------------------------------------------------------------------------
if (mirrorFBO) glDeleteFramebuffers(1, &mirrorFBO);
if (mirrorTexture) ovr_DestroyMirrorTexture(session, mirrorTexture);
for (int eye = 0; eye < 2; eye++) UnloadOculusBuffer(session, eyeRenderBuffer[eye]);
rlglClose();
glfwDestroyWindow(window);
glfwTerminate();
ovr_Destroy(session); // Must be called after glfwTerminate()
ovr_Shutdown();
//--------------------------------------------------------------------------------------
return 0;
}
//----------------------------------------------------------------------------------
// Module specific Functions Definitions
//----------------------------------------------------------------------------------
// Load Oculus required buffers: texture-swap-chain, fbo, texture-depth
static OculusBuffer LoadOculusBuffer(ovrSession session, int width, int height)
{
OculusBuffer buffer;
buffer.width = width;
buffer.height = height;
// Create OVR texture chain
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Width = width;
desc.Height = height;
desc.MipLevels = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
ovrResult result = ovr_CreateTextureSwapChainGL(session, &desc, &buffer.textureChain);
int textureCount = 0;
ovr_GetTextureSwapChainLength(session, buffer.textureChain, &textureCount);
if (OVR_SUCCESS(result))
{
for (int i = 0; i < textureCount; ++i)
{
GLuint chainTexId;
ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, i, &chainTexId);
glBindTexture(GL_TEXTURE_2D, chainTexId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
}
// Generate framebuffer
glGenFramebuffers(1, &buffer.fboId);
// Create Depth texture
glGenTextures(1, &buffer.depthId);
glBindTexture(GL_TEXTURE_2D, buffer.depthId);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT16, buffer.width, buffer.height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
return buffer;
}
// Unload texture required buffers
static void UnloadOculusBuffer(ovrSession session, OculusBuffer buffer)
{
if (buffer.textureChain)
{
ovr_DestroyTextureSwapChain(session, buffer.textureChain);
buffer.textureChain = NULL;
}
if (buffer.depthId)
{
glDeleteTextures(1, &buffer.depthId);
buffer.depthId = 0;
}
if (buffer.fboId)
{
glDeleteFramebuffers(1, &buffer.fboId);
buffer.fboId = 0;
}
}
// Set current Oculus buffer
static void SetOculusBuffer(ovrSession session, OculusBuffer buffer)
{
GLuint currentTexId;
int currentIndex;
ovr_GetTextureSwapChainCurrentIndex(session, buffer.textureChain, &currentIndex);
ovr_GetTextureSwapChainBufferGL(session, buffer.textureChain, currentIndex, &currentTexId);
glBindFramebuffer(GL_FRAMEBUFFER, buffer.fboId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, currentTexId, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, buffer.depthId, 0);
glViewport(0, 0, buffer.width, buffer.height);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_FRAMEBUFFER_SRGB);
}
// Unset Oculus buffer
static void UnsetOculusBuffer(OculusBuffer buffer)
{
glBindFramebuffer(GL_FRAMEBUFFER, buffer.fboId);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
}
// Draw rectangle using rlgl OpenGL 1.1 style coding (translated to OpenGL 3.3 internally)
static void DrawRectangleV(Vector2 position, Vector2 size, Color color)
{
rlBegin(RL_TRIANGLES);
rlColor4ub(color.r, color.g, color.b, color.a);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y);
rlEnd();
}
// Output a trace log message
// NOTE: Expected msgType: (0)Info, (1)Error, (2)Warning
static void TraceLog(int msgType, const char *text, ...)
{
va_list args;
va_start(args, text);
switch(msgType)
{
case LOG_INFO: fprintf(stdout, "INFO: "); break;
case LOG_ERROR: fprintf(stdout, "ERROR: "); break;
case LOG_WARNING: fprintf(stdout, "WARNING: "); break;
case LOG_DEBUG: fprintf(stdout, "DEBUG: "); break;
default: break;
}
vfprintf(stdout, text, args);
fprintf(stdout, "\n");
va_end(args);
//if (msgType == LOG_ERROR) exit(1);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

View File

@ -0,0 +1,131 @@
/*******************************************************************************************
*
* raylib [rlgl] example - Using rlgl module as standalone module
*
* NOTE: This example requires OpenGL 3.3 or ES2 versions for shaders support,
* OpenGL 1.1 does not support shaders but it can also be used.
*
* Compile rlgl module using:
* gcc -c rlgl.c -Wall -std=c99 -DRLGL_STANDALONE -DRAYMATH_IMPLEMENTATION -DGRAPHICS_API_OPENGL_33
*
* Compile example using:
* gcc -o $(NAME_PART).exe $(FILE_NAME) rlgl.o glad.o -lglfw3 -lopengl32 -lgdi32 -std=c99
*
* This example has been created using raylib 1.5 (www.raylib.com)
* raylib is licensed under an unmodified zlib/libpng license (View raylib.h for details)
*
* Copyright (c) 2015 Ramon Santamaria (@raysan5)
*
********************************************************************************************/
#include "glad.h"
#include <GLFW/glfw3.h>
#define RLGL_STANDALONE
#include "rlgl.h"
#include <stdlib.h>
#include <stdio.h>
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
static void ErrorCallback(int error, const char* description)
{
fputs(description, stderr);
}
static void KeyCallback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
}
void DrawRectangleV(Vector2 position, Vector2 size, Color color);
//----------------------------------------------------------------------------------
// Main Entry point
//----------------------------------------------------------------------------------
int main(void)
{
const int screenWidth = 800;
const int screenHeight = 450;
GLFWwindow *window;
glfwSetErrorCallback(ErrorCallback);
if (!glfwInit()) exit(EXIT_FAILURE);
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
window = glfwCreateWindow(screenWidth, screenHeight, "rlgl standalone", NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwSetKeyCallback(window, KeyCallback);
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
printf("Cannot load GL extensions.\n");
exit(1);
}
rlglInit();
rlglInitGraphics(0, 0, screenWidth, screenHeight);
rlClearColor(245, 245, 245, 255); // Define clear color
Vector2 position = { screenWidth/2 - 100, screenHeight/2 - 100 };
Vector2 size = { 200, 200 };
Color color = { 180, 20, 20, 255 };
while (!glfwWindowShouldClose(window))
{
rlClearScreenBuffers();
DrawRectangleV(position, size, color);
rlglDraw();
glfwSwapBuffers(window);
glfwPollEvents();
}
rlglClose();
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
//----------------------------------------------------------------------------------
// Module specific Functions Definitions
//----------------------------------------------------------------------------------
void DrawRectangleV(Vector2 position, Vector2 size, Color color)
{
rlBegin(RL_TRIANGLES);
rlColor4ub(color.r, color.g, color.b, color.a);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x, position.y);
rlVertex2i(position.x + size.x, position.y + size.y);
rlVertex2i(position.x + size.x, position.y);
rlEnd();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,319 @@
/**********************************************************************************************
*
* rlgl - raylib OpenGL abstraction layer
*
* raylib now uses OpenGL 1.1 style functions (rlVertex) that are mapped to selected OpenGL version:
* OpenGL 1.1 - Direct map rl* -> gl*
* OpenGL 3.3 - Vertex data is stored in VAOs, call rlglDraw() to render
* OpenGL ES 2 - Vertex data is stored in VBOs or VAOs (when available), call rlglDraw() to render
*
* Copyright (c) 2014 Ramon Santamaria (@raysan5)
*
* This software is provided "as-is", without any express or implied warranty. In no event
* will the authors be held liable for any damages arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose, including commercial
* applications, and to alter it and redistribute it freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not claim that you
* wrote the original software. If you use this software in a product, an acknowledgment
* in the product documentation would be appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be misrepresented
* as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*
**********************************************************************************************/
#ifndef RLGL_H
#define RLGL_H
//#define RLGL_STANDALONE // NOTE: To use rlgl as standalone lib, just uncomment this line
#ifndef RLGL_STANDALONE
#include "raylib.h" // Required for typedef(s): Model, Shader, Texture2D
#include "utils.h" // Required for function TraceLog()
#endif
#ifdef RLGL_STANDALONE
#define RAYMATH_STANDALONE
#endif
#include "raymath.h" // Required for types: Vector3, Matrix
// Select desired OpenGL version
// NOTE: Those preprocessor defines are only used on rlgl module,
// if OpenGL version is required by any other module, it uses rlGetVersion()
// Choose opengl version here or just define it at compile time: -DGRAPHICS_API_OPENGL_33
//#define GRAPHICS_API_OPENGL_11 // Only available on PLATFORM_DESKTOP
//#define GRAPHICS_API_OPENGL_33 // Only available on PLATFORM_DESKTOP
//#define GRAPHICS_API_OPENGL_ES2 // Only available on PLATFORM_ANDROID or PLATFORM_RPI or PLATFORM_WEB
// Security check in case no GRAPHICS_API_OPENGL_* defined
#if !defined(GRAPHICS_API_OPENGL_11) && !defined(GRAPHICS_API_OPENGL_33) && !defined(GRAPHICS_API_OPENGL_ES2)
#define GRAPHICS_API_OPENGL_11
#endif
// Security check in case multiple GRAPHICS_API_OPENGL_* defined
#if defined(GRAPHICS_API_OPENGL_11)
#if defined(GRAPHICS_API_OPENGL_33)
#undef GRAPHICS_API_OPENGL_33
#endif
#if defined(GRAPHICS_API_OPENGL_ES2)
#undef GRAPHICS_API_OPENGL_ES2
#endif
#endif
//----------------------------------------------------------------------------------
// Defines and Macros
//----------------------------------------------------------------------------------
#if defined(GRAPHICS_API_OPENGL_11) || defined(GRAPHICS_API_OPENGL_33)
// NOTE: This is the maximum amount of lines, triangles and quads per frame, be careful!
#define MAX_LINES_BATCH 8192
#define MAX_TRIANGLES_BATCH 4096
#define MAX_QUADS_BATCH 4096
#elif defined(GRAPHICS_API_OPENGL_ES2)
// NOTE: Reduce memory sizes for embedded systems (RPI and HTML5)
// NOTE: On HTML5 (emscripten) this is allocated on heap, by default it's only 16MB!...just take care...
#define MAX_LINES_BATCH 1024 // Critical for wire shapes (sphere)
#define MAX_TRIANGLES_BATCH 2048 // Critical for some shapes (sphere)
#define MAX_QUADS_BATCH 1024 // Be careful with text, every letter maps a quad
#endif
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
typedef enum { RL_PROJECTION, RL_MODELVIEW, RL_TEXTURE } MatrixMode;
typedef enum { RL_LINES, RL_TRIANGLES, RL_QUADS } DrawMode;
typedef enum { OPENGL_11 = 1, OPENGL_33, OPENGL_ES_20 } GlVersion;
#if defined(RLGL_STANDALONE)
#ifndef __cplusplus
// Boolean type
typedef enum { false, true } bool;
#endif
// byte type
typedef unsigned char byte;
// Color type, RGBA (32bit)
typedef struct Color {
unsigned char r;
unsigned char g;
unsigned char b;
unsigned char a;
} Color;
// Texture formats (support depends on OpenGL version)
typedef enum {
UNCOMPRESSED_GRAYSCALE = 1, // 8 bit per pixel (no alpha)
UNCOMPRESSED_GRAY_ALPHA,
UNCOMPRESSED_R5G6B5, // 16 bpp
UNCOMPRESSED_R8G8B8, // 24 bpp
UNCOMPRESSED_R5G5B5A1, // 16 bpp (1 bit alpha)
UNCOMPRESSED_R4G4B4A4, // 16 bpp (4 bit alpha)
UNCOMPRESSED_R8G8B8A8, // 32 bpp
COMPRESSED_DXT1_RGB, // 4 bpp (no alpha)
COMPRESSED_DXT1_RGBA, // 4 bpp (1 bit alpha)
COMPRESSED_DXT3_RGBA, // 8 bpp
COMPRESSED_DXT5_RGBA, // 8 bpp
COMPRESSED_ETC1_RGB, // 4 bpp
COMPRESSED_ETC2_RGB, // 4 bpp
COMPRESSED_ETC2_EAC_RGBA, // 8 bpp
COMPRESSED_PVRT_RGB, // 4 bpp
COMPRESSED_PVRT_RGBA, // 4 bpp
COMPRESSED_ASTC_4x4_RGBA, // 8 bpp
COMPRESSED_ASTC_8x8_RGBA // 2 bpp
} TextureFormat;
// Bounding box type
typedef struct BoundingBox {
Vector3 min;
Vector3 max;
} BoundingBox;
// Mesh with vertex data type
// NOTE: If using OpenGL 1.1, data loaded in CPU; if OpenGL 3.3+ data loaded in GPU (vaoId)
typedef struct Mesh {
int vertexCount; // num vertices
float *vertices; // vertex position (XYZ - 3 components per vertex)
float *texcoords; // vertex texture coordinates (UV - 2 components per vertex)
float *texcoords2; // vertex second texture coordinates (useful for lightmaps)
float *normals; // vertex normals (XYZ - 3 components per vertex)
float *tangents; // vertex tangents (XYZ - 3 components per vertex)
unsigned char *colors; // vertex colors (RGBA - 4 components per vertex)
BoundingBox bounds; // mesh limits defined by min and max points
unsigned int vaoId; // OpenGL Vertex Array Object id
unsigned int vboId[6]; // OpenGL Vertex Buffer Objects id (6 types of vertex data)
} Mesh;
// Shader type
typedef struct Shader {
unsigned int id; // Shader program id
// Variable attributes
int vertexLoc; // Vertex attribute location point (vertex shader)
int texcoordLoc; // Texcoord attribute location point (vertex shader)
int normalLoc; // Normal attribute location point (vertex shader)
int colorLoc; // Color attibute location point (vertex shader)
// Uniforms
int mvpLoc; // ModelView-Projection matrix uniform location point (vertex shader)
int tintColorLoc; // Color uniform location point (fragment shader)
int mapDiffuseLoc; // Diffuse map texture uniform location point (fragment shader)
int mapNormalLoc; // Normal map texture uniform location point (fragment shader)
int mapSpecularLoc; // Specular map texture uniform location point (fragment shader)
} Shader;
// Texture2D type
// NOTE: Data stored in GPU memory
typedef struct Texture2D {
unsigned int id; // OpenGL texture id
int width; // Texture base width
int height; // Texture base height
int mipmaps; // Mipmap levels, 1 by default
int format; // Data format (TextureFormat)
} Texture2D;
// RenderTexture2D type, for texture rendering
typedef struct RenderTexture2D {
unsigned int id; // Render texture (fbo) id
Texture2D texture; // Color buffer attachment texture
Texture2D depth; // Depth buffer attachment texture
} RenderTexture2D;
// Material type
typedef struct Material {
Shader shader;
Texture2D texDiffuse; // Diffuse texture
Texture2D texNormal; // Normal texture
Texture2D texSpecular; // Specular texture
Color colDiffuse;
Color colAmbient;
Color colSpecular;
float glossiness;
float normalDepth;
} Material;
// 3d Model type
typedef struct Model {
Mesh mesh;
Matrix transform;
Material material;
} Model;
// Color blending modes (pre-defined)
typedef enum { BLEND_ALPHA = 0, BLEND_ADDITIVE, BLEND_MULTIPLIED } BlendMode;
#endif
#ifdef __cplusplus
extern "C" { // Prevents name mangling of functions
#endif
//------------------------------------------------------------------------------------
// Functions Declaration - Matrix operations
//------------------------------------------------------------------------------------
void rlMatrixMode(int mode); // Choose the current matrix to be transformed
void rlPushMatrix(void); // Push the current matrix to stack
void rlPopMatrix(void); // Pop lattest inserted matrix from stack
void rlLoadIdentity(void); // Reset current matrix to identity matrix
void rlTranslatef(float x, float y, float z); // Multiply the current matrix by a translation matrix
void rlRotatef(float angleDeg, float x, float y, float z); // Multiply the current matrix by a rotation matrix
void rlScalef(float x, float y, float z); // Multiply the current matrix by a scaling matrix
void rlMultMatrixf(float *mat); // Multiply the current matrix by another matrix
void rlFrustum(double left, double right, double bottom, double top, double near, double far);
void rlOrtho(double left, double right, double bottom, double top, double near, double far);
//------------------------------------------------------------------------------------
// Functions Declaration - Vertex level operations
//------------------------------------------------------------------------------------
void rlBegin(int mode); // Initialize drawing mode (how to organize vertex)
void rlEnd(void); // Finish vertex providing
void rlVertex2i(int x, int y); // Define one vertex (position) - 2 int
void rlVertex2f(float x, float y); // Define one vertex (position) - 2 float
void rlVertex3f(float x, float y, float z); // Define one vertex (position) - 3 float
void rlTexCoord2f(float x, float y); // Define one vertex (texture coordinate) - 2 float
void rlNormal3f(float x, float y, float z); // Define one vertex (normal) - 3 float
void rlColor4ub(byte r, byte g, byte b, byte a); // Define one vertex (color) - 4 byte
void rlColor3f(float x, float y, float z); // Define one vertex (color) - 3 float
void rlColor4f(float x, float y, float z, float w); // Define one vertex (color) - 4 float
//------------------------------------------------------------------------------------
// Functions Declaration - OpenGL equivalent functions (common to 1.1, 3.3+, ES2)
// NOTE: This functions are used to completely abstract raylib code from OpenGL layer
//------------------------------------------------------------------------------------
void rlEnableTexture(unsigned int id); // Enable texture usage
void rlDisableTexture(void); // Disable texture usage
void rlEnableRenderTexture(unsigned int id); // Enable render texture (fbo)
void rlDisableRenderTexture(void); // Disable render texture (fbo), return to default framebuffer
void rlEnableDepthTest(void); // Enable depth test
void rlDisableDepthTest(void); // Disable depth test
void rlDeleteTextures(unsigned int id); // Delete OpenGL texture from GPU
void rlDeleteRenderTextures(RenderTexture2D target); // Delete render textures (fbo) from GPU
void rlDeleteShader(unsigned int id); // Delete OpenGL shader program from GPU
void rlDeleteVertexArrays(unsigned int id); // Unload vertex data (VAO) from GPU memory
void rlDeleteBuffers(unsigned int id); // Unload vertex data (VBO) from GPU memory
void rlClearColor(byte r, byte g, byte b, byte a); // Clear color buffer with color
void rlClearScreenBuffers(void); // Clear used screen buffers (color and depth)
int rlGetVersion(void); // Returns current OpenGL version
//------------------------------------------------------------------------------------
// Functions Declaration - rlgl functionality
//------------------------------------------------------------------------------------
void rlglInit(void); // Initialize rlgl (shaders, VAO, VBO...)
void rlglClose(void); // De-init rlgl
void rlglDraw(Matrix mvp); // Draw VAO/VBO
void rlglInitGraphics(int offsetX, int offsetY, int width, int height); // Initialize Graphics (OpenGL stuff)
unsigned int rlglLoadTexture(void *data, int width, int height, int textureFormat, int mipmapCount); // Load texture in GPU
RenderTexture2D rlglLoadRenderTexture(int width, int height); // Load a texture to be used for rendering (fbo with color and depth attachments)
void rlglUpdateTexture(unsigned int id, int width, int height, int format, void *data); // Update GPU texture with new data
void rlglGenerateMipmaps(Texture2D texture); // Generate mipmap data for selected texture
// NOTE: There is a set of shader related functions that are available to end user,
// to avoid creating function wrappers through core module, they have been directly declared in raylib.h
Model rlglLoadModel(Mesh mesh); // Upload vertex data into GPU and provided VAO/VBO ids
void rlglDrawModel(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color color, bool wires);
Vector3 rlglUnproject(Vector3 source, Matrix proj, Matrix view); // Get world coordinates from screen coordinates
unsigned char *rlglReadScreenPixels(int width, int height); // Read screen pixel data (color buffer)
void *rlglReadTexturePixels(Texture2D texture); // Read texture pixel data
#if defined(RLGL_STANDALONE)
//------------------------------------------------------------------------------------
// Shaders System Functions (Module: rlgl)
// NOTE: This functions are useless when using OpenGL 1.1
//------------------------------------------------------------------------------------
Shader LoadShader(char *vsFileName, char *fsFileName); // Load a custom shader and bind default locations
unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr); // Load custom shader strings and return program id
void UnloadShader(Shader shader); // Unload a custom shader from memory
void SetCustomShader(Shader shader); // Set custom shader to be used in batch draw
void SetDefaultShader(void); // Set default shader to be used in batch draw
void SetModelShader(Model *model, Shader shader); // Link a shader to a model
int GetShaderLocation(Shader shader, const char *uniformName); // Get shader uniform location
void SetShaderValue(Shader shader, int uniformLoc, float *value, int size); // Set shader uniform value (float)
void SetShaderValuei(Shader shader, int uniformLoc, int *value, int size); // Set shader uniform value (int)
void SetShaderValueMatrix(Shader shader, int uniformLoc, Matrix mat); // Set shader uniform value (matrix 4x4)
void SetBlendMode(int mode); // Set blending mode (alpha, additive, multiplied)
#endif
#ifdef __cplusplus
}
#endif
#endif // RLGL_H

View File

@ -30,26 +30,26 @@ int main()
bool isDebug = false; bool isDebug = false;
// Create rectangle physic object // Create rectangle physic object
PhysicObject *rectangle = CreatePhysicObject((Vector2){ screenWidth*0.25f, screenHeight/2 }, 0.0f, (Vector2){ 75, 50 }); PhysicObject rectangle = CreatePhysicObject((Vector2){ screenWidth*0.25f, screenHeight/2 }, 0.0f, (Vector2){ 75, 50 });
rectangle->rigidbody.enabled = true; // Enable physic object rigidbody behaviour rectangle->rigidbody.enabled = true; // Enable physic object rigidbody behaviour
rectangle->rigidbody.applyGravity = true; rectangle->rigidbody.applyGravity = true;
rectangle->rigidbody.friction = 0.1f; rectangle->rigidbody.friction = 0.1f;
rectangle->rigidbody.bounciness = 6.0f; rectangle->rigidbody.bounciness = 6.0f;
// Create square physic object // Create square physic object
PhysicObject *square = CreatePhysicObject((Vector2){ screenWidth*0.75f, screenHeight/2 }, 0.0f, (Vector2){ 50, 50 }); PhysicObject square = CreatePhysicObject((Vector2){ screenWidth*0.75f, screenHeight/2 }, 0.0f, (Vector2){ 50, 50 });
square->rigidbody.enabled = true; // Enable physic object rigidbody behaviour square->rigidbody.enabled = true; // Enable physic object rigidbody behaviour
square->rigidbody.applyGravity = true; square->rigidbody.applyGravity = true;
square->rigidbody.friction = 0.1f; square->rigidbody.friction = 0.1f;
// Create walls physic objects // Create walls physic objects
PhysicObject *floor = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.95f }, 0.0f, (Vector2){ screenWidth*0.9f, 100 }); PhysicObject floor = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.95f }, 0.0f, (Vector2){ screenWidth*0.9f, 100 });
PhysicObject *leftWall = CreatePhysicObject((Vector2){ 0.0f, screenHeight/2 }, 0.0f, (Vector2){ screenWidth*0.1f, screenHeight }); PhysicObject leftWall = CreatePhysicObject((Vector2){ 0.0f, screenHeight/2 }, 0.0f, (Vector2){ screenWidth*0.1f, screenHeight });
PhysicObject *rightWall = CreatePhysicObject((Vector2){ screenWidth, screenHeight/2 }, 0.0f, (Vector2){ screenWidth*0.1f, screenHeight }); PhysicObject rightWall = CreatePhysicObject((Vector2){ screenWidth, screenHeight/2 }, 0.0f, (Vector2){ screenWidth*0.1f, screenHeight });
PhysicObject *roof = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.05f }, 0.0f, (Vector2){ screenWidth*0.9f, 100 }); PhysicObject roof = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.05f }, 0.0f, (Vector2){ screenWidth*0.9f, 100 });
// Create pplatform physic object // Create pplatform physic object
PhysicObject *platform = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.7f }, 0.0f, (Vector2){ screenWidth*0.25f, 20 }); PhysicObject platform = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight*0.7f }, 0.0f, (Vector2){ screenWidth*0.25f, 20 });
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
@ -114,7 +114,8 @@ int main()
// De-Initialization // De-Initialization
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
ClosePhysics(); // Unitialize physics module ClosePhysics(); // Unitialize physics (including all loaded objects)
CloseWindow(); // Close window and OpenGL context CloseWindow(); // Close window and OpenGL context
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------

View File

@ -17,7 +17,7 @@
#define LINE_LENGTH 75 #define LINE_LENGTH 75
#define TRIANGLE_LENGTH 12 #define TRIANGLE_LENGTH 12
void DrawRigidbodyCircle(PhysicObject *obj, Color color); void DrawRigidbodyCircle(PhysicObject obj, Color color);
int main() int main()
{ {
@ -36,7 +36,7 @@ int main()
bool isDebug = false; bool isDebug = false;
// Create rectangle physic objects // Create rectangle physic objects
PhysicObject *rectangles[3]; PhysicObject rectangles[3];
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
{ {
rectangles[i] = CreatePhysicObject((Vector2){ screenWidth/4*(i+1), (((i % 2) == 0) ? (screenHeight/3) : (screenHeight/1.5f)) }, 0.0f, (Vector2){ 50, 50 }); rectangles[i] = CreatePhysicObject((Vector2){ screenWidth/4*(i+1), (((i % 2) == 0) ? (screenHeight/3) : (screenHeight/1.5f)) }, 0.0f, (Vector2){ 50, 50 });
@ -46,7 +46,7 @@ int main()
// Create circles physic objects // Create circles physic objects
// NOTE: when creating circle physic objects, transform.scale must be { 0, 0 } and object radius must be defined in collider.radius and use this value to draw the circle. // NOTE: when creating circle physic objects, transform.scale must be { 0, 0 } and object radius must be defined in collider.radius and use this value to draw the circle.
PhysicObject *circles[3]; PhysicObject circles[3];
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
{ {
circles[i] = CreatePhysicObject((Vector2){ screenWidth/4*(i+1), (((i % 2) == 0) ? (screenHeight/1.5f) : (screenHeight/4)) }, 0.0f, (Vector2){ 0, 0 }); circles[i] = CreatePhysicObject((Vector2){ screenWidth/4*(i+1), (((i % 2) == 0) ? (screenHeight/1.5f) : (screenHeight/4)) }, 0.0f, (Vector2){ 0, 0 });
@ -57,10 +57,10 @@ int main()
} }
// Create walls physic objects // Create walls physic objects
PhysicObject *leftWall = CreatePhysicObject((Vector2){ -25, screenHeight/2 }, 0.0f, (Vector2){ 50, screenHeight }); PhysicObject leftWall = CreatePhysicObject((Vector2){ -25, screenHeight/2 }, 0.0f, (Vector2){ 50, screenHeight });
PhysicObject *rightWall = CreatePhysicObject((Vector2){ screenWidth + 25, screenHeight/2 }, 0.0f, (Vector2){ 50, screenHeight }); PhysicObject rightWall = CreatePhysicObject((Vector2){ screenWidth + 25, screenHeight/2 }, 0.0f, (Vector2){ 50, screenHeight });
PhysicObject *topWall = CreatePhysicObject((Vector2){ screenWidth/2, -25 }, 0.0f, (Vector2){ screenWidth, 50 }); PhysicObject topWall = CreatePhysicObject((Vector2){ screenWidth/2, -25 }, 0.0f, (Vector2){ screenWidth, 50 });
PhysicObject *bottomWall = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight + 25 }, 0.0f, (Vector2){ screenWidth, 50 }); PhysicObject bottomWall = CreatePhysicObject((Vector2){ screenWidth/2, screenHeight + 25 }, 0.0f, (Vector2){ screenWidth, 50 });
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------

View File

@ -1,42 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 sum = vec4(0);
vec4 tc = vec4(0);
for (int i = -4; i < 4; i++)
{
for (int j = -3; j < 3; j++)
{
sum += texture2D(texture0, fragTexCoord + vec2(j, i)*0.004) * 0.25;
}
}
if (texture2D(texture0, fragTexCoord).r < 0.3)
{
tc = sum*sum*0.012 + texture2D(texture0, fragTexCoord);
}
else
{
if (texture2D(texture0, fragTexCoord).r < 0.5)
{
tc = sum*sum*0.009 + texture2D(texture0, fragTexCoord);
}
else
{
tc = sum*sum*0.0075 + texture2D(texture0, fragTexCoord);
}
}
fragColor = tc;
}

View File

@ -1,20 +1,26 @@
#version 100 #version 100
// Input vertex attributes
attribute vec3 vertexPosition; attribute vec3 vertexPosition;
attribute vec2 vertexTexCoord; attribute vec2 vertexTexCoord;
attribute vec3 vertexNormal; attribute vec3 vertexNormal;
attribute vec4 vertexColor;
varying vec2 fragTexCoord; // Input uniform values
uniform mat4 mvpMatrix; uniform mat4 mvpMatrix;
// Output vertex attributes (to fragment shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
void main() void main()
{ {
vec3 normal = vertexNormal; // Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord; fragTexCoord = vertexTexCoord;
fragColor = vertexColor;
// Calculate final vertex position
gl_Position = mvpMatrix*vec4(vertexPosition, 1.0); gl_Position = mvpMatrix*vec4(vertexPosition, 1.0);
} }

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -22,21 +25,13 @@ void main()
} }
} }
if (texture2D(texture0, fragTexCoord).r < 0.3) // Texel color fetching from texture sampler
{ vec4 texelColor = texture(texture0, fragTexCoord);
tc = sum*sum*0.012 + texture2D(texture0, fragTexCoord);
} // Calculate final fragment color
else if (texelColor.r < 0.3) tc = sum*sum*0.012 + texelColor;
{ else if (texelColor.r < 0.5) tc = sum*sum*0.009 + texelColor;
if (texture2D(texture0, fragTexCoord).r < 0.5) else tc = sum*sum*0.0075 + texelColor;
{
tc = sum*sum*0.009 + texture2D(texture0, fragTexCoord);
}
else
{
tc = sum*sum*0.0075 + texture2D(texture0, fragTexCoord);
}
}
gl_FragColor = tc; gl_FragColor = tc;
} }

View File

@ -0,0 +1,25 @@
#version 100
precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord)*fragTintColor*fragColor;
// Convert texel color to grayscale using NTSC conversion weights
float gray = dot(texelColor.rgb, vec3(0.299, 0.587, 0.114));
// Calculate final fragment color
gl_FragColor = vec4(gray, gray, gray, texelColor.a);
}

View File

@ -1,9 +1,12 @@
#version 330 #version 100
in vec2 fragTexCoord; precision mediump float;
out vec4 fragColor; // Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -17,11 +20,12 @@ float angle = 0.8;
uniform vec2 center = vec2(200.0, 200.0); uniform vec2 center = vec2(200.0, 200.0);
void main (void) void main()
{ {
vec2 texSize = vec2(renderWidth, renderHeight); vec2 texSize = vec2(renderWidth, renderHeight);
vec2 tc = fragTexCoord*texSize; vec2 tc = fragTexCoord*texSize;
tc -= center; tc -= center;
float dist = length(tc); float dist = length(tc);
if (dist < radius) if (dist < radius)
@ -37,5 +41,5 @@ void main (void)
tc += center; tc += center;
vec3 color = texture2D(texture0, tc/texSize).rgb; vec3 color = texture2D(texture0, tc/texSize).rgb;
fragColor = vec4(color, 1.0);; gl_FragColor = vec4(color, 1.0);;
} }

View File

@ -1,18 +1,26 @@
#version 330 #version 330
// Input vertex attributes
in vec3 vertexPosition; in vec3 vertexPosition;
in vec2 vertexTexCoord; in vec2 vertexTexCoord;
in vec3 vertexNormal; in vec3 vertexNormal;
in vec4 vertexColor;
out vec2 fragTexCoord; // Input uniform values
uniform mat4 mvpMatrix; uniform mat4 mvpMatrix;
// Output vertex attributes (to fragment shader)
out vec2 fragTexCoord;
out vec4 fragColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
void main() void main()
{ {
// Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord; fragTexCoord = vertexTexCoord;
fragColor = vertexColor;
// Calculate final vertex position
gl_Position = mvpMatrix*vec4(vertexPosition, 1.0); gl_Position = mvpMatrix*vec4(vertexPosition, 1.0);
} }

View File

@ -0,0 +1,38 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
vec4 sum = vec4(0);
vec4 tc = vec4(0);
for (int i = -4; i < 4; i++)
{
for (int j = -3; j < 3; j++)
{
sum += texture(texture0, fragTexCoord + vec2(j, i)*0.004)*0.25;
}
}
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
// Calculate final fragment color
if (texelColor.r < 0.3) tc = sum*sum*0.012 + texelColor;
else if (texelColor.r < 0.5) tc = sum*sum*0.009 + texelColor;
else tc = sum*sum*0.0075 + texelColor;
finalColor = tc;
}

View File

@ -0,0 +1,27 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; // Depth texture
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
float zNear = 0.01; // camera z near
float zFar = 10.0; // camera z far
float z = texture(texture0, fragTexCoord).x;
// Linearize depth value
float depth = (2.0*zNear)/(zFar + zNear - z*(zFar - zNear));
// Calculate final fragment color
finalColor = vec4(depth, depth, depth, 1.0f);
}

View File

@ -0,0 +1,26 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord)*fragTintColor*fragColor;
// Convert texel color to grayscale using NTSC conversion weights
float gray = dot(texelColor.rgb, vec3(0.299, 0.587, 0.114));
// Calculate final fragment color
finalColor = vec4(gray, gray, gray, texelColor.a);
}

View File

@ -0,0 +1,85 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec3 fragNormal;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
// Light uniform values
uniform vec3 lightAmbientColor = vec3(0.6, 0.3, 0.0);
uniform vec3 lightDiffuseColor = vec3(1.0, 0.5, 0.0);
uniform vec3 lightSpecularColor = vec3(0.0, 1.0, 0.0);
uniform float lightIntensity = 1.0;
uniform float lightSpecIntensity = 1.0;
// Material uniform values
uniform vec3 matAmbientColor = vec3(1.0, 1.0, 1.0);
uniform vec3 matSpecularColor = vec3(1.0, 1.0, 1.0);
uniform float matGlossiness = 50.0;
// World uniform values
uniform vec3 lightPosition;
uniform vec3 cameraPosition;
// Fragment shader output data
out vec4 fragColor;
// Calculate ambient lighting component
vec3 AmbientLighting()
{
return (matAmbientColor*lightAmbientColor);
}
// Calculate diffuse lighting component
vec3 DiffuseLighting(in vec3 N, in vec3 L)
{
// Lambertian reflection calculation
float diffuse = clamp(dot(N, L), 0, 1);
return (fragTintColor.xyz*lightDiffuseColor*lightIntensity*diffuse);
}
// Calculate specular lighting component
vec3 SpecularLighting(in vec3 N, in vec3 L, in vec3 V)
{
float specular = 0.0;
// Calculate specular reflection only if the surface is oriented to the light source
if (dot(N, L) > 0)
{
// Calculate half vector
vec3 H = normalize(L + V);
// Calculate specular intensity
specular = pow(dot(N, H), 3 + matGlossiness);
}
return (matSpecularColor*lightSpecularColor*lightSpecIntensity*specular);
}
void main()
{
// Normalize input vectors
vec3 L = normalize(lightPosition);
vec3 V = normalize(cameraPosition);
vec3 N = normalize(fragNormal);
// Calculate lighting components
vec3 ambient = AmbientLighting();
vec3 diffuse = DiffuseLighting(N, L);
vec3 specular = SpecularLighting(N, L, V);
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
// Calculate final fragment color
finalColor = vec4(texelColor.rgb*(ambient + diffuse + specular), texelColor.a);
}

View File

@ -1,25 +1,25 @@
#version 330 #version 330
// Vertex input data // Input vertex attributes
in vec3 vertexPosition; in vec3 vertexPosition;
in vec2 vertexTexCoord; in vec2 vertexTexCoord;
in vec3 vertexNormal; in vec3 vertexNormal;
// Projection and model data // Input uniform values
uniform mat4 mvpMatrix; uniform mat4 mvpMatrix;
uniform mat4 modelMatrix; // Output vertex attributes (to fragment shader)
//uniform mat4 viewMatrix; // Not used
// Attributes to fragment shader
out vec2 fragTexCoord; out vec2 fragTexCoord;
out vec3 fragNormal; out vec3 fragNormal;
// NOTE: Add here your custom variables
uniform mat4 modelMatrix;
void main() void main()
{ {
// Send texture coord to fragment shader // Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord; fragTexCoord = vertexTexCoord;
// Calculate view vector normal from model // Calculate view vector normal from model
mat3 normalMatrix = transpose(inverse(mat3(modelMatrix))); mat3 normalMatrix = transpose(inverse(mat3(modelMatrix)));
fragNormal = normalize(normalMatrix*vertexNormal); fragNormal = normalize(normalMatrix*vertexNormal);

View File

@ -1,27 +1,32 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
const float renderWidth = 1280.0; const float renderWidth = 800.0; // HARDCODED for example!
const float renderHeight = 720.0; const float renderHeight = 480.0; // Use uniforms instead...
float radius = 250.0; float radius = 250.0;
float angle = 0.8; float angle = 0.8;
uniform vec2 center = vec2(200.0, 200.0); uniform vec2 center = vec2(200.0, 200.0);
void main (void) void main()
{ {
vec2 texSize = vec2(renderWidth, renderHeight); vec2 texSize = vec2(renderWidth, renderHeight);
vec2 tc = fragTexCoord*texSize; vec2 tc = fragTexCoord*texSize;
tc -= center; tc -= center;
float dist = length(tc); float dist = length(tc);
if (dist < radius) if (dist < radius)
@ -37,5 +42,5 @@ void main (void)
tc += center; tc += center;
vec3 color = texture(texture0, tc/texSize).rgb; vec3 color = texture(texture0, tc/texSize).rgb;
fragColor = vec4(color, 1.0);; finalColor = vec4(color, 1.0);;
} }

View File

@ -1,20 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 base = texture2D(texture0, fragTexCoord)*fragTintColor;
// Convert to grayscale using NTSC conversion weights
float gray = dot(base.rgb, vec3(0.299, 0.587, 0.114));
fragColor = vec4(gray, gray, gray, fragTintColor.a);
}

View File

@ -1,76 +0,0 @@
#version 330
// Vertex shader input data
in vec2 fragTexCoord;
in vec3 fragNormal;
// Diffuse data
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Light attributes
uniform vec3 light_ambientColor = vec3(0.6, 0.3, 0.0);
uniform vec3 light_diffuseColor = vec3(1.0, 0.5, 0.0);
uniform vec3 light_specularColor = vec3(0.0, 1.0, 0.0);
uniform float light_intensity = 1.0;
uniform float light_specIntensity = 1.0;
// Material attributes
uniform vec3 mat_ambientColor = vec3(1.0, 1.0, 1.0);
uniform vec3 mat_specularColor = vec3(1.0, 1.0, 1.0);
uniform float mat_glossiness = 50.0;
// World attributes
uniform vec3 lightPos;
uniform vec3 cameraPos;
// Fragment shader output data
out vec4 fragColor;
vec3 AmbientLighting()
{
return (mat_ambientColor*light_ambientColor);
}
vec3 DiffuseLighting(in vec3 N, in vec3 L)
{
// Lambertian reflection calculation
float diffuse = clamp(dot(N, L), 0, 1);
return (fragTintColor.xyz*light_diffuseColor*light_intensity*diffuse);
}
vec3 SpecularLighting(in vec3 N, in vec3 L, in vec3 V)
{
float specular = 0.0;
// Calculate specular reflection only if the surface is oriented to the light source
if (dot(N, L) > 0)
{
// Calculate half vector
vec3 H = normalize(L + V);
// Calculate specular intensity
specular = pow(dot(N, H), 3 + mat_glossiness);
}
return (mat_specularColor*light_specularColor*light_specIntensity*specular);
}
void main()
{
// Normalize input vectors
vec3 L = normalize(lightPos);
vec3 V = normalize(cameraPos);
vec3 N = normalize(fragNormal);
vec3 ambient = AmbientLighting();
vec3 diffuse = DiffuseLighting(N, L);
vec3 specular = SpecularLighting(N, L, V);
// Get base color from texture
vec4 textureColor = texture(texture0, fragTexCoord);
vec3 finalColor = textureColor.rgb;
fragColor = vec4(finalColor * (ambient + diffuse + specular), textureColor.a);
}

View File

@ -1,18 +0,0 @@
#version 330
attribute vec3 vertexPosition;
attribute vec2 vertexTexCoord;
attribute vec4 vertexColor;
uniform mat4 mvpMatrix;
varying vec2 fragTexCoord;
varying vec4 fragTintColor;
void main()
{
fragTexCoord = vertexTexCoord;
fragTintColor = vertexColor;
gl_Position = mvpMatrix*vec4(vertexPosition, 1.0);
}

View File

@ -1,15 +0,0 @@
#version 330
uniform sampler2D texture0;
varying vec2 fragTexCoord;
varying vec4 fragTintColor;
void main()
{
vec4 base = texture2D(texture0, fragTexCoord)*fragTintColor;
// Convert to grayscale using NTSC conversion weights
float gray = dot(base.rgb, vec3(0.299, 0.587, 0.114));
gl_FragColor = vec4(gray, gray, gray, base.a);
}

View File

@ -41,23 +41,23 @@ int main()
// Model initialization // Model initialization
Vector3 position = { 0.0f, 0.0f, 0.0f }; Vector3 position = { 0.0f, 0.0f, 0.0f };
Model model = LoadModel("resources/model/dwarf.obj"); Model model = LoadModel("resources/model/dwarf.obj");
Shader shader = LoadShader("resources/shaders/phong.vs", "resources/shaders/phong.fs"); Shader shader = LoadShader("resources/shaders/glsl330/phong.vs", "resources/shaders/glsl330/phong.fs");
SetModelShader(&model, shader); SetModelShader(&model, shader);
// Shader locations initialization // Shader locations initialization
int lIntensityLoc = GetShaderLocation(shader, "light_intensity"); int lIntensityLoc = GetShaderLocation(shader, "lightIntensity");
int lAmbientLoc = GetShaderLocation(shader, "light_ambientColor"); int lAmbientLoc = GetShaderLocation(shader, "lightAmbientColor");
int lDiffuseLoc = GetShaderLocation(shader, "light_diffuseColor"); int lDiffuseLoc = GetShaderLocation(shader, "lightDiffuseColor");
int lSpecularLoc = GetShaderLocation(shader, "light_specularColor"); int lSpecularLoc = GetShaderLocation(shader, "lightSpecularColor");
int lSpecIntensityLoc = GetShaderLocation(shader, "light_specIntensity"); int lSpecIntensityLoc = GetShaderLocation(shader, "lightSpecIntensity");
int mAmbientLoc = GetShaderLocation(shader, "mat_ambientColor"); int mAmbientLoc = GetShaderLocation(shader, "matAmbientColor");
int mSpecularLoc = GetShaderLocation(shader, "mat_specularColor"); int mSpecularLoc = GetShaderLocation(shader, "matSpecularColor");
int mGlossLoc = GetShaderLocation(shader, "mat_glossiness"); int mGlossLoc = GetShaderLocation(shader, "matGlossiness");
// Camera and light vectors shader locations // Camera and light vectors shader locations
int cameraLoc = GetShaderLocation(shader, "cameraPos"); int cameraLoc = GetShaderLocation(shader, "cameraPosition");
int lightLoc = GetShaderLocation(shader, "lightPos"); int lightLoc = GetShaderLocation(shader, "lightPosition");
// Model and View matrix locations (required for lighting) // Model and View matrix locations (required for lighting)
int modelLoc = GetShaderLocation(shader, "modelMatrix"); int modelLoc = GetShaderLocation(shader, "modelMatrix");

View File

@ -36,10 +36,10 @@ int main()
Texture2D texture = LoadTexture("resources/model/dwarf_diffuse.png"); // Load model texture Texture2D texture = LoadTexture("resources/model/dwarf_diffuse.png"); // Load model texture
SetModelTexture(&dwarf, texture); // Bind texture to model SetModelTexture(&dwarf, texture); // Bind texture to model
Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position
Shader shader = LoadShader("resources/shaders/base.vs", Shader shader = LoadShader("resources/shaders/glsl330/base.vs",
"resources/shaders/swirl.fs"); // Load postpro shader "resources/shaders/glsl330/swirl.fs"); // Load postpro shader
// Get variable (uniform) location on the shader to connect with the program // Get variable (uniform) location on the shader to connect with the program
// NOTE: If uniform variable could not be found in the shader, function returns -1 // NOTE: If uniform variable could not be found in the shader, function returns -1
@ -47,7 +47,8 @@ int main()
float swirlCenter[2] = { (float)screenWidth/2, (float)screenHeight/2 }; float swirlCenter[2] = { (float)screenWidth/2, (float)screenHeight/2 };
SetPostproShader(shader); // Set fullscreen postprocessing shader // Create a RenderTexture2D to be used for render to texture
RenderTexture2D target = LoadRenderTexture(screenWidth, screenHeight);
// Setup orbital camera // Setup orbital camera
SetCameraMode(CAMERA_ORBITAL); // Set an orbital camera mode SetCameraMode(CAMERA_ORBITAL); // Set an orbital camera mode
@ -78,14 +79,23 @@ int main()
BeginDrawing(); BeginDrawing();
ClearBackground(RAYWHITE); ClearBackground(RAYWHITE);
BeginTextureMode(target); // Enable drawing to texture
Begin3dMode(camera); Begin3dMode(camera);
DrawModel(dwarf, position, 2.0f, WHITE); // Draw 3d model with texture DrawModel(dwarf, position, 2.0f, WHITE); // Draw 3d model with texture
DrawGrid(10, 1.0f); // Draw a grid DrawGrid(10, 1.0f); // Draw a grid
End3dMode(); End3dMode();
EndTextureMode(); // End drawing to texture (now we have a texture available for next passes)
SetCustomShader(shader);
// NOTE: Render texture must be y-flipped due to default OpenGL coordinates (left-bottom)
DrawTextureRec(target.texture, (Rectangle){ 0, 0, target.texture.width, -target.texture.height }, (Vector2){ 0, 0 }, WHITE);
SetDefaultShader();
DrawText("(c) Dwarf 3D model by David Moreno", screenWidth - 200, screenHeight - 20, 10, GRAY); DrawText("(c) Dwarf 3D model by David Moreno", screenWidth - 200, screenHeight - 20, 10, GRAY);
@ -97,11 +107,12 @@ int main()
// De-Initialization // De-Initialization
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
UnloadShader(shader); // Unload shader UnloadShader(shader); // Unload shader
UnloadTexture(texture); // Unload texture UnloadTexture(texture); // Unload texture
UnloadModel(dwarf); // Unload model UnloadModel(dwarf); // Unload model
UnloadRenderTexture(target); // Unload render texture
CloseWindow(); // Close window and OpenGL context CloseWindow(); // Close window and OpenGL context
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
return 0; return 0;

View File

@ -34,11 +34,11 @@ int main()
Model dwarf = LoadModel("resources/model/dwarf.obj"); // Load OBJ model Model dwarf = LoadModel("resources/model/dwarf.obj"); // Load OBJ model
Texture2D texture = LoadTexture("resources/model/dwarf_diffuse.png"); // Load model texture Texture2D texture = LoadTexture("resources/model/dwarf_diffuse.png"); // Load model texture
Shader shader = LoadShader("resources/shaders/base.vs", Shader shader = LoadShader("resources/shaders/glsl330/base.vs",
"resources/shaders/grayscale.fs"); // Load model shader "resources/shaders/glsl330/grayscale.fs"); // Load model shader
SetModelShader(&dwarf, shader); // Set shader effect to 3d model dwarf.material.shader = shader; // Set shader effect to 3d model
SetModelTexture(&dwarf, texture); // Bind texture to model dwarf.material.texDiffuse = texture; // Bind texture to model
Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position

View File

@ -38,10 +38,11 @@ int main()
Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position Vector3 position = { 0.0f, 0.0f, 0.0f }; // Set model position
Shader shader = LoadShader("resources/shaders/base.vs", Shader shader = LoadShader("resources/shaders/glsl330/base.vs",
"resources/shaders/bloom.fs"); // Load postpro shader "resources/shaders/glsl330/bloom.fs"); // Load postpro shader
SetPostproShader(shader); // Set fullscreen postprocessing shader // Create a RenderTexture2D to be used for render to texture
RenderTexture2D target = LoadRenderTexture(screenWidth, screenHeight);
// Setup orbital camera // Setup orbital camera
SetCameraMode(CAMERA_ORBITAL); // Set an orbital camera mode SetCameraMode(CAMERA_ORBITAL); // Set an orbital camera mode
@ -65,15 +66,26 @@ int main()
ClearBackground(RAYWHITE); ClearBackground(RAYWHITE);
Begin3dMode(camera); BeginTextureMode(target); // Enable drawing to texture
DrawModel(dwarf, position, 2.0f, WHITE); // Draw 3d model with texture Begin3dMode(camera);
DrawGrid(10, 1.0f); // Draw a grid DrawModel(dwarf, position, 2.0f, WHITE); // Draw 3d model with texture
End3dMode(); DrawGrid(10, 1.0f); // Draw a grid
End3dMode();
DrawText("HELLO POSTPROCESSING!", 70, 190, 50, RED);
EndTextureMode(); // End drawing to texture (now we have a texture available for next passes)
DrawText("(c) Dwarf 3D model by David Moreno", screenWidth - 200, screenHeight - 20, 10, BLACK); SetCustomShader(shader);
// NOTE: Render texture must be y-flipped due to default OpenGL coordinates (left-bottom)
DrawTextureRec(target.texture, (Rectangle){ 0, 0, target.texture.width, -target.texture.height }, (Vector2){ 0, 0 }, WHITE);
SetDefaultShader();
DrawText("(c) Dwarf 3D model by David Moreno", screenWidth - 200, screenHeight - 20, 10, DARKGRAY);
DrawFPS(10, 10); DrawFPS(10, 10);
@ -83,11 +95,12 @@ int main()
// De-Initialization // De-Initialization
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
UnloadShader(shader); // Unload shader UnloadShader(shader); // Unload shader
UnloadTexture(texture); // Unload texture UnloadTexture(texture); // Unload texture
UnloadModel(dwarf); // Unload model UnloadModel(dwarf); // Unload model
UnloadRenderTexture(target); // Unload render texture
CloseWindow(); // Close window and OpenGL context CloseWindow(); // Close window and OpenGL context
//-------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------
return 0; return 0;

View File

@ -32,10 +32,9 @@ int main()
Texture2D sonic = LoadTexture("resources/texture_formats/sonic.png"); Texture2D sonic = LoadTexture("resources/texture_formats/sonic.png");
// NOTE: This shader is a bit different than model/postprocessing shaders, // NOTE: Using GLSL 330 shader version, on OpenGL ES 2.0 use GLSL 100 shader version
// it requires the color data for every vertice to use it in every shape or texture independently Shader shader = LoadShader("resources/shaders/glsl330/base.vs",
Shader shader = LoadShader("resources/shaders/shapes_base.vs", "resources/shaders/glsl330/grayscale.fs");
"resources/shaders/shapes_grayscale.fs");
// Shader usage is also different than models/postprocessing, shader is just activated when required // Shader usage is also different than models/postprocessing, shader is just activated when required

View File

@ -1,42 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 sum = vec4(0);
vec4 tc = vec4(0);
for (int i = -4; i < 4; i++)
{
for (int j = -3; j < 3; j++)
{
sum += texture(texture0, fragTexCoord + vec2(j, i)*0.004) * 0.25;
}
}
if (texture(texture0, fragTexCoord).r < 0.3)
{
tc = sum*sum*0.012 + texture(texture0, fragTexCoord);
}
else
{
if (texture(texture0, fragTexCoord).r < 0.5)
{
tc = sum*sum*0.009 + texture(texture0, fragTexCoord);
}
else
{
tc = sum*sum*0.0075 + texture(texture0, fragTexCoord);
}
}
fragColor = tc;
}

View File

@ -1,29 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
const float renderWidth = 1280.0;
const float renderHeight = 720.0;
float offset[3] = float[](0.0, 1.3846153846, 3.2307692308);
float weight[3] = float[](0.2270270270, 0.3162162162, 0.0702702703);
void main()
{
vec3 tc = texture(texture0, fragTexCoord).rgb*weight[0];
for (int i = 1; i < 3; i++)
{
tc += texture(texture0, fragTexCoord + vec2(offset[i])/renderWidth, 0.0).rgb*weight[i];
tc += texture(texture0, fragTexCoord - vec2(offset[i])/renderWidth, 0.0).rgb*weight[i];
}
fragColor = vec4(tc, 1.0);
}

View File

@ -1,20 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 base = texture(texture0, fragTexCoord)*fragTintColor;
// Convert to grayscale using NTSC conversion weights
float gray = dot(base.rgb, vec3(0.299, 0.587, 0.114));
fragColor = vec4(gray, gray, gray, fragTintColor.a);
}

View File

@ -1,76 +0,0 @@
#version 330
// Vertex shader input data
in vec2 fragTexCoord;
in vec3 fragNormal;
// Diffuse data
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Light attributes
uniform vec3 light_ambientColor = vec3(0.6, 0.3, 0);
uniform vec3 light_diffuseColor = vec3(1, 0.5, 0);
uniform vec3 light_specularColor = vec3(0, 1, 0);
uniform float light_intensity = 1;
uniform float light_specIntensity = 1;
// Material attributes
uniform vec3 mat_ambientColor = vec3(1, 1, 1);
uniform vec3 mat_specularColor = vec3(1, 1, 1);
uniform float mat_glossiness = 50;
// World attributes
uniform vec3 lightPos;
uniform vec3 cameraPos;
// Fragment shader output data
out vec4 fragColor;
vec3 AmbientLighting()
{
return mat_ambientColor * light_ambientColor;
}
vec3 DiffuseLighting(in vec3 N, in vec3 L)
{
// Lambertian reflection calculation
float diffuse = clamp(dot(N, L), 0, 1);
return tintColor.xyz * light_diffuseColor * light_intensity * diffuse;
}
vec3 SpecularLighting(in vec3 N, in vec3 L, in vec3 V)
{
float specular = 0;
// Calculate specular reflection only if the surface is oriented to the light source
if(dot(N, L) > 0)
{
// Calculate half vector
vec3 H = normalize(L + V);
// Calculate specular intensity
specular = pow(dot(N, H), 3 + mat_glossiness);
}
return mat_specularColor * light_specularColor * light_specIntensity * specular;
}
void main()
{
// Normalize input vectors
vec3 L = normalize(lightPos);
vec3 V = normalize(cameraPos);
vec3 N = normalize(fragNormal);
vec3 ambient = AmbientLighting();
vec3 diffuse = DiffuseLighting(N, L);
vec3 specular = SpecularLighting(N, L, V);
// Get base color from texture
vec4 textureColor = texture(texture0, fragTexCoord);
vec3 finalColor = textureColor.rgb;
fragColor = vec4(finalColor * (ambient + diffuse + specular), textureColor.a);
}

View File

@ -1,26 +0,0 @@
#version 330
in vec2 fragTexCoord;
out vec4 fragColor;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
float gamma = 0.6;
float numColors = 8.0;
void main()
{
vec3 color = texture(texture0, fragTexCoord.xy).rgb;
color = pow(color, vec3(gamma, gamma, gamma));
color = color*numColors;
color = floor(color);
color = color/numColors;
color = pow(color, vec3(1.0/gamma));
fragColor = vec4(color, 1.0);
}

View File

@ -1,20 +0,0 @@
#version 100
precision mediump float;
varying vec2 fragTexCoord;
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 base = texture2D(texture0, fragTexCoord)*fragTintColor;
// Convert to grayscale using NTSC conversion weights
float gray = dot(base.rgb, vec3(0.299, 0.587, 0.114));
gl_FragColor = vec4(gray, gray, gray, fragTintColor.a);
}

26
shaders/glsl100/base.vs Normal file
View File

@ -0,0 +1,26 @@
#version 100
// Input vertex attributes
attribute vec3 vertexPosition;
attribute vec2 vertexTexCoord;
attribute vec3 vertexNormal;
attribute vec4 vertexColor;
// Input uniform values
uniform mat4 mvpMatrix;
// Output vertex attributes (to fragment shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// NOTE: Add here your custom variables
void main()
{
// Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord;
fragColor = vertexColor;
// Calculate final vertex position
gl_Position = mvpMatrix*vec4(vertexPosition, 1.0);
}

37
shaders/glsl100/bloom.fs Normal file
View File

@ -0,0 +1,37 @@
#version 100
precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
vec4 sum = vec4(0);
vec4 tc = vec4(0);
for (int i = -4; i < 4; i++)
{
for (int j = -3; j < 3; j++)
{
sum += texture2D(texture0, fragTexCoord + vec2(j, i)*0.004) * 0.25;
}
}
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
// Calculate final fragment color
if (texelColor.r < 0.3) tc = sum*sum*0.012 + texelColor;
else if (texelColor.r < 0.5) tc = sum*sum*0.009 + texelColor;
else tc = sum*sum*0.0075 + texelColor;
gl_FragColor = tc;
}

View File

@ -2,7 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -16,6 +20,7 @@ float weight[3] = float[]( 0.2270270270, 0.3162162162, 0.0702702703 );
void main() void main()
{ {
// Texel color fetching from texture sampler
vec3 tc = texture2D(texture0, fragTexCoord).rgb*weight[0]; vec3 tc = texture2D(texture0, fragTexCoord).rgb*weight[0];
for (int i = 1; i < 3; i++) for (int i = 1; i < 3; i++)

View File

@ -2,12 +2,15 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
float hatchOffsetY = 5.0f; float hatchOffsetY = 5.0f;
float lumThreshold01 = 0.9f; float lumThreshold01 = 0.9f;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -46,7 +49,7 @@ vec4 PostFX(sampler2D tex, vec2 uv)
return c; return c;
} }
void main(void) void main()
{ {
vec3 tc = PostFX(texture0, fragTexCoord).rgb; vec3 tc = PostFX(texture0, fragTexCoord).rgb;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;

View File

@ -2,12 +2,15 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
const float PI = 3.1415926535; const float PI = 3.1415926535;

View File

@ -0,0 +1,25 @@
#version 100
precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// NOTE: Add here your custom variables
void main()
{
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord)*fragTintColor*fragColor;
// Convert texel color to grayscale using NTSC conversion weights
float gray = dot(texelColor.rgb, vec3(0.299, 0.587, 0.114));
// Calculate final fragment color
gl_FragColor = vec4(gray, gray, gray, texelColor.a);
}

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -14,7 +17,7 @@ float frequency = 720/3.0;
uniform float time; uniform float time;
void main (void) void main()
{ {
/* /*
// Scanlines method 1 // Scanlines method 1

View File

@ -2,28 +2,32 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
const float renderWidth = 1280; const float renderWidth = 800.0; // HARDCODED for example!
const float renderHeight = 720; const float renderHeight = 480.0; // Use uniforms instead...
float radius = 250.0; float radius = 250.0;
float angle = 0.8; float angle = 0.8;
uniform vec2 center = vec2(200, 200); uniform vec2 center = vec2(200.0, 200.0);
void main (void) void main()
{ {
vec2 texSize = vec2(renderWidth, renderHeight); vec2 texSize = vec2(renderWidth, renderHeight);
vec2 tc = fragTexCoord*texSize; vec2 tc = fragTexCoord*texSize;
tc -= center; tc -= center;
float dist = length(tc);
float dist = length(tc);
if (dist < radius) if (dist < radius)
{ {
float percent = (radius - dist)/radius; float percent = (radius - dist)/radius;
@ -33,7 +37,7 @@ void main (void)
tc = vec2(dot(tc, vec2(c, -s)), dot(tc, vec2(s, c))); tc = vec2(dot(tc, vec2(c, -s)), dot(tc, vec2(s, c)));
} }
tc += center; tc += center;
vec3 color = texture2D(texture0, tc/texSize).rgb; vec3 color = texture2D(texture0, tc/texSize).rgb;

View File

@ -2,8 +2,11 @@
precision mediump float; precision mediump float;
// Input vertex attributes (from vertex shader)
varying vec2 fragTexCoord; varying vec2 fragTexCoord;
varying vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
@ -11,6 +14,7 @@ uniform vec4 fragTintColor;
void main() void main()
{ {
// Texel color fetching from texture sampler
vec4 texelColor = texture2D(texture0, fragTexCoord); vec4 texelColor = texture2D(texture0, fragTexCoord);
// NOTE: Implement here your fragment shader code // NOTE: Implement here your fragment shader code

View File

@ -1,18 +1,26 @@
#version 330 #version 330
// Input vertex attributes
in vec3 vertexPosition; in vec3 vertexPosition;
in vec2 vertexTexCoord; in vec2 vertexTexCoord;
in vec3 vertexNormal; in vec3 vertexNormal;
in vec4 vertexColor;
out vec2 fragTexCoord; // Input uniform values
uniform mat4 mvpMatrix; uniform mat4 mvpMatrix;
// Output vertex attributes (to fragment shader)
out vec2 fragTexCoord;
out vec4 fragColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
void main() void main()
{ {
// Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord; fragTexCoord = vertexTexCoord;
fragColor = vertexColor;
// Calculate final vertex position
gl_Position = mvpMatrix*vec4(vertexPosition, 1.0); gl_Position = mvpMatrix*vec4(vertexPosition, 1.0);
} }

38
shaders/glsl330/bloom.fs Normal file
View File

@ -0,0 +1,38 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
vec4 sum = vec4(0);
vec4 tc = vec4(0);
for (int i = -4; i < 4; i++)
{
for (int j = -3; j < 3; j++)
{
sum += texture(texture0, fragTexCoord + vec2(j, i)*0.004)*0.25;
}
}
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
// Calculate final fragment color
if (texelColor.r < 0.3) tc = sum*sum*0.012 + texelColor;
else if (texelColor.r < 0.5) tc = sum*sum*0.009 + texelColor;
else tc = sum*sum*0.0075 + texelColor;
finalColor = tc;
}

34
shaders/glsl330/blur.fs Normal file
View File

@ -0,0 +1,34 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
const float renderWidth = 1280.0;
const float renderHeight = 720.0;
float offset[3] = float[](0.0, 1.3846153846, 3.2307692308);
float weight[3] = float[](0.2270270270, 0.3162162162, 0.0702702703);
void main()
{
// Texel color fetching from texture sampler
vec3 texelColor = texture(texture0, fragTexCoord).rgb*weight[0];
for (int i = 1; i < 3; i++)
{
texelColor += texture(texture0, fragTexCoord + vec2(offset[i])/renderWidth, 0.0).rgb*weight[i];
texelColor += texture(texture0, fragTexCoord - vec2(offset[i])/renderWidth, 0.0).rgb*weight[i];
}
finalColor = vec4(texelColor, 1.0);
}

View File

@ -1,13 +1,17 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// NOTE: Add here your custom variables // Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
float hatchOffsetY = 5.0; float hatchOffsetY = 5.0;
float lumThreshold01 = 0.9; float lumThreshold01 = 0.9;
@ -27,18 +31,18 @@ void main()
if (lum < lumThreshold02) if (lum < lumThreshold02)
{ {
if (mod(gl_FragCoord .x - gl_FragCoord .y, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0); if (mod(gl_FragCoord.x - gl_FragCoord.y, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0);
} }
if (lum < lumThreshold03) if (lum < lumThreshold03)
{ {
if (mod(gl_FragCoord .x + gl_FragCoord .y - hatchOffsetY, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0); if (mod(gl_FragCoord.x + gl_FragCoord.y - hatchOffsetY, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0);
} }
if (lum < lumThreshold04) if (lum < lumThreshold04)
{ {
if (mod(gl_FragCoord .x - gl_FragCoord .y - hatchOffsetY, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0); if (mod(gl_FragCoord.x - gl_FragCoord.y - hatchOffsetY, 10.0) == 0.0) tc = vec3(0.0, 0.0, 0.0);
} }
fragColor = vec4(tc, 1.0); finalColor = vec4(tc, 1.0);
} }

View File

@ -1,12 +1,16 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
const float renderWidth = 1280.0; const float renderWidth = 1280.0;
@ -46,9 +50,9 @@ vec4 PostFX(sampler2D tex, vec2 uv)
return c; return c;
} }
void main(void) void main()
{ {
vec3 tc = PostFX(texture0, fragTexCoord).rgb; vec3 tc = PostFX(texture0, fragTexCoord).rgb;
fragColor = vec4(tc, 1.0); finalColor = vec4(tc, 1.0);
} }

27
shaders/glsl330/depth.fs Normal file
View File

@ -0,0 +1,27 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0; // Depth texture
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
float zNear = 0.01; // camera z near
float zFar = 10.0; // camera z far
float z = texture(texture0, fragTexCoord).x;
// Linearize depth value
float depth = (2.0*zNear)/(zFar + zNear - z*(zFar - zNear));
// Calculate final fragment color
finalColor = vec4(depth, depth, depth, 1.0f);
}

View File

@ -0,0 +1,26 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
void main()
{
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord)*fragTintColor*fragColor;
// Convert texel color to grayscale using NTSC conversion weights
float gray = dot(texelColor.rgb, vec3(0.299, 0.587, 0.114));
// Calculate final fragment color
finalColor = vec4(gray, gray, gray, texelColor.a);
}

85
shaders/glsl330/phong.fs Normal file
View File

@ -0,0 +1,85 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec3 fragNormal;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
// Light uniform values
uniform vec3 lightAmbientColor = vec3(0.6, 0.3, 0.0);
uniform vec3 lightDiffuseColor = vec3(1.0, 0.5, 0.0);
uniform vec3 lightSpecularColor = vec3(0.0, 1.0, 0.0);
uniform float lightIntensity = 1.0;
uniform float lightSpecIntensity = 1.0;
// Material uniform values
uniform vec3 matAmbientColor = vec3(1.0, 1.0, 1.0);
uniform vec3 matSpecularColor = vec3(1.0, 1.0, 1.0);
uniform float matGlossiness = 50.0;
// World uniform values
uniform vec3 lightPosition;
uniform vec3 cameraPosition;
// Fragment shader output data
out vec4 fragColor;
// Calculate ambient lighting component
vec3 AmbientLighting()
{
return (matAmbientColor*lightAmbientColor);
}
// Calculate diffuse lighting component
vec3 DiffuseLighting(in vec3 N, in vec3 L)
{
// Lambertian reflection calculation
float diffuse = clamp(dot(N, L), 0, 1);
return (fragTintColor.xyz*lightDiffuseColor*lightIntensity*diffuse);
}
// Calculate specular lighting component
vec3 SpecularLighting(in vec3 N, in vec3 L, in vec3 V)
{
float specular = 0.0;
// Calculate specular reflection only if the surface is oriented to the light source
if (dot(N, L) > 0)
{
// Calculate half vector
vec3 H = normalize(L + V);
// Calculate specular intensity
specular = pow(dot(N, H), 3 + matGlossiness);
}
return (matSpecularColor*lightSpecularColor*lightSpecIntensity*specular);
}
void main()
{
// Normalize input vectors
vec3 L = normalize(lightPosition);
vec3 V = normalize(cameraPosition);
vec3 N = normalize(fragNormal);
// Calculate lighting components
vec3 ambient = AmbientLighting();
vec3 diffuse = DiffuseLighting(N, L);
vec3 specular = SpecularLighting(N, L, V);
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
// Calculate final fragment color
finalColor = vec4(texelColor.rgb*(ambient + diffuse + specular), texelColor.a);
}

View File

@ -1,23 +1,25 @@
#version 330 #version 330
// Vertex input data // Input vertex attributes
in vec3 vertexPosition; in vec3 vertexPosition;
in vec2 vertexTexCoord; in vec2 vertexTexCoord;
in vec3 vertexNormal; in vec3 vertexNormal;
// Projection and model data // Input uniform values
uniform mat4 mvpMatrix; uniform mat4 mvpMatrix;
uniform mat4 modelMatrix;
// Attributes to fragment shader // Output vertex attributes (to fragment shader)
out vec2 fragTexCoord; out vec2 fragTexCoord;
out vec3 fragNormal; out vec3 fragNormal;
// NOTE: Add here your custom variables
uniform mat4 modelMatrix;
void main() void main()
{ {
// Send texture coord to fragment shader // Send vertex attributes to fragment shader
fragTexCoord = vertexTexCoord; fragTexCoord = vertexTexCoord;
// Calculate view vector normal from model // Calculate view vector normal from model
mat3 normalMatrix = transpose(inverse(mat3(modelMatrix))); mat3 normalMatrix = transpose(inverse(mat3(modelMatrix)));
fragNormal = normalize(normalMatrix*vertexNormal); fragNormal = normalize(normalMatrix*vertexNormal);

View File

@ -1,13 +1,17 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// NOTE: Add here your custom variables // Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
const float renderWidth = 1280.0; const float renderWidth = 1280.0;
const float renderHeight = 720.0; const float renderHeight = 720.0;
@ -24,5 +28,5 @@ void main()
vec3 tc = texture(texture0, coord).rgb; vec3 tc = texture(texture0, coord).rgb;
fragColor = vec4(tc, 1.0); finalColor = vec4(tc, 1.0);
} }

View File

@ -0,0 +1,31 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
float gamma = 0.6;
float numColors = 8.0;
void main()
{
// Texel color fetching from texture sampler
vec3 texelColor = texture(texture0, fragTexCoord.xy).rgb;
texelColor = pow(texelColor, vec3(gamma, gamma, gamma));
texelColor = texelColor*numColors;
texelColor = floor(texelColor);
texelColor = texelColor/numColors;
texelColor = pow(texelColor, vec3(1.0/gamma));
finalColor = vec4(texelColor, 1.0);
}

View File

@ -1,27 +1,32 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
void main() void main()
{ {
vec3 color = texture(texture0, fragTexCoord).rgb; // Texel color fetching from texture sampler
vec3 texelColor = texture(texture0, fragTexCoord).rgb;
vec3 colors[3]; vec3 colors[3];
colors[0] = vec3(0.0, 0.0, 1.0); colors[0] = vec3(0.0, 0.0, 1.0);
colors[1] = vec3(1.0, 1.0, 0.0); colors[1] = vec3(1.0, 1.0, 0.0);
colors[2] = vec3(1.0, 0.0, 0.0); colors[2] = vec3(1.0, 0.0, 0.0);
float lum = (color.r + color.g + color.b)/3.0; float lum = (texelColor.r + texelColor.g + texelColor.b)/3.0;
int ix = (lum < 0.5)? 0:1; int ix = (lum < 0.5)? 0:1;
vec3 tc = mix(colors[ix], colors[ix + 1], (lum - float(ix)*0.5)/0.5); vec3 tc = mix(colors[ix], colors[ix + 1], (lum - float(ix)*0.5)/0.5);
fragColor = vec4(tc, 1.0); finalColor = vec4(tc, 1.0);
} }

View File

@ -1,12 +1,16 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
float offset = 0.0; float offset = 0.0;
@ -14,7 +18,7 @@ float frequency = 720.0/3.0;
uniform float time; uniform float time;
void main (void) void main()
{ {
/* /*
// Scanlines method 1 // Scanlines method 1
@ -35,7 +39,8 @@ void main (void)
float globalPos = (fragTexCoord.y + offset) * frequency; float globalPos = (fragTexCoord.y + offset) * frequency;
float wavePos = cos((fract(globalPos) - 0.5)*3.14); float wavePos = cos((fract(globalPos) - 0.5)*3.14);
vec4 color = texture(texture0, fragTexCoord); // Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord);
fragColor = mix(vec4(0.0, 0.3, 0.0, 0.0), color, wavePos); finalColor = mix(vec4(0.0, 0.3, 0.0, 0.0), texelColor, wavePos);
} }

46
shaders/glsl330/swirl.fs Normal file
View File

@ -0,0 +1,46 @@
#version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord;
in vec4 fragColor;
// Input uniform values
uniform sampler2D texture0;
uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables
const float renderWidth = 800.0; // HARDCODED for example!
const float renderHeight = 480.0; // Use uniforms instead...
float radius = 250.0;
float angle = 0.8;
uniform vec2 center = vec2(200.0, 200.0);
void main()
{
vec2 texSize = vec2(renderWidth, renderHeight);
vec2 tc = fragTexCoord*texSize;
tc -= center;
float dist = length(tc);
if (dist < radius)
{
float percent = (radius - dist)/radius;
float theta = percent*percent*angle*8.0;
float s = sin(theta);
float c = cos(theta);
tc = vec2(dot(tc, vec2(c, -s)), dot(tc, vec2(s, c)));
}
tc += center;
vec3 color = texture(texture0, tc/texSize).rgb;
finalColor = vec4(color, 1.0);;
}

View File

@ -1,19 +1,24 @@
#version 330 #version 330
// Input vertex attributes (from vertex shader)
in vec2 fragTexCoord; in vec2 fragTexCoord;
in vec4 fragColor;
out vec4 fragColor; // Input uniform values
uniform sampler2D texture0; uniform sampler2D texture0;
uniform vec4 fragTintColor; uniform vec4 fragTintColor;
// Output fragment color
out vec4 finalColor;
// NOTE: Add here your custom variables // NOTE: Add here your custom variables
void main() void main()
{ {
// Texel color fetching from texture sampler
vec4 texelColor = texture(texture0, fragTexCoord); vec4 texelColor = texture(texture0, fragTexCoord);
// NOTE: Implement here your fragment shader code // NOTE: Implement here your fragment shader code
fragColor = texelColor*fragTintColor; finalColor = texelColor*fragTintColor;
} }

View File

@ -21,6 +21,8 @@
# #
#************************************************************************************************** #**************************************************************************************************
.PHONY: all clean
# define raylib platform to compile for # define raylib platform to compile for
# possible platforms: PLATFORM_DESKTOP PLATFORM_RPI PLATFORM_WEB # possible platforms: PLATFORM_DESKTOP PLATFORM_RPI PLATFORM_WEB
PLATFORM ?= PLATFORM_DESKTOP PLATFORM ?= PLATFORM_DESKTOP
@ -84,8 +86,6 @@ else
# external libraries headers # external libraries headers
# GLFW3 # GLFW3
INCLUDES += -I../external/glfw3/include INCLUDES += -I../external/glfw3/include
# GLEW
INCLUDES += -I../external/glew/include
# OpenAL Soft # OpenAL Soft
INCLUDES += -I../external/openal_soft/include INCLUDES += -I../external/openal_soft/include
endif endif
@ -99,9 +99,9 @@ else
endif endif
# typing 'make' will invoke the first target entry in the file, # typing 'make' will invoke the default target entry called 'all',
# in this case, the 'default' target entry is raylib # in this case, the 'default' target entry is basic_game
default: raylib all: raylib
# compile raylib library # compile raylib library
raylib: $(OBJS) raylib: $(OBJS)
@ -143,7 +143,7 @@ models.o: models.c
# compile audio module # compile audio module
audio.o: audio.c audio.o: audio.c
$(CC) -c audio.c $(CFLAGS) $(INCLUDES) -D$(PLATFORM) $(CC) -c audio.c $(CFLAGS) $(INCLUDES) -D$(PLATFORM)
# compile stb_vorbis library # compile stb_vorbis library
stb_vorbis.o: stb_vorbis.c stb_vorbis.o: stb_vorbis.c
$(CC) -c stb_vorbis.c -O1 $(INCLUDES) -D$(PLATFORM) $(CC) -c stb_vorbis.c -O1 $(INCLUDES) -D$(PLATFORM)
@ -163,21 +163,21 @@ gestures.o: gestures.c
# clean everything # clean everything
clean: clean:
ifeq ($(PLATFORM),PLATFORM_DESKTOP) ifeq ($(PLATFORM),PLATFORM_DESKTOP)
ifeq ($(PLATFORM_OS),OSX) ifeq ($(PLATFORM_OS),WINDOWS)
rm -f *.o libraylib.a
else
ifeq ($(PLATFORM_OS),LINUX)
find -type f -executable | xargs file -i | grep -E 'x-object|x-archive|x-sharedlib|x-executable' | rev | cut -d ':' -f 2- | rev | xargs rm -f
else
del *.o libraylib.a del *.o libraylib.a
else
rm -f *.o libraylib.a
endif endif
endif
ifeq ($(PLATFORM),PLATFORM_WEB)
ifeq ($(PLATFORM_OS),WINDOWS)
del *.o libraylib.bc
else
rm -f *.o libraylib.bc
endif endif
endif endif
ifeq ($(PLATFORM),PLATFORM_RPI) ifeq ($(PLATFORM),PLATFORM_RPI)
rm -f *.o libraylib.a rm -f *.o libraylib.a
endif
ifeq ($(PLATFORM),PLATFORM_WEB)
del *.o libraylib.bc
endif endif
@echo Cleaning done @echo Cleaning done

View File

@ -37,6 +37,7 @@
#include "AL/al.h" // OpenAL basic header #include "AL/al.h" // OpenAL basic header
#include "AL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work) #include "AL/alc.h" // OpenAL context header (like OpenGL, OpenAL requires a context to work)
#include "AL/alext.h" // extensions for other format types
#include <stdlib.h> // Declares malloc() and free() for memory management #include <stdlib.h> // Declares malloc() and free() for memory management
#include <string.h> // Required for strcmp() #include <string.h> // Required for strcmp()
@ -50,39 +51,57 @@
#endif #endif
//#define STB_VORBIS_HEADER_ONLY //#define STB_VORBIS_HEADER_ONLY
#include "stb_vorbis.h" // OGG loading functions #include "stb_vorbis.h" // OGG loading functions
#define JAR_XM_IMPLEMENTATION
#include "jar_xm.h" // For playing .xm files
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Defines and Macros // Defines and Macros
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
#define MUSIC_STREAM_BUFFERS 2 #define MAX_STREAM_BUFFERS 2 // Number of buffers for each alSource
#define MAX_MIX_CHANNELS 4 // Number of open AL sources
#define MAX_MUSIC_STREAMS 2 // Number of simultanious music sources
#if defined(PLATFORM_RPI) #if defined(PLATFORM_RPI) || defined(PLATFORM_ANDROID)
// NOTE: On RPI should be lower to avoid frame-stalls // NOTE: On RPI and Android should be lower to avoid frame-stalls
#define MUSIC_BUFFER_SIZE 4096*2 // PCM data buffer (short) - 16Kb (RPI) #define MUSIC_BUFFER_SIZE_SHORT 4096*2 // PCM data buffer (short) - 16Kb (RPI)
#define MUSIC_BUFFER_SIZE_FLOAT 4096 // PCM data buffer (float) - 16Kb (RPI)
#else #else
// NOTE: On HTML5 (emscripten) this is allocated on heap, by default it's only 16MB!...just take care... // NOTE: On HTML5 (emscripten) this is allocated on heap, by default it's only 16MB!...just take care...
#define MUSIC_BUFFER_SIZE 4096*8 // PCM data buffer (short) - 64Kb #define MUSIC_BUFFER_SIZE_SHORT 4096*8 // PCM data buffer (short) - 64Kb
#define MUSIC_BUFFER_SIZE_FLOAT 4096*4 // PCM data buffer (float) - 64Kb
#endif #endif
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Types and Structures Definition // Types and Structures Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Used to create custom audio streams that are not bound to a specific file. There can be
// no more than 4 concurrent mixchannels in use. This is due to each active mixc being tied to
// a dedicated mix channel.
typedef struct MixChannel_t {
unsigned short sampleRate; // default is 48000
unsigned char channels; // 1=mono,2=stereo
unsigned char mixChannel; // 0-3 or mixA-mixD, each mix channel can receive up to one dedicated audio stream
bool floatingPoint; // if false then the short datatype is used instead
bool playing; // false if paused
ALenum alFormat; // openAL format specifier
ALuint alSource; // openAL source
ALuint alBuffer[MAX_STREAM_BUFFERS]; // openAL sample buffer
} MixChannel_t;
// Music type (file streaming from memory) // Music type (file streaming from memory)
// NOTE: Anything longer than ~10 seconds should be streamed... // NOTE: Anything longer than ~10 seconds should be streamed into a mix channel...
typedef struct Music { typedef struct Music {
stb_vorbis *stream; stb_vorbis *stream;
jar_xm_context_t *chipctx; // Stores jar_xm mixc
ALuint buffers[MUSIC_STREAM_BUFFERS]; MixChannel_t *mixc; // mix channel
ALuint source;
ALenum format;
int channels;
int sampleRate;
int totalSamplesLeft; int totalSamplesLeft;
float totalLengthSeconds;
bool loop; bool loop;
bool chipTune; // True if chiptune is loaded
} Music; } Music;
#if defined(AUDIO_STANDALONE) #if defined(AUDIO_STANDALONE)
@ -92,19 +111,28 @@ typedef enum { INFO = 0, ERROR, WARNING, DEBUG, OTHER } TraceLogType;
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Global Variables Definition // Global Variables Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static bool musicEnabled = false; static MixChannel_t* mixChannelsActive_g[MAX_MIX_CHANNELS]; // What mix channels are currently active
static Music currentMusic; // Current music loaded static bool musicEnabled_g = false;
// NOTE: Only one music file playing at a time static Music currentMusic[MAX_MUSIC_STREAMS]; // Current music loaded, up to two can play at the same time
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Module specific Functions Declaration // Module specific Functions Declaration
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static Wave LoadWAV(const char *fileName); // Load WAV file static Wave LoadWAV(const char *fileName); // Load WAV file
static Wave LoadOGG(char *fileName); // Load OGG file static Wave LoadOGG(char *fileName); // Load OGG file
static void UnloadWave(Wave wave); // Unload wave data static void UnloadWave(Wave wave); // Unload wave data
static bool BufferMusicStream(ALuint buffer); // Fill music buffers with data static bool BufferMusicStream(int index, int numBuffers); // Fill music buffers with data
static void EmptyMusicStream(void); // Empty music buffers static void EmptyMusicStream(int index); // Empty music buffers
static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint); // For streaming into mix channels.
static void CloseMixChannel(MixChannel_t* mixc); // Frees mix channel
static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements); // Pushes more audio data into mixc mix channel, if NULL is passed it pauses
static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer); // Fill buffer with zeros, returns number processed
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len); // Pass two arrays of the same legnth in
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len); // Pass two arrays of same length in
static int IsMusicStreamReadyForBuffering(int index); // Checks if music buffer is ready to be refilled
#if defined(AUDIO_STANDALONE) #if defined(AUDIO_STANDALONE)
const char *GetExtension(const char *fileName); // Get the extension for a filename const char *GetExtension(const char *fileName); // Get the extension for a filename
@ -115,7 +143,7 @@ void TraceLog(int msgType, const char *text, ...); // Outputs a trace log messa
// Module Functions Definition - Audio Device initialization and Closing // Module Functions Definition - Audio Device initialization and Closing
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Initialize audio device and context // Initialize audio device and mixc
void InitAudioDevice(void) void InitAudioDevice(void)
{ {
// Open and initialize a device with default settings // Open and initialize a device with default settings
@ -131,7 +159,7 @@ void InitAudioDevice(void)
alcCloseDevice(device); alcCloseDevice(device);
TraceLog(ERROR, "Could not setup audio context"); TraceLog(ERROR, "Could not setup mix channel");
} }
TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER)); TraceLog(INFO, "Audio device and context initialized successfully: %s", alcGetString(device, ALC_DEVICE_SPECIFIER));
@ -142,15 +170,19 @@ void InitAudioDevice(void)
alListener3f(AL_ORIENTATION, 0, 0, -1); alListener3f(AL_ORIENTATION, 0, 0, -1);
} }
// Close the audio device for the current context, and destroys the context // Close the audio device for all contexts
void CloseAudioDevice(void) void CloseAudioDevice(void)
{ {
StopMusicStream(); // Stop music streaming and close current stream for(int index=0; index<MAX_MUSIC_STREAMS; index++)
{
if(currentMusic[index].mixc) StopMusicStream(index); // Stop music streaming and close current stream
}
ALCdevice *device; ALCdevice *device;
ALCcontext *context = alcGetCurrentContext(); ALCcontext *context = alcGetCurrentContext();
if (context == NULL) TraceLog(WARNING, "Could not get current audio context for closing"); if (context == NULL) TraceLog(WARNING, "Could not get current mix channel for closing");
device = alcGetContextsDevice(context); device = alcGetContextsDevice(context);
@ -159,6 +191,229 @@ void CloseAudioDevice(void)
alcCloseDevice(device); alcCloseDevice(device);
} }
// True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
bool IsAudioDeviceReady(void)
{
ALCcontext *context = alcGetCurrentContext();
if (context == NULL) return false;
else{
ALCdevice *device = alcGetContextsDevice(context);
if (device == NULL) return false;
else return true;
}
}
//----------------------------------------------------------------------------------
// Module Functions Definition - Custom audio output
//----------------------------------------------------------------------------------
// For streaming into mix channels.
// The mixChannel is what audio muxing channel you want to operate on, 0-3 are the ones available. Each mix channel can only be used one at a time.
// exmple usage is InitMixChannel(48000, 0, 2, true); // mixchannel 1, 48khz, stereo, floating point
static MixChannel_t* InitMixChannel(unsigned short sampleRate, unsigned char mixChannel, unsigned char channels, bool floatingPoint)
{
if(mixChannel >= MAX_MIX_CHANNELS) return NULL;
if(!IsAudioDeviceReady()) InitAudioDevice();
if(!mixChannelsActive_g[mixChannel]){
MixChannel_t *mixc = (MixChannel_t*)malloc(sizeof(MixChannel_t));
mixc->sampleRate = sampleRate;
mixc->channels = channels;
mixc->mixChannel = mixChannel;
mixc->floatingPoint = floatingPoint;
mixChannelsActive_g[mixChannel] = mixc;
// setup openAL format
if(channels == 1)
{
if(floatingPoint)
mixc->alFormat = AL_FORMAT_MONO_FLOAT32;
else
mixc->alFormat = AL_FORMAT_MONO16;
}
else if(channels == 2)
{
if(floatingPoint)
mixc->alFormat = AL_FORMAT_STEREO_FLOAT32;
else
mixc->alFormat = AL_FORMAT_STEREO16;
}
// Create an audio source
alGenSources(1, &mixc->alSource);
alSourcef(mixc->alSource, AL_PITCH, 1);
alSourcef(mixc->alSource, AL_GAIN, 1);
alSource3f(mixc->alSource, AL_POSITION, 0, 0, 0);
alSource3f(mixc->alSource, AL_VELOCITY, 0, 0, 0);
// Create Buffer
alGenBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
//fill buffers
int x;
for(x=0;x<MAX_STREAM_BUFFERS;x++)
FillAlBufferWithSilence(mixc, mixc->alBuffer[x]);
alSourceQueueBuffers(mixc->alSource, MAX_STREAM_BUFFERS, mixc->alBuffer);
mixc->playing = true;
alSourcePlay(mixc->alSource);
return mixc;
}
return NULL;
}
// Frees buffer in mix channel
static void CloseMixChannel(MixChannel_t* mixc)
{
if(mixc){
alSourceStop(mixc->alSource);
mixc->playing = false;
//flush out all queued buffers
ALuint buffer = 0;
int queued = 0;
alGetSourcei(mixc->alSource, AL_BUFFERS_QUEUED, &queued);
while (queued > 0)
{
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
queued--;
}
//delete source and buffers
alDeleteSources(1, &mixc->alSource);
alDeleteBuffers(MAX_STREAM_BUFFERS, mixc->alBuffer);
mixChannelsActive_g[mixc->mixChannel] = NULL;
free(mixc);
mixc = NULL;
}
}
// Pushes more audio data into mixc mix channel, only one buffer per call
// Call "BufferMixChannel(mixc, NULL, 0)" if you want to pause the audio.
// @Returns number of samples that where processed.
static int BufferMixChannel(MixChannel_t* mixc, void *data, int numberElements)
{
if(!mixc || mixChannelsActive_g[mixc->mixChannel] != mixc) return 0; // when there is two channels there must be an even number of samples
if (!data || !numberElements)
{ // pauses audio until data is given
if(mixc->playing){
alSourcePause(mixc->alSource);
mixc->playing = false;
}
return 0;
}
else if(!mixc->playing)
{ // restart audio otherwise
alSourcePlay(mixc->alSource);
mixc->playing = true;
}
ALuint buffer = 0;
alSourceUnqueueBuffers(mixc->alSource, 1, &buffer);
if(!buffer) return 0;
if(mixc->floatingPoint) // process float buffers
{
float *ptr = (float*)data;
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(float), mixc->sampleRate);
}
else // process short buffers
{
short *ptr = (short*)data;
alBufferData(buffer, mixc->alFormat, ptr, numberElements*sizeof(short), mixc->sampleRate);
}
alSourceQueueBuffers(mixc->alSource, 1, &buffer);
return numberElements;
}
// fill buffer with zeros, returns number processed
static int FillAlBufferWithSilence(MixChannel_t *mixc, ALuint buffer)
{
if(mixc->floatingPoint){
float pcm[MUSIC_BUFFER_SIZE_FLOAT] = {0.f};
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_FLOAT*sizeof(float), mixc->sampleRate);
return MUSIC_BUFFER_SIZE_FLOAT;
}
else
{
short pcm[MUSIC_BUFFER_SIZE_SHORT] = {0};
alBufferData(buffer, mixc->alFormat, pcm, MUSIC_BUFFER_SIZE_SHORT*sizeof(short), mixc->sampleRate);
return MUSIC_BUFFER_SIZE_SHORT;
}
}
// example usage:
// short sh[3] = {1,2,3};float fl[3];
// ResampleShortToFloat(sh,fl,3);
static void ResampleShortToFloat(short *shorts, float *floats, unsigned short len)
{
int x;
for(x=0;x<len;x++)
{
if(shorts[x] < 0)
floats[x] = (float)shorts[x] / 32766.f;
else
floats[x] = (float)shorts[x] / 32767.f;
}
}
// example usage:
// char ch[3] = {1,2,3};float fl[3];
// ResampleByteToFloat(ch,fl,3);
static void ResampleByteToFloat(char *chars, float *floats, unsigned short len)
{
int x;
for(x=0;x<len;x++)
{
if(chars[x] < 0)
floats[x] = (float)chars[x] / 127.f;
else
floats[x] = (float)chars[x] / 128.f;
}
}
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint)
{
int mixIndex;
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot
{
if(mixChannelsActive_g[mixIndex] == NULL) break;
else if(mixIndex = MAX_MIX_CHANNELS - 1) return -1; // error
}
if(InitMixChannel(sampleRate, mixIndex, channels, floatingPoint))
return mixIndex;
else
return -2; // error
}
void CloseRawAudioContext(RawAudioContext ctx)
{
if(mixChannelsActive_g[ctx])
CloseMixChannel(mixChannelsActive_g[ctx]);
}
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements)
{
int numBuffered = 0;
if(ctx >= 0)
{
MixChannel_t* mixc = mixChannelsActive_g[ctx];
numBuffered = BufferMixChannel(mixc, data, numberElements);
}
return numBuffered;
}
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Module Functions Definition - Sounds loading and playing (.WAV) // Module Functions Definition - Sounds loading and playing (.WAV)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
@ -479,7 +734,7 @@ void StopSound(Sound sound)
} }
// Check if a sound is playing // Check if a sound is playing
bool SoundIsPlaying(Sound sound) bool IsSoundPlaying(Sound sound)
{ {
bool playing = false; bool playing = false;
ALint state; ALint state;
@ -507,145 +762,217 @@ void SetSoundPitch(Sound sound, float pitch)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Start music playing (open stream) // Start music playing (open stream)
void PlayMusicStream(char *fileName) // returns 0 on success
int PlayMusicStream(int musicIndex, char *fileName)
{ {
int mixIndex;
if(currentMusic[musicIndex].stream || currentMusic[musicIndex].chipctx) return 1; // error
for(mixIndex = 0; mixIndex < MAX_MIX_CHANNELS; mixIndex++) // find empty mix channel slot
{
if(mixChannelsActive_g[mixIndex] == NULL) break;
else if(mixIndex = MAX_MIX_CHANNELS - 1) return 2; // error
}
if (strcmp(GetExtension(fileName),"ogg") == 0) if (strcmp(GetExtension(fileName),"ogg") == 0)
{ {
// Stop current music, clean buffers, unload current stream
StopMusicStream();
// Open audio stream // Open audio stream
currentMusic.stream = stb_vorbis_open_filename(fileName, NULL, NULL); currentMusic[musicIndex].stream = stb_vorbis_open_filename(fileName, NULL, NULL);
if (currentMusic.stream == NULL) if (currentMusic[musicIndex].stream == NULL)
{ {
TraceLog(WARNING, "[%s] OGG audio file could not be opened", fileName); TraceLog(WARNING, "[%s] OGG audio file could not be opened", fileName);
return 3; // error
} }
else else
{ {
// Get file info // Get file info
stb_vorbis_info info = stb_vorbis_get_info(currentMusic.stream); stb_vorbis_info info = stb_vorbis_get_info(currentMusic[musicIndex].stream);
currentMusic.channels = info.channels;
currentMusic.sampleRate = info.sample_rate;
TraceLog(INFO, "[%s] Ogg sample rate: %i", fileName, info.sample_rate); TraceLog(INFO, "[%s] Ogg sample rate: %i", fileName, info.sample_rate);
TraceLog(INFO, "[%s] Ogg channels: %i", fileName, info.channels); TraceLog(INFO, "[%s] Ogg channels: %i", fileName, info.channels);
TraceLog(DEBUG, "[%s] Temp memory required: %i", fileName, info.temp_memory_required); TraceLog(DEBUG, "[%s] Temp memory required: %i", fileName, info.temp_memory_required);
if (info.channels == 2) currentMusic.format = AL_FORMAT_STEREO16; currentMusic[musicIndex].loop = true; // We loop by default
else currentMusic.format = AL_FORMAT_MONO16; musicEnabled_g = true;
currentMusic.loop = true; // We loop by default currentMusic[musicIndex].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[musicIndex].stream) * info.channels;
musicEnabled = true; currentMusic[musicIndex].totalLengthSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[musicIndex].stream);
// Create an audio source if (info.channels == 2){
alGenSources(1, &currentMusic.source); // Generate pointer to audio source currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 2, false);
currentMusic[musicIndex].mixc->playing = true;
alSourcef(currentMusic.source, AL_PITCH, 1); }
alSourcef(currentMusic.source, AL_GAIN, 1); else{
alSource3f(currentMusic.source, AL_POSITION, 0, 0, 0); currentMusic[musicIndex].mixc = InitMixChannel(info.sample_rate, mixIndex, 1, false);
alSource3f(currentMusic.source, AL_VELOCITY, 0, 0, 0); currentMusic[musicIndex].mixc->playing = true;
//alSourcei(currentMusic.source, AL_LOOPING, AL_TRUE); // ERROR: Buffers do not queue! }
if(!currentMusic[musicIndex].mixc) return 4; // error
// Generate two OpenAL buffers
alGenBuffers(2, currentMusic.buffers);
// Fill buffers with music...
BufferMusicStream(currentMusic.buffers[0]);
BufferMusicStream(currentMusic.buffers[1]);
// Queue buffers and start playing
alSourceQueueBuffers(currentMusic.source, 2, currentMusic.buffers);
alSourcePlay(currentMusic.source);
// NOTE: Regularly, we must check if a buffer has been processed and refill it: UpdateMusicStream()
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels;
} }
} }
else TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName); else if (strcmp(GetExtension(fileName),"xm") == 0)
{
// only stereo is supported for xm
if(!jar_xm_create_context_from_file(&currentMusic[musicIndex].chipctx, 48000, fileName))
{
currentMusic[musicIndex].chipTune = true;
currentMusic[musicIndex].loop = true;
jar_xm_set_max_loop_count(currentMusic[musicIndex].chipctx, 0); // infinite number of loops
currentMusic[musicIndex].totalSamplesLeft = jar_xm_get_remaining_samples(currentMusic[musicIndex].chipctx);
currentMusic[musicIndex].totalLengthSeconds = ((float)currentMusic[musicIndex].totalSamplesLeft) / 48000.f;
musicEnabled_g = true;
TraceLog(INFO, "[%s] XM number of samples: %i", fileName, currentMusic[musicIndex].totalSamplesLeft);
TraceLog(INFO, "[%s] XM track length: %11.6f sec", fileName, currentMusic[musicIndex].totalLengthSeconds);
currentMusic[musicIndex].mixc = InitMixChannel(48000, mixIndex, 2, false);
if(!currentMusic[musicIndex].mixc) return 5; // error
currentMusic[musicIndex].mixc->playing = true;
}
else
{
TraceLog(WARNING, "[%s] XM file could not be opened", fileName);
return 6; // error
}
}
else
{
TraceLog(WARNING, "[%s] Music extension not recognized, it can't be loaded", fileName);
return 7; // error
}
return 0; // normal return
} }
// Stop music playing (close stream) // Stop music playing for individual music index of currentMusic array (close stream)
void StopMusicStream(void) void StopMusicStream(int index)
{ {
if (musicEnabled) if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
{ {
alSourceStop(currentMusic.source); CloseMixChannel(currentMusic[index].mixc);
EmptyMusicStream(); // Empty music buffers if (currentMusic[index].chipTune)
{
alDeleteSources(1, &currentMusic.source); jar_xm_free_context(currentMusic[index].chipctx);
alDeleteBuffers(2, currentMusic.buffers); }
else
stb_vorbis_close(currentMusic.stream); {
stb_vorbis_close(currentMusic[index].stream);
}
if(!getMusicStreamCount()) musicEnabled_g = false;
if(currentMusic[index].stream || currentMusic[index].chipctx)
{
currentMusic[index].stream = NULL;
currentMusic[index].chipctx = NULL;
}
} }
}
musicEnabled = false; //get number of music channels active at this time, this does not mean they are playing
int getMusicStreamCount(void)
{
int musicCount = 0;
for(int musicIndex = 0; musicIndex < MAX_MUSIC_STREAMS; musicIndex++) // find empty music slot
if(currentMusic[musicIndex].stream != NULL || currentMusic[musicIndex].chipTune) musicCount++;
return musicCount;
} }
// Pause music playing // Pause music playing
void PauseMusicStream(void) void PauseMusicStream(int index)
{ {
// Pause music stream if music available! // Pause music stream if music available!
if (musicEnabled) if (index < MAX_MUSIC_STREAMS && currentMusic[index].mixc && musicEnabled_g)
{ {
TraceLog(INFO, "Pausing music stream"); TraceLog(INFO, "Pausing music stream");
alSourcePause(currentMusic.source); alSourcePause(currentMusic[index].mixc->alSource);
musicEnabled = false; currentMusic[index].mixc->playing = false;
} }
} }
// Resume music playing // Resume music playing
void ResumeMusicStream(void) void ResumeMusicStream(int index)
{ {
// Resume music playing... if music available! // Resume music playing... if music available!
ALenum state; ALenum state;
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if (state == AL_PAUSED) if (state == AL_PAUSED)
{ {
TraceLog(INFO, "Resuming music stream"); TraceLog(INFO, "Resuming music stream");
alSourcePlay(currentMusic.source); alSourcePlay(currentMusic[index].mixc->alSource);
musicEnabled = true; currentMusic[index].mixc->playing = true;
}
} }
} }
// Check if music is playing // Check if any music is playing
bool MusicIsPlaying(void) bool IsMusicPlaying(int index)
{ {
bool playing = false; bool playing = false;
ALint state; ALint state;
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
if (state == AL_PLAYING) playing = true; alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if (state == AL_PLAYING) playing = true;
}
return playing; return playing;
} }
// Set volume for music // Set volume for music
void SetMusicVolume(float volume) void SetMusicVolume(int index, float volume)
{ {
alSourcef(currentMusic.source, AL_GAIN, volume); if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alSourcef(currentMusic[index].mixc->alSource, AL_GAIN, volume);
}
}
void SetMusicPitch(int index, float pitch)
{
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc){
alSourcef(currentMusic[index].mixc->alSource, AL_PITCH, pitch);
}
} }
// Get current music time length (in seconds) // Get current music time length (in seconds)
float GetMusicTimeLength(void) float GetMusicTimeLength(int index)
{ {
float totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic.stream); float totalSeconds;
if (currentMusic[index].chipTune)
{
totalSeconds = currentMusic[index].totalLengthSeconds;
}
else
{
totalSeconds = stb_vorbis_stream_length_in_seconds(currentMusic[index].stream);
}
return totalSeconds; return totalSeconds;
} }
// Get current music time played (in seconds) // Get current music time played (in seconds)
float GetMusicTimePlayed(void) float GetMusicTimePlayed(int index)
{ {
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic.stream) * currentMusic.channels; float secondsPlayed;
if(index < MAX_MUSIC_STREAMS && currentMusic[index].mixc)
int samplesPlayed = totalSamples - currentMusic.totalSamplesLeft; {
if (currentMusic[index].chipTune)
float secondsPlayed = (float)samplesPlayed / (currentMusic.sampleRate * currentMusic.channels); {
uint64_t samples;
jar_xm_get_position(currentMusic[index].chipctx, NULL, NULL, NULL, &samples);
secondsPlayed = (float)samples / (48000 * currentMusic[index].mixc->channels); // Not sure if this is the correct value
}
else
{
int totalSamples = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
int samplesPlayed = totalSamples - currentMusic[index].totalSamplesLeft;
secondsPlayed = (float)samplesPlayed / (currentMusic[index].mixc->sampleRate * currentMusic[index].mixc->channels);
}
}
return secondsPlayed; return secondsPlayed;
} }
@ -655,103 +982,118 @@ float GetMusicTimePlayed(void)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Fill music buffers with new data from music stream // Fill music buffers with new data from music stream
static bool BufferMusicStream(ALuint buffer) static bool BufferMusicStream(int index, int numBuffers)
{ {
short pcm[MUSIC_BUFFER_SIZE]; short pcm[MUSIC_BUFFER_SIZE_SHORT];
float pcmf[MUSIC_BUFFER_SIZE_FLOAT];
int size = 0; // Total size of data steamed (in bytes)
int streamedBytes = 0; // Bytes of data obtained in one samples get int size = 0; // Total size of data steamed in L+R samples for xm floats, individual L or R for ogg shorts
bool active = true; // We can get more data from stream (not finished) bool active = true; // We can get more data from stream (not finished)
if (musicEnabled) if (currentMusic[index].chipTune) // There is no end of stream for xmfiles, once the end is reached zeros are generated for non looped chiptunes.
{ {
while (size < MUSIC_BUFFER_SIZE) if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT)
size = MUSIC_BUFFER_SIZE_SHORT / 2;
else
size = currentMusic[index].totalSamplesLeft / 2;
for(int x=0; x<numBuffers; x++)
{ {
streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic.stream, currentMusic.channels, pcm + size, MUSIC_BUFFER_SIZE - size); jar_xm_generate_samples_16bit(currentMusic[index].chipctx, pcm, size); // reads 2*readlen shorts and moves them to buffer+size memory location
BufferMixChannel(currentMusic[index].mixc, pcm, size * 2);
if (streamedBytes > 0) size += (streamedBytes*currentMusic.channels); currentMusic[index].totalSamplesLeft -= size * 2;
else break; if(currentMusic[index].totalSamplesLeft <= 0)
{
active = false;
break;
}
} }
//TraceLog(DEBUG, "Streaming music data to buffer. Bytes streamed: %i", size);
}
if (size > 0)
{
alBufferData(buffer, currentMusic.format, pcm, size*sizeof(short), currentMusic.sampleRate);
currentMusic.totalSamplesLeft -= size;
} }
else else
{ {
active = false; if(currentMusic[index].totalSamplesLeft >= MUSIC_BUFFER_SIZE_SHORT)
TraceLog(WARNING, "No more data obtained from stream"); size = MUSIC_BUFFER_SIZE_SHORT;
else
size = currentMusic[index].totalSamplesLeft;
for(int x=0; x<numBuffers; x++)
{
int streamedBytes = stb_vorbis_get_samples_short_interleaved(currentMusic[index].stream, currentMusic[index].mixc->channels, pcm, size);
BufferMixChannel(currentMusic[index].mixc, pcm, streamedBytes * currentMusic[index].mixc->channels);
currentMusic[index].totalSamplesLeft -= streamedBytes * currentMusic[index].mixc->channels;
if(currentMusic[index].totalSamplesLeft <= 0)
{
active = false;
break;
}
}
} }
return active; return active;
} }
// Empty music buffers // Empty music buffers
static void EmptyMusicStream(void) static void EmptyMusicStream(int index)
{ {
ALuint buffer = 0; ALuint buffer = 0;
int queued = 0; int queued = 0;
alGetSourcei(currentMusic.source, AL_BUFFERS_QUEUED, &queued); alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_QUEUED, &queued);
while (queued > 0) while (queued > 0)
{ {
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer); alSourceUnqueueBuffers(currentMusic[index].mixc->alSource, 1, &buffer);
queued--; queued--;
} }
} }
// Update (re-fill) music buffers if data already processed //determine if a music stream is ready to be written to
void UpdateMusicStream(void) static int IsMusicStreamReadyForBuffering(int index)
{ {
ALuint buffer = 0;
ALint processed = 0; ALint processed = 0;
alGetSourcei(currentMusic[index].mixc->alSource, AL_BUFFERS_PROCESSED, &processed);
return processed;
}
// Update (re-fill) music buffers if data already processed
void UpdateMusicStream(int index)
{
ALenum state;
bool active = true; bool active = true;
int numBuffers = IsMusicStreamReadyForBuffering(index);
if (musicEnabled)
if (currentMusic[index].mixc->playing && index < MAX_MUSIC_STREAMS && musicEnabled_g && currentMusic[index].mixc && numBuffers)
{ {
// Get the number of already processed buffers (if any) active = BufferMusicStream(index, numBuffers);
alGetSourcei(currentMusic.source, AL_BUFFERS_PROCESSED, &processed);
if (!active && currentMusic[index].loop)
while (processed > 0)
{ {
// Recover processed buffer for refill if (currentMusic[index].chipTune)
alSourceUnqueueBuffers(currentMusic.source, 1, &buffer);
// Refill buffer
active = BufferMusicStream(buffer);
// If no more data to stream, restart music (if loop)
if ((!active) && (currentMusic.loop))
{ {
stb_vorbis_seek_start(currentMusic.stream); currentMusic[index].totalSamplesLeft = currentMusic[index].totalLengthSeconds * 48000;
currentMusic.totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic.stream)*currentMusic.channels;
active = BufferMusicStream(buffer);
} }
else
// Add refilled buffer to queue again... don't let the music stop! {
alSourceQueueBuffers(currentMusic.source, 1, &buffer); stb_vorbis_seek_start(currentMusic[index].stream);
currentMusic[index].totalSamplesLeft = stb_vorbis_stream_length_in_samples(currentMusic[index].stream) * currentMusic[index].mixc->channels;
if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Ogg playing, error buffering data..."); }
active = true;
processed--;
} }
ALenum state; if (alGetError() != AL_NO_ERROR) TraceLog(WARNING, "Error buffering data...");
alGetSourcei(currentMusic.source, AL_SOURCE_STATE, &state);
alGetSourcei(currentMusic[index].mixc->alSource, AL_SOURCE_STATE, &state);
if ((state != AL_PLAYING) && active) alSourcePlay(currentMusic.source); if (state != AL_PLAYING && active) alSourcePlay(currentMusic[index].mixc->alSource);
if (!active) StopMusicStream(); if (!active) StopMusicStream(index);
} }
else
return;
} }
// Load WAV file into Wave structure // Load WAV file into Wave structure

View File

@ -41,7 +41,9 @@
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
#ifndef __cplusplus #ifndef __cplusplus
// Boolean type // Boolean type
typedef enum { false, true } bool; #ifndef true
typedef enum { false, true } bool;
#endif
#endif #endif
// Sound source type // Sound source type
@ -59,6 +61,8 @@ typedef struct Wave {
short channels; short channels;
} Wave; } Wave;
typedef int RawAudioContext;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { // Prevents name mangling of functions extern "C" { // Prevents name mangling of functions
#endif #endif
@ -73,6 +77,7 @@ extern "C" { // Prevents name mangling of functions
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
void InitAudioDevice(void); // Initialize audio device and context void InitAudioDevice(void); // Initialize audio device and context
void CloseAudioDevice(void); // Close the audio device and context (and music stream) void CloseAudioDevice(void); // Close the audio device and context (and music stream)
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
Sound LoadSound(char *fileName); // Load sound to memory Sound LoadSound(char *fileName); // Load sound to memory
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
@ -81,19 +86,28 @@ void UnloadSound(Sound sound); // Unload sound
void PlaySound(Sound sound); // Play a sound void PlaySound(Sound sound); // Play a sound
void PauseSound(Sound sound); // Pause a sound void PauseSound(Sound sound); // Pause a sound
void StopSound(Sound sound); // Stop playing a sound void StopSound(Sound sound); // Stop playing a sound
bool SoundIsPlaying(Sound sound); // Check if a sound is currently playing bool IsSoundPlaying(Sound sound); // Check if a sound is currently playing
void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level) void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level)
void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level) void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level)
void PlayMusicStream(char *fileName); // Start music playing (open stream) int PlayMusicStream(int musicIndex, char *fileName); // Start music playing (open stream)
void UpdateMusicStream(void); // Updates buffers for music streaming void UpdateMusicStream(int index); // Updates buffers for music streaming
void StopMusicStream(void); // Stop music playing (close stream) void StopMusicStream(int index); // Stop music playing (close stream)
void PauseMusicStream(void); // Pause music playing void PauseMusicStream(int index); // Pause music playing
void ResumeMusicStream(void); // Resume playing paused music void ResumeMusicStream(int index); // Resume playing paused music
bool MusicIsPlaying(void); // Check if music is playing bool IsMusicPlaying(int index); // Check if music is playing
void SetMusicVolume(float volume); // Set volume for music (1.0 is max level) void SetMusicVolume(int index, float volume); // Set volume for music (1.0 is max level)
float GetMusicTimeLength(void); // Get current music time length (in seconds) float GetMusicTimeLength(int index); // Get music time length (in seconds)
float GetMusicTimePlayed(void); // Get current music time played (in seconds) float GetMusicTimePlayed(int index); // Get current music time played (in seconds)
int getMusicStreamCount(void);
void SetMusicPitch(int index, float pitch);
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint);
void CloseRawAudioContext(RawAudioContext ctx);
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements); // returns number of elements buffered
#ifdef __cplusplus #ifdef __cplusplus
} }

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,23 @@
* This header uses: * This header uses:
* #define EASINGS_STATIC_INLINE // Inlines all functions code, so it runs faster. * #define EASINGS_STATIC_INLINE // Inlines all functions code, so it runs faster.
* // This requires lots of memory on system. * // This requires lots of memory on system.
* How to use:
* The four inputs t,b,c,d are defined as follows:
* t = current time in milliseconds
* b = starting position in only one dimension [X || Y || Z] your choice
* c = the total change in value of b that needs to occur
* d = total time it should take to complete
*
* Example:
* float speed = 1.f;
* float currentTime = 0.f;
* float currentPos[2] = {0,0};
* float finalPos[2] = {1,1};
* float startPosition[2] = currentPos;//x,y positions
* while(currentPos[0] < finalPos[0])
* currentPos[0] = EaseSineIn(currentTime, startPosition[0], startPosition[0]-finalPos[0], speed);
* currentPos[1] = EaseSineIn(currentTime, startPosition[1], startPosition[1]-finalPos[0], speed);
* currentTime += diffTime();
* *
* A port of Robert Penner's easing equations to C (http://robertpenner.com/easing/) * A port of Robert Penner's easing equations to C (http://robertpenner.com/easing/)
* *

View File

@ -292,14 +292,14 @@ void UpdateGestures(void)
} }
// Check if a gesture have been detected // Check if a gesture have been detected
bool IsGestureDetected(void) bool IsGestureDetected(int gesture)
{ {
if ((enabledGestures & currentGesture) != GESTURE_NONE) return true; if ((enabledGestures & currentGesture) == gesture) return true;
else return false; else return false;
} }
// Check gesture type // Check gesture type
int GetGestureType(void) int GetGestureDetected(void)
{ {
// Get current gesture only if enabled // Get current gesture only if enabled
return (enabledGestures & currentGesture); return (enabledGestures & currentGesture);

View File

@ -92,8 +92,8 @@ extern "C" { // Prevents name mangling of functions
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
void ProcessGestureEvent(GestureEvent event); // Process gesture event and translate it into gestures void ProcessGestureEvent(GestureEvent event); // Process gesture event and translate it into gestures
void UpdateGestures(void); // Update gestures detected (must be called every frame) void UpdateGestures(void); // Update gestures detected (must be called every frame)
bool IsGestureDetected(void); // Check if a gesture have been detected bool IsGestureDetected(int gesture); // Check if a gesture have been detected
int GetGestureType(void); // Get latest detected gesture int GetGestureDetected(void); // Get latest detected gesture
void SetGesturesEnabled(unsigned int gestureFlags); // Enable a set of gestures using flags void SetGesturesEnabled(unsigned int gestureFlags); // Enable a set of gestures using flags
int GetTouchPointsCount(void); // Get touch points count int GetTouchPointsCount(void); // Get touch points count

2666
src/jar_xm.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -55,7 +55,9 @@ extern unsigned int whiteTexture;
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Module specific Functions Declaration // Module specific Functions Declaration
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static Mesh LoadOBJ(const char *fileName); static Mesh LoadOBJ(const char *fileName); // Load OBJ mesh data
static Material LoadMTL(const char *fileName); // Load MTL material data
static Mesh GenMeshHeightmap(Image image, Vector3 size); static Mesh GenMeshHeightmap(Image image, Vector3 size);
static Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize); static Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize);
@ -542,39 +544,119 @@ void DrawGizmo(Vector3 position)
Model LoadModel(const char *fileName) Model LoadModel(const char *fileName)
{ {
Model model = { 0 }; Model model = { 0 };
Mesh mesh = { 0 };
// NOTE: Initialize default data for model in case loading fails, maybe a cube? // TODO: Initialize default data for model in case loading fails, maybe a cube?
if (strcmp(GetExtension(fileName),"obj") == 0) mesh = LoadOBJ(fileName); if (strcmp(GetExtension(fileName),"obj") == 0) model.mesh = LoadOBJ(fileName);
else TraceLog(WARNING, "[%s] Model extension not recognized, it can't be loaded", fileName); else TraceLog(WARNING, "[%s] Model extension not recognized, it can't be loaded", fileName);
// NOTE: At this point we have all vertex, texcoord, normal data for the model in mesh struct if (model.mesh.vertexCount == 0) TraceLog(WARNING, "Model could not be loaded");
if (mesh.vertexCount == 0) TraceLog(WARNING, "Model could not be loaded");
else else
{ {
// NOTE: model properties (transform, texture, shader) are initialized inside rlglLoadModel() rlglLoadMesh(&model.mesh, false); // Upload vertex data to GPU (static model)
model = rlglLoadModel(mesh); // Upload vertex data to GPU
model.transform = MatrixIdentity();
// NOTE: Now that vertex data is uploaded to GPU VRAM, we can free arrays from CPU RAM model.material = LoadDefaultMaterial();
// We don't need CPU vertex data on OpenGL 3.3 or ES2... for static meshes...
// ...but we could keep CPU vertex data in case we need to update the mesh
} }
return model; return model;
} }
// Load a 3d model (from vertex data) // Load a 3d model (from vertex data)
Model LoadModelEx(Mesh data) Model LoadModelEx(Mesh data, bool dynamic)
{ {
Model model; Model model = { 0 };
// NOTE: model properties (transform, texture, shader) are initialized inside rlglLoadModel() model.mesh = data;
model = rlglLoadModel(data); // Upload vertex data to GPU
// NOTE: Vertex data is managed externally, must be deallocated manually rlglLoadMesh(&model.mesh, dynamic); // Upload vertex data to GPU
model.transform = MatrixIdentity();
model.material = LoadDefaultMaterial();
return model;
}
// Load a 3d model from rRES file (raylib Resource)
Model LoadModelFromRES(const char *rresName, int resId)
{
Model model = { 0 };
bool found = false;
char id[4]; // rRES file identifier
unsigned char version; // rRES file version and subversion
char useless; // rRES header reserved data
short numRes;
ResInfoHeader infoHeader;
FILE *rresFile = fopen(rresName, "rb");
if (rresFile == NULL)
{
TraceLog(WARNING, "[%s] rRES raylib resource file could not be opened", rresName);
}
else
{
// Read rres file (basic file check - id)
fread(&id[0], sizeof(char), 1, rresFile);
fread(&id[1], sizeof(char), 1, rresFile);
fread(&id[2], sizeof(char), 1, rresFile);
fread(&id[3], sizeof(char), 1, rresFile);
fread(&version, sizeof(char), 1, rresFile);
fread(&useless, sizeof(char), 1, rresFile);
if ((id[0] != 'r') && (id[1] != 'R') && (id[2] != 'E') &&(id[3] != 'S'))
{
TraceLog(WARNING, "[%s] This is not a valid raylib resource file", rresName);
}
else
{
// Read number of resources embedded
fread(&numRes, sizeof(short), 1, rresFile);
for (int i = 0; i < numRes; i++)
{
fread(&infoHeader, sizeof(ResInfoHeader), 1, rresFile);
if (infoHeader.id == resId)
{
found = true;
// Check data is of valid MODEL type
if (infoHeader.type == 8)
{
// TODO: Load model data
}
else
{
TraceLog(WARNING, "[%s] Required resource do not seem to be a valid MODEL resource", rresName);
}
}
else
{
// Depending on type, skip the right amount of parameters
switch (infoHeader.type)
{
case 0: fseek(rresFile, 6, SEEK_CUR); break; // IMAGE: Jump 6 bytes of parameters
case 1: fseek(rresFile, 6, SEEK_CUR); break; // SOUND: Jump 6 bytes of parameters
case 2: fseek(rresFile, 5, SEEK_CUR); break; // MODEL: Jump 5 bytes of parameters (TODO: Review)
case 3: break; // TEXT: No parameters
case 4: break; // RAW: No parameters
default: break;
}
// Jump DATA to read next infoHeader
fseek(rresFile, infoHeader.size, SEEK_CUR);
}
}
}
fclose(rresFile);
}
if (!found) TraceLog(WARNING, "[%s] Required resource id [%i] could not be found in the raylib resource file", rresName, resId);
return model; return model;
} }
@ -582,8 +664,14 @@ Model LoadModelEx(Mesh data)
// NOTE: model map size is defined in generic units // NOTE: model map size is defined in generic units
Model LoadHeightmap(Image heightmap, Vector3 size) Model LoadHeightmap(Image heightmap, Vector3 size)
{ {
Mesh mesh = GenMeshHeightmap(heightmap, size); Model model = { 0 };
Model model = rlglLoadModel(mesh);
model.mesh = GenMeshHeightmap(heightmap, size);
rlglLoadMesh(&model.mesh, false); // Upload vertex data to GPU (static model)
model.transform = MatrixIdentity();
model.material = LoadDefaultMaterial();
return model; return model;
} }
@ -591,34 +679,76 @@ Model LoadHeightmap(Image heightmap, Vector3 size)
// Load a map image as a 3d model (cubes based) // Load a map image as a 3d model (cubes based)
Model LoadCubicmap(Image cubicmap) Model LoadCubicmap(Image cubicmap)
{ {
Mesh mesh = GenMeshCubicmap(cubicmap, (Vector3){ 1.0, 1.0, 1.5f }); Model model = { 0 };
Model model = rlglLoadModel(mesh);
model.mesh = GenMeshCubicmap(cubicmap, (Vector3){ 1.0, 1.0, 1.5f });
rlglLoadMesh(&model.mesh, false); // Upload vertex data to GPU (static model)
model.transform = MatrixIdentity();
model.material = LoadDefaultMaterial();
return model; return model;
} }
// Unload 3d model from memory // Unload 3d model from memory (mesh and material)
void UnloadModel(Model model) void UnloadModel(Model model)
{ {
// Unload mesh data rlglUnloadMesh(&model.mesh);
free(model.mesh.vertices);
free(model.mesh.texcoords);
free(model.mesh.normals);
free(model.mesh.colors);
//if (model.mesh.texcoords2 != NULL) free(model.mesh.texcoords2); // Not used
//if (model.mesh.tangents != NULL) free(model.mesh.tangents); // Not used
rlDeleteBuffers(model.mesh.vboId[0]); // vertex
rlDeleteBuffers(model.mesh.vboId[1]); // texcoords
rlDeleteBuffers(model.mesh.vboId[2]); // normals
//rlDeleteBuffers(model.mesh.vboId[3]); // texcoords2 (NOT USED)
//rlDeleteBuffers(model.mesh.vboId[4]); // tangents (NOT USED)
//rlDeleteBuffers(model.mesh.vboId[5]); // colors (NOT USED)
rlDeleteVertexArrays(model.mesh.vaoId); UnloadMaterial(model.material);
if (model.mesh.vaoId > 0) TraceLog(INFO, "[VAO ID %i] Unloaded model data from VRAM (GPU)", model.mesh.vaoId); TraceLog(INFO, "Unloaded model data from RAM and VRAM");
else TraceLog(INFO, "[VBO ID %i][VBO ID %i][VBO ID %i] Unloaded model data from VRAM (GPU)", model.mesh.vboId[0], model.mesh.vboId[1], model.mesh.vboId[2]); }
// Load material data (from file)
Material LoadMaterial(const char *fileName)
{
Material material = { 0 };
if (strcmp(GetExtension(fileName),"mtl") == 0) material = LoadMTL(fileName);
else TraceLog(WARNING, "[%s] Material extension not recognized, it can't be loaded", fileName);
return material;
}
// Load default material (uses default models shader)
Material LoadDefaultMaterial(void)
{
Material material = { 0 };
material.shader = GetDefaultShader();
material.texDiffuse = GetDefaultTexture(); // White texture (1x1 pixel)
//material.texNormal; // NOTE: By default, not set
//material.texSpecular; // NOTE: By default, not set
material.colDiffuse = WHITE; // Diffuse color
material.colAmbient = WHITE; // Ambient color
material.colSpecular = WHITE; // Specular color
material.glossiness = 100.0f; // Glossiness level
material.normalDepth = 1.0f; // Normal map depth
return material;
}
// Load standard material (uses standard models shader)
// NOTE: Standard shader supports multiple maps and lights
Material LoadStandardMaterial(void)
{
Material material = LoadDefaultMaterial();
//material.shader = GetStandardShader();
return material;
}
// Unload material from memory
void UnloadMaterial(Material material)
{
rlDeleteTextures(material.texDiffuse.id);
rlDeleteTextures(material.texNormal.id);
rlDeleteTextures(material.texSpecular.id);
} }
// Link a texture to a model // Link a texture to a model
@ -628,11 +758,12 @@ void SetModelTexture(Model *model, Texture2D texture)
else model->material.texDiffuse = texture; else model->material.texDiffuse = texture;
} }
// Generate a mesh from heightmap
static Mesh GenMeshHeightmap(Image heightmap, Vector3 size) static Mesh GenMeshHeightmap(Image heightmap, Vector3 size)
{ {
#define GRAY_VALUE(c) ((c.r+c.g+c.b)/3) #define GRAY_VALUE(c) ((c.r+c.g+c.b)/3)
Mesh mesh; Mesh mesh = { 0 };
int mapX = heightmap.width; int mapX = heightmap.width;
int mapZ = heightmap.height; int mapZ = heightmap.height;
@ -647,7 +778,7 @@ static Mesh GenMeshHeightmap(Image heightmap, Vector3 size)
mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float)); mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float));
mesh.colors = (unsigned char *)malloc(mesh.vertexCount*4*sizeof(unsigned char)); // Not used... mesh.colors = NULL;
int vCounter = 0; // Used to count vertices float by float int vCounter = 0; // Used to count vertices float by float
int tcCounter = 0; // Used to count texcoords float by float int tcCounter = 0; // Used to count texcoords float by float
@ -730,16 +861,12 @@ static Mesh GenMeshHeightmap(Image heightmap, Vector3 size)
free(pixels); free(pixels);
// Fill color data
// NOTE: Not used any more... just one plain color defined at DrawModel()
for (int i = 0; i < (4*mesh.vertexCount); i++) mesh.colors[i] = 255;
return mesh; return mesh;
} }
static Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize) static Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize)
{ {
Mesh mesh; Mesh mesh = { 0 };
Color *cubicmapPixels = GetImageData(cubicmap); Color *cubicmapPixels = GetImageData(cubicmap);
@ -1048,11 +1175,7 @@ static Mesh GenMeshCubicmap(Image cubicmap, Vector3 cubeSize)
mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float)); mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float));
mesh.colors = (unsigned char *)malloc(mesh.vertexCount*4*sizeof(unsigned char)); // Not used... mesh.colors = NULL;
// Fill color data
// NOTE: Not used any more... just one plain color defined at DrawModel()
for (int i = 0; i < (4*mesh.vertexCount); i++) mesh.colors[i] = 255;
int fCounter = 0; int fCounter = 0;
@ -1100,31 +1223,46 @@ void DrawModel(Model model, Vector3 position, float scale, Color tint)
{ {
Vector3 vScale = { scale, scale, scale }; Vector3 vScale = { scale, scale, scale };
Vector3 rotationAxis = { 0.0f, 0.0f, 0.0f }; Vector3 rotationAxis = { 0.0f, 0.0f, 0.0f };
DrawModelEx(model, position, rotationAxis, 0.0f, vScale, tint); DrawModelEx(model, position, rotationAxis, 0.0f, vScale, tint);
} }
// Draw a model with extended parameters // Draw a model with extended parameters
void DrawModelEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint) void DrawModelEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint)
{ {
// NOTE: Rotation must be provided in degrees, it's converted to radians inside rlglDrawModel() // Calculate transformation matrix from function parameters
rlglDrawModel(model, position, rotationAxis, rotationAngle, scale, tint, false); // Get transform matrix (rotation -> scale -> translation)
Matrix matRotation = MatrixRotate(rotationAxis, rotationAngle*DEG2RAD);
Matrix matScale = MatrixScale(scale.x, scale.y, scale.z);
Matrix matTranslation = MatrixTranslate(position.x, position.y, position.z);
// Combine model transformation matrix (model.transform) with matrix generated by function parameters (matTransform)
//Matrix matModel = MatrixMultiply(model.transform, matTransform); // Transform to world-space coordinates
model.transform = MatrixMultiply(MatrixMultiply(matScale, matRotation), matTranslation);
model.material.colDiffuse = tint;
rlglDrawMesh(model.mesh, model.material, model.transform);
} }
// Draw a model wires (with texture if set) // Draw a model wires (with texture if set)
void DrawModelWires(Model model, Vector3 position, float scale, Color color) void DrawModelWires(Model model, Vector3 position, float scale, Color tint)
{ {
Vector3 vScale = { scale, scale, scale }; rlEnableWireMode();
Vector3 rotationAxis = { 0.0f, 0.0f, 0.0f };
DrawModel(model, position, scale, tint);
rlglDrawModel(model, position, rotationAxis, 0.0f, vScale, color, true);
rlDisableWireMode();
} }
// Draw a model wires (with texture if set) with extended parameters // Draw a model wires (with texture if set) with extended parameters
void DrawModelWiresEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint) void DrawModelWiresEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint)
{ {
// NOTE: Rotation must be provided in degrees, it's converted to radians inside rlglDrawModel() rlEnableWireMode();
rlglDrawModel(model, position, rotationAxis, rotationAngle, scale, tint, true);
DrawModelEx(model, position, rotationAxis, rotationAngle, scale, tint);
rlDisableWireMode();
} }
// Draw a billboard // Draw a billboard
@ -1329,13 +1467,19 @@ bool CheckCollisionRayBox(Ray ray, BoundingBox box)
BoundingBox CalculateBoundingBox(Mesh mesh) BoundingBox CalculateBoundingBox(Mesh mesh)
{ {
// Get min and max vertex to construct bounds (AABB) // Get min and max vertex to construct bounds (AABB)
Vector3 minVertex = (Vector3){ mesh.vertices[0], mesh.vertices[1], mesh.vertices[2] }; Vector3 minVertex = { 0 };
Vector3 maxVertex = (Vector3){ mesh.vertices[0], mesh.vertices[1], mesh.vertices[2] }; Vector3 maxVertex = { 0 };
for (int i = 1; i < mesh.vertexCount; i++) if (mesh.vertices != NULL)
{ {
minVertex = VectorMin(minVertex, (Vector3){ mesh.vertices[i*3], mesh.vertices[i*3 + 1], mesh.vertices[i*3 + 2] }); minVertex = (Vector3){ mesh.vertices[0], mesh.vertices[1], mesh.vertices[2] };
maxVertex = VectorMax(maxVertex, (Vector3){ mesh.vertices[i*3], mesh.vertices[i*3 + 1], mesh.vertices[i*3 + 2] }); maxVertex = (Vector3){ mesh.vertices[0], mesh.vertices[1], mesh.vertices[2] };
for (int i = 1; i < mesh.vertexCount; i++)
{
minVertex = VectorMin(minVertex, (Vector3){ mesh.vertices[i*3], mesh.vertices[i*3 + 1], mesh.vertices[i*3 + 2] });
maxVertex = VectorMax(maxVertex, (Vector3){ mesh.vertices[i*3], mesh.vertices[i*3 + 1], mesh.vertices[i*3 + 2] });
}
} }
// Create the bounding box // Create the bounding box
@ -1736,7 +1880,7 @@ static Mesh LoadOBJ(const char *fileName)
mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.vertices = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float)); mesh.texcoords = (float *)malloc(mesh.vertexCount*2*sizeof(float));
mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float)); mesh.normals = (float *)malloc(mesh.vertexCount*3*sizeof(float));
mesh.colors = (unsigned char *)malloc(mesh.vertexCount*4*sizeof(unsigned char)); mesh.colors = NULL;
int vCounter = 0; // Used to count vertices float by float int vCounter = 0; // Used to count vertices float by float
int tcCounter = 0; // Used to count texcoords float by float int tcCounter = 0; // Used to count texcoords float by float
@ -1835,10 +1979,6 @@ static Mesh LoadOBJ(const char *fileName)
// Security check, just in case no normals or no texcoords defined in OBJ // Security check, just in case no normals or no texcoords defined in OBJ
if (numTexCoords == 0) for (int i = 0; i < (2*mesh.vertexCount); i++) mesh.texcoords[i] = 0.0f; if (numTexCoords == 0) for (int i = 0; i < (2*mesh.vertexCount); i++) mesh.texcoords[i] = 0.0f;
// NOTE: We set all vertex colors to white
// NOTE: Not used any more... just one plain color defined at DrawModel()
for (int i = 0; i < (4*mesh.vertexCount); i++) mesh.colors[i] = 255;
// Now we can free temp mid* arrays // Now we can free temp mid* arrays
free(midVertices); free(midVertices);
@ -1850,3 +1990,163 @@ static Mesh LoadOBJ(const char *fileName)
return mesh; return mesh;
} }
// Load MTL material data (specs: http://paulbourke.net/dataformats/mtl/)
// NOTE: Texture map parameters are not supported
static Material LoadMTL(const char *fileName)
{
#define MAX_BUFFER_SIZE 128
Material material = { 0 }; // LoadDefaultMaterial();
char buffer[MAX_BUFFER_SIZE];
Vector3 color = { 1.0f, 1.0f, 1.0f };
char *mapFileName = NULL;
FILE *mtlFile;
mtlFile = fopen(fileName, "rt");
if (mtlFile == NULL)
{
TraceLog(WARNING, "[%s] MTL file could not be opened", fileName);
return material;
}
while(!feof(mtlFile))
{
fgets(buffer, MAX_BUFFER_SIZE, mtlFile);
switch (buffer[0])
{
case 'n': // newmtl string Material name. Begins a new material description.
{
// TODO: Support multiple materials in a single .mtl
sscanf(buffer, "newmtl %s", mapFileName);
TraceLog(INFO, "[%s] Loading material...", mapFileName);
}
case 'i': // illum int Illumination model
{
// illum = 1 if specular disabled
// illum = 2 if specular enabled (lambertian model)
// ...
}
case 'K': // Ka, Kd, Ks, Ke
{
switch (buffer[1])
{
case 'a': // Ka float float float Ambient color (RGB)
{
sscanf(buffer, "Ka %f %f %f", &color.x, &color.y, &color.z);
material.colAmbient.r = (unsigned char)(color.x*255);
material.colAmbient.g = (unsigned char)(color.y*255);
material.colAmbient.b = (unsigned char)(color.z*255);
} break;
case 'd': // Kd float float float Diffuse color (RGB)
{
sscanf(buffer, "Kd %f %f %f", &color.x, &color.y, &color.z);
material.colDiffuse.r = (unsigned char)(color.x*255);
material.colDiffuse.g = (unsigned char)(color.y*255);
material.colDiffuse.b = (unsigned char)(color.z*255);
} break;
case 's': // Ks float float float Specular color (RGB)
{
sscanf(buffer, "Ks %f %f %f", &color.x, &color.y, &color.z);
material.colSpecular.r = (unsigned char)(color.x*255);
material.colSpecular.g = (unsigned char)(color.y*255);
material.colSpecular.b = (unsigned char)(color.z*255);
} break;
case 'e': // Ke float float float Emmisive color (RGB)
{
// TODO: Support Ke ?
} break;
default: break;
}
} break;
case 'N': // Ns, Ni
{
if (buffer[1] == 's') // Ns int Shininess (specular exponent). Ranges from 0 to 1000.
{
sscanf(buffer, "Ns %i", &material.glossiness);
}
else if (buffer[1] == 'i') // Ni int Refraction index.
{
// Not supported...
}
} break;
case 'm': // map_Kd, map_Ks, map_Ka, map_Bump, map_d
{
switch (buffer[4])
{
case 'K': // Color texture maps
{
if (buffer[5] == 'd') // map_Kd string Diffuse color texture map.
{
sscanf(buffer, "map_Kd %s", mapFileName);
if (mapFileName != NULL) material.texDiffuse = LoadTexture(mapFileName);
}
else if (buffer[5] == 's') // map_Ks string Specular color texture map.
{
sscanf(buffer, "map_Ks %s", mapFileName);
if (mapFileName != NULL) material.texSpecular = LoadTexture(mapFileName);
}
else if (buffer[5] == 'a') // map_Ka string Ambient color texture map.
{
// Not supported...
}
} break;
case 'B': // map_Bump string Bump texture map.
{
sscanf(buffer, "map_Bump %s", mapFileName);
if (mapFileName != NULL) material.texNormal = LoadTexture(mapFileName);
} break;
case 'b': // map_bump string Bump texture map.
{
sscanf(buffer, "map_bump %s", mapFileName);
if (mapFileName != NULL) material.texNormal = LoadTexture(mapFileName);
} break;
case 'd': // map_d string Opacity texture map.
{
// Not supported...
} break;
default: break;
}
} break;
case 'd': // d, disp
{
if (buffer[1] == ' ') // d float Dissolve factor. d is inverse of Tr
{
float alpha = 1.0f;
sscanf(buffer, "d %f", &alpha);
material.colDiffuse.a = (unsigned char)(alpha*255);
}
else if (buffer[1] == 'i') // disp string Displacement map
{
// Not supported...
}
} break;
case 'b': // bump string Bump texture map
{
sscanf(buffer, "bump %s", mapFileName);
if (mapFileName != NULL) material.texNormal = LoadTexture(mapFileName);
} break;
case 'T': // Tr float Transparency Tr (alpha). Tr is inverse of d
{
float ialpha = 0.0f;
sscanf(buffer, "Tr %f", &ialpha);
material.colDiffuse.a = (unsigned char)((1.0f - ialpha)*255);
} break;
case 'r': // refl string Reflection texture map
default: break;
}
}
fclose(mtlFile);
// NOTE: At this point we have all material data
TraceLog(INFO, "[%s] Material loaded successfully", fileName);
return material;
}

View File

@ -49,7 +49,7 @@
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Global Variables Definition // Global Variables Definition
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
static PhysicObject *physicObjects[MAX_PHYSIC_OBJECTS]; // Physic objects pool static PhysicObject physicObjects[MAX_PHYSIC_OBJECTS]; // Physic objects pool
static int physicObjectsCount; // Counts current enabled physic objects static int physicObjectsCount; // Counts current enabled physic objects
static Vector2 gravityForce; // Gravity force static Vector2 gravityForce; // Gravity force
@ -463,10 +463,10 @@ void ClosePhysics()
} }
// Create a new physic object dinamically, initialize it and add to pool // Create a new physic object dinamically, initialize it and add to pool
PhysicObject *CreatePhysicObject(Vector2 position, float rotation, Vector2 scale) PhysicObject CreatePhysicObject(Vector2 position, float rotation, Vector2 scale)
{ {
// Allocate dynamic memory // Allocate dynamic memory
PhysicObject *obj = (PhysicObject *)malloc(sizeof(PhysicObject)); PhysicObject obj = (PhysicObject)malloc(sizeof(PhysicObjectData));
// Initialize physic object values with generic values // Initialize physic object values with generic values
obj->id = physicObjectsCount; obj->id = physicObjectsCount;
@ -498,7 +498,7 @@ PhysicObject *CreatePhysicObject(Vector2 position, float rotation, Vector2 scale
} }
// Destroy a specific physic object and take it out of the list // Destroy a specific physic object and take it out of the list
void DestroyPhysicObject(PhysicObject *pObj) void DestroyPhysicObject(PhysicObject pObj)
{ {
// Free dynamic memory allocation // Free dynamic memory allocation
free(physicObjects[pObj->id]); free(physicObjects[pObj->id]);
@ -520,7 +520,7 @@ void DestroyPhysicObject(PhysicObject *pObj)
} }
// Apply directional force to a physic object // Apply directional force to a physic object
void ApplyForce(PhysicObject *pObj, Vector2 force) void ApplyForce(PhysicObject pObj, Vector2 force)
{ {
if (pObj->rigidbody.enabled) if (pObj->rigidbody.enabled)
{ {
@ -571,7 +571,7 @@ Rectangle TransformToRectangle(Transform transform)
} }
// Draw physic object information at screen position // Draw physic object information at screen position
void DrawPhysicObjectInfo(PhysicObject *pObj, Vector2 position, int fontSize) void DrawPhysicObjectInfo(PhysicObject pObj, Vector2 position, int fontSize)
{ {
// Draw physic object ID // Draw physic object ID
DrawText(FormatText("PhysicObject ID: %i - Enabled: %i", pObj->id, pObj->enabled), position.x, position.y, fontSize, BLACK); DrawText(FormatText("PhysicObject ID: %i - Enabled: %i", pObj->id, pObj->enabled), position.x, position.y, fontSize, BLACK);

View File

@ -66,13 +66,13 @@ typedef struct Collider {
int radius; // Used for COLLIDER_CIRCLE int radius; // Used for COLLIDER_CIRCLE
} Collider; } Collider;
typedef struct PhysicObject { typedef struct PhysicObjectData {
unsigned int id; unsigned int id;
Transform transform; Transform transform;
Rigidbody rigidbody; Rigidbody rigidbody;
Collider collider; Collider collider;
bool enabled; bool enabled;
} PhysicObject; } PhysicObjectData, *PhysicObject;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { // Prevents name mangling of functions extern "C" { // Prevents name mangling of functions
@ -85,14 +85,14 @@ void InitPhysics(Vector2 gravity);
void UpdatePhysics(); // Update physic objects, calculating physic behaviours and collisions detection void UpdatePhysics(); // Update physic objects, calculating physic behaviours and collisions detection
void ClosePhysics(); // Unitialize all physic objects and empty the objects pool void ClosePhysics(); // Unitialize all physic objects and empty the objects pool
PhysicObject *CreatePhysicObject(Vector2 position, float rotation, Vector2 scale); // Create a new physic object dinamically, initialize it and add to pool PhysicObject CreatePhysicObject(Vector2 position, float rotation, Vector2 scale); // Create a new physic object dinamically, initialize it and add to pool
void DestroyPhysicObject(PhysicObject *pObj); // Destroy a specific physic object and take it out of the list void DestroyPhysicObject(PhysicObject pObj); // Destroy a specific physic object and take it out of the list
void ApplyForce(PhysicObject *pObj, Vector2 force); // Apply directional force to a physic object void ApplyForce(PhysicObject pObj, Vector2 force); // Apply directional force to a physic object
void ApplyForceAtPosition(Vector2 position, float force, float radius); // Apply radial force to all physic objects in range void ApplyForceAtPosition(Vector2 position, float force, float radius); // Apply radial force to all physic objects in range
Rectangle TransformToRectangle(Transform transform); // Convert Transform data type to Rectangle (position and scale) Rectangle TransformToRectangle(Transform transform); // Convert Transform data type to Rectangle (position and scale)
void DrawPhysicObjectInfo(PhysicObject *pObj, Vector2 position, int fontSize); // Draw physic object information at screen position void DrawPhysicObjectInfo(PhysicObject pObj, Vector2 position, int fontSize); // Draw physic object information at screen position
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -1,6 +1,6 @@
/********************************************************************************************** /**********************************************************************************************
* *
* raylib 1.4.0 (www.raylib.com) * raylib 1.5.0 (www.raylib.com)
* *
* A simple and easy-to-use library to learn videogames programming * A simple and easy-to-use library to learn videogames programming
* *
@ -261,7 +261,9 @@
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
#ifndef __cplusplus #ifndef __cplusplus
// Boolean type // Boolean type
typedef enum { false, true } bool; #ifndef true
typedef enum { false, true } bool;
#endif
#endif #endif
// byte type // byte type
@ -324,6 +326,13 @@ typedef struct Texture2D {
int format; // Data format (TextureFormat) int format; // Data format (TextureFormat)
} Texture2D; } Texture2D;
// RenderTexture2D type, for texture rendering
typedef struct RenderTexture2D {
unsigned int id; // Render texture (fbo) id
Texture2D texture; // Color buffer attachment texture
Texture2D depth; // Depth buffer attachment texture
} RenderTexture2D;
// SpriteFont type, includes texture and charSet array data // SpriteFont type, includes texture and charSet array data
typedef struct SpriteFont { typedef struct SpriteFont {
Texture2D texture; // Font texture Texture2D texture; // Font texture
@ -345,8 +354,8 @@ typedef struct Camera {
// Camera2D type, defines a 2d camera // Camera2D type, defines a 2d camera
typedef struct Camera2D { typedef struct Camera2D {
Vector2 position; // Camera position Vector2 offset; // Camera offset (displacement from target)
Vector2 origin; // Camera origin (for rotation and zoom) Vector2 target; // Camera target (rotation and zoom origin)
float rotation; // Camera rotation in degrees float rotation; // Camera rotation in degrees
float zoom; // Camera zoom (scaling), should be 1.0f by default float zoom; // Camera zoom (scaling), should be 1.0f by default
} Camera2D; } Camera2D;
@ -359,34 +368,39 @@ typedef struct BoundingBox {
// Vertex data definning a mesh // Vertex data definning a mesh
typedef struct Mesh { typedef struct Mesh {
int vertexCount; // num vertices int vertexCount; // number of vertices stored in arrays
float *vertices; // vertex position (XYZ - 3 components per vertex) float *vertices; // vertex position (XYZ - 3 components per vertex) (shader-location = 0)
float *texcoords; // vertex texture coordinates (UV - 2 components per vertex) float *texcoords; // vertex texture coordinates (UV - 2 components per vertex) (shader-location = 1)
float *texcoords2; // vertex second texture coordinates (useful for lightmaps) float *texcoords2; // vertex second texture coordinates (useful for lightmaps) (shader-location = 5)
float *normals; // vertex normals (XYZ - 3 components per vertex) float *normals; // vertex normals (XYZ - 3 components per vertex) (shader-location = 2)
float *tangents; // vertex tangents (XYZ - 3 components per vertex) float *tangents; // vertex tangents (XYZ - 3 components per vertex) (shader-location = 4)
unsigned char *colors; // vertex colors (RGBA - 4 components per vertex) unsigned char *colors; // vertex colors (RGBA - 4 components per vertex) (shader-location = 3)
unsigned short *indices; // vertex indices (in case vertex data comes indexed)
int triangleCount; // number of triangles stored (indexed or not)
BoundingBox bounds; // mesh limits defined by min and max points BoundingBox bounds; // mesh limits defined by min and max points
unsigned int vaoId; // OpenGL Vertex Array Object id unsigned int vaoId; // OpenGL Vertex Array Object id
unsigned int vboId[6]; // OpenGL Vertex Buffer Objects id (6 types of vertex data) unsigned int vboId[7]; // OpenGL Vertex Buffer Objects id (7 types of vertex data)
} Mesh; } Mesh;
// Shader type (generic shader) // Shader type (generic shader)
typedef struct Shader { typedef struct Shader {
unsigned int id; // Shader program id unsigned int id; // Shader program id
// Variable attributes // Vertex attributes locations (default locations)
int vertexLoc; // Vertex attribute location point (vertex shader) int vertexLoc; // Vertex attribute location point (default-location = 0)
int texcoordLoc; // Texcoord attribute location point (vertex shader) int texcoordLoc; // Texcoord attribute location point (default-location = 1)
int normalLoc; // Normal attribute location point (vertex shader) int normalLoc; // Normal attribute location point (default-location = 2)
int colorLoc; // Color attibute location point (vertex shader) int colorLoc; // Color attibute location point (default-location = 3)
int tangentLoc; // Tangent attribute location point (default-location = 4)
int texcoord2Loc; // Texcoord2 attribute location point (default-location = 5)
// Uniforms // Uniform locations
int mvpLoc; // ModelView-Projection matrix uniform location point (vertex shader) int mvpLoc; // ModelView-Projection matrix uniform location point (vertex shader)
int tintColorLoc; // Color uniform location point (fragment shader) int tintColorLoc; // Color uniform location point (fragment shader)
// Texture map locations
int mapDiffuseLoc; // Diffuse map texture uniform location point (fragment shader) int mapDiffuseLoc; // Diffuse map texture uniform location point (fragment shader)
int mapNormalLoc; // Normal map texture uniform location point (fragment shader) int mapNormalLoc; // Normal map texture uniform location point (fragment shader)
int mapSpecularLoc; // Specular map texture uniform location point (fragment shader) int mapSpecularLoc; // Specular map texture uniform location point (fragment shader)
@ -408,14 +422,38 @@ typedef struct Material {
float normalDepth; // Normal map depth float normalDepth; // Normal map depth
} Material; } Material;
// 3d Model type // Model type
// TODO: Replace shader/testure by material
typedef struct Model { typedef struct Model {
Mesh mesh; Mesh mesh; // Vertex data buffers (RAM and VRAM)
Matrix transform; Matrix transform; // Local transform matrix
Material material; Material material; // Shader and textures data
} Model; } Model;
// Light type
// TODO: Review contained data to support different light types and features
typedef struct LightData {
int id;
int type; // LIGHT_POINT, LIGHT_DIRECTIONAL, LIGHT_SPOT
bool enabled;
Vector3 position;
Vector3 direction; // Used on LIGHT_DIRECTIONAL and LIGHT_SPOT (cone direction)
float attenuation; // Lost of light intensity with distance (use radius?)
Color diffuse; // Use Vector3 diffuse (including intensities)?
float intensity;
Color specular;
//float specFactor; // Specular intensity ?
//Color ambient; // Required?
float coneAngle; // SpotLight
} LightData, *Light;
// Light types
typedef enum { LIGHT_POINT, LIGHT_DIRECTIONAL, LIGHT_SPOT } LightType;
// Ray type (useful for raycast) // Ray type (useful for raycast)
typedef struct Ray { typedef struct Ray {
Vector3 position; Vector3 position;
@ -432,11 +470,13 @@ typedef struct Sound {
typedef struct Wave { typedef struct Wave {
void *data; // Buffer data pointer void *data; // Buffer data pointer
unsigned int dataSize; // Data size in bytes unsigned int dataSize; // Data size in bytes
unsigned int sampleRate; unsigned int sampleRate; // Samples per second to be played
short bitsPerSample; short bitsPerSample; // Sample size in bits
short channels; short channels;
} Wave; } Wave;
typedef int RawAudioContext;
// Texture formats // Texture formats
// NOTE: Support depends on OpenGL version and platform // NOTE: Support depends on OpenGL version and platform
typedef enum { typedef enum {
@ -484,7 +524,7 @@ typedef enum { TOUCH_UP, TOUCH_DOWN, TOUCH_MOVE } TouchAction;
// Gesture events // Gesture events
// NOTE: MAX_TOUCH_POINTS fixed to 2 // NOTE: MAX_TOUCH_POINTS fixed to 2
typedef struct { typedef struct GestureEvent {
int touchAction; int touchAction;
int pointCount; int pointCount;
int pointerId[MAX_TOUCH_POINTS]; int pointerId[MAX_TOUCH_POINTS];
@ -520,13 +560,13 @@ typedef struct Collider {
int radius; // Used for COLLIDER_CIRCLE int radius; // Used for COLLIDER_CIRCLE
} Collider; } Collider;
typedef struct PhysicObject { typedef struct PhysicObjectData {
unsigned int id; unsigned int id;
Transform transform; Transform transform;
Rigidbody rigidbody; Rigidbody rigidbody;
Collider collider; Collider collider;
bool enabled; bool enabled;
} PhysicObject; } PhysicObjectData, *PhysicObject;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { // Prevents name mangling of functions extern "C" { // Prevents name mangling of functions
@ -550,24 +590,28 @@ void CloseWindow(void); // Close Window and
bool WindowShouldClose(void); // Detect if KEY_ESCAPE pressed or Close icon pressed bool WindowShouldClose(void); // Detect if KEY_ESCAPE pressed or Close icon pressed
bool IsWindowMinimized(void); // Detect if window has been minimized (or lost focus) bool IsWindowMinimized(void); // Detect if window has been minimized (or lost focus)
void ToggleFullscreen(void); // Fullscreen toggle (only PLATFORM_DESKTOP) void ToggleFullscreen(void); // Fullscreen toggle (only PLATFORM_DESKTOP)
#if defined(PLATFORM_DESKTOP) || defined(PLATFORM_RPI)
void SetCustomCursor(const char *cursorImage); // Set a custom cursor icon/image
void SetExitKey(int key); // Set a custom key to exit program (default is ESC)
#endif
int GetScreenWidth(void); // Get current screen width int GetScreenWidth(void); // Get current screen width
int GetScreenHeight(void); // Get current screen height int GetScreenHeight(void); // Get current screen height
void ShowCursor(void); // Shows cursor
void HideCursor(void); // Hides cursor
bool IsCursorHidden(void); // Returns true if cursor is not visible
void EnableCursor(void); // Enables cursor
void DisableCursor(void); // Disables cursor
void ClearBackground(Color color); // Sets Background Color void ClearBackground(Color color); // Sets Background Color
void BeginDrawing(void); // Setup drawing canvas to start drawing void BeginDrawing(void); // Setup drawing canvas to start drawing
void BeginDrawingEx(Camera2D camera); // Setup drawing canvas with 2d camera
void BeginDrawingPro(int blendMode, Shader shader, Matrix transform); // Setup drawing canvas with pro parameters
void EndDrawing(void); // End canvas drawing and Swap Buffers (Double Buffering) void EndDrawing(void); // End canvas drawing and Swap Buffers (Double Buffering)
void Begin2dMode(Camera2D camera); // Initialize 2D mode with custom camera
void End2dMode(void); // Ends 2D mode custom camera usage
void Begin3dMode(Camera camera); // Initializes 3D mode for drawing (Camera setup) void Begin3dMode(Camera camera); // Initializes 3D mode for drawing (Camera setup)
void End3dMode(void); // Ends 3D mode and returns to default 2D orthographic mode void End3dMode(void); // Ends 3D mode and returns to default 2D orthographic mode
void BeginTextureMode(RenderTexture2D target); // Initializes render texture for drawing
void EndTextureMode(void); // Ends drawing to render texture
Ray GetMouseRay(Vector2 mousePosition, Camera camera); // Returns a ray trace from mouse position Ray GetMouseRay(Vector2 mousePosition, Camera camera); // Returns a ray trace from mouse position
Vector2 WorldToScreen(Vector3 position, Camera camera); // Returns the screen space position from a 3d world space position Vector2 GetWorldToScreen(Vector3 position, Camera camera); // Returns the screen space position from a 3d world space position
Matrix GetCameraMatrix(Camera camera); // Returns camera transform matrix (view matrix) Matrix GetCameraMatrix(Camera camera); // Returns camera transform matrix (view matrix)
void SetTargetFPS(int fps); // Set target FPS (maximum) void SetTargetFPS(int fps); // Set target FPS (maximum)
@ -602,6 +646,15 @@ bool IsKeyDown(int key); // Detect if a key is be
bool IsKeyReleased(int key); // Detect if a key has been released once bool IsKeyReleased(int key); // Detect if a key has been released once
bool IsKeyUp(int key); // Detect if a key is NOT being pressed bool IsKeyUp(int key); // Detect if a key is NOT being pressed
int GetKeyPressed(void); // Get latest key pressed int GetKeyPressed(void); // Get latest key pressed
void SetExitKey(int key); // Set a custom key to exit program (default is ESC)
bool IsGamepadAvailable(int gamepad); // Detect if a gamepad is available
float GetGamepadAxisMovement(int gamepad, int axis); // Return axis movement value for a gamepad axis
bool IsGamepadButtonPressed(int gamepad, int button); // Detect if a gamepad button has been pressed once
bool IsGamepadButtonDown(int gamepad, int button); // Detect if a gamepad button is being pressed
bool IsGamepadButtonReleased(int gamepad, int button); // Detect if a gamepad button has been released once
bool IsGamepadButtonUp(int gamepad, int button); // Detect if a gamepad button is NOT being pressed
#endif
bool IsMouseButtonPressed(int button); // Detect if a mouse button has been pressed once bool IsMouseButtonPressed(int button); // Detect if a mouse button has been pressed once
bool IsMouseButtonDown(int button); // Detect if a mouse button is being pressed bool IsMouseButtonDown(int button); // Detect if a mouse button is being pressed
@ -613,20 +666,6 @@ Vector2 GetMousePosition(void); // Returns mouse positio
void SetMousePosition(Vector2 position); // Set mouse position XY void SetMousePosition(Vector2 position); // Set mouse position XY
int GetMouseWheelMove(void); // Returns mouse wheel movement Y int GetMouseWheelMove(void); // Returns mouse wheel movement Y
void ShowCursor(void); // Shows cursor
void HideCursor(void); // Hides cursor
void EnableCursor(void); // Enables cursor
void DisableCursor(void); // Disables cursor
bool IsCursorHidden(void); // Returns true if cursor is not visible
bool IsGamepadAvailable(int gamepad); // Detect if a gamepad is available
float GetGamepadAxisMovement(int gamepad, int axis); // Return axis movement value for a gamepad axis
bool IsGamepadButtonPressed(int gamepad, int button); // Detect if a gamepad button has been pressed once
bool IsGamepadButtonDown(int gamepad, int button); // Detect if a gamepad button is being pressed
bool IsGamepadButtonReleased(int gamepad, int button); // Detect if a gamepad button has been released once
bool IsGamepadButtonUp(int gamepad, int button); // Detect if a gamepad button is NOT being pressed
#endif
int GetTouchX(void); // Returns touch position X for touch point 0 (relative to screen size) int GetTouchX(void); // Returns touch position X for touch point 0 (relative to screen size)
int GetTouchY(void); // Returns touch position Y for touch point 0 (relative to screen size) int GetTouchY(void); // Returns touch position Y for touch point 0 (relative to screen size)
Vector2 GetTouchPosition(int index); // Returns touch position XY for a touch point index (relative to screen size) Vector2 GetTouchPosition(int index); // Returns touch position XY for a touch point index (relative to screen size)
@ -641,9 +680,9 @@ bool IsButtonReleased(int button); // Detect if an android
// Gestures and Touch Handling Functions (Module: gestures) // Gestures and Touch Handling Functions (Module: gestures)
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
void ProcessGestureEvent(GestureEvent event); // Process gesture event and translate it into gestures void ProcessGestureEvent(GestureEvent event); // Process gesture event and translate it into gestures
void UpdateGestures(void); // Update gestures detected (must be called every frame) void UpdateGestures(void); // Update gestures detected (called automatically in PollInputEvents())
bool IsGestureDetected(void); // Check if a gesture have been detected bool IsGestureDetected(int gesture); // Check if a gesture have been detected
int GetGestureType(void); // Get latest detected gesture int GetGestureDetected(void); // Get latest detected gesture
void SetGesturesEnabled(unsigned int gestureFlags); // Enable a set of gestures using flags void SetGesturesEnabled(unsigned int gestureFlags); // Enable a set of gestures using flags
int GetTouchPointsCount(void); // Get touch points count int GetTouchPointsCount(void); // Get touch points count
@ -714,8 +753,10 @@ Texture2D LoadTexture(const char *fileName);
Texture2D LoadTextureEx(void *data, int width, int height, int textureFormat); // Load a texture from raw data into GPU memory Texture2D LoadTextureEx(void *data, int width, int height, int textureFormat); // Load a texture from raw data into GPU memory
Texture2D LoadTextureFromRES(const char *rresName, int resId); // Load an image as texture from rRES file (raylib Resource) Texture2D LoadTextureFromRES(const char *rresName, int resId); // Load an image as texture from rRES file (raylib Resource)
Texture2D LoadTextureFromImage(Image image); // Load a texture from image data Texture2D LoadTextureFromImage(Image image); // Load a texture from image data
RenderTexture2D LoadRenderTexture(int width, int height); // Load a texture to be used for rendering
void UnloadImage(Image image); // Unload image from CPU memory (RAM) void UnloadImage(Image image); // Unload image from CPU memory (RAM)
void UnloadTexture(Texture2D texture); // Unload texture from GPU memory void UnloadTexture(Texture2D texture); // Unload texture from GPU memory
void UnloadRenderTexture(RenderTexture2D target); // Unload render texture from GPU memory
Color *GetImageData(Image image); // Get pixel data from image as a Color struct array Color *GetImageData(Image image); // Get pixel data from image as a Color struct array
Image GetTextureData(Texture2D texture); // Get pixel data from GPU texture and return an Image Image GetTextureData(Texture2D texture); // Get pixel data from GPU texture and return an Image
void ImageToPOT(Image *image, Color fillColor); // Convert image to POT (power-of-two) void ImageToPOT(Image *image, Color fillColor); // Convert image to POT (power-of-two)
@ -785,17 +826,21 @@ void DrawGizmo(Vector3 position);
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
// Model 3d Loading and Drawing Functions (Module: models) // Model 3d Loading and Drawing Functions (Module: models)
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
Model LoadModel(const char *fileName); // Load a 3d model (.OBJ) Model LoadModel(const char *fileName); // Load a 3d model (.OBJ)
Model LoadModelEx(Mesh data); // Load a 3d model (from mesh data) Model LoadModelEx(Mesh data, bool dynamic); // Load a 3d model (from mesh data)
//Model LoadModelFromRES(const char *rresName, int resId); // TODO: Load a 3d model from rRES file (raylib Resource) Model LoadModelFromRES(const char *rresName, int resId); // Load a 3d model from rRES file (raylib Resource)
Model LoadHeightmap(Image heightmap, Vector3 size); // Load a heightmap image as a 3d model Model LoadHeightmap(Image heightmap, Vector3 size); // Load a heightmap image as a 3d model
Model LoadCubicmap(Image cubicmap); // Load a map image as a 3d model (cubes based) Model LoadCubicmap(Image cubicmap); // Load a map image as a 3d model (cubes based)
void UnloadModel(Model model); // Unload 3d model from memory void UnloadModel(Model model); // Unload 3d model from memory
void SetModelTexture(Model *model, Texture2D texture); // Link a texture to a model void SetModelTexture(Model *model, Texture2D texture); // Link a texture to a model
Material LoadMaterial(const char *fileName); // Load material data (from file)
Material LoadDefaultMaterial(void); // Load default material (uses default models shader)
void UnloadMaterial(Material material); // Unload material textures from VRAM
void DrawModel(Model model, Vector3 position, float scale, Color tint); // Draw a model (with texture if set) void DrawModel(Model model, Vector3 position, float scale, Color tint); // Draw a model (with texture if set)
void DrawModelEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model with extended parameters void DrawModelEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model with extended parameters
void DrawModelWires(Model model, Vector3 position, float scale, Color color); // Draw a model wires (with texture if set) void DrawModelWires(Model model, Vector3 position, float scale, Color tint); // Draw a model wires (with texture if set)
void DrawModelWiresEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model wires (with texture if set) with extended parameters void DrawModelWiresEx(Model model, Vector3 position, Vector3 rotationAxis, float rotationAngle, Vector3 scale, Color tint); // Draw a model wires (with texture if set) with extended parameters
void DrawBoundingBox(BoundingBox box, Color color); // Draw bounding box (wires) void DrawBoundingBox(BoundingBox box, Color color); // Draw bounding box (wires)
@ -816,25 +861,22 @@ Vector3 ResolveCollisionCubicmap(Image cubicmap, Vector3 mapPosition, Vector3 *p
// NOTE: This functions are useless when using OpenGL 1.1 // NOTE: This functions are useless when using OpenGL 1.1
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
Shader LoadShader(char *vsFileName, char *fsFileName); // Load a custom shader and bind default locations Shader LoadShader(char *vsFileName, char *fsFileName); // Load a custom shader and bind default locations
unsigned int LoadShaderProgram(char *vShaderStr, char *fShaderStr); // Load custom shaders strings and return program id
void UnloadShader(Shader shader); // Unload a custom shader from memory void UnloadShader(Shader shader); // Unload a custom shader from memory
void SetPostproShader(Shader shader); // Set fullscreen postproduction shader
void SetCustomShader(Shader shader); // Set custom shader to be used in batch draw
void SetDefaultShader(void); // Set default shader to be used in batch draw void SetDefaultShader(void); // Set default shader to be used in batch draw
void SetModelShader(Model *model, Shader shader); // Link a shader to a model void SetCustomShader(Shader shader); // Set custom shader to be used in batch draw
bool IsPosproShaderEnabled(void); // Check if postprocessing shader is enabled Shader GetDefaultShader(void); // Get default shader
Texture2D GetDefaultTexture(void); // Get default texture
int GetShaderLocation(Shader shader, const char *uniformName); // Get shader uniform location int GetShaderLocation(Shader shader, const char *uniformName); // Get shader uniform location
void SetShaderValue(Shader shader, int uniformLoc, float *value, int size); // Set shader uniform value (float) void SetShaderValue(Shader shader, int uniformLoc, float *value, int size); // Set shader uniform value (float)
void SetShaderValuei(Shader shader, int uniformLoc, int *value, int size); // Set shader uniform value (int) void SetShaderValuei(Shader shader, int uniformLoc, int *value, int size); // Set shader uniform value (int)
void SetShaderValueMatrix(Shader shader, int uniformLoc, Matrix mat); // Set shader uniform value (matrix 4x4) void SetShaderValueMatrix(Shader shader, int uniformLoc, Matrix mat); // Set shader uniform value (matrix 4x4)
//void SetShaderMapDiffuse(Shader *shader, Texture2D texture); // Default diffuse shader map texture assignment
//void SetShaderMapNormal(Shader *shader, const char *uniformName, Texture2D texture); // Normal map texture shader assignment
//void SetShaderMapSpecular(Shader *shader, const char *uniformName, Texture2D texture); // Specular map texture shader assignment
//void SetShaderMap(Shader *shader, int mapLocation, Texture2D texture, int textureUnit); // TODO: Generic shader map assignment
void SetBlendMode(int mode); // Set blending mode (alpha, additive, multiplied) void SetBlendMode(int mode); // Set blending mode (alpha, additive, multiplied)
Light CreateLight(int type, Vector3 position, Color diffuse); // Create a new light, initialize it and add to pool
void DestroyLight(Light light); // Destroy a light and take it out of the list
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
// Physics System Functions (Module: physac) // Physics System Functions (Module: physac)
//---------------------------------------------------------------------------------- //----------------------------------------------------------------------------------
@ -842,20 +884,21 @@ void InitPhysics(Vector2 gravity);
void UpdatePhysics(); // Update physic objects, calculating physic behaviours and collisions detection void UpdatePhysics(); // Update physic objects, calculating physic behaviours and collisions detection
void ClosePhysics(); // Unitialize all physic objects and empty the objects pool void ClosePhysics(); // Unitialize all physic objects and empty the objects pool
PhysicObject *CreatePhysicObject(Vector2 position, float rotation, Vector2 scale); // Create a new physic object dinamically, initialize it and add to pool PhysicObject CreatePhysicObject(Vector2 position, float rotation, Vector2 scale); // Create a new physic object dinamically, initialize it and add to pool
void DestroyPhysicObject(PhysicObject *pObj); // Destroy a specific physic object and take it out of the list void DestroyPhysicObject(PhysicObject pObj); // Destroy a specific physic object and take it out of the list
void ApplyForce(PhysicObject *pObj, Vector2 force); // Apply directional force to a physic object void ApplyForce(PhysicObject pObj, Vector2 force); // Apply directional force to a physic object
void ApplyForceAtPosition(Vector2 position, float force, float radius); // Apply radial force to all physic objects in range void ApplyForceAtPosition(Vector2 position, float force, float radius); // Apply radial force to all physic objects in range
Rectangle TransformToRectangle(Transform transform); // Convert Transform data type to Rectangle (position and scale) Rectangle TransformToRectangle(Transform transform); // Convert Transform data type to Rectangle (position and scale)
void DrawPhysicObjectInfo(PhysicObject *pObj, Vector2 position, int fontSize); // Draw physic object information at screen position void DrawPhysicObjectInfo(PhysicObject pObj, Vector2 position, int fontSize); // Draw physic object information at screen position
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
// Audio Loading and Playing Functions (Module: audio) // Audio Loading and Playing Functions (Module: audio)
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
void InitAudioDevice(void); // Initialize audio device and context void InitAudioDevice(void); // Initialize audio device and context
void CloseAudioDevice(void); // Close the audio device and context (and music stream) void CloseAudioDevice(void); // Close the audio device and context (and music stream)
bool IsAudioDeviceReady(void); // True if call to InitAudioDevice() was successful and CloseAudioDevice() has not been called yet
Sound LoadSound(char *fileName); // Load sound to memory Sound LoadSound(char *fileName); // Load sound to memory
Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data Sound LoadSoundFromWave(Wave wave); // Load sound to memory from wave data
@ -864,19 +907,28 @@ void UnloadSound(Sound sound); // Unload sound
void PlaySound(Sound sound); // Play a sound void PlaySound(Sound sound); // Play a sound
void PauseSound(Sound sound); // Pause a sound void PauseSound(Sound sound); // Pause a sound
void StopSound(Sound sound); // Stop playing a sound void StopSound(Sound sound); // Stop playing a sound
bool SoundIsPlaying(Sound sound); // Check if a sound is currently playing bool IsSoundPlaying(Sound sound); // Check if a sound is currently playing
void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level) void SetSoundVolume(Sound sound, float volume); // Set volume for a sound (1.0 is max level)
void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level) void SetSoundPitch(Sound sound, float pitch); // Set pitch for a sound (1.0 is base level)
void PlayMusicStream(char *fileName); // Start music playing (open stream) int PlayMusicStream(int musicIndex, char *fileName); // Start music playing (open stream)
void UpdateMusicStream(void); // Updates buffers for music streaming void UpdateMusicStream(int index); // Updates buffers for music streaming
void StopMusicStream(void); // Stop music playing (close stream) void StopMusicStream(int index); // Stop music playing (close stream)
void PauseMusicStream(void); // Pause music playing void PauseMusicStream(int index); // Pause music playing
void ResumeMusicStream(void); // Resume playing paused music void ResumeMusicStream(int index); // Resume playing paused music
bool MusicIsPlaying(void); // Check if music is playing bool IsMusicPlaying(int index); // Check if music is playing
void SetMusicVolume(float volume); // Set volume for music (1.0 is max level) void SetMusicVolume(int index, float volume); // Set volume for music (1.0 is max level)
float GetMusicTimeLength(void); // Get current music time length (in seconds) float GetMusicTimeLength(int index); // Get current music time length (in seconds)
float GetMusicTimePlayed(void); // Get current music time played (in seconds) float GetMusicTimePlayed(int index); // Get current music time played (in seconds)
int getMusicStreamCount(void);
void SetMusicPitch(int index, float pitch);
// used to output raw audio streams, returns negative numbers on error
// if floating point is false the data size is 16bit short, otherwise it is float 32bit
RawAudioContext InitRawAudioContext(int sampleRate, int channels, bool floatingPoint);
void CloseRawAudioContext(RawAudioContext ctx);
int BufferRawAudioContext(RawAudioContext ctx, void *data, int numberElements); // returns number of elements buffered
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -158,6 +158,7 @@ RMDEF void PrintMatrix(Matrix m); // Print matrix ut
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------
RMDEF float QuaternionLength(Quaternion quat); // Compute the length of a quaternion RMDEF float QuaternionLength(Quaternion quat); // Compute the length of a quaternion
RMDEF void QuaternionNormalize(Quaternion *q); // Normalize provided quaternion RMDEF void QuaternionNormalize(Quaternion *q); // Normalize provided quaternion
RMDEF void QuaternionInvert(Quaternion *quat); // Invert provided quaternion
RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2); // Calculate two quaternion multiplication RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2); // Calculate two quaternion multiplication
RMDEF Quaternion QuaternionSlerp(Quaternion q1, Quaternion q2, float slerp); // Calculates spherical linear interpolation between two quaternions RMDEF Quaternion QuaternionSlerp(Quaternion q1, Quaternion q2, float slerp); // Calculates spherical linear interpolation between two quaternions
RMDEF Quaternion QuaternionFromMatrix(Matrix matrix); // Returns a quaternion for a given rotation matrix RMDEF Quaternion QuaternionFromMatrix(Matrix matrix); // Returns a quaternion for a given rotation matrix
@ -908,6 +909,23 @@ RMDEF void QuaternionNormalize(Quaternion *q)
q->w *= ilength; q->w *= ilength;
} }
// Invert provided quaternion
RMDEF void QuaternionInvert(Quaternion *quat)
{
float length = QuaternionLength(*quat);
float lengthSq = length*length;
if (lengthSq != 0.0)
{
float i = 1.0f/lengthSq;
quat->x *= -i;
quat->y *= -i;
quat->z *= -i;
quat->w *= i;
}
}
// Calculate two quaternion multiplication // Calculate two quaternion multiplication
RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2) RMDEF Quaternion QuaternionMultiply(Quaternion q1, Quaternion q2)
{ {

2337
src/rlgl.c

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More