FreeRDP/winpr/libwinpr/synch/critical.c

295 lines
8.5 KiB
C
Raw Normal View History

2012-05-29 22:14:26 +04:00
/**
* WinPR: Windows Portable Runtime
* Synchronization Functions
*
* Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
2013-12-04 14:37:57 +04:00
* Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
2012-05-29 22:14:26 +04:00
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
2013-09-19 21:50:00 +04:00
#include <winpr/tchar.h>
2012-05-29 22:14:26 +04:00
#include <winpr/synch.h>
#include <winpr/sysinfo.h>
#include <winpr/interlocked.h>
#include <winpr/thread.h>
2012-05-29 22:14:26 +04:00
#include "synch.h"
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifndef _WIN32
2014-08-18 19:22:22 +04:00
#include "../log.h"
#define TAG WINPR_TAG("synch.critical")
2014-08-18 19:22:22 +04:00
VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
}
2012-09-18 23:51:33 +04:00
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags)
{
/**
* See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
* - The LockCount field indicates the number of times that any thread has
* called the EnterCriticalSection routine for this critical section,
* minus one. This field starts at -1 for an unlocked critical section.
* Each call of EnterCriticalSection increments this value; each call of
* LeaveCriticalSection decrements it.
* - The RecursionCount field indicates the number of times that the owning
* thread has called EnterCriticalSection for this critical section.
*/
2014-08-18 19:22:22 +04:00
if (Flags != 0)
{
WLog_WARN(TAG, "Flags unimplemented");
}
lpCriticalSection->DebugInfo = NULL;
lpCriticalSection->LockCount = -1;
lpCriticalSection->SpinCount = 0;
lpCriticalSection->RecursionCount = 0;
lpCriticalSection->OwningThread = NULL;
2014-08-18 21:34:47 +04:00
lpCriticalSection->LockSemaphore = (winpr_sem_t*) malloc(sizeof(winpr_sem_t));
if (!lpCriticalSection->LockSemaphore)
return FALSE;
#if defined(__APPLE__)
if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) != KERN_SUCCESS)
goto out_fail;
#else
if(sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
goto out_fail;
#endif
SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
return TRUE;
out_fail:
free(lpCriticalSection->LockSemaphore);
return FALSE;
2012-09-18 23:51:33 +04:00
}
BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
{
return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
}
DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
{
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
SYSTEM_INFO sysinfo;
DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
if (dwSpinCount)
{
/* Don't spin on uniprocessor systems! */
GetNativeSystemInfo(&sysinfo);
2014-08-18 19:22:22 +04:00
if (sysinfo.dwNumberOfProcessors < 2)
dwSpinCount = 0;
}
2014-08-18 19:22:22 +04:00
lpCriticalSection->SpinCount = dwSpinCount;
return dwPreviousSpinCount;
#else
return 0;
#endif
}
static VOID _WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
#if defined(__APPLE__)
2014-08-18 21:34:47 +04:00
semaphore_wait(*((winpr_sem_t*) lpCriticalSection->LockSemaphore));
#else
2014-08-18 21:34:47 +04:00
sem_wait((winpr_sem_t*) lpCriticalSection->LockSemaphore);
#endif
}
static VOID _UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
#if defined __APPLE__
2014-08-18 21:34:47 +04:00
semaphore_signal(*((winpr_sem_t*) lpCriticalSection->LockSemaphore));
#else
2014-08-18 21:34:47 +04:00
sem_post((winpr_sem_t*) lpCriticalSection->LockSemaphore);
#endif
}
VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
ULONG SpinCount = lpCriticalSection->SpinCount;
/* If we're lucky or if the current thread is already owner we can return early */
if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
return;
/* Spin requested times but don't compete with another waiting thread */
while (SpinCount-- && lpCriticalSection->LockCount < 1)
{
/* Atomically try to acquire and check the if the section is free. */
if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
{
lpCriticalSection->RecursionCount = 1;
2014-08-18 19:22:22 +04:00
lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR) GetCurrentThreadId();
return;
}
2014-08-18 19:22:22 +04:00
/* Failed to get the lock. Let the scheduler know that we're spinning. */
if (sched_yield()!=0)
{
/**
* On some operating systems sched_yield is a stub.
* usleep should at least trigger a context switch if any thread is waiting.
* A ThreadYield() would be nice in winpr ...
*/
usleep(1);
}
}
#endif
/* First try the fastest possible path to get the lock. */
if (InterlockedIncrement(&lpCriticalSection->LockCount))
{
/* Section is already locked. Check if it is owned by the current thread. */
2014-08-18 19:22:22 +04:00
if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR) GetCurrentThreadId())
{
/* Recursion. No need to wait. */
lpCriticalSection->RecursionCount++;
return;
}
/* Section is locked by another thread. We have to wait. */
_WaitForCriticalSection(lpCriticalSection);
}
2014-08-18 19:22:22 +04:00
/* We got the lock. Own it ... */
lpCriticalSection->RecursionCount = 1;
2014-08-18 19:22:22 +04:00
lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR) GetCurrentThreadId();
}
BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
2014-08-18 19:22:22 +04:00
HANDLE current_thread = (HANDLE)(ULONG_PTR) GetCurrentThreadId();
/* Atomically acquire the the lock if the section is free. */
2014-08-18 19:22:22 +04:00
if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
{
lpCriticalSection->RecursionCount = 1;
lpCriticalSection->OwningThread = current_thread;
return TRUE;
}
/* Section is already locked. Check if it is owned by the current thread. */
if (lpCriticalSection->OwningThread == current_thread)
{
/* Recursion, return success */
lpCriticalSection->RecursionCount++;
InterlockedIncrement(&lpCriticalSection->LockCount);
return TRUE;
}
return FALSE;
}
VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
/* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
if (--lpCriticalSection->RecursionCount < 1)
{
/* Last recursion, clear owner, unlock and if there are other waiting threads ... */
lpCriticalSection->OwningThread = NULL;
2014-08-18 19:22:22 +04:00
if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
{
/* ...signal the semaphore to unblock the next waiting thread */
_UnWaitCriticalSection(lpCriticalSection);
}
}
else
{
InterlockedDecrement(&lpCriticalSection->LockCount);
}
}
2012-05-29 22:14:26 +04:00
VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
{
lpCriticalSection->LockCount = -1;
lpCriticalSection->SpinCount = 0;
lpCriticalSection->RecursionCount = 0;
lpCriticalSection->OwningThread = NULL;
if (lpCriticalSection->LockSemaphore != NULL)
{
#if defined __APPLE__
2014-08-18 21:34:47 +04:00
semaphore_destroy(mach_task_self(), *((winpr_sem_t*) lpCriticalSection->LockSemaphore));
#else
2014-08-18 21:34:47 +04:00
sem_destroy((winpr_sem_t*) lpCriticalSection->LockSemaphore);
#endif
free(lpCriticalSection->LockSemaphore);
lpCriticalSection->LockSemaphore = NULL;
}
}
#endif
2013-09-19 21:50:00 +04:00
#ifdef WINPR_INITIALIZE_CRITICAL_SECTION_EX
2013-09-19 21:50:00 +04:00
2014-08-18 21:34:47 +04:00
typedef BOOL (WINAPI* PINITIALIZE_CRITICAL_SECTION_EX_FN)(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags);
2013-09-19 21:50:00 +04:00
static HMODULE g_KERNEL32_Library = NULL;
static BOOL g_InitializeCriticalSectionEx_Detected = FALSE;
static BOOL g_InitializeCriticalSectionEx_Available = FALSE;
static PINITIALIZE_CRITICAL_SECTION_EX_FN g_pInitializeCriticalSectionEx = NULL;
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount, DWORD Flags)
{
if (!g_InitializeCriticalSectionEx_Detected)
{
g_KERNEL32_Library = LoadLibrary(_T("kernel32.dll"));
if (g_KERNEL32_Library)
{
g_pInitializeCriticalSectionEx = (PINITIALIZE_CRITICAL_SECTION_EX_FN)
2014-08-18 19:22:22 +04:00
GetProcAddress(g_KERNEL32_Library, "InitializeCriticalSectionEx");
2013-09-19 21:50:00 +04:00
g_InitializeCriticalSectionEx_Available = (g_pInitializeCriticalSectionEx) ? TRUE : FALSE;
}
else
{
g_InitializeCriticalSectionEx_Available = FALSE;
}
g_InitializeCriticalSectionEx_Detected = TRUE;
}
if (g_InitializeCriticalSectionEx_Available)
{
/* Vista and later */
2013-09-19 22:17:00 +04:00
return (*g_pInitializeCriticalSectionEx)(lpCriticalSection, dwSpinCount, Flags);
2013-09-19 21:50:00 +04:00
}
else
{
/* Windows XP */
InitializeCriticalSection(lpCriticalSection);
}
return TRUE;
}
#endif