2012-05-29 22:14:26 +04:00
|
|
|
/**
|
|
|
|
* WinPR: Windows Portable Runtime
|
|
|
|
* Synchronization Functions
|
|
|
|
*
|
|
|
|
* Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
|
2013-12-04 14:37:57 +04:00
|
|
|
* Copyright 2013 Norbert Federa <norbert.federa@thincast.com>
|
2012-05-29 22:14:26 +04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2012-08-15 01:20:53 +04:00
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include "config.h"
|
|
|
|
#endif
|
|
|
|
|
2013-09-19 21:50:00 +04:00
|
|
|
#include <winpr/tchar.h>
|
2012-05-29 22:14:26 +04:00
|
|
|
#include <winpr/synch.h>
|
2013-08-07 12:20:04 +04:00
|
|
|
#include <winpr/sysinfo.h>
|
|
|
|
#include <winpr/interlocked.h>
|
|
|
|
#include <winpr/thread.h>
|
2012-05-29 22:14:26 +04:00
|
|
|
|
2012-12-03 23:57:15 +04:00
|
|
|
#include "synch.h"
|
|
|
|
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
#include <unistd.h>
|
|
|
|
#endif
|
2012-09-18 20:57:19 +04:00
|
|
|
|
2018-03-08 15:46:46 +03:00
|
|
|
#if defined(__APPLE__)
|
|
|
|
#include <mach/task.h>
|
|
|
|
#include <mach/mach.h>
|
|
|
|
#include <mach/semaphore.h>
|
|
|
|
#endif
|
|
|
|
|
2012-09-19 02:36:13 +04:00
|
|
|
#ifndef _WIN32
|
|
|
|
|
2014-08-18 19:22:22 +04:00
|
|
|
#include "../log.h"
|
2014-08-18 20:57:08 +04:00
|
|
|
#define TAG WINPR_TAG("synch.critical")
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2012-09-18 20:57:19 +04:00
|
|
|
VOID InitializeCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
InitializeCriticalSectionEx(lpCriticalSection, 0, 0);
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
|
|
|
|
2018-03-08 15:46:46 +03:00
|
|
|
BOOL InitializeCriticalSectionEx(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount,
|
|
|
|
DWORD Flags)
|
2012-09-18 23:51:33 +04:00
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
/**
|
|
|
|
* See http://msdn.microsoft.com/en-us/library/ff541979(v=vs.85).aspx
|
|
|
|
* - The LockCount field indicates the number of times that any thread has
|
|
|
|
* called the EnterCriticalSection routine for this critical section,
|
|
|
|
* minus one. This field starts at -1 for an unlocked critical section.
|
|
|
|
* Each call of EnterCriticalSection increments this value; each call of
|
|
|
|
* LeaveCriticalSection decrements it.
|
|
|
|
* - The RecursionCount field indicates the number of times that the owning
|
|
|
|
* thread has called EnterCriticalSection for this critical section.
|
|
|
|
*/
|
2014-08-18 19:22:22 +04:00
|
|
|
if (Flags != 0)
|
|
|
|
{
|
|
|
|
WLog_WARN(TAG, "Flags unimplemented");
|
2013-08-02 14:07:05 +04:00
|
|
|
}
|
2013-08-07 12:20:04 +04:00
|
|
|
|
|
|
|
lpCriticalSection->DebugInfo = NULL;
|
|
|
|
lpCriticalSection->LockCount = -1;
|
|
|
|
lpCriticalSection->SpinCount = 0;
|
|
|
|
lpCriticalSection->RecursionCount = 0;
|
|
|
|
lpCriticalSection->OwningThread = NULL;
|
2019-11-06 17:24:51 +03:00
|
|
|
lpCriticalSection->LockSemaphore = (winpr_sem_t*)malloc(sizeof(winpr_sem_t));
|
2018-03-08 15:46:46 +03:00
|
|
|
|
2015-03-23 19:25:23 +03:00
|
|
|
if (!lpCriticalSection->LockSemaphore)
|
|
|
|
return FALSE;
|
2018-03-08 15:46:46 +03:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
#if defined(__APPLE__)
|
2018-03-08 15:46:46 +03:00
|
|
|
|
2019-11-06 17:24:51 +03:00
|
|
|
if (semaphore_create(mach_task_self(), lpCriticalSection->LockSemaphore, SYNC_POLICY_FIFO, 0) !=
|
|
|
|
KERN_SUCCESS)
|
2015-03-23 19:25:23 +03:00
|
|
|
goto out_fail;
|
2018-03-08 15:46:46 +03:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
#else
|
2018-03-08 15:46:46 +03:00
|
|
|
|
|
|
|
if (sem_init(lpCriticalSection->LockSemaphore, 0, 0) != 0)
|
2015-03-23 19:25:23 +03:00
|
|
|
goto out_fail;
|
2018-03-08 15:46:46 +03:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
#endif
|
|
|
|
SetCriticalSectionSpinCount(lpCriticalSection, dwSpinCount);
|
|
|
|
return TRUE;
|
2015-03-23 19:25:23 +03:00
|
|
|
out_fail:
|
|
|
|
free(lpCriticalSection->LockSemaphore);
|
|
|
|
return FALSE;
|
2012-09-18 23:51:33 +04:00
|
|
|
}
|
|
|
|
|
2012-09-18 20:57:19 +04:00
|
|
|
BOOL InitializeCriticalSectionAndSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
return InitializeCriticalSectionEx(lpCriticalSection, dwSpinCount, 0);
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
DWORD SetCriticalSectionSpinCount(LPCRITICAL_SECTION lpCriticalSection, DWORD dwSpinCount)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
|
|
|
|
SYSTEM_INFO sysinfo;
|
2013-08-02 14:07:05 +04:00
|
|
|
DWORD dwPreviousSpinCount = lpCriticalSection->SpinCount;
|
2013-08-07 12:20:04 +04:00
|
|
|
|
|
|
|
if (dwSpinCount)
|
|
|
|
{
|
|
|
|
/* Don't spin on uniprocessor systems! */
|
|
|
|
GetNativeSystemInfo(&sysinfo);
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
if (sysinfo.dwNumberOfProcessors < 2)
|
|
|
|
dwSpinCount = 0;
|
|
|
|
}
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2013-08-02 14:07:05 +04:00
|
|
|
lpCriticalSection->SpinCount = dwSpinCount;
|
|
|
|
return dwPreviousSpinCount;
|
2013-08-07 12:20:04 +04:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-07-23 19:26:49 +04:00
|
|
|
static VOID _WaitForCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
2013-08-07 12:20:04 +04:00
|
|
|
{
|
|
|
|
#if defined(__APPLE__)
|
2019-11-06 17:24:51 +03:00
|
|
|
semaphore_wait(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
|
2013-08-07 12:20:04 +04:00
|
|
|
#else
|
2019-11-06 17:24:51 +03:00
|
|
|
sem_wait((winpr_sem_t*)lpCriticalSection->LockSemaphore);
|
2013-08-07 12:20:04 +04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-07-23 19:26:49 +04:00
|
|
|
static VOID _UnWaitCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
2013-08-07 12:20:04 +04:00
|
|
|
{
|
|
|
|
#if defined __APPLE__
|
2019-11-06 17:24:51 +03:00
|
|
|
semaphore_signal(*((winpr_sem_t*)lpCriticalSection->LockSemaphore));
|
2013-08-07 12:20:04 +04:00
|
|
|
#else
|
2019-11-06 17:24:51 +03:00
|
|
|
sem_post((winpr_sem_t*)lpCriticalSection->LockSemaphore);
|
2013-08-07 12:20:04 +04:00
|
|
|
#endif
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
VOID EnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
#if !defined(WINPR_CRITICAL_SECTION_DISABLE_SPINCOUNT)
|
|
|
|
ULONG SpinCount = lpCriticalSection->SpinCount;
|
|
|
|
|
|
|
|
/* If we're lucky or if the current thread is already owner we can return early */
|
|
|
|
if (SpinCount && TryEnterCriticalSection(lpCriticalSection))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Spin requested times but don't compete with another waiting thread */
|
|
|
|
while (SpinCount-- && lpCriticalSection->LockCount < 1)
|
2013-08-02 14:07:05 +04:00
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
/* Atomically try to acquire and check the if the section is free. */
|
|
|
|
if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
|
|
|
|
{
|
|
|
|
lpCriticalSection->RecursionCount = 1;
|
2019-11-06 17:24:51 +03:00
|
|
|
lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
|
2013-08-02 14:07:05 +04:00
|
|
|
return;
|
2013-08-07 12:20:04 +04:00
|
|
|
}
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
/* Failed to get the lock. Let the scheduler know that we're spinning. */
|
2018-03-08 15:46:46 +03:00
|
|
|
if (sched_yield() != 0)
|
2013-08-07 12:20:04 +04:00
|
|
|
{
|
|
|
|
/**
|
|
|
|
* On some operating systems sched_yield is a stub.
|
|
|
|
* usleep should at least trigger a context switch if any thread is waiting.
|
|
|
|
* A ThreadYield() would be nice in winpr ...
|
|
|
|
*/
|
|
|
|
usleep(1);
|
|
|
|
}
|
2013-08-02 14:07:05 +04:00
|
|
|
}
|
2013-08-07 12:20:04 +04:00
|
|
|
|
2012-12-03 23:57:15 +04:00
|
|
|
#endif
|
2013-08-07 12:20:04 +04:00
|
|
|
|
2015-02-01 00:56:25 +03:00
|
|
|
/* First try the fastest possible path to get the lock. */
|
2013-08-07 12:20:04 +04:00
|
|
|
if (InterlockedIncrement(&lpCriticalSection->LockCount))
|
|
|
|
{
|
|
|
|
/* Section is already locked. Check if it is owned by the current thread. */
|
2019-11-06 17:24:51 +03:00
|
|
|
if (lpCriticalSection->OwningThread == (HANDLE)(ULONG_PTR)GetCurrentThreadId())
|
2013-08-07 12:20:04 +04:00
|
|
|
{
|
|
|
|
/* Recursion. No need to wait. */
|
|
|
|
lpCriticalSection->RecursionCount++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Section is locked by another thread. We have to wait. */
|
|
|
|
_WaitForCriticalSection(lpCriticalSection);
|
|
|
|
}
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
/* We got the lock. Own it ... */
|
|
|
|
lpCriticalSection->RecursionCount = 1;
|
2019-11-06 17:24:51 +03:00
|
|
|
lpCriticalSection->OwningThread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
BOOL TryEnterCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
|
|
|
{
|
2019-11-06 17:24:51 +03:00
|
|
|
HANDLE current_thread = (HANDLE)(ULONG_PTR)GetCurrentThreadId();
|
2013-08-07 12:20:04 +04:00
|
|
|
|
|
|
|
/* Atomically acquire the the lock if the section is free. */
|
2014-08-18 19:22:22 +04:00
|
|
|
if (InterlockedCompareExchange(&lpCriticalSection->LockCount, 0, -1) == -1)
|
2013-08-07 12:20:04 +04:00
|
|
|
{
|
|
|
|
lpCriticalSection->RecursionCount = 1;
|
|
|
|
lpCriticalSection->OwningThread = current_thread;
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Section is already locked. Check if it is owned by the current thread. */
|
|
|
|
if (lpCriticalSection->OwningThread == current_thread)
|
|
|
|
{
|
|
|
|
/* Recursion, return success */
|
|
|
|
lpCriticalSection->RecursionCount++;
|
|
|
|
InterlockedIncrement(&lpCriticalSection->LockCount);
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return FALSE;
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
VOID LeaveCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
/* Decrement RecursionCount and check if this is the last LeaveCriticalSection call ...*/
|
|
|
|
if (--lpCriticalSection->RecursionCount < 1)
|
|
|
|
{
|
|
|
|
/* Last recursion, clear owner, unlock and if there are other waiting threads ... */
|
|
|
|
lpCriticalSection->OwningThread = NULL;
|
2014-08-18 19:22:22 +04:00
|
|
|
|
2013-08-07 12:20:04 +04:00
|
|
|
if (InterlockedDecrement(&lpCriticalSection->LockCount) >= 0)
|
|
|
|
{
|
|
|
|
/* ...signal the semaphore to unblock the next waiting thread */
|
|
|
|
_UnWaitCriticalSection(lpCriticalSection);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
InterlockedDecrement(&lpCriticalSection->LockCount);
|
|
|
|
}
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
2012-05-29 22:14:26 +04:00
|
|
|
|
2012-09-18 20:57:19 +04:00
|
|
|
VOID DeleteCriticalSection(LPCRITICAL_SECTION lpCriticalSection)
|
|
|
|
{
|
2013-08-07 12:20:04 +04:00
|
|
|
lpCriticalSection->LockCount = -1;
|
|
|
|
lpCriticalSection->SpinCount = 0;
|
|
|
|
lpCriticalSection->RecursionCount = 0;
|
|
|
|
lpCriticalSection->OwningThread = NULL;
|
|
|
|
|
|
|
|
if (lpCriticalSection->LockSemaphore != NULL)
|
|
|
|
{
|
|
|
|
#if defined __APPLE__
|
2019-11-06 17:24:51 +03:00
|
|
|
semaphore_destroy(mach_task_self(), *((winpr_sem_t*)lpCriticalSection->LockSemaphore));
|
2013-08-07 12:20:04 +04:00
|
|
|
#else
|
2019-11-06 17:24:51 +03:00
|
|
|
sem_destroy((winpr_sem_t*)lpCriticalSection->LockSemaphore);
|
2013-08-07 12:20:04 +04:00
|
|
|
#endif
|
|
|
|
free(lpCriticalSection->LockSemaphore);
|
|
|
|
lpCriticalSection->LockSemaphore = NULL;
|
|
|
|
}
|
2012-09-18 20:57:19 +04:00
|
|
|
}
|
2012-09-19 02:36:13 +04:00
|
|
|
|
|
|
|
#endif
|