Skip to content

Commit

Permalink
Add support for vanilla spinlocks
Browse files Browse the repository at this point in the history
Signed-off-by: Alan Jowett <[email protected]>
  • Loading branch information
Alan Jowett committed Nov 14, 2024
1 parent 8295e9b commit b466a80
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 10 deletions.
28 changes: 26 additions & 2 deletions cxplat/cxplat_test/cxplat_processor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,36 @@ TEST_CASE("processor", "[processor]")
cxplat_cleanup();
}

TEST_CASE("lock", "[processor]")
TEST_CASE("queued_spin_lock", "[processor]")
{
REQUIRE(cxplat_initialize() == CXPLAT_STATUS_SUCCESS);
cxplat_spin_lock_t lock;
cxplat_queue_spin_lock_t lock;
cxplat_lock_queue_handle_t handle;
cxplat_acquire_in_stack_queued_spin_lock(&lock, &handle);
cxplat_release_in_stack_queued_spin_lock(&handle);
cxplat_cleanup();
}

TEST_CASE("spin_lock", "[processor]")
{
REQUIRE(cxplat_initialize() == CXPLAT_STATUS_SUCCESS);
cxplat_irql_t irql;
cxplat_spin_lock_t lock;
REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL);
irql = cxplat_acquire_spin_lock(&lock);
REQUIRE(cxplat_get_current_irql() == DISPATCH_LEVEL);
cxplat_release_spin_lock(&lock, irql);
REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL);

irql = cxplat_raise_irql(DISPATCH_LEVEL);
REQUIRE(cxplat_get_current_irql() == DISPATCH_LEVEL);
REQUIRE(irql == PASSIVE_LEVEL);
cxplat_acquire_spin_lock_at_dpc_level(&lock);

cxplat_release_spin_lock_from_dpc_level(&lock);

cxplat_lower_irql(irql);
REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL);

cxplat_cleanup();
}
32 changes: 29 additions & 3 deletions cxplat/inc/cxplat_processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,22 @@ cxplat_get_maximum_processor_count();
typedef struct cxplat_spin_lock
{
uint64_t Reserved[2];
} cxplat_spin_lock_t;
} cxplat_queue_spin_lock_t;

typedef struct cxplat_lock_queue_handle
{
uint64_t Reserved_1[2];
uint8_t Reserved_2;
} cxplat_lock_queue_handle_t;

typedef uintptr_t cxplat_spin_lock_t;

typedef uint8_t cxplat_irql_t;

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock(
Expand All @@ -50,10 +54,32 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_
_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock_from_dpc(
_In_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_
_IRQL_raises_(DISPATCH_LEVEL)
cxplat_irql_t
cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock);

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock)
_IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql);

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock);

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock);

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t
cxplat_raise_irql(_In_ cxplat_irql_t irql);

_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql);

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql();

CXPLAT_EXTERN_C_END
49 changes: 46 additions & 3 deletions cxplat/src/cxplat_winkernel/processor_winkernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,14 @@ cxplat_get_active_processor_count()
return KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
}

static_assert(sizeof(cxplat_spin_lock_t) == sizeof(KSPIN_LOCK_QUEUE), "Size mismatch");
static_assert(sizeof(cxplat_queue_spin_lock_t) == sizeof(KSPIN_LOCK_QUEUE), "Size mismatch");
static_assert(sizeof(cxplat_lock_queue_handle_t) == sizeof(KLOCK_QUEUE_HANDLE), "Size mismatch");
static_assert(sizeof(cxplat_spin_lock_t) == sizeof(KSPIN_LOCK), "Size mismatch");

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
KeAcquireInStackQueuedSpinLock((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle);
}
Expand All @@ -43,7 +44,7 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_
_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
KeAcquireInStackQueuedSpinLockAtDpcLevel((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle);
}
Expand All @@ -54,3 +55,45 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_
{
KeReleaseInStackQueuedSpinLockFromDpcLevel((PKLOCK_QUEUE_HANDLE)lock_handle);
}

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_
_IRQL_raises_(DISPATCH_LEVEL)
cxplat_irql_t
cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock)
{
return KeAcquireSpinLockRaiseToDpc((PKSPIN_LOCK)spin_lock);
}

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock)
_IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql)
{
KeReleaseSpinLock((PKSPIN_LOCK)spin_lock, old_irql);
}

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock)
{
KeAcquireSpinLockAtDpcLevel((PKSPIN_LOCK)&spin_lock);
}

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock)
{
KeReleaseSpinLockFromDpcLevel((PKSPIN_LOCK)&spin_lock);
}

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t
cxplat_raise_irql(_In_ cxplat_irql_t irql)
{
cxplat_irql_t old_irql;
KeRaiseIrql(irql, &old_irql);
return old_irql;
}

_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql)
{
KeLowerIrql(irql);
}

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql() { return KeGetCurrentIrql(); }
54 changes: 52 additions & 2 deletions cxplat/src/cxplat_winuser/processor_winuser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ cxplat_get_active_processor_count()
_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);
Expand All @@ -118,7 +118,7 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_
_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
_Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);
Expand All @@ -133,3 +133,53 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_
auto lock = reinterpret_cast<SRWLOCK*>(lock_handle->Reserved_1[0]);
ReleaseSRWLockExclusive(lock);
}

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_
_IRQL_raises_(DISPATCH_LEVEL)
cxplat_irql_t
cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock)
{
cxplat_irql_t old_irql = cxplat_raise_irql(DISPATCH_LEVEL);
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);
return old_irql;
}

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock)
_IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
ReleaseSRWLockExclusive(lock);
cxplat_lower_irql(old_irql);
}

_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);
}

_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_(
DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
}

thread_local cxplat_irql_t _cxplat_current_irql = PASSIVE_LEVEL;

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t
cxplat_raise_irql(_In_ cxplat_irql_t irql)
{
auto old_irql = _cxplat_current_irql;
_cxplat_current_irql = irql;
return old_irql;
}

_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql)
{
_cxplat_current_irql = irql;
}

_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql() { return _cxplat_current_irql; }

0 comments on commit b466a80

Please sign in to comment.