diff --git a/cxplat/cxplat_test/cxplat_processor_test.cpp b/cxplat/cxplat_test/cxplat_processor_test.cpp index 37d9f6d..18be68e 100644 --- a/cxplat/cxplat_test/cxplat_processor_test.cpp +++ b/cxplat/cxplat_test/cxplat_processor_test.cpp @@ -22,12 +22,44 @@ TEST_CASE("processor", "[processor]") cxplat_cleanup(); } -TEST_CASE("lock", "[processor]") +TEST_CASE("queued_spin_lock", "[processor]") { REQUIRE(cxplat_initialize() == CXPLAT_STATUS_SUCCESS); - cxplat_spin_lock_t lock; + cxplat_queue_spin_lock_t lock; cxplat_lock_queue_handle_t handle; cxplat_acquire_in_stack_queued_spin_lock(&lock, &handle); cxplat_release_in_stack_queued_spin_lock(&handle); + cxplat_cleanup(); +} + +#if !defined(PASSIVE_LEVEL) +#define PASSIVE_LEVEL 0 +#endif + +#if !defined(DISPATCH_LEVEL) +#define DISPATCH_LEVEL 2 +#endif + +TEST_CASE("spin_lock", "[processor]") +{ + REQUIRE(cxplat_initialize() == CXPLAT_STATUS_SUCCESS); + cxplat_irql_t irql; + cxplat_spin_lock_t lock; + REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL); + irql = cxplat_acquire_spin_lock(&lock); + REQUIRE(cxplat_get_current_irql() == DISPATCH_LEVEL); + cxplat_release_spin_lock(&lock, irql); + REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL); + + irql = cxplat_raise_irql(DISPATCH_LEVEL); + REQUIRE(cxplat_get_current_irql() == DISPATCH_LEVEL); + REQUIRE(irql == PASSIVE_LEVEL); + cxplat_acquire_spin_lock_at_dpc_level(&lock); + + cxplat_release_spin_lock_from_dpc_level(&lock); + + cxplat_lower_irql(irql); + REQUIRE(cxplat_get_current_irql() == PASSIVE_LEVEL); + cxplat_cleanup(); } \ No newline at end of file diff --git a/cxplat/inc/cxplat_processor.h b/cxplat/inc/cxplat_processor.h index e71d80a..cf3b0b4 100644 --- a/cxplat/inc/cxplat_processor.h +++ b/cxplat/inc/cxplat_processor.h @@ -30,7 +30,7 @@ cxplat_get_maximum_processor_count(); typedef struct cxplat_spin_lock { uint64_t Reserved[2]; -} cxplat_spin_lock_t; +} cxplat_queue_spin_lock_t; typedef struct cxplat_lock_queue_handle { @@ -38,10 +38,14 @@ typedef struct cxplat_lock_queue_handle uint8_t Reserved_2; } cxplat_lock_queue_handle_t; +typedef uintptr_t cxplat_spin_lock_t; + +typedef uint8_t cxplat_irql_t; + _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle); + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle); _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL) _IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock( @@ -50,10 +54,32 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_ _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle); + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle); _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL) _IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock_from_dpc( _In_ cxplat_lock_queue_handle_t* lock_handle); +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_ + _IRQL_raises_(DISPATCH_LEVEL) +cxplat_irql_t +cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock); + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) + _IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock( + _Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql); + +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock); + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock); + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t + cxplat_raise_irql(_In_ cxplat_irql_t irql); + +_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql); + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql(); + CXPLAT_EXTERN_C_END diff --git a/cxplat/src/cxplat_winkernel/processor_winkernel.c b/cxplat/src/cxplat_winkernel/processor_winkernel.c index 565bbde..97e9ecb 100644 --- a/cxplat/src/cxplat_winkernel/processor_winkernel.c +++ b/cxplat/src/cxplat_winkernel/processor_winkernel.c @@ -22,13 +22,14 @@ cxplat_get_active_processor_count() return KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS); } -static_assert(sizeof(cxplat_spin_lock_t) == sizeof(KSPIN_LOCK_QUEUE), "Size mismatch"); +static_assert(sizeof(cxplat_queue_spin_lock_t) == sizeof(KSPIN_LOCK_QUEUE), "Size mismatch"); static_assert(sizeof(cxplat_lock_queue_handle_t) == sizeof(KLOCK_QUEUE_HANDLE), "Size mismatch"); +static_assert(sizeof(cxplat_spin_lock_t) == sizeof(KSPIN_LOCK), "Size mismatch"); _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) { KeAcquireInStackQueuedSpinLock((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle); } @@ -43,7 +44,7 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_ _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) { KeAcquireInStackQueuedSpinLockAtDpcLevel((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle); } @@ -54,3 +55,45 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_ { KeReleaseInStackQueuedSpinLockFromDpcLevel((PKLOCK_QUEUE_HANDLE)lock_handle); } + +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_ + _IRQL_raises_(DISPATCH_LEVEL) +cxplat_irql_t +cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + return KeAcquireSpinLockRaiseToDpc((PKSPIN_LOCK)spin_lock); +} + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) + _IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock( + _Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql) +{ + KeReleaseSpinLock((PKSPIN_LOCK)spin_lock, old_irql); +} + +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + KeAcquireSpinLockAtDpcLevel((PKSPIN_LOCK)&spin_lock); +} + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + KeReleaseSpinLockFromDpcLevel((PKSPIN_LOCK)&spin_lock); +} + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t + cxplat_raise_irql(_In_ cxplat_irql_t irql) +{ + cxplat_irql_t old_irql; + KeRaiseIrql(irql, &old_irql); + return old_irql; +} + +_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql) +{ + KeLowerIrql(irql); +} + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql() { return KeGetCurrentIrql(); } diff --git a/cxplat/src/cxplat_winuser/processor_winuser.cpp b/cxplat/src/cxplat_winuser/processor_winuser.cpp index 989644c..2e64bb9 100644 --- a/cxplat/src/cxplat_winuser/processor_winuser.cpp +++ b/cxplat/src/cxplat_winuser/processor_winuser.cpp @@ -99,7 +99,7 @@ cxplat_get_active_processor_count() _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) { auto lock = reinterpret_cast(spin_lock); AcquireSRWLockExclusive(lock); @@ -118,7 +118,7 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_ _Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle) _IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc( - _Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) + _Inout_ cxplat_queue_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle) { auto lock = reinterpret_cast(spin_lock); AcquireSRWLockExclusive(lock); @@ -133,3 +133,53 @@ _Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_ auto lock = reinterpret_cast(lock_handle->Reserved_1[0]); ReleaseSRWLockExclusive(lock); } + +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_ + _IRQL_raises_(DISPATCH_LEVEL) +cxplat_irql_t +cxplat_acquire_spin_lock(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + cxplat_irql_t old_irql = cxplat_raise_irql(DISPATCH_LEVEL); + auto lock = reinterpret_cast(spin_lock); + AcquireSRWLockExclusive(lock); + return old_irql; +} + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) + _IRQL_requires_(DISPATCH_LEVEL) void cxplat_release_spin_lock( + _Inout_ cxplat_spin_lock_t* spin_lock, _In_ _IRQL_restores_ cxplat_irql_t old_irql) +{ + auto lock = reinterpret_cast(spin_lock); + ReleaseSRWLockExclusive(lock); + cxplat_lower_irql(old_irql); +} + +_Requires_lock_not_held_(*spin_lock) _Acquires_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_acquire_spin_lock_at_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + auto lock = reinterpret_cast(spin_lock); + AcquireSRWLockExclusive(lock); +} + +_Requires_lock_held_(*spin_lock) _Releases_lock_(*spin_lock) _IRQL_requires_min_( + DISPATCH_LEVEL) void cxplat_release_spin_lock_from_dpc_level(_Inout_ cxplat_spin_lock_t* spin_lock) +{ + auto lock = reinterpret_cast(spin_lock); +} + +thread_local cxplat_irql_t _cxplat_current_irql = PASSIVE_LEVEL; + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_raises_(irql) _IRQL_saves_ cxplat_irql_t + cxplat_raise_irql(_In_ cxplat_irql_t irql) +{ + auto old_irql = _cxplat_current_irql; + _cxplat_current_irql = irql; + return old_irql; +} + +_IRQL_requires_max_(HIGH_LEVEL) void cxplat_lower_irql(_In_ _Notliteral_ _IRQL_restores_ cxplat_irql_t irql) +{ + _cxplat_current_irql = irql; +} + +_IRQL_requires_max_(HIGH_LEVEL) _IRQL_saves_ cxplat_irql_t cxplat_get_current_irql() { return _cxplat_current_irql; }