Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cxplat queued spinlocks #227

Merged
merged 1 commit into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions cxplat/cxplat_test/cxplat_processor_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,13 @@ TEST_CASE("processor", "[processor]")
REQUIRE(current < maximum);
cxplat_cleanup();
}

TEST_CASE("lock", "[processor]")
{
REQUIRE(cxplat_initialize() == CXPLAT_STATUS_SUCCESS);
cxplat_spin_lock_t lock;
cxplat_lock_queue_handle_t handle;
cxplat_acquire_in_stack_queued_spin_lock(&lock, &handle);
cxplat_release_in_stack_queued_spin_lock(&handle);
cxplat_cleanup();
}
29 changes: 29 additions & 0 deletions cxplat/inc/cxplat_processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,33 @@ cxplat_get_active_processor_count();
_Must_inspect_result_ uint32_t
cxplat_get_maximum_processor_count();

typedef struct cxplat_spin_lock
{
uint64_t Reserved[2];
} cxplat_spin_lock_t;

typedef struct cxplat_lock_queue_handle
{
uint64_t Reserved_1[2];
uint8_t Reserved_2;
} cxplat_lock_queue_handle_t;

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock(
_In_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle);

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock_from_dpc(
_In_ cxplat_lock_queue_handle_t* lock_handle);

CXPLAT_EXTERN_C_END
33 changes: 33 additions & 0 deletions cxplat/src/cxplat_winkernel/processor_winkernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,36 @@ cxplat_get_active_processor_count()
{
return KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS);
}

static_assert(sizeof(cxplat_spin_lock_t) == sizeof(KSPIN_LOCK_QUEUE), "Size mismatch");
static_assert(sizeof(cxplat_lock_queue_handle_t) == sizeof(KLOCK_QUEUE_HANDLE), "Size mismatch");

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
KeAcquireInStackQueuedSpinLock((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle);
}

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock(
_In_ cxplat_lock_queue_handle_t* lock_handle)
{
KeReleaseInStackQueuedSpinLock((PKLOCK_QUEUE_HANDLE)lock_handle);
}

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
KeAcquireInStackQueuedSpinLockAtDpcLevel((PKSPIN_LOCK)spin_lock, (PKLOCK_QUEUE_HANDLE)lock_handle);
}

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock_from_dpc(
_In_ cxplat_lock_queue_handle_t* lock_handle)
{
KeReleaseInStackQueuedSpinLockFromDpcLevel((PKLOCK_QUEUE_HANDLE)lock_handle);
}
38 changes: 38 additions & 0 deletions cxplat/src/cxplat_winuser/processor_winuser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,3 +95,41 @@ cxplat_get_active_processor_count()
{
return GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
}

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);

lock_handle->Reserved_1[0] = reinterpret_cast<uint64_t>(lock);
}

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock(
_In_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(lock_handle->Reserved_1[0]);
ReleaseSRWLockExclusive(lock);
}

_Requires_lock_not_held_(*lock_handle) _Acquires_lock_(*lock_handle) _Post_same_lock_(*spin_lock, *lock_handle)
_IRQL_requires_max_(DISPATCH_LEVEL) _IRQL_saves_global_(QueuedSpinLock, lock_handle)
_IRQL_raises_(DISPATCH_LEVEL) void cxplat_acquire_in_stack_queued_spin_lock_at_dpc(
_Inout_ cxplat_spin_lock_t* spin_lock, _Out_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(spin_lock);
AcquireSRWLockExclusive(lock);

lock_handle->Reserved_1[0] = reinterpret_cast<uint64_t>(lock);
}

_Requires_lock_held_(*lock_handle) _Releases_lock_(*lock_handle) _IRQL_requires_(DISPATCH_LEVEL)
_IRQL_restores_global_(QueuedSpinLock, lock_handle) void cxplat_release_in_stack_queued_spin_lock_from_dpc(
_In_ cxplat_lock_queue_handle_t* lock_handle)
{
auto lock = reinterpret_cast<SRWLOCK*>(lock_handle->Reserved_1[0]);
ReleaseSRWLockExclusive(lock);
}