Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unused xAPIC mitigation #6780

Merged
merged 3 commits into from
Jan 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -472,11 +472,6 @@ option(BUILD_TESTS "Build tests" ON)
option(BUILD_UNIT_TESTS "Build unit tests" ON)
option(CLIENT_PROTOCOLS_TEST "Test client protocols (TLS, HTTP/2)" OFF)
option(BUILD_TPCC "Build TPPC sample app and clients" OFF)
option(
FORCE_ENABLE_XAPIC_MITIGATION
"Always enable aligned reads from host-memory to mitigate xAPIC stale data read vulnerability. When this setting is off, the mitigation is enabled at run-time when vulnerable hardware is detected"
OFF
)

# Allow framework code to use LOG_*_FMT macros. These will be removed from
# public headers in future
Expand Down
60 changes: 0 additions & 60 deletions include/ccf/pal/mem.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,15 +64,6 @@ namespace ccf::pal
return true;
}

static bool require_alignment_for_untrusted_reads()
{
# ifdef FORCE_ENABLE_XAPIC_MITIGATION
return true;
# else
return false;
# endif
}

#else

static inline void* safe_memcpy(void* dest, const void* src, size_t count)
Expand All @@ -94,56 +85,5 @@ namespace ccf::pal
return true;
}

static bool is_vulnerable_to_stale_xapic_read()
{
CpuidInfo info;

cpuid(&info, 1, 0);

// Ignores stepping, looks only at model and family: potentially
// includes safe instances which differ only by stepping from a vulnerable
// instance.
constexpr uint64_t proc_id_mask = 0x000F'0FF0;
const uint64_t proc_id = info.eax & proc_id_mask;

// https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
// 2022 tab, column "Stale Data Read from Legacy xAPIC, CVE-2022-21233,
// INTEL-SA-00657"
const std::set<uint64_t> vulnerable_proc_ids{
0x506C0, // Apollo Lake
0x506F0, // Denverton (Goldmont)
0x606A0, // Ice Lake Xeon-SP
0x606C0, // Ice Lake D
0x706A0, // Gemini Lake
0x706E0, // Ice Lake U, Y
0x80660, // Snow Ridge BTS (Tremont)
0x806A0, // Lakefield B-step (Tremont)
0x806C0, // Tiger Lake U
0x806D0, // Tiger Lake H
0x90660, // Elkhart Lake (Tremont)
0x90670, // Alder Lake S (Golden Cove, Gracemont)
0x906A0, // Alder Lake H (Golden Cove, Gracemont)
0x906C0, // Jasper Lake (Tremont)
0xA0670 // Rocket Lake
};

const auto it = vulnerable_proc_ids.find(proc_id);
return it != vulnerable_proc_ids.end();
}

static bool require_alignment_for_untrusted_reads()
{
# ifdef FORCE_ENABLE_XAPIC_MITIGATION
return true;
# else
static std::optional<bool> required = std::nullopt;
if (!required.has_value())
{
required = is_intel_cpu() && is_vulnerable_to_stale_xapic_read();
}
return required.value();
# endif
}

#endif
}
22 changes: 1 addition & 21 deletions src/ds/ring_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,6 @@ namespace ringbuffer

BufferDef bd;

std::vector<uint8_t> local_copy;

virtual uint64_t read64(size_t index)
{
bd.check_access(index, sizeof(uint64_t));
Expand Down Expand Up @@ -253,25 +251,7 @@ namespace ringbuffer
// Call the handler function for this message.
bd.check_access(hd_index, advance);

if (ccf::pal::require_alignment_for_untrusted_reads() && size > 0)
{
// To prevent unaligned reads during message processing, copy aligned
// chunk into enclave memory
const auto copy_size = Const::align_size(size);
if (local_copy.size() < copy_size)
{
local_copy.resize(copy_size);
}
ccf::pal::safe_memcpy(
local_copy.data(),
bd.data + msg_index + Const::header_size(),
copy_size);
f(m, local_copy.data(), (size_t)size);
}
else
{
f(m, bd.data + msg_index + Const::header_size(), (size_t)size);
}
f(m, bd.data + msg_index + Const::header_size(), (size_t)size);
}

if (advance > 0)
Expand Down
14 changes: 2 additions & 12 deletions src/ds/test/ring_buffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -674,12 +674,7 @@ class SparseWriter : public ringbuffer::Writer

// This test checks that the ringbuffer functions correctly when the offsets
// overflow and wrap around from their maximum representable size to 0
TEST_CASE(
"Offset overflow" *
doctest::test_suite("ringbuffer")
// Skip when xAPIC mitigations are enabled, which are not correctly handled by
// SparseReader
* doctest::skip(ccf::pal::require_alignment_for_untrusted_reads()))
TEST_CASE("Offset overflow" * doctest::test_suite("ringbuffer"))
{
const auto seed = time(NULL);
INFO("Using seed: ", seed);
Expand Down Expand Up @@ -774,12 +769,7 @@ TEST_CASE(
}
}

TEST_CASE(
"Malicious writer" *
doctest::test_suite("ringbuffer")
// Skip when xAPIC mitigations are enabled, since the core assertion that
// reads are within the original buffer is deliberately broken by the Reader
* doctest::skip(ccf::pal::require_alignment_for_untrusted_reads()))
TEST_CASE("Malicious writer" * doctest::test_suite("ringbuffer"))
{
constexpr auto buffer_size = 256ull;

Expand Down
32 changes: 0 additions & 32 deletions src/enclave/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,6 @@ std::unique_ptr<threading::ThreadMessaging>
std::chrono::microseconds ccf::Channel::min_gap_between_initiation_attempts(
2'000'000);

static bool is_aligned(void* p, size_t align, size_t count = 0)
{
const auto start = reinterpret_cast<std::uintptr_t>(p);
const auto end = start + count;
return (start % align == 0) && (end % align == 0);
}

extern "C"
{
// Confirming in-enclave behaviour in separate unit tests is tricky, so we
Expand Down Expand Up @@ -89,12 +82,6 @@ extern "C"
return CreateNodeStatus::MemoryNotOutsideEnclave;
}

if (!is_aligned(enclave_config, 8, sizeof(EnclaveConfig)))
{
LOG_FAIL_FMT("Read source memory not aligned: enclave_config");
return CreateNodeStatus::UnalignedArguments;
}

EnclaveConfig ec = *static_cast<EnclaveConfig*>(enclave_config);

// Setup logger to allow enclave logs to reach the host before node is
Expand Down Expand Up @@ -175,13 +162,6 @@ extern "C"
return CreateNodeStatus::MemoryNotOutsideEnclave;
}

if (!is_aligned(
time_location, 8, sizeof(*ccf::enclavetime::host_time_us)))
{
LOG_FAIL_FMT("Read source memory not aligned: time_location");
return CreateNodeStatus::UnalignedArguments;
}

ccf::enclavetime::host_time_us =
static_cast<decltype(ccf::enclavetime::host_time_us)>(time_location);

Expand All @@ -194,25 +174,13 @@ extern "C"
return CreateNodeStatus::MemoryNotOutsideEnclave;
}

if (!is_aligned(ccf_config, 8, ccf_config_size))
{
LOG_FAIL_FMT("Read source memory not aligned: ccf_config");
return CreateNodeStatus::UnalignedArguments;
}

if (!ccf::pal::is_outside_enclave(
startup_snapshot_data, startup_snapshot_size))
{
LOG_FAIL_FMT("Memory outside enclave: startup snapshot");
return CreateNodeStatus::MemoryNotOutsideEnclave;
}

if (!is_aligned(startup_snapshot_data, 8, startup_snapshot_size))
{
LOG_FAIL_FMT("Read source memory not aligned: startup snapshot");
return CreateNodeStatus::UnalignedArguments;
}

ccf::pal::speculation_barrier();

ccf::StartupConfig cc =
Expand Down
33 changes: 6 additions & 27 deletions src/host/enclave.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,32 +231,14 @@ namespace host
// Pad config and startup snapshot with NULLs to a multiple of 8, in an
// 8-byte aligned allocation
auto config_s = nlohmann::json(ccf_config).dump();
auto [config, config_aligned_size] = allocate_8_aligned(config_s.size());
LOG_DEBUG_FMT(
"Padding config of size {} to {} bytes",
config_s.size(),
config_aligned_size);
auto copy_end = std::copy(config_s.begin(), config_s.end(), config);
std::fill(copy_end, config + config_aligned_size, 0);

auto [snapshot, snapshot_aligned_size] =
allocate_8_aligned(startup_snapshot.size());
LOG_DEBUG_FMT(
"Padding startup snapshot of size {} to {} bytes",
startup_snapshot.size(),
snapshot_aligned_size);

auto snapshot_copy_end =
std::copy(startup_snapshot.begin(), startup_snapshot.end(), snapshot);
std::fill(snapshot_copy_end, snapshot + snapshot_aligned_size, 0);

#define CREATE_NODE_ARGS \
&status, (void*)&enclave_config, config, config_aligned_size, snapshot, \
snapshot_aligned_size, node_cert.data(), node_cert.size(), &node_cert_len, \
service_cert.data(), service_cert.size(), &service_cert_len, \
enclave_version_buf.data(), enclave_version_buf.size(), \
&enclave_version_len, start_type, enclave_log_level, num_worker_thread, \
time_location
&status, (void*)&enclave_config, (uint8_t*)config_s.data(), config_s.size(), \
startup_snapshot.data(), startup_snapshot.size(), node_cert.data(), \
node_cert.size(), &node_cert_len, service_cert.data(), \
service_cert.size(), &service_cert_len, enclave_version_buf.data(), \
enclave_version_buf.size(), &enclave_version_len, start_type, \
enclave_log_level, num_worker_thread, time_location

oe_result_t err = OE_FAILURE;

Expand All @@ -275,9 +257,6 @@ namespace host
}
#endif

std::free(config);
std::free(snapshot);

if (err != OE_OK || status != CreateNodeStatus::OK)
{
// Logs have described the errors already, we just need to allow the
Expand Down
Loading