@@ -725,7 +725,7 @@ struct PrivatePool {
725
725
// Instead of maintaining private BlockPools here, I could stuff all blocks
726
726
// (private or no) into the top-level large_blocks and small_blocks, and
727
727
// distinguish private blocks by adding a "pool id" check above the stream
728
- // check in BlockComparator. BlockComparator is performance- critial though,
728
+ // check in BlockComparator. BlockComparator is performance- critical though,
729
729
// I'd rather not add more logic to it.
730
730
BlockPool large_blocks;
731
731
BlockPool small_blocks;
@@ -1118,7 +1118,7 @@ class DeviceCachingAllocator {
1118
1118
format_size (alloc_size),
1119
1119
" . GPU " ,
1120
1120
device,
1121
- " has a total capacty of " ,
1121
+ " has a total capacity of " ,
1122
1122
format_size (device_total),
1123
1123
" of which " ,
1124
1124
format_size (device_free),
@@ -1649,7 +1649,7 @@ class DeviceCachingAllocator {
1649
1649
const auto all_blocks = get_all_blocks ();
1650
1650
1651
1651
for (const Block* const head_block : all_blocks) {
1652
- // For expandable segments, we report one segment for each continguous
1652
+ // For expandable segments, we report one segment for each contiguous
1653
1653
// mapped range of memory
1654
1654
if (head_block->prev && head_block->prev ->mapped ) {
1655
1655
continue ;
@@ -2078,7 +2078,7 @@ class DeviceCachingAllocator {
2078
2078
// cannot be freed when requested, but fully free pages
2079
2079
// of expandable blocks can always be freed.
2080
2080
// The logic to track this as statistic is pretty involved,
2081
- // so we simply just exclude expandable segements from
2081
+ // so we simply just exclude expandable segments from
2082
2082
// inactive_split
2083
2083
if (!block->expandable_segment_ ) {
2084
2084
update_stat (
0 commit comments