Skip to content

Commit ce63193

Browse files
cyyeverpytorchmergebot
authored andcommittedOct 25, 2024
[Distributed] [18/N] Fix clang-tidy warnings in torch/csrc/distributed/ (pytorch#138692)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#138692 Approved by: https://github.com/ezyang
1 parent b999daf commit ce63193

16 files changed

+66
-63
lines changed
 

‎torch/csrc/distributed/c10d/Backoff.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ std::chrono::milliseconds ExponentialBackoffWithJitter::nextBackoff() {
4646
std::chrono::milliseconds maxSampleInterval =
4747
currentInterval_ + randomization;
4848

49-
std::uniform_int_distribution<> dist(
49+
std::uniform_int_distribution<int64_t> dist(
5050
minSampleInterval.count(), maxSampleInterval.count());
5151
std::chrono::milliseconds backoffInterval{dist(gen_)};
5252

‎torch/csrc/distributed/c10d/CudaDMAConnectivity.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ namespace {
1212
constexpr int max_nvlinks = 64;
1313

1414
std::string get_bus_id(int device_idx) {
15+
// NOLINTNEXTLINE(*array*)
1516
char bus_id[80];
1617
cudaDeviceProp prop{};
1718
C10_CUDA_CHECK(cudaGetDeviceProperties(&prop, device_idx));
@@ -27,7 +28,7 @@ std::string get_bus_id(int device_idx) {
2728

2829
struct C10_EXPORT NVLinkDetector : public c10d::DMAConnectivityDetector {
2930
c10::intrusive_ptr<c10d::DMAConnectivity> detect() override {
30-
int num_devices;
31+
int num_devices = 0;
3132
C10_CUDA_CHECK(cudaGetDeviceCount(&num_devices));
3233

3334
std::vector<std::vector<int>> matrix;
@@ -74,9 +75,8 @@ struct C10_EXPORT NVLinkDetector : public c10d::DMAConnectivityDetector {
7475
std::vector<int> switch_link_count(num_devices, 0);
7576
for (int i = 0; i < num_devices; ++i) {
7677
for (int link = 0; link < max_nvlinks; ++link) {
77-
nvmlReturn_t ret;
78-
nvmlIntNvLinkDeviceType_t deviceType;
79-
ret = driver_api->nvmlDeviceGetNvLinkRemoteDeviceType_(
78+
nvmlIntNvLinkDeviceType_t deviceType{};
79+
auto ret = driver_api->nvmlDeviceGetNvLinkRemoteDeviceType_(
8080
nvml_devices[i], link, &deviceType);
8181
if (ret != NVML_SUCCESS) {
8282
// We've exhausted the NVLinks connected to this device. This error

‎torch/csrc/distributed/c10d/GroupRegistry.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,11 @@ namespace {
1010
class GroupRegistry {
1111
public:
1212
void register_group(
13-
const std::string& group_name,
13+
std::string group_name,
1414
c10::intrusive_ptr<c10d::ProcessGroup> group) {
1515
std::unique_lock write_lock(lock_);
16-
auto [_, inserted] = registry_.try_emplace(group_name, std::move(group));
16+
auto [_, inserted] =
17+
registry_.try_emplace(std::move(group_name), std::move(group));
1718
TORCH_CHECK(
1819
inserted,
1920
"A process group is already registered under the name",
@@ -70,12 +71,11 @@ bool get_thread_isolation_mode() {
7071

7172
void register_process_group(
7273
const std::string& group_name,
73-
c10::intrusive_ptr<c10d::ProcessGroup> group) {
74+
const c10::intrusive_ptr<c10d::ProcessGroup>& group) {
7475
if (thread_isolation_mode) {
75-
RankLocal<::GroupRegistry>::get().register_group(
76-
group_name, std::move(group));
76+
RankLocal<::GroupRegistry>::get().register_group(group_name, group);
7777
} else {
78-
process_registry.register_group(group_name, std::move(group));
78+
process_registry.register_group(group_name, group);
7979
}
8080
}
8181

‎torch/csrc/distributed/c10d/GroupRegistry.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ bool get_thread_isolation_mode();
1010

1111
C10_EXPORT void register_process_group(
1212
const std::string& group_name,
13-
c10::intrusive_ptr<c10d::ProcessGroup> group);
13+
const c10::intrusive_ptr<c10d::ProcessGroup>& group);
1414

1515
C10_EXPORT c10::intrusive_ptr<c10d::ProcessGroup> resolve_process_group(
1616
const std::string& group_name);

‎torch/csrc/distributed/c10d/NCCLUtils.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -176,9 +176,9 @@ bool nccl_use_nonblocking() {
176176
int nccl_nonblocking_timeout() {
177177
static int timeout = -2; // -2 means not initialized
178178
if (timeout == -2) {
179-
const char* val = getenv("TORCH_NCCL_NONBLOCKING_TIMEOUT");
180-
if (val && strlen(val) > 0) {
181-
timeout = strtol(val, nullptr, 0);
179+
const auto val = c10::utils::get_env("TORCH_NCCL_NONBLOCKING_TIMEOUT");
180+
if (val.has_value() && !val.value().empty()) {
181+
timeout = stoi(val.value());
182182
} else {
183183
// Default value consistent with kBackendDefaultTimeout
184184
timeout = 30 * 60;
@@ -353,7 +353,7 @@ void DebugInfoWriter::write(const std::string& ncclTrace) {
353353
return;
354354
}
355355

356-
file.write(ncclTrace.data(), ncclTrace.size());
356+
file.write(ncclTrace.data(), static_cast<std::streamsize>(ncclTrace.size()));
357357
if (!file) {
358358
LOG(ERROR) << "Error opening file for writing NCCLPG debug info: "
359359
<< filename_;
@@ -547,7 +547,7 @@ void NCCLTraceBuffer::retire_id(
547547
return;
548548
}
549549
if (duration.has_value()) {
550-
entry->duration_ = duration.value();
550+
entry->duration_ = duration;
551551
}
552552
}
553553
}

‎torch/csrc/distributed/c10d/NCCLUtils.hpp

+15-20
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,11 @@
33
#ifdef USE_C10D_NCCL
44

55
#include <sched.h>
6-
#include <stdio.h>
7-
#include <stdlib.h>
6+
#include <cstdio>
7+
#include <cstdlib>
88

99
#include <memory>
1010
#include <mutex>
11-
#include <thread>
1211

1312
#include <ATen/ATen.h>
1413
#include <ATen/cuda/CUDAEvent.h>
@@ -265,7 +264,7 @@ class TORCH_API DebugInfoWriter {
265264
}
266265

267266
protected:
268-
DebugInfoWriter(std::string namePrefix, int rank) {
267+
DebugInfoWriter(const std::string& namePrefix, int rank) {
269268
filename_ = c10::str(namePrefix, rank);
270269
}
271270
std::string filename_;
@@ -278,14 +277,9 @@ class TORCH_API DebugInfoWriter {
278277
// RAII wrapper for NCCL communicator
279278
class NCCLComm {
280279
public:
281-
explicit NCCLComm(ncclComm_t ncclComm)
282-
: aborted_(false),
283-
ncclAsyncErr_(ncclSuccess),
284-
commFailureReason_(std::nullopt),
285-
initialized_(false),
286-
ncclComm_(ncclComm) {}
280+
explicit NCCLComm(ncclComm_t ncclComm) : ncclComm_(ncclComm) {}
287281

288-
NCCLComm() : NCCLComm(nullptr) {}
282+
NCCLComm() = default;
289283

290284
~NCCLComm() noexcept {
291285
// Add lock in this destructor, as aborted_ needs to be read after memory
@@ -379,6 +373,7 @@ class NCCLComm {
379373
NCCLComm& operator=(NCCLComm&& other) = delete;
380374

381375
// Move constructable
376+
// NOLINTNEXTLINE(.*-noexcept-move-.*)
382377
NCCLComm(NCCLComm&& other) {
383378
// Using other's lock, as it reads other's states
384379
// Can not use this.mutex_, as this object is being constructed.
@@ -488,7 +483,7 @@ class NCCLComm {
488483
" has already been registered on ncclComm_ ",
489484
ncclComm_);
490485

491-
void* handle;
486+
void* handle = nullptr;
492487
// Use getNcclComm to make sure comm is ready before calling nccl APIs
493488
auto comm = getNcclComm();
494489
C10D_NCCL_CHECK(
@@ -544,16 +539,16 @@ class NCCLComm {
544539

545540
protected:
546541
// Unique nccl_id for this communicator.
547-
ncclUniqueId ncclId_;
548-
bool aborted_;
542+
ncclUniqueId ncclId_{};
543+
bool aborted_{false};
549544
uint64_t ncclCommSplitCounter_{0};
550-
ncclResult_t ncclAsyncErr_;
545+
ncclResult_t ncclAsyncErr_{ncclSuccess};
551546
mutable std::mutex mutex_;
552547
// Rank that this communicator corresponds to.
553-
int rank_;
548+
int rank_{};
554549
// Optional reason for communicator failure, provided by ProcessGroupNCCL for
555550
// better error messaging.
556-
std::optional<std::string> commFailureReason_;
551+
std::optional<std::string> commFailureReason_{};
557552
bool initialized_{false};
558553
#ifdef NCCL_HAS_COMM_REGISTER
559554
// Stores handlers for tensors registered by NCCL
@@ -572,7 +567,7 @@ struct ncclRedOpRAII {
572567
: op_(op), comm_(comm), premul_sum_(true) {}
573568
ncclRedOpRAII(const ncclRedOpRAII&) = delete;
574569
ncclRedOpRAII& operator=(const ncclRedOpRAII&) = delete;
575-
ncclRedOpRAII(ncclRedOpRAII&& tmp) : ncclRedOpRAII() {
570+
ncclRedOpRAII(ncclRedOpRAII&& tmp) noexcept : ncclRedOpRAII() {
576571
std::swap(tmp.op_, this->op_);
577572
std::swap(tmp.comm_, this->comm_);
578573
std::swap(tmp.premul_sum_, this->premul_sum_);
@@ -587,8 +582,8 @@ struct ncclRedOpRAII {
587582
operator ncclRedOp_t() const {
588583
return op_;
589584
}
590-
ncclRedOp_t op_;
591-
ncclComm_t comm_;
585+
ncclRedOp_t op_{};
586+
ncclComm_t comm_{};
592587
bool premul_sum_ = false;
593588
};
594589

‎torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp

+11-9
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ static bool allocatorHooksAttached = false;
305305

306306
std::atomic<bool> ProcessGroupNCCL::shouldDump_(false);
307307

308-
void cacheAllocatorRegisterHook(
308+
static void cacheAllocatorRegisterHook(
309309
const c10::cuda::CUDACachingAllocator::TraceEntry& te) {
310310
// Register after SEGMENT_ALLOC
311311
if (te.action_ !=
@@ -323,7 +323,7 @@ void cacheAllocatorRegisterHook(
323323
}
324324
}
325325

326-
void cacheAllocatorDeregisterHook(
326+
static void cacheAllocatorDeregisterHook(
327327
const c10::cuda::CUDACachingAllocator::TraceEntry& te) {
328328
// deregister before SEGMENT_FREE
329329
if (te.action_ !=
@@ -341,8 +341,9 @@ void cacheAllocatorDeregisterHook(
341341
}
342342
}
343343

344-
std::unordered_map<std::string, std::unordered_map<std::string, std::string>>
345-
getNCCLCommDumpMap() {
344+
static std::
345+
unordered_map<std::string, std::unordered_map<std::string, std::string>>
346+
getNCCLCommDumpMap() {
346347
#if defined(IS_NCCLX) && defined(NCCL_COMM_DUMP)
347348
std::unordered_map<
348349
std::string /* ncclUniqueID */,
@@ -464,7 +465,7 @@ gil_checker_t& get_gil_checker() {
464465
return gil_checker;
465466
}
466467

467-
std::future<bool> launchAsyncGilCheck() {
468+
static std::future<bool> launchAsyncGilCheck() {
468469
std::promise<bool> resultPromise;
469470
std::future<bool> resultFuture = resultPromise.get_future();
470471
TORCH_CHECK(get_gil_checker(), "Can't check GIL with null GIL checker");
@@ -861,12 +862,12 @@ constexpr const char* MULTI_DEVICE_ERROR_MSG =
861862
"ProcessGroupNCCL continues supporting multi-process and multi-thread modes.";
862863

863864
ProcessGroupNCCL::ProcessGroupNCCL(
864-
const c10::intrusive_ptr<Store>& store,
865+
c10::intrusive_ptr<Store> store,
865866
int rank,
866867
int size,
867868
c10::intrusive_ptr<Options> options)
868869
: Backend(rank, size),
869-
store_(store),
870+
store_(std::move(store)),
870871
options_(std::move(options)),
871872

872873
traceKeyStart_(getTraceStartKey("NCCL", rank)),
@@ -1286,7 +1287,8 @@ void ProcessGroupNCCL::abortCommsFromMap(
12861287
// Note: original name of this method is `abort`. It was renamed to
12871288
// `abortComms` to distinguish from the `abort` method below. The `abort`
12881289
// method calls `abortComms` but does more destruction than the latter.
1289-
bool ProcessGroupNCCL::abortComms(std::optional<std::string> abortReason) {
1290+
bool ProcessGroupNCCL::abortComms(
1291+
const std::optional<std::string>& abortReason) {
12901292
// Remove record from global ncclCommDevIdxMapMutex before aboarting,
12911293
// so that a new cache segment would not register to already aborded
12921294
// communicators. Note that ncclCommDevIdxMap is a global container which may
@@ -1407,7 +1409,7 @@ void ProcessGroupNCCL::terminateProcess(const std::string& errMsg) {
14071409
LOG(FATAL) << logPrefix() << errMsg;
14081410
}
14091411

1410-
long computeDeltaMS(
1412+
static long computeDeltaMS(
14111413
std::chrono::time_point<std::chrono::steady_clock> start,
14121414
std::chrono::time_point<std::chrono::steady_clock> end) {
14131415
return std::chrono::duration_cast<std::chrono::milliseconds>(end - start)

‎torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -525,7 +525,7 @@ class TORCH_API ProcessGroupNCCL : public Backend {
525525
// communicator. These NCCL communicators are cached and reused if possible.
526526
//
527527
ProcessGroupNCCL(
528-
const c10::intrusive_ptr<Store>& store,
528+
c10::intrusive_ptr<Store> store,
529529
int rank,
530530
int size,
531531
c10::intrusive_ptr<Options> options = Options::create());
@@ -776,7 +776,7 @@ class TORCH_API ProcessGroupNCCL : public Backend {
776776
bool dumpDebuggingInfo();
777777

778778
// Abort all communicators on this rank.
779-
bool abortComms(std::optional<std::string> abortReason = std::nullopt);
779+
bool abortComms(const std::optional<std::string>& abortReason = std::nullopt);
780780

781781
private:
782782
int globalRankStart;
@@ -1029,7 +1029,7 @@ class TORCH_API ProcessGroupNCCL : public Backend {
10291029
std::mutex mutex_;
10301030

10311031
// Heartbeat of watchdog thread.
1032-
std::atomic_uint64_t heartbeat_;
1032+
std::atomic_uint64_t heartbeat_{};
10331033

10341034
// The time interval used for deciding whether there is no watchdog heartbeat.
10351035
int heartbeatTimeoutInSec_;
@@ -1048,10 +1048,10 @@ class TORCH_API ProcessGroupNCCL : public Backend {
10481048
int ncclTraceBufferSize_;
10491049

10501050
// We gate the heartbeat monitor thread so that we can roll it out gradually.
1051-
std::atomic<bool> monitorThreadEnabled_;
1051+
std::atomic<bool> monitorThreadEnabled_{};
10521052

10531053
// We gate the cudaEventCache so that we can roll it out gradually.
1054-
std::atomic<bool> cudaEventCacheEnabled_;
1054+
std::atomic<bool> cudaEventCacheEnabled_{};
10551055

10561056
// Monitor thread which checks the heartbeat of Watchdog thread.
10571057
// If the monitor thread finds there is no heartbeat, it will dump debug info
@@ -1074,7 +1074,7 @@ class TORCH_API ProcessGroupNCCL : public Backend {
10741074
std::atomic<bool> collectiveDebugInfoMode_;
10751075

10761076
// Whether there are hooks pending to be fired
1077-
std::atomic<bool> hasPendingHooks_;
1077+
std::atomic<bool> hasPendingHooks_{};
10781078

10791079
// This is the signal from watchdog threads to indicate whether the monitor
10801080
// thread should dump. Making it static so that it is accessiable from all the
@@ -1188,11 +1188,11 @@ class TORCH_API ProcessGroupNCCL : public Backend {
11881188
// Whether or not to create start CUDAEvent and enable timing for start
11891189
// and end events. Note that enableTiming_ is always true if desyncDebug_
11901190
// is set to true.
1191-
std::atomic<bool> enableTiming_;
1191+
std::atomic<bool> enableTiming_{};
11921192

11931193
// Flag to enable the print of hash value of input/output of collectives for
11941194
// verification.
1195-
std::atomic<bool> enableCollecticeHashDebug_;
1195+
std::atomic<bool> enableCollecticeHashDebug_{};
11961196

11971197
// Whether or not TORCH_NCCL_AVOID_RECORD_STREAMS was set
11981198
bool avoidRecordStreams_ = false;

‎torch/csrc/distributed/c10d/init.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <torch/csrc/distributed/c10d/control_collectives/ControlCollectives.hpp>
1111
#include <torch/csrc/distributed/c10d/control_collectives/StoreCollectives.hpp>
1212
#include <torch/csrc/distributed/c10d/control_plane/WorkerServer.hpp>
13+
#include <utility>
1314
#include <vector>
1415
#ifndef _WIN32
1516
#include <torch/csrc/distributed/c10d/HashStore.hpp>
@@ -106,6 +107,7 @@ class IntrusivePtrNoGilDestructor {
106107
// This ctor is very important; see
107108
// https://github.com/pybind/pybind11/issues/2957
108109
explicit IntrusivePtrNoGilDestructor(T* impl)
110+
// NOLINTNEXTLINE(bugprone-exception-escape)
109111
: impl_(c10::intrusive_ptr<T>::unsafe_steal_from_new(impl)) {}
110112
~IntrusivePtrNoGilDestructor() {
111113
if (impl_) {
@@ -908,7 +910,7 @@ This class does not support ``__members__`` property.)");
908910
module.def(
909911
"_register_process_group",
910912
[](const std::string& group_name,
911-
c10::intrusive_ptr<::c10d::ProcessGroup> group) {
913+
const c10::intrusive_ptr<::c10d::ProcessGroup>& group) {
912914
::c10d::register_process_group(group_name, group);
913915
},
914916
py::arg("group_name"),

‎torch/csrc/distributed/c10d/intra_node_comm.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target {
7272
* Members initialized after rendezvous
7373
*/
7474
bool isInitialized_ = false;
75-
int deviceIdx_;
75+
int deviceIdx_{0};
7676
Topology topology_ = Topology::UNKNOWN;
7777
void* symmetricMemoryPtr_ = nullptr;
7878
c10::intrusive_ptr<SymmetricMemory> symmetricMemory_ = nullptr;

‎torch/csrc/distributed/c10d/logger.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ Logger::Logger(std::shared_ptr<c10d::Reducer> reducer)
6161
ddp_logging_data_ = std::make_unique<at::DDPLoggingData>();
6262
}
6363

64-
c10::once_flag log_graph_static_flag;
64+
static c10::once_flag log_graph_static_flag;
6565

6666
void Logger::log_if_graph_static(bool is_static) {
6767
c10::call_once(log_graph_static_flag, [this, is_static]() {
@@ -116,7 +116,7 @@ void Logger::set_env_variables() {
116116
void Logger::set_parameter_stats() {
117117
// The number of parameter tensors
118118
ddp_logging_data_->ints_map["num_parameter_tensors"] =
119-
reducer_->params_.size();
119+
static_cast<int64_t>(reducer_->params_.size());
120120
// Total parameters size (Bytes)
121121
ddp_logging_data_->ints_map["total_parameter_size_bytes"] = 0;
122122
// Parameters' data types, there may be multiple data

‎torch/csrc/distributed/c10d/python_comm_hook.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
namespace c10d {
99

10+
// NOLINTNEXTLINE(bugprone-exception-escape)
1011
PythonCommHook::~PythonCommHook() {
1112
py::gil_scoped_acquire ag;
1213
state_.dec_ref();

‎torch/csrc/distributed/c10d/reducer.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1044,11 +1044,11 @@ void Reducer::mark_bucket_ready(size_t bucket_index) {
10441044
}
10451045

10461046
void Reducer::install_futures(
1047-
c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs) {
1047+
const c10::List<c10::intrusive_ptr<c10::ivalue::Future>>& futs) {
10481048
// Append instead of overwrite so that this method can be called multiple
10491049
// times in one iteration.
10501050
if (!installed_futures_) {
1051-
installed_futures_ = std::move(futs);
1051+
installed_futures_ = futs;
10521052
} else {
10531053
installed_futures_->append(futs);
10541054
}

‎torch/csrc/distributed/c10d/reducer.hpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,8 @@ class TORCH_API Reducer {
137137
// Install futures that should be awaited at end of backwards. Currently these
138138
// are only used by user-defined custom buffer reduction hooks, but can be
139139
// generalized to any user-originating futures that need to be awaited.
140-
void install_futures(c10::List<c10::intrusive_ptr<c10::ivalue::Future>> futs);
140+
void install_futures(
141+
const c10::List<c10::intrusive_ptr<c10::ivalue::Future>>& futs);
141142

142143
// Returns true if we should rebuild buckets, else false. We only rebuild
143144
// buckets once after the first iteration and never rebuild them if

‎torch/csrc/distributed/c10d/socket.cpp

+3-1
Original file line numberDiff line numberDiff line change
@@ -206,13 +206,15 @@ std::string formatSockAddr(const struct ::sockaddr* addr, socklen_t len) {
206206
// if we can't resolve the hostname, display the IP address
207207
if (addr->sa_family == AF_INET) {
208208
struct sockaddr_in* psai = (struct sockaddr_in*)&addr;
209+
// NOLINTNEXTLINE(*array*)
209210
char ip[INET_ADDRSTRLEN];
210211
if (inet_ntop(addr->sa_family, &(psai->sin_addr), ip, INET_ADDRSTRLEN) !=
211212
nullptr) {
212213
return fmt::format("{}:{}", ip, psai->sin_port);
213214
}
214215
} else if (addr->sa_family == AF_INET6) {
215216
struct sockaddr_in6* psai = (struct sockaddr_in6*)&addr;
217+
// NOLINTNEXTLINE(*array*)
216218
char ip[INET6_ADDRSTRLEN];
217219
if (inet_ntop(
218220
addr->sa_family, &(psai->sin6_addr), ip, INET6_ADDRSTRLEN) !=
@@ -275,7 +277,7 @@ struct formatter<c10d::detail::SocketImpl> {
275277
addr.ai_addr = addr_ptr;
276278
addr.ai_addrlen = addr_len;
277279

278-
auto remote = socket.remote();
280+
auto const& remote = socket.remote();
279281
std::string remoteStr = remote ? *remote : "none";
280282

281283
return fmt::format_to(

‎torch/csrc/distributed/rpc/agent_utils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ std::unordered_map<std::string, worker_id_t> collectNames(
1616
std::unordered_map<std::string, worker_id_t> nameToId;
1717
nameToId.reserve(worldSize);
1818
nameToId.emplace(selfName, selfId);
19-
// NOLINTNEXTLINE(bugprone-too-small-loop-variable)
19+
// NOLINTNEXTLINE(*loop*)
2020
for (worker_id_t workerId = 0; workerId < worldSize; ++workerId) {
2121
if (workerId == selfId) {
2222
continue;

0 commit comments

Comments
 (0)
Please sign in to comment.