From 2570b8339aee184d8c8621030054f8b789087bea Mon Sep 17 00:00:00 2001 From: frankvicky Date: Thu, 8 Aug 2024 09:28:49 +0800 Subject: [PATCH 1/7] KAFKA-16907: Resolve the ClassDataAbstractionCoupling and ClassFanOutComplexity issue caused by RaftUtils --- .../kafka/raft/KafkaNetworkChannel.java | 3 +- .../apache/kafka/raft/KafkaRaftClient.java | 61 +++-- .../org/apache/kafka/raft/LeaderState.java | 6 +- .../kafka/raft/internals/AddVoterHandler.java | 14 +- .../raft/internals/DefaultRequestSender.java | 4 +- .../raft/internals/RemoveVoterHandler.java | 12 +- .../raft/internals/UpdateVoterHandler.java | 16 +- .../internals/UpdateVoterHandlerState.java | 4 +- .../kafka/raft/utils/ApiMessageUtils.java | 45 +++ .../kafka/raft/utils/DescribeQuorumRpc.java | 135 +++++++++ .../org/apache/kafka/raft/utils/FetchRpc.java | 111 ++++++++ .../kafka/raft/utils/FetchSnapshotRpc.java | 118 ++++++++ .../kafka/raft/utils/QuorumEpochRpc.java | 232 ++++++++++++++++ .../org/apache/kafka/raft/utils/VoteRpc.java | 259 ++++++++++++++++++ .../kafka/raft/KafkaNetworkChannelTest.java | 6 +- .../raft/KafkaRaftClientSnapshotTest.java | 5 +- .../kafka/raft/RaftClientTestContext.java | 40 +-- 17 files changed, 994 insertions(+), 77 deletions(-) create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java b/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java index 68224a8c2410d..903ed7f882ad3 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaNetworkChannel.java @@ -37,6 +37,7 @@ import org.apache.kafka.common.requests.FetchSnapshotRequest; import org.apache.kafka.common.requests.VoteRequest; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.raft.utils.ApiMessageUtils; import org.apache.kafka.server.util.InterBrokerSendThread; import org.apache.kafka.server.util.RequestAndCompletionHandler; @@ -154,7 +155,7 @@ private void sendOnComplete(RaftRequest.Outbound request, ClientResponse clientR private ApiMessage errorResponse(ApiMessage request, Errors error) { ApiKeys apiKey = ApiKeys.forId(request.apiKey()); - return RaftUtil.errorResponse(apiKey, error); + return ApiMessageUtils.parseErrorResponse(apiKey, error); } @Override diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java index e8ab7b5c6dabc..c99351030f7cf 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java @@ -80,6 +80,13 @@ import org.apache.kafka.raft.internals.RemoveVoterHandler; import org.apache.kafka.raft.internals.ThresholdPurgatory; import org.apache.kafka.raft.internals.UpdateVoterHandler; +import org.apache.kafka.raft.internals.VoterSet; +import org.apache.kafka.raft.utils.ApiMessageUtils; +import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.FetchRpc; +import org.apache.kafka.raft.utils.FetchSnapshotRpc; +import org.apache.kafka.raft.utils.QuorumEpochRpc; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.serialization.RecordSerde; import org.apache.kafka.snapshot.NotifyingRawSnapshotWriter; @@ -735,7 +742,7 @@ private VoteResponseData buildVoteResponse( Errors partitionLevelError, boolean voteGranted ) { - return RaftUtil.singletonVoteResponse( + return VoteRpc.singletonVoteResponse( listenerName, apiVersion, Errors.NONE, @@ -803,7 +810,7 @@ private VoteResponseData handleVoteRequest( } // Check that the request was intended for this replica - Optional voterKey = RaftUtil.voteRequestVoterKey(request, partitionRequest); + Optional voterKey = VoteRpc.voteRequestVoterKey(request, partitionRequest); if (!isValidVoterKey(voterKey)) { logger.info( "Candidate sent a voter key ({}) in the VOTE request that doesn't match the " + @@ -951,7 +958,7 @@ private BeginQuorumEpochResponseData buildBeginQuorumEpochResponse( short apiVersion, Errors partitionLevelError ) { - return RaftUtil.singletonBeginQuorumEpochResponse( + return QuorumEpochRpc.singletonBeginQuorumEpochResponse( listenerName, apiVersion, Errors.NONE, @@ -1017,7 +1024,7 @@ private BeginQuorumEpochResponseData handleBeginQuorumEpochRequest( ); // Check that the request was intended for this replica - Optional voterKey = RaftUtil.beginQuorumEpochRequestVoterKey(request, partitionRequest); + Optional voterKey = QuorumEpochRpc.beginQuorumEpochRequestVoterKey(request, partitionRequest); if (!isValidVoterKey(voterKey)) { logger.info( "Leader sent a voter key ({}) in the BEGIN_QUORUM_EPOCH request that doesn't " + @@ -1107,7 +1114,7 @@ private EndQuorumEpochResponseData buildEndQuorumEpochResponse( short apiVersion, Errors partitionLevelError ) { - return RaftUtil.singletonEndQuorumEpochResponse( + return QuorumEpochRpc.singletonEndQuorumEpochResponse( listenerName, apiVersion, Errors.NONE, @@ -1281,7 +1288,7 @@ private FetchResponseData buildFetchResponse( ValidOffsetAndEpoch validOffsetAndEpoch, Optional highWatermark ) { - return RaftUtil.singletonFetchResponse( + return FetchRpc.singletonFetchResponse( listenerName, apiVersion, log.topicPartition(), @@ -1740,7 +1747,7 @@ private DescribeQuorumResponseData handleDescribeQuorumRequest( } LeaderState leaderState = quorum.leaderStateOrThrow(); - return RaftUtil.singletonDescribeQuorumResponse( + return DescribeQuorumRpc.singletonDescribeQuorumResponse( requestMetadata.apiVersion(), log.topicPartition(), quorum.localIdOrThrow(), @@ -1790,7 +1797,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( data.topics().get(0).partitions().get(0).partition() ); - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), unknownTopicPartition, @@ -1806,7 +1813,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( partitionSnapshot.currentLeaderEpoch() ); if (leaderValidation.isPresent()) { - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), log.topicPartition(), @@ -1827,7 +1834,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( // The bootstrap checkpoint should not be replicated. The first leader will // make sure that the content of the bootstrap checkpoint is included in the // partition log - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), log.topicPartition(), @@ -1841,7 +1848,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( RawSnapshotReader snapshot = snapshotOpt.get(); long snapshotSize = snapshot.sizeInBytes(); if (partitionSnapshot.position() < 0 || partitionSnapshot.position() >= snapshotSize) { - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), log.topicPartition(), @@ -1881,7 +1888,7 @@ private FetchSnapshotResponseData handleFetchSnapshotRequest( currentTimeMs ); - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( requestMetadata.listenerName(), requestMetadata.apiVersion(), log.topicPartition(), @@ -2092,7 +2099,7 @@ private CompletableFuture handleAddVoterRequest( ); } - Optional newVoter = RaftUtil.addVoterRequestVoterKey(data); + Optional newVoter = VoteRpc.addVoterRequestVoterKey(data); if (!newVoter.isPresent() || !newVoter.get().directoryId().isPresent()) { return completedFuture( new AddRaftVoterResponseData() @@ -2175,7 +2182,7 @@ private CompletableFuture handleRemoveVoterRequest( ); } - Optional oldVoter = RaftUtil.removeVoterRequestVoterKey(data); + Optional oldVoter = VoteRpc.removeVoterRequestVoterKey(data); if (!oldVoter.isPresent() || !oldVoter.get().directoryId().isPresent()) { return completedFuture( new RemoveRaftVoterResponseData() @@ -2199,7 +2206,7 @@ private CompletableFuture handleUpdateVoterRequest( if (!hasValidClusterId(data.clusterId())) { return completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INCONSISTENT_CLUSTER_ID, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2211,7 +2218,7 @@ private CompletableFuture handleUpdateVoterRequest( Optional leaderValidationError = validateLeaderOnlyRequest(data.currentLeaderEpoch()); if (leaderValidationError.isPresent()) { return completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( leaderValidationError.get(), requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2220,10 +2227,10 @@ private CompletableFuture handleUpdateVoterRequest( ); } - Optional voter = RaftUtil.updateVoterRequestVoterKey(data); + Optional voter = VoteRpc.updateVoterRequestVoterKey(data); if (!voter.isPresent() || !voter.get().directoryId().isPresent()) { return completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2235,7 +2242,7 @@ private CompletableFuture handleUpdateVoterRequest( Endpoints voterEndpoints = Endpoints.fromUpdateVoterRequest(data.listeners()); if (!voterEndpoints.address(channel.listenerName()).isPresent()) { return completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2250,7 +2257,7 @@ private CompletableFuture handleUpdateVoterRequest( supportedKraftVersions.maxSupportedVersion() < supportedKraftVersions.minSupportedVersion() ) { return completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2587,7 +2594,7 @@ private void handleRequest(RaftRequest.Inbound request, long currentTimeMs) { responseFuture.whenComplete((response, exception) -> { ApiMessage message = response; if (message == null) { - message = RaftUtil.errorResponse(apiKey, Errors.forException(exception)); + message = ApiMessageUtils.parseErrorResponse(apiKey, Errors.forException(exception)); } RaftResponse.Outbound responseMessage = new RaftResponse.Outbound(request.correlationId(), message); @@ -2643,7 +2650,7 @@ private long maybeSendRequest( if (exception != null) { ApiKeys api = ApiKeys.forId(request.apiKey()); Errors error = Errors.forException(exception); - ApiMessage errorResponse = RaftUtil.errorResponse(api, error); + ApiMessage errorResponse = ApiMessageUtils.parseErrorResponse(api, error); response = new RaftResponse.Inbound( correlationId, @@ -2666,7 +2673,7 @@ private long maybeSendRequest( private EndQuorumEpochRequestData buildEndQuorumEpochRequest( ResignedState state ) { - return RaftUtil.singletonEndQuorumEpochRequest( + return QuorumEpochRpc.singletonEndQuorumEpochRequest( log.topicPartition(), clusterId, quorum.epoch(), @@ -2709,7 +2716,7 @@ private long maybeSendRequest( } private BeginQuorumEpochRequestData buildBeginQuorumEpochRequest(ReplicaKey remoteVoter) { - return RaftUtil.singletonBeginQuorumEpochRequest( + return QuorumEpochRpc.singletonBeginQuorumEpochRequest( log.topicPartition(), clusterId, quorum.epoch(), @@ -2721,7 +2728,7 @@ private BeginQuorumEpochRequestData buildBeginQuorumEpochRequest(ReplicaKey remo private VoteRequestData buildVoteRequest(ReplicaKey remoteVoter) { OffsetAndEpoch endOffset = endOffset(); - return RaftUtil.singletonVoteRequest( + return VoteRpc.singletonVoteRequest( log.topicPartition(), clusterId, quorum.epoch(), @@ -2733,7 +2740,7 @@ private VoteRequestData buildVoteRequest(ReplicaKey remoteVoter) { } private FetchRequestData buildFetchRequest() { - FetchRequestData request = RaftUtil.singletonFetchRequest( + FetchRequestData request = FetchRpc.singletonFetchRequest( log.topicPartition(), log.topicId(), fetchPartition -> fetchPartition @@ -2764,7 +2771,7 @@ private long maybeSendAnyVoterFetch(long currentTimeMs) { } private FetchSnapshotRequestData buildFetchSnapshotRequest(OffsetAndEpoch snapshotId, long snapshotSize) { - return RaftUtil.singletonFetchSnapshotRequest( + return FetchSnapshotRpc.singletonFetchSnapshotRequest( clusterId, ReplicaKey.of(quorum().localIdOrSentinel(), quorum.localDirectoryId()), log.topicPartition(), diff --git a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java index 803804858ab2d..f0bb948a9dcc5 100644 --- a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java +++ b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java @@ -30,6 +30,7 @@ import org.apache.kafka.raft.internals.AddVoterHandlerState; import org.apache.kafka.raft.internals.BatchAccumulator; import org.apache.kafka.raft.internals.RemoveVoterHandlerState; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; @@ -215,7 +216,7 @@ public void resetAddVoterHandlerState( addVoterHandlerState.ifPresent( handlerState -> handlerState .future() - .complete(RaftUtil.addVoterResponse(error, message)) + .complete(VoteRpc.addVoterResponse(error, message)) ); addVoterHandlerState = state; } @@ -232,7 +233,7 @@ public void resetRemoveVoterHandlerState( removeVoterHandlerState.ifPresent( handlerState -> handlerState .future() - .complete(RaftUtil.removeVoterResponse(error, message)) + .complete(VoteRpc.removeVoterResponse(error, message)) ); removeVoterHandlerState = state; } @@ -370,6 +371,7 @@ public void appendStartOfEpochControlRecords(VoterSet.VoterNode localVoterNode, return builder.build(); } }); + accumulator.forceDrain(); } public long appendVotersRecord(VoterSet voters, long currentTimeMs) { diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java index 44b9eb2a39dc0..14ea670c73697 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java @@ -28,7 +28,7 @@ import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.LeaderState; import org.apache.kafka.raft.LogOffsetMetadata; -import org.apache.kafka.raft.RaftUtil; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; import org.apache.kafka.server.common.KRaftVersion; @@ -92,7 +92,7 @@ public CompletableFuture handleAddVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to handle previous voter change request" ) @@ -103,7 +103,7 @@ public CompletableFuture handleAddVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (!highWatermark.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to establish HWM and fence previous voter changes" ) @@ -114,7 +114,7 @@ public CompletableFuture handleAddVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.UNSUPPORTED_VERSION, String.format( "Cluster doesn't support adding voter because the %s feature is %s", @@ -129,7 +129,7 @@ public CompletableFuture handleAddVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, String.format( "Request timed out waiting for voters to commit the latest voter change at %s with HWM %d", @@ -144,7 +144,7 @@ public CompletableFuture handleAddVoterRequest( VoterSet voters = votersEntry.get().value(); if (voters.voterIds().contains(voterKey.id())) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.DUPLICATE_VOTER, String.format( "The voter id for %s is already part of the set of voters %s.", @@ -174,7 +174,7 @@ public CompletableFuture handleAddVoterRequest( ); if (!timeout.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.addVoterResponse( + VoteRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, String.format("New voter %s is not ready to receive requests", voterKey) ) diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/DefaultRequestSender.java b/raft/src/main/java/org/apache/kafka/raft/internals/DefaultRequestSender.java index 0cee3c255d1b0..55b197255bff8 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/DefaultRequestSender.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/DefaultRequestSender.java @@ -26,8 +26,8 @@ import org.apache.kafka.raft.RaftMessageQueue; import org.apache.kafka.raft.RaftRequest; import org.apache.kafka.raft.RaftResponse; -import org.apache.kafka.raft.RaftUtil; import org.apache.kafka.raft.RequestManager; +import org.apache.kafka.raft.utils.ApiMessageUtils; import org.slf4j.Logger; @@ -89,7 +89,7 @@ public OptionalLong send( if (exception != null) { ApiKeys api = ApiKeys.forId(request.apiKey()); Errors error = Errors.forException(exception); - ApiMessage errorResponse = RaftUtil.errorResponse(api, error); + ApiMessage errorResponse = ApiMessageUtils.parseErrorResponse(api, error); response = new RaftResponse.Inbound( correlationId, diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java index 29093cc30b6e9..316bb2265af63 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java @@ -23,9 +23,9 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.raft.LeaderState; import org.apache.kafka.raft.LogOffsetMetadata; -import org.apache.kafka.raft.RaftUtil; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; @@ -82,7 +82,7 @@ public CompletableFuture handleRemoveVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - RaftUtil.removeVoterResponse( + VoteRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to handle previous voter change request" ) @@ -93,7 +93,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (!highWatermark.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.removeVoterResponse( + VoteRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to establish HWM and fence previous voter changes" ) @@ -104,7 +104,7 @@ public CompletableFuture handleRemoveVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - RaftUtil.removeVoterResponse( + VoteRpc.removeVoterResponse( Errors.UNSUPPORTED_VERSION, String.format( "Cluster doesn't support removing voter because the %s feature is %s", @@ -119,7 +119,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - RaftUtil.removeVoterResponse( + VoteRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, String.format( "Request timed out waiting for voters to commit the latest voter change at %s with HWM %d", @@ -134,7 +134,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional newVoters = votersEntry.get().value().removeVoter(voterKey); if (!newVoters.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.removeVoterResponse( + VoteRpc.removeVoterResponse( Errors.VOTER_NOT_FOUND, String.format( "Cannot remove voter %s from the set of voters %s", diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java index 417c1decad758..5d940cc562ebc 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java @@ -26,9 +26,9 @@ import org.apache.kafka.raft.LeaderAndEpoch; import org.apache.kafka.raft.LeaderState; import org.apache.kafka.raft.LogOffsetMetadata; -import org.apache.kafka.raft.RaftUtil; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import java.util.Optional; @@ -83,7 +83,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -99,7 +99,7 @@ public CompletableFuture handleUpdateVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (!highWatermark.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -116,7 +116,7 @@ public CompletableFuture handleUpdateVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.UNSUPPORTED_VERSION, requestListenerName, new LeaderAndEpoch( @@ -132,7 +132,7 @@ public CompletableFuture handleUpdateVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (!votersEntry.isPresent() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -147,7 +147,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that the supported version range is valid if (!validVersionRange(kraftVersion, supportedKraftVersions)) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestListenerName, new LeaderAndEpoch( @@ -162,7 +162,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that endpoinds includes the default listener if (!voterEndpoints.address(defaultListenerName).isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestListenerName, new LeaderAndEpoch( @@ -190,7 +190,7 @@ public CompletableFuture handleUpdateVoterRequest( ); if (!updatedVoters.isPresent()) { return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.VOTER_NOT_FOUND, requestListenerName, new LeaderAndEpoch( diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java index c0ac6c5189983..f4db6831375a4 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java @@ -22,7 +22,7 @@ import org.apache.kafka.common.utils.Timer; import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.raft.RaftUtil; +import org.apache.kafka.raft.utils.VoteRpc; import java.util.concurrent.CompletableFuture; @@ -57,7 +57,7 @@ public void completeFuture( Endpoints leaderEndpoints ) { future.complete( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( error, requestListenerName, leaderAndEpoch, diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java new file mode 100644 index 0000000000000..99ca74ea132ae --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.message.BeginQuorumEpochResponseData; +import org.apache.kafka.common.message.EndQuorumEpochResponseData; +import org.apache.kafka.common.message.FetchResponseData; +import org.apache.kafka.common.message.FetchSnapshotResponseData; +import org.apache.kafka.common.message.VoteResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ApiMessage; +import org.apache.kafka.common.protocol.Errors; + +public class ApiMessageUtils { + public static ApiMessage parseErrorResponse(ApiKeys apiKey, Errors error) { + switch (apiKey) { + case VOTE: + return new VoteResponseData().setErrorCode(error.code()); + case BEGIN_QUORUM_EPOCH: + return new BeginQuorumEpochResponseData().setErrorCode(error.code()); + case END_QUORUM_EPOCH: + return new EndQuorumEpochResponseData().setErrorCode(error.code()); + case FETCH: + return new FetchResponseData().setErrorCode(error.code()); + case FETCH_SNAPSHOT: + return new FetchSnapshotResponseData().setErrorCode(error.code()); + default: + throw new IllegalArgumentException("Received response for unexpected request type: " + apiKey); + } + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java new file mode 100644 index 0000000000000..acb97ddcd6431 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.DescribeQuorumRequestData; +import org.apache.kafka.common.message.DescribeQuorumResponseData; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.LeaderState; +import org.apache.kafka.raft.LogOffsetMetadata; +import org.apache.kafka.raft.internals.ReplicaKey; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +public class DescribeQuorumRpc { + public static DescribeQuorumRequestData singletonDescribeQuorumRequest( + TopicPartition topicPartition + ) { + return new DescribeQuorumRequestData() + .setTopics( + Collections.singletonList( + new DescribeQuorumRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new DescribeQuorumRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + ) + ) + ) + ); + } + + public static DescribeQuorumResponseData singletonDescribeQuorumResponse( + short apiVersion, + TopicPartition topicPartition, + int leaderId, + int leaderEpoch, + long highWatermark, + Collection voters, + Collection observers, + long currentTimeMs + ) { + DescribeQuorumResponseData response = new DescribeQuorumResponseData() + .setTopics( + Collections.singletonList( + new DescribeQuorumResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new DescribeQuorumResponseData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setErrorCode(Errors.NONE.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + .setHighWatermark(highWatermark) + .setCurrentVoters(toReplicaStates(apiVersion, leaderId, voters, currentTimeMs)) + .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)))))); + if (apiVersion >= 2) { + DescribeQuorumResponseData.NodeCollection nodes = new DescribeQuorumResponseData.NodeCollection(voters.size()); + for (LeaderState.ReplicaState voter : voters) { + nodes.add( + new DescribeQuorumResponseData.Node() + .setNodeId(voter.replicaKey().id()) + .setListeners(voter.listeners().toDescribeQuorumResponseListeners()) + ); + } + response.setNodes(nodes); + } + return response; + } + + private static List toReplicaStates( + short apiVersion, + int leaderId, + Collection states, + long currentTimeMs + ) { + return states + .stream() + .map(replicaState -> toReplicaState(apiVersion, leaderId, replicaState, currentTimeMs)) + .collect(Collectors.toList()); + } + + private static DescribeQuorumResponseData.ReplicaState toReplicaState( + short apiVersion, + int leaderId, + LeaderState.ReplicaState replicaState, + long currentTimeMs + ) { + final long lastCaughtUpTimestamp; + final long lastFetchTimestamp; + if (replicaState.replicaKey().id() == leaderId) { + lastCaughtUpTimestamp = currentTimeMs; + lastFetchTimestamp = currentTimeMs; + } else { + lastCaughtUpTimestamp = replicaState.lastCaughtUpTimestamp(); + lastFetchTimestamp = replicaState.lastFetchTimestamp(); + } + DescribeQuorumResponseData.ReplicaState replicaStateData = new DescribeQuorumResponseData.ReplicaState() + .setReplicaId(replicaState.replicaKey().id()) + .setLogEndOffset(replicaState.endOffset().map(LogOffsetMetadata::offset).orElse(-1L)) + .setLastCaughtUpTimestamp(lastCaughtUpTimestamp) + .setLastFetchTimestamp(lastFetchTimestamp); + + if (apiVersion >= 2) { + replicaStateData.setReplicaDirectoryId(replicaState.replicaKey().directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + } + return replicaStateData; + } + + public static boolean hasValidTopicPartition(DescribeQuorumRequestData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java new file mode 100644 index 0000000000000..8e5ed5b685fa0 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.message.FetchRequestData; +import org.apache.kafka.common.message.FetchResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.Endpoints; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Optional; +import java.util.function.Consumer; + +public class FetchRpc { + public static FetchRequestData singletonFetchRequest( + TopicPartition topicPartition, + Uuid topicId, + Consumer partitionConsumer + ) { + FetchRequestData.FetchPartition fetchPartition = + new FetchRequestData.FetchPartition() + .setPartition(topicPartition.partition()); + partitionConsumer.accept(fetchPartition); + + FetchRequestData.FetchTopic fetchTopic = + new FetchRequestData.FetchTopic() + .setTopic(topicPartition.topic()) + .setTopicId(topicId) + .setPartitions(Collections.singletonList(fetchPartition)); + + return new FetchRequestData() + .setTopics(Collections.singletonList(fetchTopic)); + } + + public static FetchResponseData singletonFetchResponse( + ListenerName listenerName, + short apiVersion, + TopicPartition topicPartition, + Uuid topicId, + Errors topLevelError, + int leaderId, + Endpoints endpoints, + Consumer partitionConsumer + ) { + FetchResponseData.PartitionData fetchablePartition = + new FetchResponseData.PartitionData(); + + fetchablePartition.setPartitionIndex(topicPartition.partition()); + + partitionConsumer.accept(fetchablePartition); + + FetchResponseData.FetchableTopicResponse fetchableTopic = + new FetchResponseData.FetchableTopicResponse() + .setTopic(topicPartition.topic()) + .setTopicId(topicId) + .setPartitions(Collections.singletonList(fetchablePartition)); + + FetchResponseData response = new FetchResponseData(); + + if (apiVersion >= 17) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + FetchResponseData.NodeEndpointCollection nodeEndpoints = new FetchResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new FetchResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response + .setErrorCode(topLevelError.code()) + .setResponses(Collections.singletonList(fetchableTopic)); + } + + public static boolean hasValidTopicPartition(FetchRequestData data, TopicPartition topicPartition, Uuid topicId) { + return data.topics().size() == 1 && + data.topics().get(0).topicId().equals(topicId) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partition() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(FetchResponseData data, TopicPartition topicPartition, Uuid topicId) { + return data.responses().size() == 1 && + data.responses().get(0).topicId().equals(topicId) && + data.responses().get(0).partitions().size() == 1 && + data.responses().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java new file mode 100644 index 0000000000000..1e369426c2874 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.FetchSnapshotRequestData; +import org.apache.kafka.common.message.FetchSnapshotResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.raft.Endpoints; +import org.apache.kafka.raft.OffsetAndEpoch; +import org.apache.kafka.raft.internals.ReplicaKey; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Optional; +import java.util.function.UnaryOperator; + +public class FetchSnapshotRpc { + public static FetchSnapshotRequestData singletonFetchSnapshotRequest( + String clusterId, + ReplicaKey replicaKey, + TopicPartition topicPartition, + int epoch, + OffsetAndEpoch offsetAndEpoch, + int maxBytes, + long position + ) { + FetchSnapshotRequestData.SnapshotId snapshotId = new FetchSnapshotRequestData.SnapshotId() + .setEndOffset(offsetAndEpoch.offset()) + .setEpoch(offsetAndEpoch.epoch()); + + FetchSnapshotRequestData.PartitionSnapshot partitionSnapshot = new FetchSnapshotRequestData.PartitionSnapshot() + .setPartition(topicPartition.partition()) + .setCurrentLeaderEpoch(epoch) + .setSnapshotId(snapshotId) + .setPosition(position) + .setReplicaDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + + return new FetchSnapshotRequestData() + .setClusterId(clusterId) + .setReplicaId(replicaKey.id()) + .setMaxBytes(maxBytes) + .setTopics( + Collections.singletonList( + new FetchSnapshotRequestData.TopicSnapshot() + .setName(topicPartition.topic()) + .setPartitions(Collections.singletonList(partitionSnapshot)) + ) + ); + } + + /** + * Creates a FetchSnapshotResponseData with a single PartitionSnapshot for the topic partition. + * + * The partition index will already be populated when calling operator. + * + * @param listenerName the listener used to accept the request + * @param apiVersion the api version of the request + * @param topicPartition the topic partition to include + * @param leaderId the id of the leader + * @param endpoints the endpoints of the leader + * @param operator unary operator responsible for populating all of the appropriate fields + * @return the created fetch snapshot response data + */ + public static FetchSnapshotResponseData singletonFetchSnapshotResponse( + ListenerName listenerName, + short apiVersion, + TopicPartition topicPartition, + int leaderId, + Endpoints endpoints, + UnaryOperator operator + ) { + FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = operator.apply( + new FetchSnapshotResponseData.PartitionSnapshot().setIndex(topicPartition.partition()) + ); + + FetchSnapshotResponseData response = new FetchSnapshotResponseData() + .setTopics( + Collections.singletonList( + new FetchSnapshotResponseData.TopicSnapshot() + .setName(topicPartition.topic()) + .setPartitions(Collections.singletonList(partitionSnapshot)) + ) + ); + + if (apiVersion >= 1) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + FetchSnapshotResponseData.NodeEndpointCollection nodeEndpoints = + new FetchSnapshotResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new FetchSnapshotResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response; + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java new file mode 100644 index 0000000000000..61345ad01e6b3 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.BeginQuorumEpochRequestData; +import org.apache.kafka.common.message.BeginQuorumEpochResponseData; +import org.apache.kafka.common.message.EndQuorumEpochRequestData; +import org.apache.kafka.common.message.EndQuorumEpochResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.Endpoints; +import org.apache.kafka.raft.internals.ReplicaKey; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + + +public class QuorumEpochRpc { + public static BeginQuorumEpochRequestData singletonBeginQuorumEpochRequest( + TopicPartition topicPartition, + String clusterId, + int leaderEpoch, + int leaderId, + Endpoints leaderEndpoints, + ReplicaKey voterKey + ) { + return new BeginQuorumEpochRequestData() + .setClusterId(clusterId) + .setVoterId(voterKey.id()) + .setTopics( + Collections.singletonList( + new BeginQuorumEpochRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new BeginQuorumEpochRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setLeaderEpoch(leaderEpoch) + .setLeaderId(leaderId) + .setVoterDirectoryId(voterKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + ) + ) + ) + ) + .setLeaderEndpoints(leaderEndpoints.toBeginQuorumEpochRequest()); + } + + public static BeginQuorumEpochResponseData singletonBeginQuorumEpochResponse( + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + Endpoints endpoints + ) { + BeginQuorumEpochResponseData response = new BeginQuorumEpochResponseData() + .setErrorCode(topLevelError.code()) + .setTopics( + Collections.singletonList( + new BeginQuorumEpochResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new BeginQuorumEpochResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + ) + ) + ) + ); + + if (apiVersion >= 1) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + BeginQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = + new BeginQuorumEpochResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new BeginQuorumEpochResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response; + } + + public static EndQuorumEpochRequestData singletonEndQuorumEpochRequest( + TopicPartition topicPartition, + String clusterId, + int leaderEpoch, + int leaderId, + List preferredReplicaKeys + ) { + List preferredSuccessors = preferredReplicaKeys + .stream() + .map(ReplicaKey::id) + .collect(Collectors.toList()); + + List preferredCandidates = preferredReplicaKeys + .stream() + .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() + .setCandidateId(replicaKey.id()) + .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + ) + .collect(Collectors.toList()); + + return new EndQuorumEpochRequestData() + .setClusterId(clusterId) + .setTopics( + Collections.singletonList( + new EndQuorumEpochRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new EndQuorumEpochRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setLeaderEpoch(leaderEpoch) + .setLeaderId(leaderId) + .setPreferredSuccessors(preferredSuccessors) + .setPreferredCandidates(preferredCandidates) + ) + ) + ) + ); + } + + public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + Endpoints endpoints + ) { + EndQuorumEpochResponseData response = new EndQuorumEpochResponseData() + .setErrorCode(topLevelError.code()) + .setTopics(Collections.singletonList( + new EndQuorumEpochResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions(Collections.singletonList( + new EndQuorumEpochResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + ))) + ); + + if (apiVersion >= 1) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + EndQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = + new EndQuorumEpochResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new EndQuorumEpochResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response; + } + + public static boolean hasValidTopicPartition(BeginQuorumEpochRequestData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(BeginQuorumEpochResponseData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(EndQuorumEpochRequestData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(EndQuorumEpochResponseData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static Optional beginQuorumEpochRequestVoterKey( + BeginQuorumEpochRequestData request, + BeginQuorumEpochRequestData.PartitionData partition + ) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), partition.voterDirectoryId())); + } + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java new file mode 100644 index 0000000000000..ccf925a927824 --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.feature.SupportedVersionRange; +import org.apache.kafka.common.message.AddRaftVoterRequestData; +import org.apache.kafka.common.message.AddRaftVoterResponseData; +import org.apache.kafka.common.message.RemoveRaftVoterRequestData; +import org.apache.kafka.common.message.RemoveRaftVoterResponseData; +import org.apache.kafka.common.message.UpdateRaftVoterRequestData; +import org.apache.kafka.common.message.UpdateRaftVoterResponseData; +import org.apache.kafka.common.message.VoteRequestData; +import org.apache.kafka.common.message.VoteResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.Endpoints; +import org.apache.kafka.raft.LeaderAndEpoch; +import org.apache.kafka.raft.internals.ReplicaKey; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Optional; + +public class VoteRpc { + public static VoteRequestData singletonVoteRequest( + TopicPartition topicPartition, + String clusterId, + int candidateEpoch, + ReplicaKey candidateKey, + ReplicaKey voterKey, + int lastEpoch, + long lastEpochEndOffset + ) { + return new VoteRequestData() + .setClusterId(clusterId) + .setVoterId(voterKey.id()) + .setTopics( + Collections.singletonList( + new VoteRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new VoteRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setCandidateEpoch(candidateEpoch) + .setCandidateId(candidateKey.id()) + .setCandidateDirectoryId( + candidateKey + .directoryId() + .orElse(ReplicaKey.NO_DIRECTORY_ID) + ) + .setVoterDirectoryId( + voterKey + .directoryId() + .orElse(ReplicaKey.NO_DIRECTORY_ID) + ) + .setLastOffsetEpoch(lastEpoch) + .setLastOffset(lastEpochEndOffset) + ) + ) + ) + ); + } + + public static VoteResponseData singletonVoteResponse( + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + boolean voteGranted, + Endpoints endpoints + ) { + VoteResponseData response = new VoteResponseData() + .setErrorCode(topLevelError.code()) + .setTopics(Collections.singletonList( + new VoteResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions(Collections.singletonList( + new VoteResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + .setVoteGranted(voteGranted))))); + + if (apiVersion >= 1) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + VoteResponseData.NodeEndpointCollection nodeEndpoints = new VoteResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new VoteResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response; + } + + public static Optional voteRequestVoterKey( + VoteRequestData request, + VoteRequestData.PartitionData partition + ) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), partition.voterDirectoryId())); + } + } + + public static AddRaftVoterRequestData addVoterRequest( + String clusterId, + int timeoutMs, + ReplicaKey voter, + Endpoints listeners + ) { + return new AddRaftVoterRequestData() + .setClusterId(clusterId) + .setTimeoutMs(timeoutMs) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(listeners.toAddVoterRequest()); + } + + public static AddRaftVoterResponseData addVoterResponse( + Errors error, + String errorMessage + ) { + errorMessage = errorMessage == null ? error.message() : errorMessage; + + return new AddRaftVoterResponseData() + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + + public static RemoveRaftVoterRequestData removeVoterRequest( + String clusterId, + ReplicaKey voter + ) { + return new RemoveRaftVoterRequestData() + .setClusterId(clusterId) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + } + + public static RemoveRaftVoterResponseData removeVoterResponse( + Errors error, + String errorMessage + ) { + errorMessage = errorMessage == null ? error.message() : errorMessage; + + return new RemoveRaftVoterResponseData() + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + + public static UpdateRaftVoterRequestData updateVoterRequest( + String clusterId, + ReplicaKey voter, + int epoch, + SupportedVersionRange supportedVersions, + Endpoints endpoints + ) { + UpdateRaftVoterRequestData request = new UpdateRaftVoterRequestData() + .setClusterId(clusterId) + .setCurrentLeaderEpoch(epoch) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(endpoints.toUpdateVoterRequest()); + + request.kRaftVersionFeature() + .setMinSupportedVersion(supportedVersions.min()) + .setMaxSupportedVersion(supportedVersions.max()); + + return request; + } + + public static UpdateRaftVoterResponseData updateVoterResponse( + Errors error, + ListenerName listenerName, + LeaderAndEpoch leaderAndEpoch, + Endpoints endpoints + ) { + UpdateRaftVoterResponseData response = new UpdateRaftVoterResponseData() + .setErrorCode(error.code()); + + response.currentLeader() + .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) + .setLeaderEpoch(leaderAndEpoch.epoch()); + + Optional address = endpoints.address(listenerName); + if (address.isPresent()) { + response.currentLeader() + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()); + } + + return response; + } + + public static Optional addVoterRequestVoterKey(AddRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } + + public static Optional removeVoterRequestVoterKey(RemoveRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } + + public static Optional updateVoterRequestVoterKey(UpdateRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } + + public static boolean hasValidTopicPartition(VoteResponseData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(VoteRequestData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } +} diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java index 96a5df1845fcb..6ad6924458fdf 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java @@ -47,6 +47,8 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; +import org.apache.kafka.raft.utils.FetchRpc; +import org.apache.kafka.raft.utils.FetchSnapshotRpc; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -284,7 +286,7 @@ private ApiMessage buildTestRequest(ApiKeys key) { return VoteRequest.singletonRequest(topicPartition, clusterId, leaderEpoch, leaderId, lastEpoch, 329); case FETCH: - FetchRequestData request = RaftUtil.singletonFetchRequest(topicPartition, topicId, fetchPartition -> + FetchRequestData request = FetchRpc.singletonFetchRequest(topicPartition, topicId, fetchPartition -> fetchPartition .setCurrentLeaderEpoch(5) .setFetchOffset(333) @@ -294,7 +296,7 @@ private ApiMessage buildTestRequest(ApiKeys key) { return request; case FETCH_SNAPSHOT: - return RaftUtil.singletonFetchSnapshotRequest( + return FetchSnapshotRpc.singletonFetchSnapshotRequest( clusterId, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), topicPartition, diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java index 3298bd8dca9da..15c6381d35dd8 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientSnapshotTest.java @@ -27,6 +27,7 @@ import org.apache.kafka.common.requests.FetchSnapshotRequest; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.StringSerde; +import org.apache.kafka.raft.utils.FetchSnapshotRpc; import org.apache.kafka.snapshot.RawSnapshotReader; import org.apache.kafka.snapshot.RawSnapshotWriter; import org.apache.kafka.snapshot.RecordsSnapshotWriter; @@ -2027,7 +2028,7 @@ public static FetchSnapshotRequestData fetchSnapshotRequest( int maxBytes, long position ) { - return RaftUtil.singletonFetchSnapshotRequest( + return FetchSnapshotRpc.singletonFetchSnapshotRequest( null, ReplicaKey.of(-1, ReplicaKey.NO_DIRECTORY_ID), topicPartition, @@ -2047,7 +2048,7 @@ private static FetchSnapshotRequestData fetchSnapshotRequest( int maxBytes, long position ) { - return RaftUtil.singletonFetchSnapshotRequest( + return FetchSnapshotRpc.singletonFetchSnapshotRequest( clusterId, replicaKey, topicPartition, diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 0f1cfd6f3c43e..4b650d200e5db 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -64,6 +64,11 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.BatchBuilder; import org.apache.kafka.raft.internals.StringSerde; +import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.FetchRpc; +import org.apache.kafka.raft.utils.FetchSnapshotRpc; +import org.apache.kafka.raft.utils.QuorumEpochRpc; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.Features; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.serialization.RecordSerde; @@ -98,7 +103,6 @@ import java.util.stream.Stream; import static org.apache.kafka.raft.LeaderState.CHECK_QUORUM_TIMEOUT_FACTOR; -import static org.apache.kafka.raft.RaftUtil.hasValidTopicPartition; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -763,7 +767,7 @@ void assertSentVoteResponse( RaftMessage raftMessage = sentMessages.get(0); assertInstanceOf(VoteResponseData.class, raftMessage.data()); VoteResponseData response = (VoteResponseData) raftMessage.data(); - assertTrue(hasValidTopicPartition(response, metadataPartition)); + assertTrue(VoteRpc.hasValidTopicPartition(response, metadataPartition)); VoteResponseData.PartitionData partitionResponse = response.topics().get(0).partitions().get(0); @@ -1323,7 +1327,7 @@ EndQuorumEpochResponseData endEpochResponse( int epoch, OptionalInt leaderId ) { - return RaftUtil.singletonEndQuorumEpochResponse( + return QuorumEpochRpc.singletonEndQuorumEpochResponse( channel.listenerName(), endQuorumEpochRpcVersion(), Errors.NONE, @@ -1354,7 +1358,7 @@ EndQuorumEpochRequestData endEpochRequest( int leaderId, List preferredCandidates ) { - return RaftUtil.singletonEndQuorumEpochRequest( + return QuorumEpochRpc.singletonEndQuorumEpochRequest( metadataPartition, clusterId, epoch, @@ -1381,7 +1385,7 @@ BeginQuorumEpochRequestData beginEpochRequest( int leaderId, ReplicaKey voterKey ) { - return RaftUtil.singletonBeginQuorumEpochRequest( + return QuorumEpochRpc.singletonBeginQuorumEpochRequest( metadataPartition, clusterId, epoch, @@ -1392,7 +1396,7 @@ BeginQuorumEpochRequestData beginEpochRequest( } BeginQuorumEpochResponseData beginEpochResponse(int epoch, int leaderId) { - return RaftUtil.singletonBeginQuorumEpochResponse( + return QuorumEpochRpc.singletonBeginQuorumEpochResponse( channel.listenerName(), beginQuorumEpochRpcVersion(), Errors.NONE, @@ -1448,7 +1452,7 @@ VoteRequestData voteRequest( int lastEpoch, long lastEpochOffset ) { - return RaftUtil.singletonVoteRequest( + return VoteRpc.singletonVoteRequest( metadataPartition, clusterId, epoch, @@ -1460,7 +1464,7 @@ VoteRequestData voteRequest( } VoteResponseData voteResponse(boolean voteGranted, OptionalInt leaderId, int epoch) { - return RaftUtil.singletonVoteResponse( + return VoteRpc.singletonVoteResponse( channel.listenerName(), voteRpcVersion(), Errors.NONE, @@ -1474,7 +1478,7 @@ VoteResponseData voteResponse(boolean voteGranted, OptionalInt leaderId, int epo } private VoteRequestData.PartitionData unwrap(VoteRequestData voteRequest) { - assertTrue(RaftUtil.hasValidTopicPartition(voteRequest, metadataPartition)); + assertTrue(VoteRpc.hasValidTopicPartition(voteRequest, metadataPartition)); return voteRequest.topics().get(0).partitions().get(0); } @@ -1585,7 +1589,7 @@ FetchRequestData fetchRequest( int lastFetchedEpoch, int maxWaitTimeMs ) { - FetchRequestData request = RaftUtil.singletonFetchRequest( + FetchRequestData request = FetchRpc.singletonFetchRequest( metadataPartition, metadataTopicId, fetchPartition -> { @@ -1614,7 +1618,7 @@ FetchResponseData fetchResponse( long highWatermark, Errors error ) { - return RaftUtil.singletonFetchResponse( + return FetchRpc.singletonFetchResponse( channel.listenerName(), fetchRpcVersion(), metadataPartition, @@ -1642,7 +1646,7 @@ FetchResponseData divergingFetchResponse( int divergingEpoch, long highWatermark ) { - return RaftUtil.singletonFetchResponse( + return FetchRpc.singletonFetchResponse( channel.listenerName(), fetchRpcVersion(), metadataPartition, @@ -1670,7 +1674,7 @@ FetchResponseData snapshotFetchResponse( OffsetAndEpoch snapshotId, long highWatermark ) { - return RaftUtil.singletonFetchResponse( + return FetchRpc.singletonFetchResponse( channel.listenerName(), fetchRpcVersion(), metadataPartition, @@ -1696,7 +1700,7 @@ FetchSnapshotResponseData fetchSnapshotResponse( int leaderId, UnaryOperator operator ) { - return RaftUtil.singletonFetchSnapshotResponse( + return FetchSnapshotRpc.singletonFetchSnapshotResponse( channel.listenerName(), fetchSnapshotRpcVersion(), metadataPartition, @@ -1707,7 +1711,7 @@ FetchSnapshotResponseData fetchSnapshotResponse( } DescribeQuorumRequestData describeQuorumRequest() { - return RaftUtil.singletonDescribeQuorumRequest(metadataPartition); + return DescribeQuorumRpc.singletonDescribeQuorumRequest(metadataPartition); } AddRaftVoterRequestData addVoterRequest( @@ -1729,7 +1733,7 @@ AddRaftVoterRequestData addVoterRequest( ReplicaKey voter, Endpoints endpoints ) { - return RaftUtil.addVoterRequest( + return VoteRpc.addVoterRequest( clusterId, timeoutMs, voter, @@ -1742,7 +1746,7 @@ RemoveRaftVoterRequestData removeVoterRequest(ReplicaKey voter) { } RemoveRaftVoterRequestData removeVoterRequest(String cluster, ReplicaKey voter) { - return RaftUtil.removeVoterRequest(cluster, voter); + return VoteRpc.removeVoterRequest(cluster, voter); } UpdateRaftVoterRequestData updateVoterRequest( @@ -1760,7 +1764,7 @@ UpdateRaftVoterRequestData updateVoterRequest( SupportedVersionRange supportedVersions, Endpoints endpoints ) { - return RaftUtil.updateVoterRequest(clusterId, voter, epoch, supportedVersions, endpoints); + return VoteRpc.updateVoterRequest(clusterId, voter, epoch, supportedVersions, endpoints); } UpdateRaftVoterResponseData updateVoterResponse( From 40bca25b927fb9f3bccf5b5e2198bd84c9bf81e3 Mon Sep 17 00:00:00 2001 From: frankvicky Date: Fri, 9 Aug 2024 16:06:59 +0800 Subject: [PATCH 2/7] KAFKA-16907: Rebase and fix conflicts --- .../main/java/org/apache/kafka/raft/KafkaRaftClient.java | 2 +- .../org/apache/kafka/raft/internals/UpdateVoterHandler.java | 2 +- .../java/org/apache/kafka/raft/utils/ApiMessageUtils.java | 6 ++++++ .../java/org/apache/kafka/raft/RaftClientTestContext.java | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java index c99351030f7cf..42148991f08a8 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java @@ -3103,7 +3103,7 @@ private long maybeSendFetchOrFetchSnapshot(FollowerState state, long currentTime } private UpdateRaftVoterRequestData buildUpdateVoterRequest() { - return RaftUtil.updateVoterRequest( + return VoteRpc.updateVoterRequest( clusterId, quorum.localReplicaKeyOrThrow(), quorum.epoch(), diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java index 5d940cc562ebc..377faf1fd4a6f 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java @@ -206,7 +206,7 @@ public CompletableFuture handleUpdateVoterRequest( // Reply immediately and don't wait for the change to commit return CompletableFuture.completedFuture( - RaftUtil.updateVoterResponse( + VoteRpc.updateVoterResponse( Errors.NONE, requestListenerName, new LeaderAndEpoch( diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java index 99ca74ea132ae..e4bac676ca80b 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java @@ -16,10 +16,12 @@ */ package org.apache.kafka.raft.utils; +import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.message.BeginQuorumEpochResponseData; import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.message.FetchSnapshotResponseData; +import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ApiMessage; @@ -38,6 +40,10 @@ public static ApiMessage parseErrorResponse(ApiKeys apiKey, Errors error) { return new FetchResponseData().setErrorCode(error.code()); case FETCH_SNAPSHOT: return new FetchSnapshotResponseData().setErrorCode(error.code()); + case API_VERSIONS: + return new ApiVersionsResponseData().setErrorCode(error.code()); + case UPDATE_RAFT_VOTER: + return new UpdateRaftVoterResponseData().setErrorCode(error.code()); default: throw new IllegalArgumentException("Received response for unexpected request type: " + apiKey); } diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 4b650d200e5db..20025e6b3933a 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -1771,7 +1771,7 @@ UpdateRaftVoterResponseData updateVoterResponse( Errors error, LeaderAndEpoch leaderAndEpoch ) { - return RaftUtil.updateVoterResponse( + return VoteRpc.updateVoterResponse( error, channel.listenerName(), leaderAndEpoch, From b31643bbe22241183b9fb9737bbfccb2ae490a5e Mon Sep 17 00:00:00 2001 From: frankvicky Date: Thu, 22 Aug 2024 17:46:17 +0800 Subject: [PATCH 3/7] KAFKA-16907: Rebase and refactor --- .../apache/kafka/raft/KafkaRaftClient.java | 33 +++-- .../kafka/raft/internals/AddVoterHandler.java | 2 +- ...EpochRpc.java => BeginQuorumEpochRpc.java} | 104 +------------- .../kafka/raft/utils/DescribeQuorumRpc.java | 2 +- .../kafka/raft/utils/EndQuorumEpochRpc.java | 129 ++++++++++++++++++ .../kafka/raft/utils/FetchSnapshotRpc.java | 2 +- .../org/apache/kafka/raft/utils/VoteRpc.java | 2 +- .../kafka/raft/RaftClientTestContext.java | 11 +- 8 files changed, 157 insertions(+), 128 deletions(-) rename raft/src/main/java/org/apache/kafka/raft/utils/{QuorumEpochRpc.java => BeginQuorumEpochRpc.java} (54%) create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java index 42148991f08a8..2c278c2bd70bd 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java @@ -80,12 +80,12 @@ import org.apache.kafka.raft.internals.RemoveVoterHandler; import org.apache.kafka.raft.internals.ThresholdPurgatory; import org.apache.kafka.raft.internals.UpdateVoterHandler; -import org.apache.kafka.raft.internals.VoterSet; import org.apache.kafka.raft.utils.ApiMessageUtils; +import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.EndQuorumEpochRpc; import org.apache.kafka.raft.utils.FetchRpc; import org.apache.kafka.raft.utils.FetchSnapshotRpc; -import org.apache.kafka.raft.utils.QuorumEpochRpc; import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.serialization.RecordSerde; @@ -123,7 +123,6 @@ import java.util.stream.Collectors; import static java.util.concurrent.CompletableFuture.completedFuture; -import static org.apache.kafka.raft.RaftUtil.hasValidTopicPartition; import static org.apache.kafka.snapshot.Snapshots.BOOTSTRAP_SNAPSHOT_ID; /** @@ -773,7 +772,7 @@ private VoteResponseData handleVoteRequest( return new VoteResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code()); } - if (!hasValidTopicPartition(request, log.topicPartition())) { + if (!VoteRpc.hasValidTopicPartition(request, log.topicPartition())) { // Until we support multi-raft, we treat individual topic partition mismatches as invalid requests return new VoteResponseData().setErrorCode(Errors.INVALID_REQUEST.code()); } @@ -862,7 +861,7 @@ private boolean handleVoteResponse( return handleTopLevelError(topLevelError, responseMetadata); } - if (!hasValidTopicPartition(response, log.topicPartition())) { + if (!VoteRpc.hasValidTopicPartition(response, log.topicPartition())) { return false; } @@ -958,7 +957,7 @@ private BeginQuorumEpochResponseData buildBeginQuorumEpochResponse( short apiVersion, Errors partitionLevelError ) { - return QuorumEpochRpc.singletonBeginQuorumEpochResponse( + return BeginQuorumEpochRpc.singletonBeginQuorumEpochResponse( listenerName, apiVersion, Errors.NONE, @@ -988,7 +987,7 @@ private BeginQuorumEpochResponseData handleBeginQuorumEpochRequest( return new BeginQuorumEpochResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code()); } - if (!hasValidTopicPartition(request, log.topicPartition())) { + if (!BeginQuorumEpochRpc.hasValidTopicPartition(request, log.topicPartition())) { // Until we support multi-raft, we treat topic partition mismatches as invalid requests return new BeginQuorumEpochResponseData().setErrorCode(Errors.INVALID_REQUEST.code()); } @@ -1024,7 +1023,7 @@ private BeginQuorumEpochResponseData handleBeginQuorumEpochRequest( ); // Check that the request was intended for this replica - Optional voterKey = QuorumEpochRpc.beginQuorumEpochRequestVoterKey(request, partitionRequest); + Optional voterKey = BeginQuorumEpochRpc.beginQuorumEpochRequestVoterKey(request, partitionRequest); if (!isValidVoterKey(voterKey)) { logger.info( "Leader sent a voter key ({}) in the BEGIN_QUORUM_EPOCH request that doesn't " + @@ -1059,7 +1058,7 @@ private boolean handleBeginQuorumEpochResponse( return handleTopLevelError(topLevelError, responseMetadata); } - if (!hasValidTopicPartition(response, log.topicPartition())) { + if (!BeginQuorumEpochRpc.hasValidTopicPartition(response, log.topicPartition())) { return false; } @@ -1114,7 +1113,7 @@ private EndQuorumEpochResponseData buildEndQuorumEpochResponse( short apiVersion, Errors partitionLevelError ) { - return QuorumEpochRpc.singletonEndQuorumEpochResponse( + return EndQuorumEpochRpc.singletonEndQuorumEpochResponse( listenerName, apiVersion, Errors.NONE, @@ -1144,7 +1143,7 @@ private EndQuorumEpochResponseData handleEndQuorumEpochRequest( return new EndQuorumEpochResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code()); } - if (!hasValidTopicPartition(request, log.topicPartition())) { + if (!EndQuorumEpochRpc.hasValidTopicPartition(request, log.topicPartition())) { // Until we support multi-raft, we treat topic partition mismatches as invalid requests return new EndQuorumEpochResponseData().setErrorCode(Errors.INVALID_REQUEST.code()); } @@ -1235,7 +1234,7 @@ private boolean handleEndQuorumEpochResponse( return handleTopLevelError(topLevelError, responseMetadata); } - if (!hasValidTopicPartition(response, log.topicPartition())) { + if (!EndQuorumEpochRpc.hasValidTopicPartition(response, log.topicPartition())) { return false; } @@ -1375,7 +1374,7 @@ private CompletableFuture handleFetchRequest( return completedFuture(new FetchResponseData().setErrorCode(Errors.INCONSISTENT_CLUSTER_ID.code())); } - if (!hasValidTopicPartition(request, log.topicPartition(), log.topicId())) { + if (!FetchRpc.hasValidTopicPartition(request, log.topicPartition(), log.topicId())) { // Until we support multi-raft, we treat topic partition mismatches as invalid requests return completedFuture(new FetchResponseData().setErrorCode(Errors.INVALID_REQUEST.code())); } @@ -1566,7 +1565,7 @@ private boolean handleFetchResponse( return handleTopLevelError(topLevelError, responseMetadata); } - if (!hasValidTopicPartition(response, log.topicPartition(), log.topicId())) { + if (!FetchRpc.hasValidTopicPartition(response, log.topicPartition(), log.topicId())) { return false; } // If the ID is valid, we can set the topic name. @@ -1732,7 +1731,7 @@ private DescribeQuorumResponseData handleDescribeQuorumRequest( long currentTimeMs ) { DescribeQuorumRequestData describeQuorumRequestData = (DescribeQuorumRequestData) requestMetadata.data(); - if (!hasValidTopicPartition(describeQuorumRequestData, log.topicPartition())) { + if (!DescribeQuorumRpc.hasValidTopicPartition(describeQuorumRequestData, log.topicPartition())) { return DescribeQuorumRequest.getPartitionLevelErrorResponse( describeQuorumRequestData, Errors.UNKNOWN_TOPIC_OR_PARTITION @@ -2673,7 +2672,7 @@ private long maybeSendRequest( private EndQuorumEpochRequestData buildEndQuorumEpochRequest( ResignedState state ) { - return QuorumEpochRpc.singletonEndQuorumEpochRequest( + return EndQuorumEpochRpc.singletonEndQuorumEpochRequest( log.topicPartition(), clusterId, quorum.epoch(), @@ -2716,7 +2715,7 @@ private long maybeSendRequest( } private BeginQuorumEpochRequestData buildBeginQuorumEpochRequest(ReplicaKey remoteVoter) { - return QuorumEpochRpc.singletonBeginQuorumEpochRequest( + return BeginQuorumEpochRpc.singletonBeginQuorumEpochRequest( log.topicPartition(), clusterId, quorum.epoch(), diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java index 14ea670c73697..c6e2f5f756486 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java @@ -28,9 +28,9 @@ import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.LeaderState; import org.apache.kafka.raft.LogOffsetMetadata; -import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; +import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java similarity index 54% rename from raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java rename to raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java index 61345ad01e6b3..ad836fc751f5e 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/QuorumEpochRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java @@ -19,21 +19,17 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.BeginQuorumEpochRequestData; import org.apache.kafka.common.message.BeginQuorumEpochResponseData; -import org.apache.kafka.common.message.EndQuorumEpochRequestData; -import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.raft.Endpoints; -import org.apache.kafka.raft.internals.ReplicaKey; +import org.apache.kafka.raft.ReplicaKey; import java.net.InetSocketAddress; import java.util.Collections; -import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; -public class QuorumEpochRpc { +public class BeginQuorumEpochRpc { public static BeginQuorumEpochRequestData singletonBeginQuorumEpochRequest( TopicPartition topicPartition, String clusterId, @@ -109,88 +105,6 @@ public static BeginQuorumEpochResponseData singletonBeginQuorumEpochResponse( return response; } - public static EndQuorumEpochRequestData singletonEndQuorumEpochRequest( - TopicPartition topicPartition, - String clusterId, - int leaderEpoch, - int leaderId, - List preferredReplicaKeys - ) { - List preferredSuccessors = preferredReplicaKeys - .stream() - .map(ReplicaKey::id) - .collect(Collectors.toList()); - - List preferredCandidates = preferredReplicaKeys - .stream() - .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() - .setCandidateId(replicaKey.id()) - .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - ) - .collect(Collectors.toList()); - - return new EndQuorumEpochRequestData() - .setClusterId(clusterId) - .setTopics( - Collections.singletonList( - new EndQuorumEpochRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new EndQuorumEpochRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setLeaderId(leaderId) - .setPreferredSuccessors(preferredSuccessors) - .setPreferredCandidates(preferredCandidates) - ) - ) - ) - ); - } - - public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - Endpoints endpoints - ) { - EndQuorumEpochResponseData response = new EndQuorumEpochResponseData() - .setErrorCode(topLevelError.code()) - .setTopics(Collections.singletonList( - new EndQuorumEpochResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList( - new EndQuorumEpochResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - ))) - ); - - if (apiVersion >= 1) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - EndQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = - new EndQuorumEpochResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new EndQuorumEpochResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response; - } - public static boolean hasValidTopicPartition(BeginQuorumEpochRequestData data, TopicPartition topicPartition) { return data.topics().size() == 1 && data.topics().get(0).topicName().equals(topicPartition.topic()) && @@ -205,20 +119,6 @@ public static boolean hasValidTopicPartition(BeginQuorumEpochResponseData data, data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } - public static boolean hasValidTopicPartition(EndQuorumEpochRequestData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - public static boolean hasValidTopicPartition(EndQuorumEpochResponseData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - public static Optional beginQuorumEpochRequestVoterKey( BeginQuorumEpochRequestData request, BeginQuorumEpochRequestData.PartitionData partition diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java index acb97ddcd6431..f0dc34a391a04 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java @@ -22,7 +22,7 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.raft.LeaderState; import org.apache.kafka.raft.LogOffsetMetadata; -import org.apache.kafka.raft.internals.ReplicaKey; +import org.apache.kafka.raft.ReplicaKey; import java.util.Collection; import java.util.Collections; diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java new file mode 100644 index 0000000000000..86701023eafca --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.message.EndQuorumEpochRequestData; +import org.apache.kafka.common.message.EndQuorumEpochResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.Endpoints; +import org.apache.kafka.raft.ReplicaKey; + +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +public class EndQuorumEpochRpc { + public static EndQuorumEpochRequestData singletonEndQuorumEpochRequest( + TopicPartition topicPartition, + String clusterId, + int leaderEpoch, + int leaderId, + List preferredReplicaKeys + ) { + List preferredSuccessors = preferredReplicaKeys + .stream() + .map(ReplicaKey::id) + .collect(Collectors.toList()); + + List preferredCandidates = preferredReplicaKeys + .stream() + .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() + .setCandidateId(replicaKey.id()) + .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + ) + .collect(Collectors.toList()); + + return new EndQuorumEpochRequestData() + .setClusterId(clusterId) + .setTopics( + Collections.singletonList( + new EndQuorumEpochRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new EndQuorumEpochRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setLeaderEpoch(leaderEpoch) + .setLeaderId(leaderId) + .setPreferredSuccessors(preferredSuccessors) + .setPreferredCandidates(preferredCandidates) + ) + ) + ) + ); + } + + public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + Endpoints endpoints + ) { + EndQuorumEpochResponseData response = new EndQuorumEpochResponseData() + .setErrorCode(topLevelError.code()) + .setTopics(Collections.singletonList( + new EndQuorumEpochResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions(Collections.singletonList( + new EndQuorumEpochResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + ))) + ); + + if (apiVersion >= 1) { + Optional address = endpoints.address(listenerName); + if (address.isPresent() && leaderId >= 0) { + // Populate the node endpoints + EndQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = + new EndQuorumEpochResponseData.NodeEndpointCollection(1); + nodeEndpoints.add( + new EndQuorumEpochResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) + ); + response.setNodeEndpoints(nodeEndpoints); + } + } + + return response; + } + + public static boolean hasValidTopicPartition(EndQuorumEpochRequestData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } + + public static boolean hasValidTopicPartition(EndQuorumEpochResponseData data, TopicPartition topicPartition) { + return data.topics().size() == 1 && + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java index 1e369426c2874..30b3280efcf13 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java @@ -22,7 +22,7 @@ import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.OffsetAndEpoch; -import org.apache.kafka.raft.internals.ReplicaKey; +import org.apache.kafka.raft.ReplicaKey; import java.net.InetSocketAddress; import java.util.Collections; diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java index ccf925a927824..c75a61998e28c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java @@ -30,7 +30,7 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.raft.internals.ReplicaKey; +import org.apache.kafka.raft.ReplicaKey; import java.net.InetSocketAddress; import java.util.Collections; diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 20025e6b3933a..365b6792b70fc 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -64,10 +64,11 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.BatchBuilder; import org.apache.kafka.raft.internals.StringSerde; +import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.EndQuorumEpochRpc; import org.apache.kafka.raft.utils.FetchRpc; import org.apache.kafka.raft.utils.FetchSnapshotRpc; -import org.apache.kafka.raft.utils.QuorumEpochRpc; import org.apache.kafka.raft.utils.VoteRpc; import org.apache.kafka.server.common.Features; import org.apache.kafka.server.common.KRaftVersion; @@ -1327,7 +1328,7 @@ EndQuorumEpochResponseData endEpochResponse( int epoch, OptionalInt leaderId ) { - return QuorumEpochRpc.singletonEndQuorumEpochResponse( + return EndQuorumEpochRpc.singletonEndQuorumEpochResponse( channel.listenerName(), endQuorumEpochRpcVersion(), Errors.NONE, @@ -1358,7 +1359,7 @@ EndQuorumEpochRequestData endEpochRequest( int leaderId, List preferredCandidates ) { - return QuorumEpochRpc.singletonEndQuorumEpochRequest( + return EndQuorumEpochRpc.singletonEndQuorumEpochRequest( metadataPartition, clusterId, epoch, @@ -1385,7 +1386,7 @@ BeginQuorumEpochRequestData beginEpochRequest( int leaderId, ReplicaKey voterKey ) { - return QuorumEpochRpc.singletonBeginQuorumEpochRequest( + return BeginQuorumEpochRpc.singletonBeginQuorumEpochRequest( metadataPartition, clusterId, epoch, @@ -1396,7 +1397,7 @@ BeginQuorumEpochRequestData beginEpochRequest( } BeginQuorumEpochResponseData beginEpochResponse(int epoch, int leaderId) { - return QuorumEpochRpc.singletonBeginQuorumEpochResponse( + return BeginQuorumEpochRpc.singletonBeginQuorumEpochResponse( channel.listenerName(), beginQuorumEpochRpcVersion(), Errors.NONE, From 8925ff7fb0a835256f8625250a33c4d19b4dec3c Mon Sep 17 00:00:00 2001 From: frankvicky Date: Wed, 18 Dec 2024 11:14:58 +0800 Subject: [PATCH 4/7] KAFKA-16907: Address comments --- .../apache/kafka/raft/KafkaRaftClient.java | 19 +- .../org/apache/kafka/raft/LeaderState.java | 6 +- .../java/org/apache/kafka/raft/RaftUtil.java | 768 ------------------ .../kafka/raft/internals/AddVoterHandler.java | 14 +- .../raft/internals/RemoveVoterHandler.java | 12 +- .../raft/internals/UpdateVoterHandler.java | 18 +- .../internals/UpdateVoterHandlerState.java | 4 +- .../kafka/raft/utils/DynamicReconfigRpc.java | 149 ++++ .../org/apache/kafka/raft/utils/VoteRpc.java | 138 +--- .../kafka/raft/KafkaNetworkChannelTest.java | 3 +- .../kafka/raft/RaftClientTestContext.java | 11 +- .../org/apache/kafka/raft/RaftUtilTest.java | 43 +- 12 files changed, 228 insertions(+), 957 deletions(-) delete mode 100644 raft/src/main/java/org/apache/kafka/raft/RaftUtil.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java diff --git a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java index 254c6e0ca89a9..a982832d0290b 100644 --- a/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java +++ b/raft/src/main/java/org/apache/kafka/raft/KafkaRaftClient.java @@ -83,6 +83,7 @@ import org.apache.kafka.raft.utils.ApiMessageUtils; import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.raft.utils.EndQuorumEpochRpc; import org.apache.kafka.raft.utils.FetchRpc; import org.apache.kafka.raft.utils.FetchSnapshotRpc; @@ -2129,7 +2130,7 @@ private CompletableFuture handleAddVoterRequest( ); } - Optional newVoter = VoteRpc.addVoterRequestVoterKey(data); + Optional newVoter = DynamicReconfigRpc.addVoterRequestVoterKey(data); if (newVoter.isEmpty() || newVoter.get().directoryId().isEmpty()) { return completedFuture( new AddRaftVoterResponseData() @@ -2212,7 +2213,7 @@ private CompletableFuture handleRemoveVoterRequest( ); } - Optional oldVoter = VoteRpc.removeVoterRequestVoterKey(data); + Optional oldVoter = DynamicReconfigRpc.removeVoterRequestVoterKey(data); if (oldVoter.isEmpty() || oldVoter.get().directoryId().isEmpty()) { return completedFuture( new RemoveRaftVoterResponseData() @@ -2236,7 +2237,7 @@ private CompletableFuture handleUpdateVoterRequest( if (!hasValidClusterId(data.clusterId())) { return completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INCONSISTENT_CLUSTER_ID, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2248,7 +2249,7 @@ private CompletableFuture handleUpdateVoterRequest( Optional leaderValidationError = validateLeaderOnlyRequest(data.currentLeaderEpoch()); if (leaderValidationError.isPresent()) { return completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( leaderValidationError.get(), requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2257,10 +2258,10 @@ private CompletableFuture handleUpdateVoterRequest( ); } - Optional voter = VoteRpc.updateVoterRequestVoterKey(data); + Optional voter = DynamicReconfigRpc.updateVoterRequestVoterKey(data); if (voter.isEmpty() || voter.get().directoryId().isEmpty()) { return completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2272,7 +2273,7 @@ private CompletableFuture handleUpdateVoterRequest( Endpoints voterEndpoints = Endpoints.fromUpdateVoterRequest(data.listeners()); if (voterEndpoints.address(channel.listenerName()).isEmpty()) { return completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -2287,7 +2288,7 @@ private CompletableFuture handleUpdateVoterRequest( supportedKraftVersions.maxSupportedVersion() < supportedKraftVersions.minSupportedVersion() ) { return completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestMetadata.listenerName(), quorum.leaderAndEpoch(), @@ -3138,7 +3139,7 @@ private long maybeSendFetchOrFetchSnapshot(FollowerState state, long currentTime } private UpdateRaftVoterRequestData buildUpdateVoterRequest() { - return VoteRpc.updateVoterRequest( + return DynamicReconfigRpc.updateVoterRequest( clusterId, quorum.localReplicaKeyOrThrow(), quorum.epoch(), diff --git a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java index 3b4af435e94b6..2d4595be7d387 100644 --- a/raft/src/main/java/org/apache/kafka/raft/LeaderState.java +++ b/raft/src/main/java/org/apache/kafka/raft/LeaderState.java @@ -30,7 +30,7 @@ import org.apache.kafka.raft.internals.AddVoterHandlerState; import org.apache.kafka.raft.internals.BatchAccumulator; import org.apache.kafka.raft.internals.RemoveVoterHandlerState; -import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; @@ -215,7 +215,7 @@ public void resetAddVoterHandlerState( addVoterHandlerState.ifPresent( handlerState -> handlerState .future() - .complete(VoteRpc.addVoterResponse(error, message)) + .complete(DynamicReconfigRpc.addVoterResponse(error, message)) ); addVoterHandlerState = state; } @@ -232,7 +232,7 @@ public void resetRemoveVoterHandlerState( removeVoterHandlerState.ifPresent( handlerState -> handlerState .future() - .complete(VoteRpc.removeVoterResponse(error, message)) + .complete(DynamicReconfigRpc.removeVoterResponse(error, message)) ); removeVoterHandlerState = state; } diff --git a/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java b/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java deleted file mode 100644 index 12c48955b39b7..0000000000000 --- a/raft/src/main/java/org/apache/kafka/raft/RaftUtil.java +++ /dev/null @@ -1,768 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.kafka.raft; - -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.feature.SupportedVersionRange; -import org.apache.kafka.common.message.AddRaftVoterRequestData; -import org.apache.kafka.common.message.AddRaftVoterResponseData; -import org.apache.kafka.common.message.ApiVersionsResponseData; -import org.apache.kafka.common.message.BeginQuorumEpochRequestData; -import org.apache.kafka.common.message.BeginQuorumEpochResponseData; -import org.apache.kafka.common.message.DescribeQuorumRequestData; -import org.apache.kafka.common.message.DescribeQuorumResponseData; -import org.apache.kafka.common.message.EndQuorumEpochRequestData; -import org.apache.kafka.common.message.EndQuorumEpochResponseData; -import org.apache.kafka.common.message.FetchRequestData; -import org.apache.kafka.common.message.FetchResponseData; -import org.apache.kafka.common.message.FetchSnapshotRequestData; -import org.apache.kafka.common.message.FetchSnapshotResponseData; -import org.apache.kafka.common.message.RemoveRaftVoterRequestData; -import org.apache.kafka.common.message.RemoveRaftVoterResponseData; -import org.apache.kafka.common.message.UpdateRaftVoterRequestData; -import org.apache.kafka.common.message.UpdateRaftVoterResponseData; -import org.apache.kafka.common.message.VoteRequestData; -import org.apache.kafka.common.message.VoteResponseData; -import org.apache.kafka.common.network.ListenerName; -import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ApiMessage; -import org.apache.kafka.common.protocol.Errors; - -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.function.Consumer; -import java.util.function.UnaryOperator; -import java.util.stream.Collectors; - -@SuppressWarnings({ "ClassDataAbstractionCoupling", "ClassFanOutComplexity" }) -public class RaftUtil { - - public static ApiMessage errorResponse(ApiKeys apiKey, Errors error) { - switch (apiKey) { - case VOTE: - return new VoteResponseData().setErrorCode(error.code()); - case BEGIN_QUORUM_EPOCH: - return new BeginQuorumEpochResponseData().setErrorCode(error.code()); - case END_QUORUM_EPOCH: - return new EndQuorumEpochResponseData().setErrorCode(error.code()); - case FETCH: - return new FetchResponseData().setErrorCode(error.code()); - case FETCH_SNAPSHOT: - return new FetchSnapshotResponseData().setErrorCode(error.code()); - case API_VERSIONS: - return new ApiVersionsResponseData().setErrorCode(error.code()); - case UPDATE_RAFT_VOTER: - return new UpdateRaftVoterResponseData().setErrorCode(error.code()); - default: - throw new IllegalArgumentException("Received response for unexpected request type: " + apiKey); - } - } - - public static FetchRequestData singletonFetchRequest( - TopicPartition topicPartition, - Uuid topicId, - Consumer partitionConsumer - ) { - FetchRequestData.FetchPartition fetchPartition = - new FetchRequestData.FetchPartition() - .setPartition(topicPartition.partition()); - partitionConsumer.accept(fetchPartition); - - FetchRequestData.FetchTopic fetchTopic = - new FetchRequestData.FetchTopic() - .setTopic(topicPartition.topic()) - .setTopicId(topicId) - .setPartitions(Collections.singletonList(fetchPartition)); - - return new FetchRequestData() - .setTopics(Collections.singletonList(fetchTopic)); - } - - public static FetchResponseData singletonFetchResponse( - ListenerName listenerName, - short apiVersion, - TopicPartition topicPartition, - Uuid topicId, - Errors topLevelError, - int leaderId, - Endpoints endpoints, - Consumer partitionConsumer - ) { - FetchResponseData.PartitionData fetchablePartition = - new FetchResponseData.PartitionData(); - - fetchablePartition.setPartitionIndex(topicPartition.partition()); - - partitionConsumer.accept(fetchablePartition); - - FetchResponseData.FetchableTopicResponse fetchableTopic = - new FetchResponseData.FetchableTopicResponse() - .setTopic(topicPartition.topic()) - .setTopicId(topicId) - .setPartitions(Collections.singletonList(fetchablePartition)); - - FetchResponseData response = new FetchResponseData(); - - if (apiVersion >= 17) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - FetchResponseData.NodeEndpointCollection nodeEndpoints = new FetchResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new FetchResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response - .setErrorCode(topLevelError.code()) - .setResponses(Collections.singletonList(fetchableTopic)); - } - - public static VoteRequestData singletonVoteRequest( - TopicPartition topicPartition, - String clusterId, - int replicaEpoch, - ReplicaKey replicaKey, - ReplicaKey voterKey, - int lastEpoch, - long lastEpochEndOffset, - boolean preVote - ) { - return new VoteRequestData() - .setClusterId(clusterId) - .setVoterId(voterKey.id()) - .setTopics( - Collections.singletonList( - new VoteRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new VoteRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setReplicaEpoch(replicaEpoch) - .setReplicaId(replicaKey.id()) - .setReplicaDirectoryId( - replicaKey - .directoryId() - .orElse(ReplicaKey.NO_DIRECTORY_ID) - ) - .setVoterDirectoryId( - voterKey - .directoryId() - .orElse(ReplicaKey.NO_DIRECTORY_ID) - ) - .setLastOffsetEpoch(lastEpoch) - .setLastOffset(lastEpochEndOffset) - .setPreVote(preVote) - ) - ) - ) - ); - } - - public static VoteResponseData singletonVoteResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - boolean voteGranted, - Endpoints endpoints - ) { - VoteResponseData.PartitionData partitionData = new VoteResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - .setVoteGranted(voteGranted); - - VoteResponseData response = new VoteResponseData() - .setErrorCode(topLevelError.code()) - .setTopics(Collections.singletonList( - new VoteResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList(partitionData)))); - - if (apiVersion >= 1) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - VoteResponseData.NodeEndpointCollection nodeEndpoints = new VoteResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new VoteResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response; - } - - public static FetchSnapshotRequestData singletonFetchSnapshotRequest( - String clusterId, - ReplicaKey replicaKey, - TopicPartition topicPartition, - int epoch, - OffsetAndEpoch offsetAndEpoch, - int maxBytes, - long position - ) { - FetchSnapshotRequestData.SnapshotId snapshotId = new FetchSnapshotRequestData.SnapshotId() - .setEndOffset(offsetAndEpoch.offset()) - .setEpoch(offsetAndEpoch.epoch()); - - FetchSnapshotRequestData.PartitionSnapshot partitionSnapshot = new FetchSnapshotRequestData.PartitionSnapshot() - .setPartition(topicPartition.partition()) - .setCurrentLeaderEpoch(epoch) - .setSnapshotId(snapshotId) - .setPosition(position) - .setReplicaDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); - - return new FetchSnapshotRequestData() - .setClusterId(clusterId) - .setReplicaId(replicaKey.id()) - .setMaxBytes(maxBytes) - .setTopics( - Collections.singletonList( - new FetchSnapshotRequestData.TopicSnapshot() - .setName(topicPartition.topic()) - .setPartitions(Collections.singletonList(partitionSnapshot)) - ) - ); - } - - /** - * Creates a FetchSnapshotResponseData with a single PartitionSnapshot for the topic partition. - * - * The partition index will already be populated when calling operator. - * - * @param listenerName the listener used to accept the request - * @param apiVersion the api version of the request - * @param topicPartition the topic partition to include - * @param leaderId the id of the leader - * @param endpoints the endpoints of the leader - * @param operator unary operator responsible for populating all of the appropriate fields - * @return the created fetch snapshot response data - */ - public static FetchSnapshotResponseData singletonFetchSnapshotResponse( - ListenerName listenerName, - short apiVersion, - TopicPartition topicPartition, - int leaderId, - Endpoints endpoints, - UnaryOperator operator - ) { - FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = operator.apply( - new FetchSnapshotResponseData.PartitionSnapshot().setIndex(topicPartition.partition()) - ); - - FetchSnapshotResponseData response = new FetchSnapshotResponseData() - .setTopics( - Collections.singletonList( - new FetchSnapshotResponseData.TopicSnapshot() - .setName(topicPartition.topic()) - .setPartitions(Collections.singletonList(partitionSnapshot)) - ) - ); - - if (apiVersion >= 1) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - FetchSnapshotResponseData.NodeEndpointCollection nodeEndpoints = - new FetchSnapshotResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new FetchSnapshotResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response; - } - - public static BeginQuorumEpochRequestData singletonBeginQuorumEpochRequest( - TopicPartition topicPartition, - String clusterId, - int leaderEpoch, - int leaderId, - Endpoints leaderEndpoints, - ReplicaKey voterKey - ) { - return new BeginQuorumEpochRequestData() - .setClusterId(clusterId) - .setVoterId(voterKey.id()) - .setTopics( - Collections.singletonList( - new BeginQuorumEpochRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new BeginQuorumEpochRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setLeaderId(leaderId) - .setVoterDirectoryId(voterKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - ) - ) - ) - ) - .setLeaderEndpoints(leaderEndpoints.toBeginQuorumEpochRequest()); - } - - public static BeginQuorumEpochResponseData singletonBeginQuorumEpochResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - Endpoints endpoints - ) { - BeginQuorumEpochResponseData response = new BeginQuorumEpochResponseData() - .setErrorCode(topLevelError.code()) - .setTopics( - Collections.singletonList( - new BeginQuorumEpochResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new BeginQuorumEpochResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - ) - ) - ) - ); - - if (apiVersion >= 1) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - BeginQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = - new BeginQuorumEpochResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new BeginQuorumEpochResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response; - } - - public static EndQuorumEpochRequestData singletonEndQuorumEpochRequest( - TopicPartition topicPartition, - String clusterId, - int leaderEpoch, - int leaderId, - List preferredReplicaKeys - ) { - List preferredSuccessors = preferredReplicaKeys - .stream() - .map(ReplicaKey::id) - .collect(Collectors.toList()); - - List preferredCandidates = preferredReplicaKeys - .stream() - .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() - .setCandidateId(replicaKey.id()) - .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - ) - .collect(Collectors.toList()); - - return new EndQuorumEpochRequestData() - .setClusterId(clusterId) - .setTopics( - Collections.singletonList( - new EndQuorumEpochRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new EndQuorumEpochRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setLeaderId(leaderId) - .setPreferredSuccessors(preferredSuccessors) - .setPreferredCandidates(preferredCandidates) - ) - ) - ) - ); - - } - - public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - Endpoints endpoints - ) { - EndQuorumEpochResponseData response = new EndQuorumEpochResponseData() - .setErrorCode(topLevelError.code()) - .setTopics(Collections.singletonList( - new EndQuorumEpochResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList( - new EndQuorumEpochResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - ))) - ); - - if (apiVersion >= 1) { - Optional address = endpoints.address(listenerName); - if (address.isPresent() && leaderId >= 0) { - // Populate the node endpoints - EndQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = - new EndQuorumEpochResponseData.NodeEndpointCollection(1); - nodeEndpoints.add( - new EndQuorumEpochResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) - ); - response.setNodeEndpoints(nodeEndpoints); - } - } - - return response; - } - - - public static DescribeQuorumRequestData singletonDescribeQuorumRequest( - TopicPartition topicPartition - ) { - - return new DescribeQuorumRequestData() - .setTopics( - Collections.singletonList( - new DescribeQuorumRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new DescribeQuorumRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - ) - ) - ) - ); - } - - public static DescribeQuorumResponseData singletonDescribeQuorumResponse( - short apiVersion, - TopicPartition topicPartition, - int leaderId, - int leaderEpoch, - long highWatermark, - Collection voters, - Collection observers, - long currentTimeMs - ) { - DescribeQuorumResponseData response = new DescribeQuorumResponseData() - .setTopics( - Collections.singletonList( - new DescribeQuorumResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new DescribeQuorumResponseData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setErrorCode(Errors.NONE.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - .setHighWatermark(highWatermark) - .setCurrentVoters(toReplicaStates(apiVersion, leaderId, voters, currentTimeMs)) - .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)))))); - if (apiVersion >= 2) { - DescribeQuorumResponseData.NodeCollection nodes = new DescribeQuorumResponseData.NodeCollection(voters.size()); - for (LeaderState.ReplicaState voter : voters) { - nodes.add( - new DescribeQuorumResponseData.Node() - .setNodeId(voter.replicaKey().id()) - .setListeners(voter.listeners().toDescribeQuorumResponseListeners()) - ); - } - response.setNodes(nodes); - } - return response; - } - - public static AddRaftVoterRequestData addVoterRequest( - String clusterId, - int timeoutMs, - ReplicaKey voter, - Endpoints listeners - ) { - return new AddRaftVoterRequestData() - .setClusterId(clusterId) - .setTimeoutMs(timeoutMs) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(listeners.toAddVoterRequest()); - } - - public static AddRaftVoterResponseData addVoterResponse( - Errors error, - String errorMessage - ) { - errorMessage = errorMessage == null ? error.message() : errorMessage; - - return new AddRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static RemoveRaftVoterRequestData removeVoterRequest( - String clusterId, - ReplicaKey voter - ) { - return new RemoveRaftVoterRequestData() - .setClusterId(clusterId) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); - } - - public static RemoveRaftVoterResponseData removeVoterResponse( - Errors error, - String errorMessage - ) { - errorMessage = errorMessage == null ? error.message() : errorMessage; - - return new RemoveRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static UpdateRaftVoterRequestData updateVoterRequest( - String clusterId, - ReplicaKey voter, - int epoch, - SupportedVersionRange supportedVersions, - Endpoints endpoints - ) { - UpdateRaftVoterRequestData request = new UpdateRaftVoterRequestData() - .setClusterId(clusterId) - .setCurrentLeaderEpoch(epoch) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(endpoints.toUpdateVoterRequest()); - - request.kRaftVersionFeature() - .setMinSupportedVersion(supportedVersions.min()) - .setMaxSupportedVersion(supportedVersions.max()); - - return request; - } - - public static UpdateRaftVoterResponseData updateVoterResponse( - Errors error, - ListenerName listenerName, - LeaderAndEpoch leaderAndEpoch, - Endpoints endpoints - ) { - UpdateRaftVoterResponseData response = new UpdateRaftVoterResponseData() - .setErrorCode(error.code()); - - response.currentLeader() - .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) - .setLeaderEpoch(leaderAndEpoch.epoch()); - - Optional address = endpoints.address(listenerName); - if (address.isPresent()) { - response.currentLeader() - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()); - } - - return response; - } - - private static List toReplicaStates( - short apiVersion, - int leaderId, - Collection states, - long currentTimeMs - ) { - return states - .stream() - .map(replicaState -> toReplicaState(apiVersion, leaderId, replicaState, currentTimeMs)) - .collect(Collectors.toList()); - } - - private static DescribeQuorumResponseData.ReplicaState toReplicaState( - short apiVersion, - int leaderId, - LeaderState.ReplicaState replicaState, - long currentTimeMs - ) { - final long lastCaughtUpTimestamp; - final long lastFetchTimestamp; - if (replicaState.replicaKey().id() == leaderId) { - lastCaughtUpTimestamp = currentTimeMs; - lastFetchTimestamp = currentTimeMs; - } else { - lastCaughtUpTimestamp = replicaState.lastCaughtUpTimestamp(); - lastFetchTimestamp = replicaState.lastFetchTimestamp(); - } - DescribeQuorumResponseData.ReplicaState replicaStateData = new DescribeQuorumResponseData.ReplicaState() - .setReplicaId(replicaState.replicaKey().id()) - .setLogEndOffset(replicaState.endOffset().map(LogOffsetMetadata::offset).orElse(-1L)) - .setLastCaughtUpTimestamp(lastCaughtUpTimestamp) - .setLastFetchTimestamp(lastFetchTimestamp); - - if (apiVersion >= 2) { - replicaStateData.setReplicaDirectoryId(replicaState.replicaKey().directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); - } - return replicaStateData; - } - - public static Optional voteRequestVoterKey( - VoteRequestData request, - VoteRequestData.PartitionData partition - ) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), partition.voterDirectoryId())); - } - } - - public static Optional beginQuorumEpochRequestVoterKey( - BeginQuorumEpochRequestData request, - BeginQuorumEpochRequestData.PartitionData partition - ) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), partition.voterDirectoryId())); - } - } - - public static Optional addVoterRequestVoterKey(AddRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - - public static Optional removeVoterRequestVoterKey(RemoveRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - - public static Optional updateVoterRequestVoterKey(UpdateRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - - static boolean hasValidTopicPartition(FetchRequestData data, TopicPartition topicPartition, Uuid topicId) { - return data.topics().size() == 1 && - data.topics().get(0).topicId().equals(topicId) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partition() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(FetchResponseData data, TopicPartition topicPartition, Uuid topicId) { - return data.responses().size() == 1 && - data.responses().get(0).topicId().equals(topicId) && - data.responses().get(0).partitions().size() == 1 && - data.responses().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(VoteResponseData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(VoteRequestData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(BeginQuorumEpochRequestData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(BeginQuorumEpochResponseData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(EndQuorumEpochRequestData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(EndQuorumEpochResponseData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } - - static boolean hasValidTopicPartition(DescribeQuorumRequestData data, TopicPartition topicPartition) { - return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); - } -} diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java index 5cd4070868ff4..7d512eb0174a9 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/AddVoterHandler.java @@ -30,7 +30,7 @@ import org.apache.kafka.raft.LogOffsetMetadata; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; -import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; @@ -92,7 +92,7 @@ public CompletableFuture handleAddVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to handle previous voter change request" ) @@ -103,7 +103,7 @@ public CompletableFuture handleAddVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to establish HWM and fence previous voter changes" ) @@ -114,7 +114,7 @@ public CompletableFuture handleAddVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.UNSUPPORTED_VERSION, String.format( "Cluster doesn't support adding voter because the %s feature is %s", @@ -129,7 +129,7 @@ public CompletableFuture handleAddVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, String.format( "Request timed out waiting for voters to commit the latest voter change at %s with HWM %d", @@ -144,7 +144,7 @@ public CompletableFuture handleAddVoterRequest( VoterSet voters = votersEntry.get().value(); if (voters.voterIds().contains(voterKey.id())) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.DUPLICATE_VOTER, String.format( "The voter id for %s is already part of the set of voters %s.", @@ -174,7 +174,7 @@ public CompletableFuture handleAddVoterRequest( ); if (timeout.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.addVoterResponse( + DynamicReconfigRpc.addVoterResponse( Errors.REQUEST_TIMED_OUT, String.format("New voter %s is not ready to receive requests", voterKey) ) diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java index 40cbb92cb12f9..320e7c982ef7c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/RemoveVoterHandler.java @@ -25,7 +25,7 @@ import org.apache.kafka.raft.LogOffsetMetadata; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; -import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.server.common.KRaftVersion; import org.slf4j.Logger; @@ -82,7 +82,7 @@ public CompletableFuture handleRemoveVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - VoteRpc.removeVoterResponse( + DynamicReconfigRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to handle previous voter change request" ) @@ -93,7 +93,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.removeVoterResponse( + DynamicReconfigRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, "Request timed out waiting for leader to establish HWM and fence previous voter changes" ) @@ -104,7 +104,7 @@ public CompletableFuture handleRemoveVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - VoteRpc.removeVoterResponse( + DynamicReconfigRpc.removeVoterResponse( Errors.UNSUPPORTED_VERSION, String.format( "Cluster doesn't support removing voter because the %s feature is %s", @@ -119,7 +119,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - VoteRpc.removeVoterResponse( + DynamicReconfigRpc.removeVoterResponse( Errors.REQUEST_TIMED_OUT, String.format( "Request timed out waiting for voters to commit the latest voter change at %s with HWM %d", @@ -134,7 +134,7 @@ public CompletableFuture handleRemoveVoterRequest( Optional newVoters = votersEntry.get().value().removeVoter(voterKey); if (newVoters.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.removeVoterResponse( + DynamicReconfigRpc.removeVoterResponse( Errors.VOTER_NOT_FOUND, String.format( "Cannot remove voter %s from the set of voters %s", diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java index e4cf8846b15e8..2137d64211c7a 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandler.java @@ -28,7 +28,7 @@ import org.apache.kafka.raft.LogOffsetMetadata; import org.apache.kafka.raft.ReplicaKey; import org.apache.kafka.raft.VoterSet; -import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.server.common.KRaftVersion; import java.util.Optional; @@ -83,7 +83,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check if there are any pending voter change requests if (leaderState.isOperationPending(currentTimeMs)) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -99,7 +99,7 @@ public CompletableFuture handleUpdateVoterRequest( Optional highWatermark = leaderState.highWatermark().map(LogOffsetMetadata::offset); if (highWatermark.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -116,7 +116,7 @@ public CompletableFuture handleUpdateVoterRequest( KRaftVersion kraftVersion = partitionState.lastKraftVersion(); if (!kraftVersion.isReconfigSupported()) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.UNSUPPORTED_VERSION, requestListenerName, new LeaderAndEpoch( @@ -132,7 +132,7 @@ public CompletableFuture handleUpdateVoterRequest( Optional> votersEntry = partitionState.lastVoterSetEntry(); if (votersEntry.isEmpty() || votersEntry.get().offset() >= highWatermark.get()) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.REQUEST_TIMED_OUT, requestListenerName, new LeaderAndEpoch( @@ -147,7 +147,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that the supported version range is valid if (!validVersionRange(kraftVersion, supportedKraftVersions)) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestListenerName, new LeaderAndEpoch( @@ -162,7 +162,7 @@ public CompletableFuture handleUpdateVoterRequest( // Check that endpoinds includes the default listener if (voterEndpoints.address(defaultListenerName).isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.INVALID_REQUEST, requestListenerName, new LeaderAndEpoch( @@ -190,7 +190,7 @@ public CompletableFuture handleUpdateVoterRequest( ); if (updatedVoters.isEmpty()) { return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.VOTER_NOT_FOUND, requestListenerName, new LeaderAndEpoch( @@ -206,7 +206,7 @@ public CompletableFuture handleUpdateVoterRequest( // Reply immediately and don't wait for the change to commit return CompletableFuture.completedFuture( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( Errors.NONE, requestListenerName, new LeaderAndEpoch( diff --git a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java index f4db6831375a4..e62f29f2a02bc 100644 --- a/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java +++ b/raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java @@ -22,7 +22,7 @@ import org.apache.kafka.common.utils.Timer; import org.apache.kafka.raft.Endpoints; import org.apache.kafka.raft.LeaderAndEpoch; -import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import java.util.concurrent.CompletableFuture; @@ -57,7 +57,7 @@ public void completeFuture( Endpoints leaderEndpoints ) { future.complete( - VoteRpc.updateVoterResponse( + DynamicReconfigRpc.updateVoterResponse( error, requestListenerName, leaderAndEpoch, diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java new file mode 100644 index 0000000000000..4c2531ddbf8ef --- /dev/null +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.raft.utils; + +import org.apache.kafka.common.feature.SupportedVersionRange; +import org.apache.kafka.common.message.AddRaftVoterRequestData; +import org.apache.kafka.common.message.AddRaftVoterResponseData; +import org.apache.kafka.common.message.RemoveRaftVoterRequestData; +import org.apache.kafka.common.message.RemoveRaftVoterResponseData; +import org.apache.kafka.common.message.UpdateRaftVoterRequestData; +import org.apache.kafka.common.message.UpdateRaftVoterResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.raft.Endpoints; +import org.apache.kafka.raft.LeaderAndEpoch; +import org.apache.kafka.raft.ReplicaKey; + +import java.net.InetSocketAddress; +import java.util.Optional; + +public class DynamicReconfigRpc { + public static AddRaftVoterRequestData addVoterRequest( + String clusterId, + int timeoutMs, + ReplicaKey voter, + Endpoints listeners + ) { + return new AddRaftVoterRequestData() + .setClusterId(clusterId) + .setTimeoutMs(timeoutMs) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(listeners.toAddVoterRequest()); + } + + public static AddRaftVoterResponseData addVoterResponse( + Errors error, + String errorMessage + ) { + errorMessage = errorMessage == null ? error.message() : errorMessage; + + return new AddRaftVoterResponseData() + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + + public static RemoveRaftVoterRequestData removeVoterRequest( + String clusterId, + ReplicaKey voter + ) { + return new RemoveRaftVoterRequestData() + .setClusterId(clusterId) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + } + + public static RemoveRaftVoterResponseData removeVoterResponse( + Errors error, + String errorMessage + ) { + errorMessage = errorMessage == null ? error.message() : errorMessage; + + return new RemoveRaftVoterResponseData() + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); + } + + public static UpdateRaftVoterRequestData updateVoterRequest( + String clusterId, + ReplicaKey voter, + int epoch, + SupportedVersionRange supportedVersions, + Endpoints endpoints + ) { + UpdateRaftVoterRequestData request = new UpdateRaftVoterRequestData() + .setClusterId(clusterId) + .setCurrentLeaderEpoch(epoch) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(endpoints.toUpdateVoterRequest()); + + request.kRaftVersionFeature() + .setMinSupportedVersion(supportedVersions.min()) + .setMaxSupportedVersion(supportedVersions.max()); + + return request; + } + + public static UpdateRaftVoterResponseData updateVoterResponse( + Errors error, + ListenerName listenerName, + LeaderAndEpoch leaderAndEpoch, + Endpoints endpoints + ) { + UpdateRaftVoterResponseData response = new UpdateRaftVoterResponseData() + .setErrorCode(error.code()); + + response.currentLeader() + .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) + .setLeaderEpoch(leaderAndEpoch.epoch()); + + Optional address = endpoints.address(listenerName); + if (address.isPresent()) { + response.currentLeader() + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()); + } + + return response; + } + + public static Optional addVoterRequestVoterKey(AddRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } + + public static Optional removeVoterRequestVoterKey(RemoveRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } + + public static Optional updateVoterRequestVoterKey(UpdateRaftVoterRequestData request) { + if (request.voterId() < 0) { + return Optional.empty(); + } else { + return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); + } + } +} diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java index c75a61998e28c..fd0ddbf8b4fcb 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java @@ -17,19 +17,11 @@ package org.apache.kafka.raft.utils; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.feature.SupportedVersionRange; -import org.apache.kafka.common.message.AddRaftVoterRequestData; -import org.apache.kafka.common.message.AddRaftVoterResponseData; -import org.apache.kafka.common.message.RemoveRaftVoterRequestData; -import org.apache.kafka.common.message.RemoveRaftVoterResponseData; -import org.apache.kafka.common.message.UpdateRaftVoterRequestData; -import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.message.VoteRequestData; import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.raft.Endpoints; -import org.apache.kafka.raft.LeaderAndEpoch; import org.apache.kafka.raft.ReplicaKey; import java.net.InetSocketAddress; @@ -40,11 +32,12 @@ public class VoteRpc { public static VoteRequestData singletonVoteRequest( TopicPartition topicPartition, String clusterId, - int candidateEpoch, - ReplicaKey candidateKey, + int replicaEpoch, + ReplicaKey replicaKey, ReplicaKey voterKey, int lastEpoch, - long lastEpochEndOffset + long lastEpochEndOffset, + boolean preVote ) { return new VoteRequestData() .setClusterId(clusterId) @@ -57,10 +50,10 @@ public static VoteRequestData singletonVoteRequest( Collections.singletonList( new VoteRequestData.PartitionData() .setPartitionIndex(topicPartition.partition()) - .setCandidateEpoch(candidateEpoch) - .setCandidateId(candidateKey.id()) - .setCandidateDirectoryId( - candidateKey + .setReplicaEpoch(replicaEpoch) + .setReplicaId(replicaKey.id()) + .setReplicaDirectoryId( + replicaKey .directoryId() .orElse(ReplicaKey.NO_DIRECTORY_ID) ) @@ -71,6 +64,7 @@ public static VoteRequestData singletonVoteRequest( ) .setLastOffsetEpoch(lastEpoch) .setLastOffset(lastEpochEndOffset) + .setPreVote(preVote) ) ) ) @@ -129,120 +123,6 @@ public static Optional voteRequestVoterKey( } } - public static AddRaftVoterRequestData addVoterRequest( - String clusterId, - int timeoutMs, - ReplicaKey voter, - Endpoints listeners - ) { - return new AddRaftVoterRequestData() - .setClusterId(clusterId) - .setTimeoutMs(timeoutMs) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(listeners.toAddVoterRequest()); - } - - public static AddRaftVoterResponseData addVoterResponse( - Errors error, - String errorMessage - ) { - errorMessage = errorMessage == null ? error.message() : errorMessage; - - return new AddRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static RemoveRaftVoterRequestData removeVoterRequest( - String clusterId, - ReplicaKey voter - ) { - return new RemoveRaftVoterRequestData() - .setClusterId(clusterId) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); - } - - public static RemoveRaftVoterResponseData removeVoterResponse( - Errors error, - String errorMessage - ) { - errorMessage = errorMessage == null ? error.message() : errorMessage; - - return new RemoveRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static UpdateRaftVoterRequestData updateVoterRequest( - String clusterId, - ReplicaKey voter, - int epoch, - SupportedVersionRange supportedVersions, - Endpoints endpoints - ) { - UpdateRaftVoterRequestData request = new UpdateRaftVoterRequestData() - .setClusterId(clusterId) - .setCurrentLeaderEpoch(epoch) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(endpoints.toUpdateVoterRequest()); - - request.kRaftVersionFeature() - .setMinSupportedVersion(supportedVersions.min()) - .setMaxSupportedVersion(supportedVersions.max()); - - return request; - } - - public static UpdateRaftVoterResponseData updateVoterResponse( - Errors error, - ListenerName listenerName, - LeaderAndEpoch leaderAndEpoch, - Endpoints endpoints - ) { - UpdateRaftVoterResponseData response = new UpdateRaftVoterResponseData() - .setErrorCode(error.code()); - - response.currentLeader() - .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) - .setLeaderEpoch(leaderAndEpoch.epoch()); - - Optional address = endpoints.address(listenerName); - if (address.isPresent()) { - response.currentLeader() - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()); - } - - return response; - } - - public static Optional addVoterRequestVoterKey(AddRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - - public static Optional removeVoterRequestVoterKey(RemoveRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - - public static Optional updateVoterRequestVoterKey(UpdateRaftVoterRequestData request) { - if (request.voterId() < 0) { - return Optional.empty(); - } else { - return Optional.of(ReplicaKey.of(request.voterId(), request.voterDirectoryId())); - } - } - public static boolean hasValidTopicPartition(VoteResponseData data, TopicPartition topicPartition) { return data.topics().size() == 1 && data.topics().get(0).topicName().equals(topicPartition.topic()) && diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java index 98ccad2532413..2b504fe9b970f 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaNetworkChannelTest.java @@ -50,6 +50,7 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.raft.utils.FetchRpc; import org.apache.kafka.raft.utils.FetchSnapshotRpc; @@ -311,7 +312,7 @@ private ApiMessage buildTestRequest(ApiKeys key) { ); case UPDATE_RAFT_VOTER: - return RaftUtil.updateVoterRequest( + return DynamicReconfigRpc.updateVoterRequest( clusterId, ReplicaKey.of(1, ReplicaKey.NO_DIRECTORY_ID), 5, diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 698e7eecfb68e..764bbaadaf8d6 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -65,13 +65,14 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.BatchBuilder; import org.apache.kafka.raft.internals.StringSerde; -import org.apache.kafka.server.common.Feature; import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.DynamicReconfigRpc; import org.apache.kafka.raft.utils.EndQuorumEpochRpc; import org.apache.kafka.raft.utils.FetchRpc; import org.apache.kafka.raft.utils.FetchSnapshotRpc; import org.apache.kafka.raft.utils.VoteRpc; +import org.apache.kafka.server.common.Feature; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.server.common.serialization.RecordSerde; import org.apache.kafka.snapshot.RecordsSnapshotWriter; @@ -1780,7 +1781,7 @@ AddRaftVoterRequestData addVoterRequest( ReplicaKey voter, Endpoints endpoints ) { - return VoteRpc.addVoterRequest( + return DynamicReconfigRpc.addVoterRequest( clusterId, timeoutMs, voter, @@ -1793,7 +1794,7 @@ RemoveRaftVoterRequestData removeVoterRequest(ReplicaKey voter) { } RemoveRaftVoterRequestData removeVoterRequest(String cluster, ReplicaKey voter) { - return VoteRpc.removeVoterRequest(cluster, voter); + return DynamicReconfigRpc.removeVoterRequest(cluster, voter); } UpdateRaftVoterRequestData updateVoterRequest( @@ -1811,14 +1812,14 @@ UpdateRaftVoterRequestData updateVoterRequest( SupportedVersionRange supportedVersions, Endpoints endpoints ) { - return VoteRpc.updateVoterRequest(clusterId, voter, epoch, supportedVersions, endpoints); + return DynamicReconfigRpc.updateVoterRequest(clusterId, voter, epoch, supportedVersions, endpoints); } UpdateRaftVoterResponseData updateVoterResponse( Errors error, LeaderAndEpoch leaderAndEpoch ) { - return VoteRpc.updateVoterResponse( + return DynamicReconfigRpc.updateVoterResponse( error, channel.listenerName(), leaderAndEpoch, diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java index ca100fa523996..f8707a663c40f 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java @@ -51,6 +51,13 @@ import org.apache.kafka.common.record.Records; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.raft.utils.ApiMessageUtils; +import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; +import org.apache.kafka.raft.utils.DescribeQuorumRpc; +import org.apache.kafka.raft.utils.EndQuorumEpochRpc; +import org.apache.kafka.raft.utils.FetchRpc; +import org.apache.kafka.raft.utils.FetchSnapshotRpc; +import org.apache.kafka.raft.utils.VoteRpc; import com.fasterxml.jackson.databind.JsonNode; @@ -81,16 +88,16 @@ public class RaftUtilTest { @Test public void testErrorResponse() { assertEquals(new VoteResponseData().setErrorCode(Errors.NONE.code()), - RaftUtil.errorResponse(ApiKeys.VOTE, Errors.NONE)); + ApiMessageUtils.parseErrorResponse(ApiKeys.VOTE, Errors.NONE)); assertEquals(new BeginQuorumEpochResponseData().setErrorCode(Errors.NONE.code()), - RaftUtil.errorResponse(ApiKeys.BEGIN_QUORUM_EPOCH, Errors.NONE)); + ApiMessageUtils.parseErrorResponse(ApiKeys.BEGIN_QUORUM_EPOCH, Errors.NONE)); assertEquals(new EndQuorumEpochResponseData().setErrorCode(Errors.NONE.code()), - RaftUtil.errorResponse(ApiKeys.END_QUORUM_EPOCH, Errors.NONE)); + ApiMessageUtils.parseErrorResponse(ApiKeys.END_QUORUM_EPOCH, Errors.NONE)); assertEquals(new FetchResponseData().setErrorCode(Errors.NONE.code()), - RaftUtil.errorResponse(ApiKeys.FETCH, Errors.NONE)); + ApiMessageUtils.parseErrorResponse(ApiKeys.FETCH, Errors.NONE)); assertEquals(new FetchSnapshotResponseData().setErrorCode(Errors.NONE.code()), - RaftUtil.errorResponse(ApiKeys.FETCH_SNAPSHOT, Errors.NONE)); - assertThrows(IllegalArgumentException.class, () -> RaftUtil.errorResponse(ApiKeys.PRODUCE, Errors.NONE)); + ApiMessageUtils.parseErrorResponse(ApiKeys.FETCH_SNAPSHOT, Errors.NONE)); + assertThrows(IllegalArgumentException.class, () -> ApiMessageUtils.parseErrorResponse(ApiKeys.PRODUCE, Errors.NONE)); } private static Stream singletonFetchRequestTestCases() { @@ -340,7 +347,7 @@ private static Stream describeQuorumResponseTestCases() { @ParameterizedTest @MethodSource("singletonFetchRequestTestCases") public void testSingletonFetchRequestForAllVersion(final FetchRequestTestCase testCase) { - FetchRequestData fetchRequestData = RaftUtil.singletonFetchRequest(topicPartition, Uuid.ONE_UUID, + FetchRequestData fetchRequestData = FetchRpc.singletonFetchRequest(topicPartition, Uuid.ONE_UUID, partition -> partition .setPartitionMaxBytes(10) .setCurrentLeaderEpoch(5) @@ -364,7 +371,7 @@ public void testSingletonFetchResponseForAllVersion(final FetchResponseTestCase final int producerId = 1; final int firstOffset = 10; - FetchResponseData fetchResponseData = RaftUtil.singletonFetchResponse( + FetchResponseData fetchResponseData = FetchRpc.singletonFetchResponse( listenerName, testCase.version, topicPartition, @@ -400,7 +407,7 @@ public void testSingletonVoteRequestForAllVersion(final short version, final Str int lastEpoch = 1000; long lastEpochOffset = 1000; - VoteRequestData voteRequestData = RaftUtil.singletonVoteRequest( + VoteRequestData voteRequestData = VoteRpc.singletonVoteRequest( topicPartition, clusterId, replicaEpoch, @@ -420,7 +427,7 @@ public void testSingletonVoteResponseForAllVersion(final short version, final St int leaderEpoch = 1; int leaderId = 1; - VoteResponseData voteResponseData = RaftUtil.singletonVoteResponse( + VoteResponseData voteResponseData = VoteRpc.singletonVoteResponse( listenerName, version, Errors.NONE, @@ -444,7 +451,7 @@ public void testSingletonFetchSnapshotRequestForAllVersion(final short version, int maxBytes = 1000; int position = 10; - FetchSnapshotRequestData fetchSnapshotRequestData = RaftUtil.singletonFetchSnapshotRequest( + FetchSnapshotRequestData fetchSnapshotRequestData = FetchSnapshotRpc.singletonFetchSnapshotRequest( clusterId, ReplicaKey.of(1, directoryId), topicPartition, @@ -462,7 +469,7 @@ public void testSingletonFetchSnapshotRequestForAllVersion(final short version, public void testSingletonFetchSnapshotResponseForAllVersion(final short version, final String expectedJson) { int leaderId = 1; - FetchSnapshotResponseData fetchSnapshotResponseData = RaftUtil.singletonFetchSnapshotResponse( + FetchSnapshotResponseData fetchSnapshotResponseData = FetchSnapshotRpc.singletonFetchSnapshotResponse( listenerName, version, topicPartition, @@ -481,7 +488,7 @@ public void testSingletonBeginQuorumEpochRequestForAllVersion(final short versio int leaderEpoch = 1; int leaderId = 1; - BeginQuorumEpochRequestData beginQuorumEpochRequestData = RaftUtil.singletonBeginQuorumEpochRequest( + BeginQuorumEpochRequestData beginQuorumEpochRequestData = BeginQuorumEpochRpc.singletonBeginQuorumEpochRequest( topicPartition, clusterId, leaderEpoch, @@ -499,7 +506,7 @@ public void testSingletonBeginQuorumEpochResponseForAllVersion(final short versi int leaderEpoch = 1; int leaderId = 1; - BeginQuorumEpochResponseData beginQuorumEpochResponseData = RaftUtil.singletonBeginQuorumEpochResponse( + BeginQuorumEpochResponseData beginQuorumEpochResponseData = BeginQuorumEpochRpc.singletonBeginQuorumEpochResponse( listenerName, version, Errors.NONE, @@ -519,7 +526,7 @@ public void testSingletonEndQuorumEpochRequestForAllVersion(final short version, int leaderEpoch = 1; int leaderId = 1; - EndQuorumEpochRequestData endQuorumEpochRequestData = RaftUtil.singletonEndQuorumEpochRequest( + EndQuorumEpochRequestData endQuorumEpochRequestData = EndQuorumEpochRpc.singletonEndQuorumEpochRequest( topicPartition, clusterId, leaderEpoch, @@ -536,7 +543,7 @@ public void testSingletonEndQuorumEpochResponseForAllVersion(final short version int leaderEpoch = 1; int leaderId = 1; - EndQuorumEpochResponseData endQuorumEpochResponseData = RaftUtil.singletonEndQuorumEpochResponse( + EndQuorumEpochResponseData endQuorumEpochResponseData = EndQuorumEpochRpc.singletonEndQuorumEpochResponse( listenerName, version, Errors.NONE, @@ -553,7 +560,7 @@ public void testSingletonEndQuorumEpochResponseForAllVersion(final short version @ParameterizedTest @MethodSource("describeQuorumRequestTestCases") public void testSingletonDescribeQuorumRequestForAllVersion(final short version, final String expectedJson) { - DescribeQuorumRequestData describeQuorumRequestData = RaftUtil.singletonDescribeQuorumRequest(topicPartition); + DescribeQuorumRequestData describeQuorumRequestData = DescribeQuorumRpc.singletonDescribeQuorumRequest(topicPartition); JsonNode json = DescribeQuorumRequestDataJsonConverter.write(describeQuorumRequestData, version); assertEquals(expectedJson, json.toString()); } @@ -568,7 +575,7 @@ public void testSingletonDescribeQuorumResponseForAllVersion(final short version LeaderState.ReplicaState replicaState = new LeaderState.ReplicaState(replicaKey, true, Endpoints.empty()); - DescribeQuorumResponseData describeQuorumResponseData = RaftUtil.singletonDescribeQuorumResponse( + DescribeQuorumResponseData describeQuorumResponseData = DescribeQuorumRpc.singletonDescribeQuorumResponse( version, topicPartition, leaderId, From 25bff77885fb64b149482fc8b9a79b02813bbeba Mon Sep 17 00:00:00 2001 From: frankvicky Date: Tue, 14 Jan 2025 08:47:40 +0800 Subject: [PATCH 5/7] KAFKA-16907: reformat --- .../kafka/raft/utils/BeginQuorumEpochRpc.java | 114 +++++++-------- .../kafka/raft/utils/DescribeQuorumRpc.java | 110 +++++++------- .../kafka/raft/utils/DynamicReconfigRpc.java | 86 +++++------ .../kafka/raft/utils/EndQuorumEpochRpc.java | 120 ++++++++-------- .../org/apache/kafka/raft/utils/FetchRpc.java | 70 ++++----- .../kafka/raft/utils/FetchSnapshotRpc.java | 98 ++++++------- .../org/apache/kafka/raft/utils/VoteRpc.java | 134 +++++++++--------- .../org/apache/kafka/raft/RaftUtilTest.java | 4 +- 8 files changed, 368 insertions(+), 368 deletions(-) diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java index ad836fc751f5e..b09d4e4677f8c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/BeginQuorumEpochRpc.java @@ -31,72 +31,72 @@ public class BeginQuorumEpochRpc { public static BeginQuorumEpochRequestData singletonBeginQuorumEpochRequest( - TopicPartition topicPartition, - String clusterId, - int leaderEpoch, - int leaderId, - Endpoints leaderEndpoints, - ReplicaKey voterKey + TopicPartition topicPartition, + String clusterId, + int leaderEpoch, + int leaderId, + Endpoints leaderEndpoints, + ReplicaKey voterKey ) { return new BeginQuorumEpochRequestData() - .setClusterId(clusterId) - .setVoterId(voterKey.id()) - .setTopics( - Collections.singletonList( - new BeginQuorumEpochRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new BeginQuorumEpochRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setLeaderId(leaderId) - .setVoterDirectoryId(voterKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - ) - ) + .setClusterId(clusterId) + .setVoterId(voterKey.id()) + .setTopics( + Collections.singletonList( + new BeginQuorumEpochRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new BeginQuorumEpochRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setLeaderEpoch(leaderEpoch) + .setLeaderId(leaderId) + .setVoterDirectoryId(voterKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + ) ) ) - .setLeaderEndpoints(leaderEndpoints.toBeginQuorumEpochRequest()); + ) + .setLeaderEndpoints(leaderEndpoints.toBeginQuorumEpochRequest()); } public static BeginQuorumEpochResponseData singletonBeginQuorumEpochResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - Endpoints endpoints + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + Endpoints endpoints ) { BeginQuorumEpochResponseData response = new BeginQuorumEpochResponseData() - .setErrorCode(topLevelError.code()) - .setTopics( - Collections.singletonList( - new BeginQuorumEpochResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new BeginQuorumEpochResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - ) - ) + .setErrorCode(topLevelError.code()) + .setTopics( + Collections.singletonList( + new BeginQuorumEpochResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new BeginQuorumEpochResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + ) ) - ); + ) + ); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName); if (address.isPresent() && leaderId >= 0) { // Populate the node endpoints BeginQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = - new BeginQuorumEpochResponseData.NodeEndpointCollection(1); + new BeginQuorumEpochResponseData.NodeEndpointCollection(1); nodeEndpoints.add( - new BeginQuorumEpochResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) + new BeginQuorumEpochResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) ); response.setNodeEndpoints(nodeEndpoints); } @@ -107,21 +107,21 @@ public static BeginQuorumEpochResponseData singletonBeginQuorumEpochResponse( public static boolean hasValidTopicPartition(BeginQuorumEpochRequestData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } public static boolean hasValidTopicPartition(BeginQuorumEpochResponseData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } public static Optional beginQuorumEpochRequestVoterKey( - BeginQuorumEpochRequestData request, - BeginQuorumEpochRequestData.PartitionData partition + BeginQuorumEpochRequestData request, + BeginQuorumEpochRequestData.PartitionData partition ) { if (request.voterId() < 0) { return Optional.empty(); diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java index f0dc34a391a04..9b695513410a4 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java @@ -31,55 +31,55 @@ public class DescribeQuorumRpc { public static DescribeQuorumRequestData singletonDescribeQuorumRequest( - TopicPartition topicPartition + TopicPartition topicPartition ) { return new DescribeQuorumRequestData() - .setTopics( - Collections.singletonList( - new DescribeQuorumRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new DescribeQuorumRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - ) - ) + .setTopics( + Collections.singletonList( + new DescribeQuorumRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new DescribeQuorumRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + ) ) - ); + ) + ); } public static DescribeQuorumResponseData singletonDescribeQuorumResponse( - short apiVersion, - TopicPartition topicPartition, - int leaderId, - int leaderEpoch, - long highWatermark, - Collection voters, - Collection observers, - long currentTimeMs + short apiVersion, + TopicPartition topicPartition, + int leaderId, + int leaderEpoch, + long highWatermark, + Collection voters, + Collection observers, + long currentTimeMs ) { DescribeQuorumResponseData response = new DescribeQuorumResponseData() - .setTopics( - Collections.singletonList( - new DescribeQuorumResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new DescribeQuorumResponseData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setErrorCode(Errors.NONE.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - .setHighWatermark(highWatermark) - .setCurrentVoters(toReplicaStates(apiVersion, leaderId, voters, currentTimeMs)) - .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)))))); + .setTopics( + Collections.singletonList( + new DescribeQuorumResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new DescribeQuorumResponseData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setErrorCode(Errors.NONE.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + .setHighWatermark(highWatermark) + .setCurrentVoters(toReplicaStates(apiVersion, leaderId, voters, currentTimeMs)) + .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)))))); if (apiVersion >= 2) { DescribeQuorumResponseData.NodeCollection nodes = new DescribeQuorumResponseData.NodeCollection(voters.size()); for (LeaderState.ReplicaState voter : voters) { nodes.add( - new DescribeQuorumResponseData.Node() - .setNodeId(voter.replicaKey().id()) - .setListeners(voter.listeners().toDescribeQuorumResponseListeners()) + new DescribeQuorumResponseData.Node() + .setNodeId(voter.replicaKey().id()) + .setListeners(voter.listeners().toDescribeQuorumResponseListeners()) ); } response.setNodes(nodes); @@ -88,22 +88,22 @@ public static DescribeQuorumResponseData singletonDescribeQuorumResponse( } private static List toReplicaStates( - short apiVersion, - int leaderId, - Collection states, - long currentTimeMs + short apiVersion, + int leaderId, + Collection states, + long currentTimeMs ) { return states - .stream() - .map(replicaState -> toReplicaState(apiVersion, leaderId, replicaState, currentTimeMs)) - .collect(Collectors.toList()); + .stream() + .map(replicaState -> toReplicaState(apiVersion, leaderId, replicaState, currentTimeMs)) + .collect(Collectors.toList()); } private static DescribeQuorumResponseData.ReplicaState toReplicaState( - short apiVersion, - int leaderId, - LeaderState.ReplicaState replicaState, - long currentTimeMs + short apiVersion, + int leaderId, + LeaderState.ReplicaState replicaState, + long currentTimeMs ) { final long lastCaughtUpTimestamp; final long lastFetchTimestamp; @@ -115,10 +115,10 @@ private static DescribeQuorumResponseData.ReplicaState toReplicaState( lastFetchTimestamp = replicaState.lastFetchTimestamp(); } DescribeQuorumResponseData.ReplicaState replicaStateData = new DescribeQuorumResponseData.ReplicaState() - .setReplicaId(replicaState.replicaKey().id()) - .setLogEndOffset(replicaState.endOffset().map(LogOffsetMetadata::offset).orElse(-1L)) - .setLastCaughtUpTimestamp(lastCaughtUpTimestamp) - .setLastFetchTimestamp(lastFetchTimestamp); + .setReplicaId(replicaState.replicaKey().id()) + .setLogEndOffset(replicaState.endOffset().map(LogOffsetMetadata::offset).orElse(-1L)) + .setLastCaughtUpTimestamp(lastCaughtUpTimestamp) + .setLastFetchTimestamp(lastFetchTimestamp); if (apiVersion >= 2) { replicaStateData.setReplicaDirectoryId(replicaState.replicaKey().directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); @@ -128,8 +128,8 @@ private static DescribeQuorumResponseData.ReplicaState toReplicaState( public static boolean hasValidTopicPartition(DescribeQuorumRequestData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } } diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java index 4c2531ddbf8ef..78f1069995326 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DynamicReconfigRpc.java @@ -34,90 +34,90 @@ public class DynamicReconfigRpc { public static AddRaftVoterRequestData addVoterRequest( - String clusterId, - int timeoutMs, - ReplicaKey voter, - Endpoints listeners + String clusterId, + int timeoutMs, + ReplicaKey voter, + Endpoints listeners ) { return new AddRaftVoterRequestData() - .setClusterId(clusterId) - .setTimeoutMs(timeoutMs) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(listeners.toAddVoterRequest()); + .setClusterId(clusterId) + .setTimeoutMs(timeoutMs) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(listeners.toAddVoterRequest()); } public static AddRaftVoterResponseData addVoterResponse( - Errors error, - String errorMessage + Errors error, + String errorMessage ) { errorMessage = errorMessage == null ? error.message() : errorMessage; return new AddRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static RemoveRaftVoterRequestData removeVoterRequest( - String clusterId, - ReplicaKey voter + String clusterId, + ReplicaKey voter ) { return new RemoveRaftVoterRequestData() - .setClusterId(clusterId) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + .setClusterId(clusterId) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); } public static RemoveRaftVoterResponseData removeVoterResponse( - Errors error, - String errorMessage + Errors error, + String errorMessage ) { errorMessage = errorMessage == null ? error.message() : errorMessage; return new RemoveRaftVoterResponseData() - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static UpdateRaftVoterRequestData updateVoterRequest( - String clusterId, - ReplicaKey voter, - int epoch, - SupportedVersionRange supportedVersions, - Endpoints endpoints + String clusterId, + ReplicaKey voter, + int epoch, + SupportedVersionRange supportedVersions, + Endpoints endpoints ) { UpdateRaftVoterRequestData request = new UpdateRaftVoterRequestData() - .setClusterId(clusterId) - .setCurrentLeaderEpoch(epoch) - .setVoterId(voter.id()) - .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - .setListeners(endpoints.toUpdateVoterRequest()); + .setClusterId(clusterId) + .setCurrentLeaderEpoch(epoch) + .setVoterId(voter.id()) + .setVoterDirectoryId(voter.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + .setListeners(endpoints.toUpdateVoterRequest()); request.kRaftVersionFeature() - .setMinSupportedVersion(supportedVersions.min()) - .setMaxSupportedVersion(supportedVersions.max()); + .setMinSupportedVersion(supportedVersions.min()) + .setMaxSupportedVersion(supportedVersions.max()); return request; } public static UpdateRaftVoterResponseData updateVoterResponse( - Errors error, - ListenerName listenerName, - LeaderAndEpoch leaderAndEpoch, - Endpoints endpoints + Errors error, + ListenerName listenerName, + LeaderAndEpoch leaderAndEpoch, + Endpoints endpoints ) { UpdateRaftVoterResponseData response = new UpdateRaftVoterResponseData() - .setErrorCode(error.code()); + .setErrorCode(error.code()); response.currentLeader() - .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) - .setLeaderEpoch(leaderAndEpoch.epoch()); + .setLeaderId(leaderAndEpoch.leaderId().orElse(-1)) + .setLeaderEpoch(leaderAndEpoch.epoch()); Optional address = endpoints.address(listenerName); if (address.isPresent()) { response.currentLeader() - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()); + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()); } return response; diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java index 86701023eafca..7ecf8ea8e73e5 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java @@ -32,79 +32,79 @@ public class EndQuorumEpochRpc { public static EndQuorumEpochRequestData singletonEndQuorumEpochRequest( - TopicPartition topicPartition, - String clusterId, - int leaderEpoch, - int leaderId, - List preferredReplicaKeys + TopicPartition topicPartition, + String clusterId, + int leaderEpoch, + int leaderId, + List preferredReplicaKeys ) { List preferredSuccessors = preferredReplicaKeys - .stream() - .map(ReplicaKey::id) - .collect(Collectors.toList()); + .stream() + .map(ReplicaKey::id) + .collect(Collectors.toList()); List preferredCandidates = preferredReplicaKeys - .stream() - .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() - .setCandidateId(replicaKey.id()) - .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) - ) - .collect(Collectors.toList()); + .stream() + .map(replicaKey -> new EndQuorumEpochRequestData.ReplicaInfo() + .setCandidateId(replicaKey.id()) + .setCandidateDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)) + ) + .collect(Collectors.toList()); return new EndQuorumEpochRequestData() - .setClusterId(clusterId) - .setTopics( - Collections.singletonList( - new EndQuorumEpochRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new EndQuorumEpochRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setLeaderEpoch(leaderEpoch) - .setLeaderId(leaderId) - .setPreferredSuccessors(preferredSuccessors) - .setPreferredCandidates(preferredCandidates) - ) - ) + .setClusterId(clusterId) + .setTopics( + Collections.singletonList( + new EndQuorumEpochRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new EndQuorumEpochRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setLeaderEpoch(leaderEpoch) + .setLeaderId(leaderId) + .setPreferredSuccessors(preferredSuccessors) + .setPreferredCandidates(preferredCandidates) + ) ) - ); + ) + ); } public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - Endpoints endpoints + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + Endpoints endpoints ) { EndQuorumEpochResponseData response = new EndQuorumEpochResponseData() - .setErrorCode(topLevelError.code()) - .setTopics(Collections.singletonList( - new EndQuorumEpochResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList( - new EndQuorumEpochResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - ))) - ); + .setErrorCode(topLevelError.code()) + .setTopics(Collections.singletonList( + new EndQuorumEpochResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions(Collections.singletonList( + new EndQuorumEpochResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + ))) + ); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName); if (address.isPresent() && leaderId >= 0) { // Populate the node endpoints EndQuorumEpochResponseData.NodeEndpointCollection nodeEndpoints = - new EndQuorumEpochResponseData.NodeEndpointCollection(1); + new EndQuorumEpochResponseData.NodeEndpointCollection(1); nodeEndpoints.add( - new EndQuorumEpochResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) + new EndQuorumEpochResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) ); response.setNodeEndpoints(nodeEndpoints); } @@ -115,15 +115,15 @@ public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( public static boolean hasValidTopicPartition(EndQuorumEpochRequestData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } public static boolean hasValidTopicPartition(EndQuorumEpochResponseData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } } diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java index 8e5ed5b685fa0..fb1e8d90cc76a 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/FetchRpc.java @@ -31,47 +31,47 @@ public class FetchRpc { public static FetchRequestData singletonFetchRequest( - TopicPartition topicPartition, - Uuid topicId, - Consumer partitionConsumer + TopicPartition topicPartition, + Uuid topicId, + Consumer partitionConsumer ) { FetchRequestData.FetchPartition fetchPartition = - new FetchRequestData.FetchPartition() - .setPartition(topicPartition.partition()); + new FetchRequestData.FetchPartition() + .setPartition(topicPartition.partition()); partitionConsumer.accept(fetchPartition); FetchRequestData.FetchTopic fetchTopic = - new FetchRequestData.FetchTopic() - .setTopic(topicPartition.topic()) - .setTopicId(topicId) - .setPartitions(Collections.singletonList(fetchPartition)); + new FetchRequestData.FetchTopic() + .setTopic(topicPartition.topic()) + .setTopicId(topicId) + .setPartitions(Collections.singletonList(fetchPartition)); return new FetchRequestData() - .setTopics(Collections.singletonList(fetchTopic)); + .setTopics(Collections.singletonList(fetchTopic)); } public static FetchResponseData singletonFetchResponse( - ListenerName listenerName, - short apiVersion, - TopicPartition topicPartition, - Uuid topicId, - Errors topLevelError, - int leaderId, - Endpoints endpoints, - Consumer partitionConsumer + ListenerName listenerName, + short apiVersion, + TopicPartition topicPartition, + Uuid topicId, + Errors topLevelError, + int leaderId, + Endpoints endpoints, + Consumer partitionConsumer ) { FetchResponseData.PartitionData fetchablePartition = - new FetchResponseData.PartitionData(); + new FetchResponseData.PartitionData(); fetchablePartition.setPartitionIndex(topicPartition.partition()); partitionConsumer.accept(fetchablePartition); FetchResponseData.FetchableTopicResponse fetchableTopic = - new FetchResponseData.FetchableTopicResponse() - .setTopic(topicPartition.topic()) - .setTopicId(topicId) - .setPartitions(Collections.singletonList(fetchablePartition)); + new FetchResponseData.FetchableTopicResponse() + .setTopic(topicPartition.topic()) + .setTopicId(topicId) + .setPartitions(Collections.singletonList(fetchablePartition)); FetchResponseData response = new FetchResponseData(); @@ -81,31 +81,31 @@ public static FetchResponseData singletonFetchResponse( // Populate the node endpoints FetchResponseData.NodeEndpointCollection nodeEndpoints = new FetchResponseData.NodeEndpointCollection(1); nodeEndpoints.add( - new FetchResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) + new FetchResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) ); response.setNodeEndpoints(nodeEndpoints); } } return response - .setErrorCode(topLevelError.code()) - .setResponses(Collections.singletonList(fetchableTopic)); + .setErrorCode(topLevelError.code()) + .setResponses(Collections.singletonList(fetchableTopic)); } public static boolean hasValidTopicPartition(FetchRequestData data, TopicPartition topicPartition, Uuid topicId) { return data.topics().size() == 1 && - data.topics().get(0).topicId().equals(topicId) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partition() == topicPartition.partition(); + data.topics().get(0).topicId().equals(topicId) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partition() == topicPartition.partition(); } public static boolean hasValidTopicPartition(FetchResponseData data, TopicPartition topicPartition, Uuid topicId) { return data.responses().size() == 1 && - data.responses().get(0).topicId().equals(topicId) && - data.responses().get(0).partitions().size() == 1 && - data.responses().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.responses().get(0).topicId().equals(topicId) && + data.responses().get(0).partitions().size() == 1 && + data.responses().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } } diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java index 30b3280efcf13..5a3568429dd47 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/FetchSnapshotRpc.java @@ -31,83 +31,83 @@ public class FetchSnapshotRpc { public static FetchSnapshotRequestData singletonFetchSnapshotRequest( - String clusterId, - ReplicaKey replicaKey, - TopicPartition topicPartition, - int epoch, - OffsetAndEpoch offsetAndEpoch, - int maxBytes, - long position + String clusterId, + ReplicaKey replicaKey, + TopicPartition topicPartition, + int epoch, + OffsetAndEpoch offsetAndEpoch, + int maxBytes, + long position ) { FetchSnapshotRequestData.SnapshotId snapshotId = new FetchSnapshotRequestData.SnapshotId() - .setEndOffset(offsetAndEpoch.offset()) - .setEpoch(offsetAndEpoch.epoch()); + .setEndOffset(offsetAndEpoch.offset()) + .setEpoch(offsetAndEpoch.epoch()); FetchSnapshotRequestData.PartitionSnapshot partitionSnapshot = new FetchSnapshotRequestData.PartitionSnapshot() - .setPartition(topicPartition.partition()) - .setCurrentLeaderEpoch(epoch) - .setSnapshotId(snapshotId) - .setPosition(position) - .setReplicaDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); + .setPartition(topicPartition.partition()) + .setCurrentLeaderEpoch(epoch) + .setSnapshotId(snapshotId) + .setPosition(position) + .setReplicaDirectoryId(replicaKey.directoryId().orElse(ReplicaKey.NO_DIRECTORY_ID)); return new FetchSnapshotRequestData() - .setClusterId(clusterId) - .setReplicaId(replicaKey.id()) - .setMaxBytes(maxBytes) - .setTopics( - Collections.singletonList( - new FetchSnapshotRequestData.TopicSnapshot() - .setName(topicPartition.topic()) - .setPartitions(Collections.singletonList(partitionSnapshot)) - ) - ); + .setClusterId(clusterId) + .setReplicaId(replicaKey.id()) + .setMaxBytes(maxBytes) + .setTopics( + Collections.singletonList( + new FetchSnapshotRequestData.TopicSnapshot() + .setName(topicPartition.topic()) + .setPartitions(Collections.singletonList(partitionSnapshot)) + ) + ); } /** * Creates a FetchSnapshotResponseData with a single PartitionSnapshot for the topic partition. - * + *

* The partition index will already be populated when calling operator. * - * @param listenerName the listener used to accept the request - * @param apiVersion the api version of the request + * @param listenerName the listener used to accept the request + * @param apiVersion the api version of the request * @param topicPartition the topic partition to include - * @param leaderId the id of the leader - * @param endpoints the endpoints of the leader - * @param operator unary operator responsible for populating all of the appropriate fields + * @param leaderId the id of the leader + * @param endpoints the endpoints of the leader + * @param operator unary operator responsible for populating all of the appropriate fields * @return the created fetch snapshot response data */ public static FetchSnapshotResponseData singletonFetchSnapshotResponse( - ListenerName listenerName, - short apiVersion, - TopicPartition topicPartition, - int leaderId, - Endpoints endpoints, - UnaryOperator operator + ListenerName listenerName, + short apiVersion, + TopicPartition topicPartition, + int leaderId, + Endpoints endpoints, + UnaryOperator operator ) { FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = operator.apply( - new FetchSnapshotResponseData.PartitionSnapshot().setIndex(topicPartition.partition()) + new FetchSnapshotResponseData.PartitionSnapshot().setIndex(topicPartition.partition()) ); FetchSnapshotResponseData response = new FetchSnapshotResponseData() - .setTopics( - Collections.singletonList( - new FetchSnapshotResponseData.TopicSnapshot() - .setName(topicPartition.topic()) - .setPartitions(Collections.singletonList(partitionSnapshot)) - ) - ); + .setTopics( + Collections.singletonList( + new FetchSnapshotResponseData.TopicSnapshot() + .setName(topicPartition.topic()) + .setPartitions(Collections.singletonList(partitionSnapshot)) + ) + ); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName); if (address.isPresent() && leaderId >= 0) { // Populate the node endpoints FetchSnapshotResponseData.NodeEndpointCollection nodeEndpoints = - new FetchSnapshotResponseData.NodeEndpointCollection(1); + new FetchSnapshotResponseData.NodeEndpointCollection(1); nodeEndpoints.add( - new FetchSnapshotResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) + new FetchSnapshotResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) ); response.setNodeEndpoints(nodeEndpoints); } diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java index fd0ddbf8b4fcb..1085c90828bb2 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java @@ -30,69 +30,69 @@ public class VoteRpc { public static VoteRequestData singletonVoteRequest( - TopicPartition topicPartition, - String clusterId, - int replicaEpoch, - ReplicaKey replicaKey, - ReplicaKey voterKey, - int lastEpoch, - long lastEpochEndOffset, - boolean preVote + TopicPartition topicPartition, + String clusterId, + int replicaEpoch, + ReplicaKey replicaKey, + ReplicaKey voterKey, + int lastEpoch, + long lastEpochEndOffset, + boolean preVote ) { return new VoteRequestData() - .setClusterId(clusterId) - .setVoterId(voterKey.id()) - .setTopics( - Collections.singletonList( - new VoteRequestData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions( - Collections.singletonList( - new VoteRequestData.PartitionData() - .setPartitionIndex(topicPartition.partition()) - .setReplicaEpoch(replicaEpoch) - .setReplicaId(replicaKey.id()) - .setReplicaDirectoryId( - replicaKey - .directoryId() - .orElse(ReplicaKey.NO_DIRECTORY_ID) - ) - .setVoterDirectoryId( - voterKey - .directoryId() - .orElse(ReplicaKey.NO_DIRECTORY_ID) - ) - .setLastOffsetEpoch(lastEpoch) - .setLastOffset(lastEpochEndOffset) - .setPreVote(preVote) - ) - ) + .setClusterId(clusterId) + .setVoterId(voterKey.id()) + .setTopics( + Collections.singletonList( + new VoteRequestData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions( + Collections.singletonList( + new VoteRequestData.PartitionData() + .setPartitionIndex(topicPartition.partition()) + .setReplicaEpoch(replicaEpoch) + .setReplicaId(replicaKey.id()) + .setReplicaDirectoryId( + replicaKey + .directoryId() + .orElse(ReplicaKey.NO_DIRECTORY_ID) + ) + .setVoterDirectoryId( + voterKey + .directoryId() + .orElse(ReplicaKey.NO_DIRECTORY_ID) + ) + .setLastOffsetEpoch(lastEpoch) + .setLastOffset(lastEpochEndOffset) + .setPreVote(preVote) + ) ) - ); + ) + ); } public static VoteResponseData singletonVoteResponse( - ListenerName listenerName, - short apiVersion, - Errors topLevelError, - TopicPartition topicPartition, - Errors partitionLevelError, - int leaderEpoch, - int leaderId, - boolean voteGranted, - Endpoints endpoints + ListenerName listenerName, + short apiVersion, + Errors topLevelError, + TopicPartition topicPartition, + Errors partitionLevelError, + int leaderEpoch, + int leaderId, + boolean voteGranted, + Endpoints endpoints ) { VoteResponseData response = new VoteResponseData() - .setErrorCode(topLevelError.code()) - .setTopics(Collections.singletonList( - new VoteResponseData.TopicData() - .setTopicName(topicPartition.topic()) - .setPartitions(Collections.singletonList( - new VoteResponseData.PartitionData() - .setErrorCode(partitionLevelError.code()) - .setLeaderId(leaderId) - .setLeaderEpoch(leaderEpoch) - .setVoteGranted(voteGranted))))); + .setErrorCode(topLevelError.code()) + .setTopics(Collections.singletonList( + new VoteResponseData.TopicData() + .setTopicName(topicPartition.topic()) + .setPartitions(Collections.singletonList( + new VoteResponseData.PartitionData() + .setErrorCode(partitionLevelError.code()) + .setLeaderId(leaderId) + .setLeaderEpoch(leaderEpoch) + .setVoteGranted(voteGranted))))); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName); @@ -100,10 +100,10 @@ public static VoteResponseData singletonVoteResponse( // Populate the node endpoints VoteResponseData.NodeEndpointCollection nodeEndpoints = new VoteResponseData.NodeEndpointCollection(1); nodeEndpoints.add( - new VoteResponseData.NodeEndpoint() - .setNodeId(leaderId) - .setHost(address.get().getHostString()) - .setPort(address.get().getPort()) + new VoteResponseData.NodeEndpoint() + .setNodeId(leaderId) + .setHost(address.get().getHostString()) + .setPort(address.get().getPort()) ); response.setNodeEndpoints(nodeEndpoints); } @@ -113,8 +113,8 @@ public static VoteResponseData singletonVoteResponse( } public static Optional voteRequestVoterKey( - VoteRequestData request, - VoteRequestData.PartitionData partition + VoteRequestData request, + VoteRequestData.PartitionData partition ) { if (request.voterId() < 0) { return Optional.empty(); @@ -125,15 +125,15 @@ public static Optional voteRequestVoterKey( public static boolean hasValidTopicPartition(VoteResponseData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } public static boolean hasValidTopicPartition(VoteRequestData data, TopicPartition topicPartition) { return data.topics().size() == 1 && - data.topics().get(0).topicName().equals(topicPartition.topic()) && - data.topics().get(0).partitions().size() == 1 && - data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); + data.topics().get(0).topicName().equals(topicPartition.topic()) && + data.topics().get(0).partitions().size() == 1 && + data.topics().get(0).partitions().get(0).partitionIndex() == topicPartition.partition(); } } diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java index 0b2d3c73174c0..c9a439a72bc68 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftUtilTest.java @@ -355,7 +355,7 @@ public void testSingletonFetchRequestForAllVersion(final FetchRequestTestCase te @ParameterizedTest @MethodSource("singletonFetchRequestTestCases") public void testFetchRequestV17Compatibility(final FetchRequestTestCase testCase) { - FetchRequestData fetchRequestData = RaftUtil.singletonFetchRequest( + FetchRequestData fetchRequestData = FetchRpc.singletonFetchRequest( topicPartition, Uuid.ONE_UUID, partition -> partition @@ -489,7 +489,7 @@ public void testSingletonFetchSnapshotRequestV1Compatibility( int maxBytes = 1000; int position = 10; - FetchSnapshotRequestData fetchSnapshotRequestData = RaftUtil.singletonFetchSnapshotRequest( + FetchSnapshotRequestData fetchSnapshotRequestData = FetchSnapshotRpc.singletonFetchSnapshotRequest( clusterId, ReplicaKey.of(1, directoryId), topicPartition, From f924828b3b54eb07cfce07d90885a2ff7a65044c Mon Sep 17 00:00:00 2001 From: frankvicky Date: Tue, 21 Jan 2025 11:20:45 +0800 Subject: [PATCH 6/7] Fix conflicts --- .../org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java | 5 +++-- .../java/org/apache/kafka/raft/RaftClientTestContext.java | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java index 43cfeb29fe1b0..2d0225f533a20 100644 --- a/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java +++ b/raft/src/test/java/org/apache/kafka/raft/KafkaRaftClientPreVoteTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.raft.RaftClientTestContext.RaftProtocol; +import org.apache.kafka.raft.utils.ApiMessageUtils; import org.apache.kafka.server.common.KRaftVersion; import org.junit.jupiter.params.ParameterizedTest; @@ -795,7 +796,7 @@ public void testPreVoteNotSupportedByRemote(KRaftVersion kraftVersion) throws Ex context.deliverResponse( voteRequests.get(0).correlationId(), voteRequests.get(0).destination(), - RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ApiMessageUtils.parseErrorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) ); // Local should transition to Candidate since it realizes remote node does not support PreVote. @@ -837,7 +838,7 @@ public void testPreVoteNotSupportedByRemote(KRaftVersion kraftVersion) throws Ex context.deliverResponse( voteRequests.get(1).correlationId(), voteRequests.get(1).destination(), - RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ApiMessageUtils.parseErrorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) ); context.client.poll(); assertEquals(epoch + 2, context.currentEpoch()); diff --git a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java index 4f35802c97199..54574528163d4 100644 --- a/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java +++ b/raft/src/test/java/org/apache/kafka/raft/RaftClientTestContext.java @@ -65,6 +65,7 @@ import org.apache.kafka.common.utils.Utils; import org.apache.kafka.raft.internals.BatchBuilder; import org.apache.kafka.raft.internals.StringSerde; +import org.apache.kafka.raft.utils.ApiMessageUtils; import org.apache.kafka.raft.utils.BeginQuorumEpochRpc; import org.apache.kafka.raft.utils.DescribeQuorumRpc; import org.apache.kafka.raft.utils.DynamicReconfigRpc; @@ -622,7 +623,7 @@ void expectAndGrantPreVotes(int epoch) throws Exception { deliverResponse( request.correlationId(), request.destination(), - RaftUtil.errorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) + ApiMessageUtils.parseErrorResponse(ApiKeys.VOTE, Errors.UNSUPPORTED_VERSION) ); } else { VoteResponseData voteResponse = voteResponse(true, OptionalInt.empty(), epoch); From 25c6477f008d4696d7d31b838ed132a115df114c Mon Sep 17 00:00:00 2001 From: frankvicky Date: Sat, 1 Feb 2025 10:47:54 +0800 Subject: [PATCH 7/7] Address comments about style --- .../kafka/raft/utils/ApiMessageUtils.java | 28 +++++++------------ .../kafka/raft/utils/DescribeQuorumRpc.java | 7 ++++- .../kafka/raft/utils/EndQuorumEpochRpc.java | 3 +- .../org/apache/kafka/raft/utils/VoteRpc.java | 5 +++- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java index e4bac676ca80b..7807ce3c6918c 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/ApiMessageUtils.java @@ -29,23 +29,15 @@ public class ApiMessageUtils { public static ApiMessage parseErrorResponse(ApiKeys apiKey, Errors error) { - switch (apiKey) { - case VOTE: - return new VoteResponseData().setErrorCode(error.code()); - case BEGIN_QUORUM_EPOCH: - return new BeginQuorumEpochResponseData().setErrorCode(error.code()); - case END_QUORUM_EPOCH: - return new EndQuorumEpochResponseData().setErrorCode(error.code()); - case FETCH: - return new FetchResponseData().setErrorCode(error.code()); - case FETCH_SNAPSHOT: - return new FetchSnapshotResponseData().setErrorCode(error.code()); - case API_VERSIONS: - return new ApiVersionsResponseData().setErrorCode(error.code()); - case UPDATE_RAFT_VOTER: - return new UpdateRaftVoterResponseData().setErrorCode(error.code()); - default: - throw new IllegalArgumentException("Received response for unexpected request type: " + apiKey); - } + return switch (apiKey) { + case VOTE -> new VoteResponseData().setErrorCode(error.code()); + case BEGIN_QUORUM_EPOCH -> new BeginQuorumEpochResponseData().setErrorCode(error.code()); + case END_QUORUM_EPOCH -> new EndQuorumEpochResponseData().setErrorCode(error.code()); + case FETCH -> new FetchResponseData().setErrorCode(error.code()); + case FETCH_SNAPSHOT -> new FetchSnapshotResponseData().setErrorCode(error.code()); + case API_VERSIONS -> new ApiVersionsResponseData().setErrorCode(error.code()); + case UPDATE_RAFT_VOTER -> new UpdateRaftVoterResponseData().setErrorCode(error.code()); + default -> throw new IllegalArgumentException("Received response for unexpected request type: " + apiKey); + }; } } diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java index 9b695513410a4..197302ab7733d 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/DescribeQuorumRpc.java @@ -72,7 +72,12 @@ public static DescribeQuorumResponseData singletonDescribeQuorumResponse( .setLeaderEpoch(leaderEpoch) .setHighWatermark(highWatermark) .setCurrentVoters(toReplicaStates(apiVersion, leaderId, voters, currentTimeMs)) - .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)))))); + .setObservers(toReplicaStates(apiVersion, leaderId, observers, currentTimeMs)) + ) + ) + ) + ); + if (apiVersion >= 2) { DescribeQuorumResponseData.NodeCollection nodes = new DescribeQuorumResponseData.NodeCollection(voters.size()); for (LeaderState.ReplicaState voter : voters) { diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java index 7ecf8ea8e73e5..d0d90b00f6800 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/EndQuorumEpochRpc.java @@ -91,7 +91,8 @@ public static EndQuorumEpochResponseData singletonEndQuorumEpochResponse( .setErrorCode(partitionLevelError.code()) .setLeaderId(leaderId) .setLeaderEpoch(leaderEpoch) - ))) + )) + ) ); if (apiVersion >= 1) { diff --git a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java index 1085c90828bb2..30033cf283811 100644 --- a/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java +++ b/raft/src/main/java/org/apache/kafka/raft/utils/VoteRpc.java @@ -92,7 +92,10 @@ public static VoteResponseData singletonVoteResponse( .setErrorCode(partitionLevelError.code()) .setLeaderId(leaderId) .setLeaderEpoch(leaderEpoch) - .setVoteGranted(voteGranted))))); + .setVoteGranted(voteGranted)) + ) + ) + ); if (apiVersion >= 1) { Optional address = endpoints.address(listenerName);