lianetm commented on code in PR #19515:
URL: https://github.com/apache/kafka/pull/19515#discussion_r2084965325
##########
clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java:
##########
@@ -52,13 +54,37 @@ public static class Builder extends
AbstractRequest.Builder<OffsetFetchRequest>
private final OffsetFetchRequestData data;
private final boolean throwOnFetchStableOffsetsUnsupported;
- public Builder(OffsetFetchRequestData data, boolean
throwOnFetchStableOffsetsUnsupported) {
- super(ApiKeys.OFFSET_FETCH);
+ public static Builder forTopicIdsOrNames(OffsetFetchRequestData data,
boolean throwOnFetchStableOffsetsUnsupported, boolean
enableUnstableLastVersion) {
+ return new Builder(
+ data,
+ throwOnFetchStableOffsetsUnsupported,
+ ApiKeys.OFFSET_FETCH.oldestVersion(),
+ ApiKeys.OFFSET_FETCH.latestVersion(enableUnstableLastVersion)
+ );
+ }
+
+ public static Builder forTopicNames(OffsetFetchRequestData data,
boolean throwOnFetchStableOffsetsUnsupported) {
+ return new Builder(
+ data,
+ throwOnFetchStableOffsetsUnsupported,
+ ApiKeys.OFFSET_FETCH.oldestVersion(),
+ (short) (TOPIC_ID_MIN_VERSION - 1)
+ );
+ }
+
+ private Builder(
+ OffsetFetchRequestData data,
+ boolean throwOnFetchStableOffsetsUnsupported,
+ short oldestAllowedVersion,
+ short latestAllowedVersion
+ ) {
+ super(ApiKeys.OFFSET_FETCH, oldestAllowedVersion,
latestAllowedVersion);
this.data = data;
this.throwOnFetchStableOffsetsUnsupported =
throwOnFetchStableOffsetsUnsupported;
}
@Override
+ @SuppressWarnings("checkstyle:cyclomaticComplexity")
Review Comment:
would it help if we move the logic to validate topic id and names usage to a
different func? (this is wild indeed, maybe we could consider also
encapsulating the other validation sections consistently: batching, require
stable)
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -8269,6 +8276,7 @@ class KafkaApisTest extends Logging {
.setGroupId("group-1")
.setTopics(List(
new OffsetFetchRequestData.OffsetFetchRequestTopics()
+ .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID)
.setName("foo")
Review Comment:
shouldn't we set the name in the request only if v<10?
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -8320,11 +8347,12 @@ class KafkaApisTest extends Logging {
).asJava)
).asJava)
+
Review Comment:
nit: extra line
##########
core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala:
##########
@@ -1197,15 +1201,17 @@ class ConsumerProtocolMigrationTest(cluster:
ClusterInstance) extends GroupCoord
).asJava)
).asJava),
fetchOffsets(
- groupId = groupId,
- memberId = memberId1,
- memberEpoch = 1,
- partitions = List(
- new TopicPartition("foo", 0),
- new TopicPartition("foo", 1),
- new TopicPartition("foo", 2)
- ),
- requireStable = false,
+ group = new OffsetFetchRequestData.OffsetFetchRequestGroup()
+ .setGroupId(groupId)
+ .setMemberId(memberId1)
+ .setMemberEpoch(1)
+ .setTopics(List(
+ new OffsetFetchRequestData.OffsetFetchRequestTopics()
+ .setName("foo")
+ .setTopicId(topicId)
+ .setPartitionIndexes(List[Integer](0, 1, 2).asJava)
+ ).asJava),
+ requireStable = true,
Review Comment:
is this change to true intentional?
##########
core/src/test/scala/unit/kafka/server/KafkaApisTest.scala:
##########
@@ -8349,23 +8377,168 @@ class KafkaApisTest extends Logging {
.setGroupId("group-4")
.setErrorCode(Errors.INVALID_GROUP_ID.code)
- val expectedGroups = List(group1Response, group2Response,
group3Response, group4Response)
+ val expectedGroups = List(expectedGroup1Response, group2Response,
group3Response, group4Response)
group1Future.complete(group1Response)
group2Future.complete(group2Response)
group3Future.completeExceptionally(Errors.INVALID_GROUP_ID.exception)
group4Future.complete(group4Response)
val response =
verifyNoThrottling[OffsetFetchResponse](requestChannelRequest)
- assertEquals(expectedGroups.toSet, response.data.groups().asScala.toSet)
+ assertEquals(expectedGroups.toSet, response.data.groups.asScala.toSet)
}
}
@ParameterizedTest
- @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH)
+ // We only test with topic ids.
+ @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 10)
+ def testHandleOffsetFetchWithUnknownTopicIds(version: Short): Unit = {
+ val foo = "foo"
+ val bar = "bar"
+ val fooId = Uuid.randomUuid()
+ val barId = Uuid.randomUuid()
+ addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2)
+
+ def makeRequest(version: Short): RequestChannel.Request = {
+ buildRequest(
+ OffsetFetchRequest.Builder.forTopicIdsOrNames(
+ new OffsetFetchRequestData()
+ .setGroups(List(
+ new OffsetFetchRequestData.OffsetFetchRequestGroup()
+ .setGroupId("group-1")
+ .setTopics(List(
+ new OffsetFetchRequestData.OffsetFetchRequestTopics()
+ .setName(foo)
+ .setTopicId(fooId)
+ .setPartitionIndexes(List[Integer](0).asJava),
+ // bar does not exist so it must return UNKNOWN_TOPIC_ID.
+ new OffsetFetchRequestData.OffsetFetchRequestTopics()
+ .setName(bar)
+ .setTopicId(barId)
+ .setPartitionIndexes(List[Integer](0).asJava)
+ ).asJava),
+ new OffsetFetchRequestData.OffsetFetchRequestGroup()
+ .setGroupId("group-2")
+ .setTopics(null)
+ ).asJava),
+ false,
+ true
+ ).build(version)
+ )
+ }
+
+ val requestChannelRequest = makeRequest(version)
+
+ val group1Future = new
CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]()
+ when(groupCoordinator.fetchOffsets(
+ requestChannelRequest.context,
+ new OffsetFetchRequestData.OffsetFetchRequestGroup()
+ .setGroupId("group-1")
+ .setTopics(List(
+ new OffsetFetchRequestData.OffsetFetchRequestTopics()
+ .setTopicId(fooId)
+ .setName("foo")
+ .setPartitionIndexes(List[Integer](0).asJava)).asJava),
+ false
+ )).thenReturn(group1Future)
+
+ val group2Future = new
CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]()
+ when(groupCoordinator.fetchAllOffsets(
+ requestChannelRequest.context,
+ new OffsetFetchRequestData.OffsetFetchRequestGroup()
+ .setGroupId("group-2")
+ .setTopics(null),
+ false
+ )).thenReturn(group2Future)
+
+ kafkaApis = createKafkaApis()
+ kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching)
+
+ val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup()
+ .setGroupId("group-1")
+ .setTopics(List(
+ new OffsetFetchResponseData.OffsetFetchResponseTopics()
+ .setTopicId(fooId)
+ .setName(foo)
+ .setPartitions(List(
+ new OffsetFetchResponseData.OffsetFetchResponsePartitions()
+ .setPartitionIndex(0)
+ .setCommittedOffset(100)
+ .setCommittedLeaderEpoch(1)
+ ).asJava)
+ ).asJava)
+
+ val group2Response = new OffsetFetchResponseData.OffsetFetchResponseGroup()
+ .setGroupId("group-2")
+ .setTopics(List(
+ new OffsetFetchResponseData.OffsetFetchResponseTopics()
+ .setName(foo)
+ .setPartitions(List(
+ new OffsetFetchResponseData.OffsetFetchResponsePartitions()
+ .setPartitionIndex(0)
+ .setCommittedOffset(100)
+ .setCommittedLeaderEpoch(1)
+ ).asJava),
+ // bar does not exist so it must be filtered out.
+ new OffsetFetchResponseData.OffsetFetchResponseTopics()
+ .setName(bar)
+ .setPartitions(List(
+ new OffsetFetchResponseData.OffsetFetchResponsePartitions()
+ .setPartitionIndex(0)
+ .setCommittedOffset(100)
+ .setCommittedLeaderEpoch(1)
+ ).asJava)
Review Comment:
just for my understanding, this would be the case where bar does exist when
the coordinator fetched the offsets, but by the time we build the response in
KafkaApis it doesn't exist anymore (so filtered out), correct?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]