lianetm commented on code in PR #15766:
URL: https://github.com/apache/kafka/pull/15766#discussion_r1585359721
##########
tools/src/test/java/org/apache/kafka/tools/consumer/group/DeleteConsumerGroupsTest.java:
##########
@@ -17,279 +17,332 @@
package org.apache.kafka.tools.consumer.group;
import joptsimple.OptionException;
+import kafka.test.ClusterInstance;
+import kafka.test.annotation.ClusterConfigProperty;
+import kafka.test.annotation.ClusterTest;
+import kafka.test.annotation.ClusterTestDefaults;
+import kafka.test.annotation.Type;
+import kafka.test.junit.ClusterTestExtensions;
+import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.GroupProtocol;
import org.apache.kafka.clients.consumer.RangeAssignor;
+import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.errors.GroupIdNotFoundException;
import org.apache.kafka.common.errors.GroupNotEmptyException;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.test.TestUtils;
import org.apache.kafka.tools.ToolsTestUtils;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.ValueSource;
+import org.junit.jupiter.api.extension.ExtendWith;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
-import java.util.Properties;
import java.util.Set;
import java.util.function.Function;
+import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.singletonList;
+import static java.util.Collections.singletonMap;
+import static org.apache.kafka.clients.consumer.GroupProtocol.CLASSIC;
+import static org.apache.kafka.clients.consumer.GroupProtocol.CONSUMER;
+import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
+import static org.apache.kafka.common.ConsumerGroupState.STABLE;
+import static
org.apache.kafka.coordinator.group.GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG;
+import static
org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG;
+import static
org.apache.kafka.coordinator.group.GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-public class DeleteConsumerGroupsTest extends ConsumerGroupCommandTest {
- @ParameterizedTest
- @ValueSource(strings = {"zk", "kraft"})
- public void testDeleteWithTopicOption(String quorum) {
- createOffsetsTopic(listenerName(), new Properties());
- String[] cgcArgs = new String[]{"--bootstrap-server",
bootstrapServers(listenerName()), "--delete", "--group", GROUP, "--topic"};
- assertThrows(OptionException.class, () ->
getConsumerGroupService(cgcArgs));
- }
-
- @ParameterizedTest
- @ValueSource(strings = {"zk", "kraft"})
- public void testDeleteCmdNonExistingGroup(String quorum) {
- createOffsetsTopic(listenerName(), new Properties());
- String missingGroup = "missing.group";
- String[] cgcArgs = new String[]{"--bootstrap-server",
bootstrapServers(listenerName()), "--delete", "--group", missingGroup};
- ConsumerGroupCommand.ConsumerGroupService service =
getConsumerGroupService(cgcArgs);
-
- String output =
ToolsTestUtils.grabConsoleOutput(service::deleteGroups);
- assertTrue(output.contains("Group '" + missingGroup + "' could not be
deleted due to:") && output.contains(Errors.GROUP_ID_NOT_FOUND.message()),
- "The expected error (" + Errors.GROUP_ID_NOT_FOUND + ") was not
detected while deleting consumer group");
+@ExtendWith(value = ClusterTestExtensions.class)
+@ClusterTestDefaults(clusterType = Type.ALL, serverProperties = {
+ @ClusterConfigProperty(key = OFFSETS_TOPIC_PARTITIONS_CONFIG, value =
"1"),
+ @ClusterConfigProperty(key = OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG,
value = "1"),
+ @ClusterConfigProperty(key = NEW_GROUP_COORDINATOR_ENABLE_CONFIG,
value = "true")
Review Comment:
Hey @chia7712 , agree that when `NEW_GROUP_COORDINATOR_ENABLE_CONFIG` is
true, kraft broker will use the new coordinator, with zk out of the pic, all
good there. But
> we still create LegacyConsumer to test the legacy coordinator by setting
group.protocol=classic
`LegacyConsumer` does not make the legacy coordinator kick in unless
`isNewGroupCoordinatorEnabled` is false, because the classic protocol is
supported by both coordinators. So when using the `LegacyConsumer`, it implies
classic protocol (only one it supports), but the decision of which coordinator
will serve the classic protocol is taken based on the broker config to enable
the new coordinator or not. Since we're always enabling the new coordinator for
this test, I expect we're never using the legacy coordinator created
[here](https://github.com/apache/kafka/blob/1e8415160f96eb579ceaa3f89b3362f1deeccf6b/core/src/main/scala/kafka/server/BrokerServer.scala#L602)
> is there any risk of enabling NEW_GROUP_COORDINATOR_ENABLE_CONFIG to test
LegacyConsumer
Not at all, actually we want to test it, my point is just that we also
should test old coordinator + LegacyConsumer, and the way to achieve that is
running the LegacyConsumer + NEW_GROUP_COORDINATOR_ENABLE_CONFIG=false;
Makes sense?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]