fvaleri commented on code in PR #16475:
URL: https://github.com/apache/kafka/pull/16475#discussion_r1666527349
##########
core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala:
##########
@@ -243,6 +246,209 @@ class DumpLogSegmentsTest {
assertEquals(Map.empty, errors.shallowOffsetNotFound)
}
+ @Test
+ def testDumpRemoteLogMetadataEmpty(): Unit = {
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(output.contains("Log starting offset: 0"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataOneRecordOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsOneBatch(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId,
time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains(expectedUpdatePayload))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataMultipleRecordsMultipleBatches(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+ val remoteSegmentId = Uuid.randomUuid
+
+ val topicIdPartition = new TopicIdPartition(topicId, new
TopicPartition(topicName, 0))
+ val remoteLogSegmentId = new RemoteLogSegmentId(topicIdPartition,
remoteSegmentId)
+
+ val metadata = Seq(
+ new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId, time.milliseconds,
+ Optional.of(new RemoteLogSegmentMetadata.CustomMetadata(Array[Byte](0,
1, 2, 3))), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 0),
+ new RemotePartitionDeleteMetadata(topicIdPartition,
RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds, 0)
+ )
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val expectedUpdatePayload =
String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" +
+ "RemoteLogSegmentId{topicIdPartition=%s:%s-0, id=%s},
customMetadata=Optional[" +
+ "CustomMetadata{4 bytes}], state=COPY_SEGMENT_FINISHED,
eventTimestampMs=0, brokerId=0}", topicId, topicName, remoteSegmentId)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(countSubstring(output, expectedUpdatePayload) == 2)
+ assertTrue(countSubstring(output, expectedDeletePayload) == 2)
+
+ def countSubstring(str: String, sub: String): Int =
+ str.sliding(sub.length).count(_ == sub)
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataNonZeroStartingOffset(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val memoryRecordsSizeInBytes = MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*).sizeInBytes()
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes =
memoryRecordsSizeInBytes)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val logPaths =
logDir.listFiles.filter(_.getName.endsWith(".log")).map(_.getAbsolutePath)
+ val expectedDeletePayload =
String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
+ "state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}",
topicId, topicName)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logPaths(1)))
+ assertTrue(output.contains("Log starting offset: 1"))
+ assertTrue(output.contains(expectedDeletePayload))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataWithCorruption(): Unit = {
+ val metadataRecords = Array(new SimpleRecord(null, "corrupted".getBytes()))
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ val output = runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath))
+ assertTrue(output.contains("Log starting offset: 0"))
+ assertTrue(output.contains("Could not deserialize metadata record"))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataIoException(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val metadataRecords: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
metadataRecords:_*), leaderEpoch = 0)
+ log.flush(false)
+
+ Files.setPosixFilePermissions(Paths.get(logFilePath),
PosixFilePermissions.fromString("-w-------"))
+
+ assertThrows(classOf[AccessDeniedException],
+ () => runDumpLogSegments(Array("--remote-log-metadata-decoder",
"--files", logFilePath)))
+ }
+
+ @Test
+ def testDumpRemoteLogMetadataNoFilesFlag(): Unit = {
+ val topicId = Uuid.randomUuid
+ val topicName = "foo"
+
+ val metadata = Seq(new RemotePartitionDeleteMetadata(new
TopicIdPartition(topicId, new TopicPartition(topicName, 0)),
+ RemotePartitionDeleteState.DELETE_PARTITION_MARKED, time.milliseconds,
0))
+
+ val records: Array[SimpleRecord] = metadata.map(message => {
+ new SimpleRecord(null, new RemoteLogMetadataSerde().serialize(message))
+ }).toArray
+
+ val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024)
+ log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats,
time.scheduler, time)
+ log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE,
records:_*), leaderEpoch = 0)
+ log.flush(false)
Review Comment:
Ah sorry, you are right, that's an oversight :)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]