junrao commented on code in PR #19371:
URL: https://github.com/apache/kafka/pull/19371#discussion_r2046016358
##########
core/src/main/scala/kafka/raft/KafkaMetadataLog.scala:
##########
@@ -583,35 +584,72 @@ object KafkaMetadataLog extends Logging {
scheduler: Scheduler,
config: MetadataLogConfig
): KafkaMetadataLog = {
- val props = new Properties()
- props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG,
config.maxBatchSizeInBytes.toString)
- props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG,
config.logSegmentBytes.toString)
- props.setProperty(TopicConfig.SEGMENT_MS_CONFIG,
config.logSegmentMillis.toString)
- props.setProperty(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG,
ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT.toString)
-
- // Disable time and byte retention when deleting segments
- props.setProperty(TopicConfig.RETENTION_MS_CONFIG, "-1")
- props.setProperty(TopicConfig.RETENTION_BYTES_CONFIG, "-1")
+ val props: Properties = settingLogProperties(config)
LogConfig.validate(props)
val defaultLogConfig = new LogConfig(props)
- if (config.logSegmentBytes < config.logSegmentMinBytes) {
- throw new InvalidConfigurationException(
- s"Cannot set ${KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG} below
${config.logSegmentMinBytes}: ${config.logSegmentBytes}"
- )
- } else if (defaultLogConfig.retentionMs >= 0) {
- throw new InvalidConfigurationException(
- s"Cannot set ${TopicConfig.RETENTION_MS_CONFIG} above -1:
${defaultLogConfig.retentionMs}."
- )
- } else if (defaultLogConfig.retentionSize >= 0) {
- throw new InvalidConfigurationException(
- s"Cannot set ${TopicConfig.RETENTION_BYTES_CONFIG} above -1:
${defaultLogConfig.retentionSize}."
- )
+ validateConfig(config, defaultLogConfig)
+
+ val metadataLog: KafkaMetadataLog = createKafkaMetadataLog(topicPartition,
topicId, dataDir, time, scheduler, config, defaultLogConfig)
+
+ printWarningMessage(config, metadataLog)
+
+ // When recovering, truncate fully if the latest snapshot is after the log
end offset. This can happen to a follower
+ // when the follower crashes after downloading a snapshot from the leader
but before it could truncate the log fully.
+ metadataLog.truncateToLatestSnapshot()
+
+ metadataLog
+ }
+
+ private def printWarningMessage(config: MetadataLogConfig, metadataLog:
KafkaMetadataLog): Unit = {
+ // Print a warning if users have overridden the internal config
+ if (config.logSegmentMinBytes != KafkaRaftClient.MAX_BATCH_SIZE_BYTES) {
+ metadataLog.error(s"Overriding
${KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG} is only supported for
testing. Setting " +
+ s"this value too low may lead to an inability to write batches of
metadata records.")
}
+ }
+
+ // visible for testing
+ def internalApply(
Review Comment:
I am just saying that we now have two different ways to achieve the same
goal. In the metadata log approach, you set the desired value through the
original config, which is segment.bytes. You then set an internal config to
change the min constraint.
The approach in this PR is to set the desired value through a different
internal config.
It would be useful to choose same approach for both the metadata log and the
regular log.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]