This is an automated email from the ASF dual-hosted git repository.
kharekartik pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pinot.git
The following commit(s) were added to refs/heads/master by this push:
new 4abe0ac0f9d Modify AdaptiveSizeBasedWriter based on available capacity
on disk (#16803)
4abe0ac0f9d is described below
commit 4abe0ac0f9db468230d7076ca425c980c71d810f
Author: Krishan Goyal <[email protected]>
AuthorDate: Tue Sep 23 11:28:44 2025 +0530
Modify AdaptiveSizeBasedWriter based on available capacity on disk (#16803)
* Modify AdaptiveSizeBasedWriter based on available capacity on disk
* cstyle fixes
* Fix test case
* Add default cluster config for maxDiskUsagePercentage
* Log partition space during construction for debugging purpose
* Fix bug due to flipping nature of canWrite()
* Add logs for disk usage
* Remove unncessary logs
* checkstyle fixes
* Fix test case failure due to new default config
---
.../helix/core/minion/PinotTaskManager.java | 12 ++++
.../apache/pinot/core/common/MinionConstants.java | 8 +++
.../processing/framework/SegmentConfig.java | 17 ++++-
.../framework/SegmentProcessorFramework.java | 9 +--
.../genericrow/AdaptiveSizeBasedWriter.java | 84 ++++++++++++++++++++--
.../segment/processing/mapper/SegmentMapper.java | 3 +-
.../tests/SimpleMinionClusterIntegrationTest.java | 2 +-
.../pinot/plugin/minion/tasks/MergeTaskUtils.java | 5 ++
.../plugin/minion/tasks/MergeTaskUtilsTest.java | 5 +-
9 files changed, 129 insertions(+), 16 deletions(-)
diff --git
a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/PinotTaskManager.java
b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/PinotTaskManager.java
index 5f69377057c..8b9f33e9dbd 100644
---
a/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/PinotTaskManager.java
+++
b/pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/PinotTaskManager.java
@@ -258,6 +258,7 @@ public class PinotTaskManager extends
ControllerPeriodicTask<Void> {
}
pinotTaskConfigs.forEach(pinotTaskConfig -> pinotTaskConfig.getConfigs()
.computeIfAbsent(MinionConstants.TRIGGERED_BY, k ->
CommonConstants.TaskTriggers.ADHOC_TRIGGER.name()));
+ addDefaultsToTaskConfig(pinotTaskConfigs);
LOGGER.info("Submitting ad-hoc task for task type: {} with task configs:
{}", taskType, pinotTaskConfigs);
_controllerMetrics.addMeteredTableValue(taskType,
ControllerMeter.NUMBER_ADHOC_TASKS_SUBMITTED, 1);
responseMap.put(tableNameWithType,
@@ -832,6 +833,7 @@ public class PinotTaskManager extends
ControllerPeriodicTask<Void> {
numTasks, taskType, pinotTaskConfigs, minionInstanceTag);
throw new IllegalArgumentException("No valid minion instance found
for tag: " + minionInstanceTag);
}
+ addDefaultsToTaskConfig(pinotTaskConfigs);
// This might lead to lot of logs, maybe sum it up and move outside
the loop
LOGGER.info("Submitting {} tasks for task type: {} to
minionInstance: {} with task configs: {}", numTasks,
taskType, minionInstanceTag, pinotTaskConfigs);
@@ -937,4 +939,14 @@ public class PinotTaskManager extends
ControllerPeriodicTask<Void> {
}
return true;
}
+
+ protected void addDefaultsToTaskConfig(List<PinotTaskConfig> taskConfigs) {
+ String maxDiskUsagePercentageStr =
getClusterInfoAccessor().getClusterConfig(
+ MinionConstants.MAX_DISK_USAGE_PERCENTAGE_KEY);
+ for (PinotTaskConfig taskConfig : taskConfigs) {
+ Map<String, String> configs = taskConfig.getConfigs();
+ // Add default configs if not present
+ configs.putIfAbsent(MinionConstants.MergeTask.MAX_DISK_USAGE_PERCENTAGE,
maxDiskUsagePercentageStr);
+ };
+ }
}
diff --git
a/pinot-core/src/main/java/org/apache/pinot/core/common/MinionConstants.java
b/pinot-core/src/main/java/org/apache/pinot/core/common/MinionConstants.java
index 8c454d101f5..293f4ed5784 100644
--- a/pinot-core/src/main/java/org/apache/pinot/core/common/MinionConstants.java
+++ b/pinot-core/src/main/java/org/apache/pinot/core/common/MinionConstants.java
@@ -67,6 +67,10 @@ public class MinionConstants {
// This is primarily used to prevent performance issues in helix leader
controller when it creates
// more subtasks than it can support
public static final String MAX_ALLOWED_SUB_TASKS_KEY =
"minion.maxAllowedSubTasksPerTask";
+ // Cluster level config of maximum disk usage percentage on minion hosts
before the task tries to gracefully
+ // handle the situation rather than getting killed due to lack of disk space.
+ // Note - Not all tasks may support this config. Currently, MergeTask and
its variants support this config
+ public static final String MAX_DISK_USAGE_PERCENTAGE_KEY =
"minion.maxDiskUsagePercentage";
/**
* Table level configs
@@ -128,7 +132,11 @@ public class MinionConstants {
// Segment config
public static final String MAX_NUM_RECORDS_PER_TASK_KEY =
"maxNumRecordsPerTask";
public static final String MAX_NUM_RECORDS_PER_SEGMENT_KEY =
"maxNumRecordsPerSegment";
+
+ // See AdaptiveSizeBasedWriter for documentation of these configs
public static final String SEGMENT_MAPPER_FILE_SIZE_IN_BYTES =
"segmentMapperFileSizeThresholdInBytes";
+ public static final String MAX_DISK_USAGE_PERCENTAGE =
"maxDiskUsagePercentage";
+
public static final String MAX_NUM_PARALLEL_BUCKETS =
"maxNumParallelBuckets";
public static final String SEGMENT_NAME_PREFIX_KEY = "segmentNamePrefix";
public static final String SEGMENT_NAME_POSTFIX_KEY = "segmentNamePostfix";
diff --git
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentConfig.java
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentConfig.java
index 95191b658b7..2b3605ecdd8 100644
---
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentConfig.java
+++
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentConfig.java
@@ -38,6 +38,7 @@ public class SegmentConfig {
private final String _segmentNamePostfix;
private final String _fixedSegmentName;
private final long _segmentMapperFileSizeThresholdInBytes;
+ private final int _maxDiskUsagePercentage;
@JsonCreator
private SegmentConfig(@JsonProperty(value = "maxNumRecordsPerSegment",
required = true) int maxNumRecordsPerSegment,
@@ -45,7 +46,8 @@ public class SegmentConfig {
@JsonProperty("segmentNamePostfix") @Nullable String segmentNamePostfix,
@JsonProperty("fixedSegmentName") @Nullable String fixedSegmentName,
@JsonProperty(value = "segmentMapperFileSizeThresholdInBytes", required
= true)
- long segmentMapperFileSizeThresholdInBytes) {
+ long segmentMapperFileSizeThresholdInBytes,
+ @JsonProperty("maxDiskUsagePercentage") int maxDiskUsagePercentage) {
Preconditions.checkState(maxNumRecordsPerSegment > 0, "Max num records per
segment must be > 0");
Preconditions.checkState(segmentMapperFileSizeThresholdInBytes > 0,
"Intermediate file size threshold must be > 0");
_maxNumRecordsPerSegment = maxNumRecordsPerSegment;
@@ -53,6 +55,7 @@ public class SegmentConfig {
_segmentNamePostfix = segmentNamePostfix;
_fixedSegmentName = fixedSegmentName;
_segmentMapperFileSizeThresholdInBytes =
segmentMapperFileSizeThresholdInBytes;
+ _maxDiskUsagePercentage = maxDiskUsagePercentage;
}
/**
@@ -81,12 +84,17 @@ public class SegmentConfig {
return _segmentMapperFileSizeThresholdInBytes;
}
+ public int getMaxDiskUsagePercentage() {
+ return _maxDiskUsagePercentage;
+ }
+
/**
* Builder for SegmentConfig
*/
public static class Builder {
private int _maxNumRecordsPerSegment = DEFAULT_MAX_NUM_RECORDS_PER_SEGMENT;
private long _segmentMapperFileSizeThresholdInBytes =
DEFAULT_SEGMENT_MAPPER_FILE_SIZE_IN_BYTES;
+ private int _maxDiskUsagePercentage = 100;
private String _segmentNamePrefix;
private String _segmentNamePostfix;
private String _fixedSegmentName;
@@ -115,13 +123,17 @@ public class SegmentConfig {
_segmentMapperFileSizeThresholdInBytes =
segmentMapperFileSizeThresholdInBytes;
return this;
}
+ public Builder setMaxDiskUsagePercentage(int maxDiskUsagePercentage) {
+ _maxDiskUsagePercentage = maxDiskUsagePercentage;
+ return this;
+ }
public SegmentConfig build() {
Preconditions.checkState(_maxNumRecordsPerSegment > 0, "Max num records
per segment must be > 0");
Preconditions.checkState(_segmentMapperFileSizeThresholdInBytes > 0,
"Intermediate file size threshold must be > 0");
return new SegmentConfig(_maxNumRecordsPerSegment, _segmentNamePrefix,
_segmentNamePostfix, _fixedSegmentName,
- _segmentMapperFileSizeThresholdInBytes);
+ _segmentMapperFileSizeThresholdInBytes, _maxDiskUsagePercentage);
}
}
@@ -129,6 +141,7 @@ public class SegmentConfig {
public String toString() {
return "SegmentConfig{" + "_maxNumRecordsPerSegment=" +
_maxNumRecordsPerSegment
+ ", _segmentMapperFileSizeThresholdInBytes=" +
_segmentMapperFileSizeThresholdInBytes
+ + ", _maxDiskUsagePercentage=" + _maxDiskUsagePercentage
+ ", _segmentNamePrefix='" + _segmentNamePrefix + '\'' + ",
_segmentNamePostfix='" + _segmentNamePostfix + '\''
+ ", _fixedSegmentName='" + _fixedSegmentName + '\'' + '}';
}
diff --git
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentProcessorFramework.java
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentProcessorFramework.java
index f902a9bfda4..d90c773dea0 100644
---
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentProcessorFramework.java
+++
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/framework/SegmentProcessorFramework.java
@@ -164,8 +164,9 @@ public class SegmentProcessorFramework {
int numRecordReaders = _recordReaderFileConfigs.size();
int nextRecordReaderIndexToBeProcessed = 0;
int iterationCount = 1;
- boolean isMapperOutputSizeThresholdEnabled =
-
_segmentProcessorConfig.getSegmentConfig().getIntermediateFileSizeThreshold()
!= Long.MAX_VALUE;
+ boolean canMapperBeEarlyTerminated =
+
_segmentProcessorConfig.getSegmentConfig().getIntermediateFileSizeThreshold()
!= Long.MAX_VALUE
+ ||
_segmentProcessorConfig.getSegmentConfig().getMaxDiskUsagePercentage() < 100;
String logMessage;
while (nextRecordReaderIndexToBeProcessed < numRecordReaders) {
@@ -174,7 +175,7 @@ public class SegmentProcessorFramework {
getSegmentMapper(_recordReaderFileConfigs.subList(nextRecordReaderIndexToBeProcessed,
numRecordReaders));
// Log start of iteration details only if intermediate file size
threshold is set.
- if (isMapperOutputSizeThresholdEnabled) {
+ if (canMapperBeEarlyTerminated) {
logMessage =
String.format("Starting iteration %d with %d record readers.
Starting index = %d, end index = %d",
iterationCount,
@@ -223,7 +224,7 @@ public class SegmentProcessorFramework {
nextRecordReaderIndexToBeProcessed =
getNextRecordReaderIndexToBeProcessed(nextRecordReaderIndexToBeProcessed);
// Log the details between iteration only if intermediate file size
threshold is set.
- if (isMapperOutputSizeThresholdEnabled) {
+ if (canMapperBeEarlyTerminated) {
// Take care of logging the proper RecordReader index in case of the
last iteration.
int boundaryIndexToLog =
nextRecordReaderIndexToBeProcessed == numRecordReaders ?
nextRecordReaderIndexToBeProcessed
diff --git
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/genericrow/AdaptiveSizeBasedWriter.java
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/genericrow/AdaptiveSizeBasedWriter.java
index d7db6509f3f..d2acbfed9bd 100644
---
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/genericrow/AdaptiveSizeBasedWriter.java
+++
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/genericrow/AdaptiveSizeBasedWriter.java
@@ -18,18 +18,63 @@
*/
package org.apache.pinot.core.segment.processing.genericrow;
+import java.io.File;
import java.io.IOException;
+import java.nio.file.FileStore;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import javax.annotation.Nullable;
import org.apache.pinot.spi.data.readers.GenericRow;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-
+/**
+ * AdaptiveSizeBasedWriter provides adaptive control over writing data based
on configurable constraints.
+ * <p>
+ * It supports two main constraints:
+ * <ul>
+ * <li><b>Byte Limit:</b> The maximum number of bytes that can be written
for a given instance of the class
+ * Once this limit is reached, no further data is written.
+ * This config doesn't take into account other writers writing to the same
file store.
+ * </li>
+ * <li><b>Disk Usage Percentage:</b> The maximum allowed disk usage
percentage for the underlying file store.
+ * If the disk usage exceeds this threshold, writing is halted.
+ * This config helps prevent the system from running out of disk space while
considering other processes
+ * that may be writing to the same file store.
+ * </li>
+ * </ul>
+ * <p>
+ */
public class AdaptiveSizeBasedWriter implements
AdaptiveConstraintsWriter<FileWriter<GenericRow>, GenericRow> {
- private final long _bytesLimit;
- private long _numBytesWritten;
+ private static final Logger LOGGER =
LoggerFactory.getLogger(AdaptiveSizeBasedWriter.class);
+
+ private final long _bytesLimit; // Max number of bytes that can be written
for this instance of the writer
+ private final int _maxDiskUsagePercentage; // Max disk usage percentage for
the underlying file store
+
+ private long _numBytesWritten; // Number of bytes written so far by this
instance of the writer
+ @Nullable
+ private final FileStore _fileStore;
+ private long _lastDiskUsageCheckTime = 0L;
+
+ private boolean _hasExceededSizeLimit = false;
- public AdaptiveSizeBasedWriter(long bytesLimit) {
+ private static final long DISK_USAGE_CHECK_INTERVAL_MS = 10 * 1000L; // 10
seconds
+
+ public AdaptiveSizeBasedWriter(long bytesLimit, int maxDiskUsagePercentage,
File outputDir) {
_bytesLimit = bytesLimit;
_numBytesWritten = 0;
+
+ FileStore fileStore;
+ try {
+ Path path = outputDir.toPath();
+ fileStore = Files.getFileStore(path);
+ } catch (Exception e) {
+ LOGGER.error("Failed to get the filestore for path: {}",
outputDir.getAbsolutePath(), e);
+ fileStore = null;
+ }
+ _fileStore = fileStore;
+ _maxDiskUsagePercentage = maxDiskUsagePercentage;
}
public long getBytesLimit() {
@@ -39,13 +84,42 @@ public class AdaptiveSizeBasedWriter implements
AdaptiveConstraintsWriter<FileWr
return _numBytesWritten;
}
+ // Ensure canWrite() doesn't return true after it has returned false once
@Override
public boolean canWrite() {
- return _numBytesWritten < _bytesLimit;
+ return _numBytesWritten < _bytesLimit && !isDiskUsageExceeded();
}
@Override
public void write(FileWriter<GenericRow> writer, GenericRow row) throws
IOException {
_numBytesWritten += writer.writeData(row);
}
+
+ private boolean isDiskUsageExceeded() {
+ if (_fileStore == null || _maxDiskUsagePercentage <= 0 ||
_maxDiskUsagePercentage >= 100) {
+ // Unable to get the filestore or invalid or no limit on max disk usage
percentage
+ return _hasExceededSizeLimit;
+ }
+ try {
+ long currentTime = System.currentTimeMillis();
+ if (currentTime - _lastDiskUsageCheckTime <
DISK_USAGE_CHECK_INTERVAL_MS) {
+ return _hasExceededSizeLimit;
+ }
+ _lastDiskUsageCheckTime = currentTime;
+
+ long totalSpace = _fileStore.getTotalSpace();
+ long usableSpace = _fileStore.getUsableSpace();
+ long usedSpace = totalSpace - usableSpace;
+ int usedPercentage = (int) ((usedSpace * 100) / totalSpace);
+ if (usedPercentage >= _maxDiskUsagePercentage) {
+ LOGGER.warn("Disk usage percentage: {}% has exceeded the max limit:
{}%. Will stop writing more data",
+ usedPercentage, _maxDiskUsagePercentage);
+ _hasExceededSizeLimit = true;
+ }
+ return _hasExceededSizeLimit;
+ } catch (Exception e) {
+ LOGGER.error("Failed to get the disk usage info", e);
+ return _hasExceededSizeLimit;
+ }
+ }
}
diff --git
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/mapper/SegmentMapper.java
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/mapper/SegmentMapper.java
index e81b1174686..49a60c52164 100644
---
a/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/mapper/SegmentMapper.java
+++
b/pinot-core/src/main/java/org/apache/pinot/core/segment/processing/mapper/SegmentMapper.java
@@ -129,7 +129,8 @@ public class SegmentMapper {
// initialize adaptive writer.
_adaptiveSizeBasedWriter =
- new
AdaptiveSizeBasedWriter(processorConfig.getSegmentConfig().getIntermediateFileSizeThreshold());
+ new
AdaptiveSizeBasedWriter(processorConfig.getSegmentConfig().getIntermediateFileSizeThreshold(),
+ processorConfig.getSegmentConfig().getMaxDiskUsagePercentage(),
mapperOutputDir);
}
/**
diff --git
a/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/SimpleMinionClusterIntegrationTest.java
b/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/SimpleMinionClusterIntegrationTest.java
index 55846837728..222a9e9bcf4 100644
---
a/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/SimpleMinionClusterIntegrationTest.java
+++
b/pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/SimpleMinionClusterIntegrationTest.java
@@ -60,7 +60,7 @@ public class SimpleMinionClusterIntegrationTest extends
ClusterTest {
public static final String TABLE_NAME_2 = "testTable2";
public static final String TABLE_NAME_3 = "testTable3";
public static final int NUM_TASKS = 2;
- public static final int NUM_CONFIGS = 4;
+ public static final int NUM_CONFIGS = 5;
public static final AtomicBoolean HOLD = new AtomicBoolean();
public static final AtomicBoolean TASK_START_NOTIFIED = new AtomicBoolean();
public static final AtomicBoolean TASK_SUCCESS_NOTIFIED = new
AtomicBoolean();
diff --git
a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
index 34bf2e5ecf3..74ba586c838 100644
---
a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
+++
b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtils.java
@@ -139,6 +139,7 @@ public class MergeTaskUtils {
/**
* Returns the segment config based on the task config.
+ * TODO - Ensure all tasks that build SegmentConfig use this method so that
all appropriate configs are set.
*/
public static SegmentConfig getSegmentConfig(Map<String, String> taskConfig)
{
SegmentConfig.Builder segmentConfigBuilder = new SegmentConfig.Builder();
@@ -150,6 +151,10 @@ public class MergeTaskUtils {
if (segmentMapperFileSizeThreshold != null) {
segmentConfigBuilder.setIntermediateFileSizeThreshold(Long.parseLong(segmentMapperFileSizeThreshold));
}
+ String maxDiskUsagePercentage =
taskConfig.get(MergeTask.MAX_DISK_USAGE_PERCENTAGE);
+ if (maxDiskUsagePercentage != null) {
+
segmentConfigBuilder.setMaxDiskUsagePercentage(Integer.parseInt(maxDiskUsagePercentage));
+ }
segmentConfigBuilder.setSegmentNamePrefix(taskConfig.get(MergeTask.SEGMENT_NAME_PREFIX_KEY));
segmentConfigBuilder.setSegmentNamePostfix(taskConfig.get(MergeTask.SEGMENT_NAME_POSTFIX_KEY));
segmentConfigBuilder.setFixedSegmentName(taskConfig.get(MergeTask.FIXED_SEGMENT_NAME_KEY));
diff --git
a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
index 4e41ddeb98f..2f8e7601773 100644
---
a/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
+++
b/pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/test/java/org/apache/pinot/plugin/minion/tasks/MergeTaskUtilsTest.java
@@ -176,6 +176,7 @@ public class MergeTaskUtilsTest {
public void testGetSegmentConfig() {
Map<String, String> taskConfig = new HashMap<>();
taskConfig.put(MergeTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, "10000");
+ taskConfig.put(MergeTask.MAX_DISK_USAGE_PERCENTAGE, "80");
taskConfig.put(MergeTask.SEGMENT_NAME_PREFIX_KEY, "myPrefix");
taskConfig.put(MergeTask.SEGMENT_NAME_POSTFIX_KEY, "myPostfix");
taskConfig.put(MergeTask.FIXED_SEGMENT_NAME_KEY, "mySegment");
@@ -187,9 +188,7 @@ public class MergeTaskUtilsTest {
assertEquals(segmentConfig.getSegmentNamePostfix(), "myPostfix");
assertEquals(segmentConfig.getFixedSegmentName(), "mySegment");
assertEquals(segmentConfig.getIntermediateFileSizeThreshold(),
1000000000L);
- assertEquals(segmentConfig.toString(),
- "SegmentConfig{_maxNumRecordsPerSegment=10000,
_segmentMapperFileSizeThresholdInBytes=1000000000, "
- + "_segmentNamePrefix='myPrefix', _segmentNamePostfix='myPostfix',
_fixedSegmentName='mySegment'}");
+ assertEquals(segmentConfig.getMaxDiskUsagePercentage(), 80);
segmentConfig = MergeTaskUtils.getSegmentConfig(Collections.emptyMap());
assertEquals(segmentConfig.getMaxNumRecordsPerSegment(),
SegmentConfig.DEFAULT_MAX_NUM_RECORDS_PER_SEGMENT);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]