cshuo commented on code in PR #18348:
URL: https://github.com/apache/hudi/pull/18348#discussion_r3050400908
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -432,107 +412,78 @@ private boolean initializeFromFilesystem(String
dataTableInstantTime, List<Metad
partitionInfoList = Collections.emptyList();
}
}
- Map<String, Map<String, Long>> partitionIdToAllFilesMap =
partitionInfoList.stream()
- .map(p -> {
- String partitionName =
HoodieTableMetadataUtil.getPartitionIdentifierForFilesPartition(p.getRelativePath());
- return Pair.of(partitionName, p.getFilenameToSizeMap());
- })
- .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
-
- // validate that each index is eligible to be initialized
- Iterator<MetadataPartitionType> iterator = partitionsToInit.iterator();
- while (iterator.hasNext()) {
- MetadataPartitionType partitionType = iterator.next();
- if (partitionType == PARTITION_STATS &&
!dataMetaClient.getTableConfig().isTablePartitioned()) {
- // Partition stats index cannot be enabled for a non-partitioned table
- iterator.remove();
- this.enabledPartitionTypes.remove(partitionType);
- }
- }
+ Map<String, List<FileInfo>> partitionIdToAllFilesMap =
DirectoryInfo.getPartitionToFileInfo(partitionInfoList);
+ Lazy<List<FileSliceAndPartition>> lazyLatestMergedPartitionFileSliceList =
getLazyLatestMergedPartitionFileSliceList();
- // For a fresh table, defer RLI initialization
- if (dataWriteConfig.getMetadataConfig().shouldDeferRliInitForFreshTable()
&& this.enabledPartitionTypes.contains(RECORD_INDEX)
- &&
dataMetaClient.getActiveTimeline().filterCompletedInstants().countInstants() ==
0) {
- this.enabledPartitionTypes.remove(RECORD_INDEX);
- partitionsToInit.remove(RECORD_INDEX);
+ // FILES partition should always be initialized first if enabled
+ if (!filesPartitionAvailable) {
+ initializeMetadataPartition(FILES,
indexerMapForPartitionsToInit.get(FILES),
+ dataTableInstantTime, partitionIdToAllFilesMap,
lazyLatestMergedPartitionFileSliceList);
+ hasPartitionsStateChanged = true;
}
- Lazy<List<Pair<String, FileSlice>>> lazyLatestMergedPartitionFileSliceList
= getLazyLatestMergedPartitionFileSliceList();
- for (MetadataPartitionType partitionType : partitionsToInit) {
- // Find the commit timestamp to use for this partition. Each
initialization should use its own unique commit time.
- String instantTimeForPartition =
generateUniqueInstantTime(dataTableInstantTime);
- String partitionTypeName = partitionType.name();
- LOG.info("Initializing MDT partition {} at instant {}",
partitionTypeName, instantTimeForPartition);
- String relativePartitionPath;
- Pair<Integer, HoodieData<HoodieRecord>> fileGroupCountAndRecordsPair;
- Lazy<Option<HoodieSchema>> tableSchema = Lazy.lazily(() ->
HoodieTableMetadataUtil.tryResolveSchemaForTable(dataMetaClient));
- try {
- switch (partitionType) {
- case FILES:
- fileGroupCountAndRecordsPair =
initializeFilesPartition(partitionIdToAllFilesMap);
- initializeFilegroupsAndCommit(partitionType,
FILES.getPartitionPath(), fileGroupCountAndRecordsPair,
instantTimeForPartition);
- break;
- case BLOOM_FILTERS:
- fileGroupCountAndRecordsPair =
initializeBloomFiltersPartition(dataTableInstantTime, partitionIdToAllFilesMap);
- initializeFilegroupsAndCommit(partitionType,
BLOOM_FILTERS.getPartitionPath(), fileGroupCountAndRecordsPair,
instantTimeForPartition);
- break;
- case COLUMN_STATS:
- Pair<List<String>, Pair<Integer, HoodieData<HoodieRecord>>>
colStatsColumnsAndRecord =
initializeColumnStatsPartition(partitionIdToAllFilesMap, tableSchema);
- fileGroupCountAndRecordsPair = colStatsColumnsAndRecord.getValue();
- initializeFilegroupsAndCommit(partitionType,
COLUMN_STATS.getPartitionPath(), fileGroupCountAndRecordsPair,
instantTimeForPartition, colStatsColumnsAndRecord.getKey());
- break;
- case RECORD_INDEX:
- boolean isPartitionedRLI =
dataWriteConfig.isRecordLevelIndexEnabled();
-
initializeFilegroupsAndCommitToRecordIndexPartition(instantTimeForPartition,
lazyLatestMergedPartitionFileSliceList, isPartitionedRLI);
- break;
- case EXPRESSION_INDEX:
- Set<String> expressionIndexPartitionsToInit =
getExpressionIndexPartitionsToInit(partitionType,
dataWriteConfig.getMetadataConfig(), dataMetaClient);
- if (expressionIndexPartitionsToInit.size() != 1) {
- if (expressionIndexPartitionsToInit.size() > 1) {
- LOG.warn("Skipping expression index initialization as only one
expression index bootstrap at a time is supported for now. Provided: {}",
expressionIndexPartitionsToInit);
- }
- continue;
- }
- relativePartitionPath =
expressionIndexPartitionsToInit.iterator().next();
- fileGroupCountAndRecordsPair =
initializeExpressionIndexPartition(relativePartitionPath, dataTableInstantTime,
lazyLatestMergedPartitionFileSliceList, tableSchema);
- initializeFilegroupsAndCommit(partitionType,
relativePartitionPath, fileGroupCountAndRecordsPair, instantTimeForPartition);
- break;
- case PARTITION_STATS:
- // For PARTITION_STATS, COLUMN_STATS should also be enabled
- if (!dataWriteConfig.isMetadataColumnStatsIndexEnabled()) {
- LOG.debug("Skipping partition stats initialization as column
stats index is not enabled. Please enable {}",
-
HoodieMetadataConfig.ENABLE_METADATA_INDEX_COLUMN_STATS.key());
- continue;
- }
- fileGroupCountAndRecordsPair =
initializePartitionStatsIndex(lazyLatestMergedPartitionFileSliceList,
tableSchema);
- initializeFilegroupsAndCommit(partitionType,
PARTITION_STATS.getPartitionPath(), fileGroupCountAndRecordsPair,
instantTimeForPartition);
- break;
- case SECONDARY_INDEX:
- Set<String> secondaryIndexPartitionsToInit =
getSecondaryIndexPartitionsToInit(partitionType,
dataWriteConfig.getMetadataConfig(), dataMetaClient);
- if (secondaryIndexPartitionsToInit.size() != 1) {
- if (secondaryIndexPartitionsToInit.size() > 1) {
- LOG.warn("Skipping secondary index initialization as only one
secondary index bootstrap at a time is supported for now. Provided: {}",
secondaryIndexPartitionsToInit);
- }
- continue;
- }
- relativePartitionPath =
secondaryIndexPartitionsToInit.iterator().next();
- fileGroupCountAndRecordsPair =
initializeSecondaryIndexPartition(relativePartitionPath,
lazyLatestMergedPartitionFileSliceList);
- initializeFilegroupsAndCommit(partitionType,
relativePartitionPath, fileGroupCountAndRecordsPair, instantTimeForPartition);
- break;
- default:
- throw new HoodieMetadataException(String.format("Unsupported MDT
partition type: %s", partitionType));
- }
- } catch (Exception e) {
- String metricKey = partitionType.getPartitionPath() + "_" +
HoodieMetadataMetrics.BOOTSTRAP_ERR_STR;
- metrics.ifPresent(m -> m.setMetric(metricKey, 1));
- String errMsg = String.format("Bootstrap on %s partition failed for
%s",
- partitionType.getPartitionPath(),
metadataMetaClient.getBasePath());
- LOG.error(errMsg, e);
- throw new HoodieMetadataException(errMsg, e);
+ indexerMapForPartitionsToInit.entrySet().stream().filter(e -> e.getKey()
!= FILES).forEach(
+ e -> {
+ try {
+ initializeMetadataPartition(e.getKey(), e.getValue(),
dataTableInstantTime, partitionIdToAllFilesMap,
lazyLatestMergedPartitionFileSliceList);
+ } catch (IOException ex) {
+ throw new HoodieMetadataException("Failed to initialize metadata
partition: " + e.getKey(), ex);
+ }
+ hasPartitionsStateChanged = true;
+ });
+ return true;
+ }
+
+ private void initializeMetadataPartition(
+ MetadataPartitionType partitionType,
+ Indexer indexer,
+ String dataTableInstantTime,
+ Map<String, List<FileInfo>> partitionToAllFilesMap,
+ Lazy<List<FileSliceAndPartition>>
lazyLatestMergedPartitionFileSliceList) throws IOException {
+ String instantTimeForPartition =
generateUniqueInstantTime(dataTableInstantTime);
+ // initialize metadata partitions
+ List<IndexPartitionInitialization> initializationList;
+ try {
+ initializationList = indexer.buildInitialization(
+ dataTableInstantTime, instantTimeForPartition,
partitionToAllFilesMap, lazyLatestMergedPartitionFileSliceList);
+ if (initializationList.isEmpty()) {
+ LOG.info("Skip building {} index in metadata table",
partitionType.getPartitionPath());
+ return;
}
- hasPartitionsStateChanged = true;
+
+ ValidationUtils.checkArgument(initializationList.size() == 1,
Review Comment:
I checked the old partitioned RLI flow, and yes, it was still a single
record-index initialization for the record_index metadata partition, with
multiple per-data-partition entries handled inside it. The old code iterated
each data partition, initialized its file groups separately, unioned the
records, and then did one bulk commit with a bucketized parser built from the
per-partition sizes. So initializationList.size() == 1 is still correct here:
for partitioned RLI, that one IndexPartitionInitialization can contain multiple
DataPartitionAndRecords, and the loop over dataPartitionAndRecords() preserves
the old behavior.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]