SabrinaZhaozyf commented on code in PR #14300:
URL: https://github.com/apache/pinot/pull/14300#discussion_r1829878385


##########
pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentrefresh/SegmentRefreshTaskGenerator.java:
##########
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.plugin.minion.tasks.segmentrefresh;
+
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.pinot.common.data.Segment;
+import org.apache.pinot.common.metadata.segment.SegmentZKMetadata;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import 
org.apache.pinot.controller.helix.core.minion.generator.BaseTaskGenerator;
+import 
org.apache.pinot.controller.helix.core.minion.generator.TaskGeneratorUtils;
+import org.apache.pinot.core.common.MinionConstants;
+import org.apache.pinot.core.common.MinionConstants.SegmentRefreshTask;
+import org.apache.pinot.core.minion.PinotTaskConfig;
+import org.apache.pinot.spi.annotations.minion.TaskGenerator;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableTaskConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+@TaskGenerator
+public class SegmentRefreshTaskGenerator extends BaseTaskGenerator {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentRefreshTaskGenerator.class);
+
+  @Override
+  public String getTaskType() {
+    return MinionConstants.SegmentRefreshTask.TASK_TYPE;
+  }
+
+  @Override
+  public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
+    String taskType = MinionConstants.SegmentRefreshTask.TASK_TYPE;
+    List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
+    int tableNumTasks = 0;
+
+    for (TableConfig tableConfig : tableConfigs) {
+      String tableNameWithType = tableConfig.getTableName();
+      LOGGER.info("Start generating SegmentRefresh tasks for table: {}", 
tableNameWithType);
+
+      // Get the task configs for the table. This is used to restrict the 
maximum number of allowed tasks per table at
+      // any given point.
+      Map<String, String> taskConfigs;
+      TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig();
+      if (tableTaskConfig == null) {
+        LOGGER.warn("Failed to find task config for table: {}", 
tableNameWithType);
+        continue;
+      }
+      taskConfigs = 
tableTaskConfig.getConfigsForTaskType(MinionConstants.SegmentRefreshTask.TASK_TYPE);
+      Preconditions.checkNotNull(taskConfigs, "Task config shouldn't be null 
for Table: %s", tableNameWithType);
+      int tableMaxNumTasks = Integer.MAX_VALUE;
+      String tableMaxNumTasksConfig = 
taskConfigs.get(MinionConstants.TABLE_MAX_NUM_TASKS_KEY);
+      if (tableMaxNumTasksConfig != null) {
+        try {
+          tableMaxNumTasks = Integer.parseInt(tableMaxNumTasksConfig);
+        } catch (Exception e) {
+          tableMaxNumTasks = Integer.MAX_VALUE;
+          LOGGER.warn("MaxNumTasks have been wrongly set for table : {}, and 
task {}", tableNameWithType, taskType);
+        }
+      }
+
+      // Get the running segments for a table.
+      Set<Segment> runningSegments =
+          
TaskGeneratorUtils.getRunningSegments(MinionConstants.SegmentRefreshTask.TASK_TYPE,
 _clusterInfoAccessor);
+
+      // Make a single ZK call to get the segments.
+      List<SegmentZKMetadata> allSegments = 
_clusterInfoAccessor.getSegmentsZKMetadata(tableNameWithType);
+
+      for (SegmentZKMetadata segmentZKMetadata : allSegments) {
+        String segmentName = segmentZKMetadata.getSegmentName();
+
+        // Skip consuming segments.
+        if (tableConfig.getTableType() == TableType.REALTIME && 
!segmentZKMetadata.getStatus().isCompleted()) {
+          continue;
+        }
+
+        // Skip segments for which a task is already running.
+        if (runningSegments.contains(new Segment(tableNameWithType, 
segmentZKMetadata.getSegmentName()))) {
+          continue;
+        }
+
+        // Skip if we have reached the maximum number of permissible tasks per 
iteration.
+        if (tableNumTasks >= tableMaxNumTasks) {
+          break;
+        }
+
+        // Skip if the segment is already up-to-date and doesn't have to be 
refreshed.
+        if (!shouldRefreshSegment(segmentZKMetadata, tableConfig)) {
+          continue;
+        }
+
+        Map<String, String> configs = new 
HashMap<>(getBaseTaskConfigs(tableConfig, List.of(segmentName)));
+        configs.put(MinionConstants.DOWNLOAD_URL_KEY, 
segmentZKMetadata.getDownloadUrl());
+        configs.put(MinionConstants.UPLOAD_URL_KEY, 
_clusterInfoAccessor.getVipUrl() + "/segments");
+        configs.put(MinionConstants.ORIGINAL_SEGMENT_CRC_KEY, 
String.valueOf(segmentZKMetadata.getCrc()));
+        pinotTaskConfigs.add(new PinotTaskConfig(taskType, configs));
+        tableNumTasks++;
+      }
+
+      LOGGER.info("Finished generating {} tasks configs for table: {} " + "for 
task: {}", tableNumTasks,
+          tableNameWithType, taskType);
+    }
+
+    return pinotTaskConfigs;
+  }
+
+  /**
+   * We need not refresh when: There were no tableConfig or schema updates 
after the last time the segment was
+   * refreshed by this task.
+   *
+   * Note that newly created segments after the latest tableConfig/schema 
update will still need to be refreshed. This
+   * is because inverted index created is disabled by default during segment 
generation. This can be added as an
+   * additional check in the future, if required.
+   */
+  private boolean shouldRefreshSegment(SegmentZKMetadata segmentZKMetadata, 
TableConfig tableConfig) {
+    String tableNameWithType = tableConfig.getTableName();
+    PinotHelixResourceManager pinotHelixResourceManager = 
_clusterInfoAccessor.getPinotHelixResourceManager();
+    String timestampKey = SegmentRefreshTask.TASK_TYPE + 
MinionConstants.TASK_TIME_SUFFIX;
+
+    long lastProcessedTime = 0L;
+    if (segmentZKMetadata.getCustomMap() != null && 
segmentZKMetadata.getCustomMap().containsKey(timestampKey)) {

Review Comment:
   Is it intentional to only skip refresh when refresh was done by this task 
(e.g. what about segment reload triggered by server restart / manually outside 
of this task)? 
   
   Have we considered checking against the segment modify time (are there 
concerns about missing some segments this way?) ?



##########
pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentrefresh/SegmentRefreshTaskExecutor.java:
##########
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.plugin.minion.tasks.segmentrefresh;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import 
org.apache.pinot.common.metadata.segment.SegmentZKMetadataCustomMapModifier;
+import org.apache.pinot.core.common.MinionConstants;
+import org.apache.pinot.core.minion.PinotTaskConfig;
+import 
org.apache.pinot.plugin.minion.tasks.BaseSingleSegmentConversionExecutor;
+import org.apache.pinot.plugin.minion.tasks.SegmentConversionResult;
+import 
org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader;
+import 
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.index.loader.IndexLoadingConfig;
+import org.apache.pinot.segment.local.segment.readers.PinotSegmentRecordReader;
+import org.apache.pinot.segment.spi.ColumnMetadata;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.segment.spi.index.metadata.SegmentMetadataImpl;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderContext;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderRegistry;
+import org.apache.pinot.segment.spi.store.SegmentDirectory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.env.PinotConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class SegmentRefreshTaskExecutor extends 
BaseSingleSegmentConversionExecutor {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentRefreshTaskGenerator.class);
+
+  private long _taskStartTime;
+
+  /**
+   * The code here currently covers segmentRefresh for the following cases:
+   * 1. Process newly added columns.
+   * 2. Addition/removal of indexes.
+   * 3. Compatible datatype change for existing columns
+   */
+  @Override
+  protected SegmentConversionResult convert(PinotTaskConfig pinotTaskConfig, 
File indexDir, File workingDir)
+      throws Exception {
+    _eventObserver.notifyProgress(pinotTaskConfig, "Refreshing segment: " + 
indexDir);
+
+    // We set _taskStartTime before fetching the tableConfig. Task Generation 
relies on tableConfig/Schema updates
+    // happening after the last processed time. So we explicity use the 
timestamp before fetching tableConfig as the
+    // processedTime.
+    _taskStartTime = System.currentTimeMillis();
+    Map<String, String> configs = pinotTaskConfig.getConfigs();
+    String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY);
+    String segmentName = configs.get(MinionConstants.SEGMENT_NAME_KEY);
+    String taskType = pinotTaskConfig.getTaskType();
+
+    LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
+
+    TableConfig tableConfig = getTableConfig(tableNameWithType);
+    Schema schema = getSchema(tableNameWithType);
+
+    IndexLoadingConfig indexLoadingConfig = new 
IndexLoadingConfig(tableConfig, schema);
+    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir);
+    PinotConfiguration segmentDirectoryConfigs = 
indexLoadingConfig.getSegmentDirectoryConfigs();
+    SegmentDirectoryLoaderContext segmentLoaderContext =
+        new 
SegmentDirectoryLoaderContext.Builder().setTableConfig(indexLoadingConfig.getTableConfig())
+            .setSchema(schema)
+            .setInstanceId(indexLoadingConfig.getInstanceId())
+            .setSegmentName(segmentMetadata.getName())
+            .setSegmentCrc(segmentMetadata.getCrc())
+            .setSegmentDirectoryConfigs(segmentDirectoryConfigs)
+            .build();
+    SegmentDirectory segmentDirectory =
+        
SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader().load(indexDir.toURI(),
 segmentLoaderContext);
+
+    // BaseDefaultColumnHandler part of needPreprocess() does not process any 
changes to existing columns like datatype,
+    // change from dimension to metric, etc.
+    boolean needPreprocess = 
ImmutableSegmentLoader.needPreprocess(segmentDirectory, indexLoadingConfig, 
schema);
+    closeSegmentDirectoryQuietly(segmentDirectory);
+    Set<String> refreshColumnSet = new HashSet<>();
+
+    for (FieldSpec fieldSpecInSchema : schema.getAllFieldSpecs()) {

Review Comment:
   Why not push down these additional checks into `needPreprocess()` / 
`needProcess()`?



##########
pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentrefresh/SegmentRefreshTaskExecutor.java:
##########
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.plugin.minion.tasks.segmentrefresh;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import 
org.apache.pinot.common.metadata.segment.SegmentZKMetadataCustomMapModifier;
+import org.apache.pinot.core.common.MinionConstants;
+import org.apache.pinot.core.minion.PinotTaskConfig;
+import 
org.apache.pinot.plugin.minion.tasks.BaseSingleSegmentConversionExecutor;
+import org.apache.pinot.plugin.minion.tasks.SegmentConversionResult;
+import 
org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader;
+import 
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.index.loader.IndexLoadingConfig;
+import org.apache.pinot.segment.local.segment.readers.PinotSegmentRecordReader;
+import org.apache.pinot.segment.spi.ColumnMetadata;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.segment.spi.index.metadata.SegmentMetadataImpl;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderContext;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderRegistry;
+import org.apache.pinot.segment.spi.store.SegmentDirectory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.env.PinotConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class SegmentRefreshTaskExecutor extends 
BaseSingleSegmentConversionExecutor {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentRefreshTaskGenerator.class);
+
+  private long _taskStartTime;
+
+  /**
+   * The code here currently covers segmentRefresh for the following cases:
+   * 1. Process newly added columns.
+   * 2. Addition/removal of indexes.
+   * 3. Compatible datatype change for existing columns
+   */
+  @Override
+  protected SegmentConversionResult convert(PinotTaskConfig pinotTaskConfig, 
File indexDir, File workingDir)
+      throws Exception {
+    _eventObserver.notifyProgress(pinotTaskConfig, "Refreshing segment: " + 
indexDir);
+
+    // We set _taskStartTime before fetching the tableConfig. Task Generation 
relies on tableConfig/Schema updates
+    // happening after the last processed time. So we explicity use the 
timestamp before fetching tableConfig as the
+    // processedTime.
+    _taskStartTime = System.currentTimeMillis();
+    Map<String, String> configs = pinotTaskConfig.getConfigs();
+    String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY);
+    String segmentName = configs.get(MinionConstants.SEGMENT_NAME_KEY);
+    String taskType = pinotTaskConfig.getTaskType();
+
+    LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
+
+    TableConfig tableConfig = getTableConfig(tableNameWithType);
+    Schema schema = getSchema(tableNameWithType);
+
+    IndexLoadingConfig indexLoadingConfig = new 
IndexLoadingConfig(tableConfig, schema);
+    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir);
+    PinotConfiguration segmentDirectoryConfigs = 
indexLoadingConfig.getSegmentDirectoryConfigs();
+    SegmentDirectoryLoaderContext segmentLoaderContext =
+        new 
SegmentDirectoryLoaderContext.Builder().setTableConfig(indexLoadingConfig.getTableConfig())
+            .setSchema(schema)
+            .setInstanceId(indexLoadingConfig.getInstanceId())
+            .setSegmentName(segmentMetadata.getName())
+            .setSegmentCrc(segmentMetadata.getCrc())
+            .setSegmentDirectoryConfigs(segmentDirectoryConfigs)
+            .build();
+    SegmentDirectory segmentDirectory =
+        
SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader().load(indexDir.toURI(),
 segmentLoaderContext);
+
+    // BaseDefaultColumnHandler part of needPreprocess() does not process any 
changes to existing columns like datatype,
+    // change from dimension to metric, etc.
+    boolean needPreprocess = 
ImmutableSegmentLoader.needPreprocess(segmentDirectory, indexLoadingConfig, 
schema);
+    closeSegmentDirectoryQuietly(segmentDirectory);
+    Set<String> refreshColumnSet = new HashSet<>();
+
+    for (FieldSpec fieldSpecInSchema : schema.getAllFieldSpecs()) {
+      // Virtual columns are constructed while loading the segment, thus do 
not exist in the record, nor should be
+      // persisted to the disk.
+      if (fieldSpecInSchema.isVirtualColumn()) {
+        continue;
+      }
+
+      String column = fieldSpecInSchema.getName();
+      ColumnMetadata columnMetadata = 
segmentMetadata.getColumnMetadataFor(column);
+      if (columnMetadata != null) {
+        FieldSpec fieldSpecInSegment = columnMetadata.getFieldSpec();
+
+        // Check the data type and default value matches.
+        FieldSpec.DataType dataTypeInSegment = 
fieldSpecInSegment.getDataType();
+        FieldSpec.DataType dataTypeInSchema = fieldSpecInSchema.getDataType();
+
+        // Column exists in segment.
+        if (dataTypeInSegment != dataTypeInSchema) {
+          // Check if we need to update the data-type. DataType change is 
dependent on segmentGeneration code converting
+          // the object to the destination datatype. If the existing data is 
the column is not compatible with the
+          // destination data-type, the refresh task will fail.
+          refreshColumnSet.add(column);
+        }
+
+        // TODO: Maybe we can support singleValue to multi-value conversions 
are supproted and vice-versa.
+      } else {
+        refreshColumnSet.add(column);
+      }
+    }
+
+    if (!needPreprocess && refreshColumnSet.isEmpty()) {
+      LOGGER.info("Skipping segment={}, table={} as it is up-to-date with new 
table/schema", segmentName,
+          tableNameWithType);
+      return new 
SegmentConversionResult.Builder().setTableNameWithType(tableNameWithType)
+          .setSegmentName(segmentName)
+          .build();
+    }
+
+    // Refresh the segment. Segment reload is achieved purely by
+    try (PinotSegmentRecordReader recordReader = new 
PinotSegmentRecordReader()) {
+      recordReader.init(indexDir, null, null);
+      SegmentGeneratorConfig config = getSegmentGeneratorConfig(workingDir, 
tableConfig, segmentMetadata, segmentName,
+          getSchema(tableNameWithType));
+      SegmentIndexCreationDriverImpl driver = new 
SegmentIndexCreationDriverImpl();
+      driver.init(config, recordReader);
+      driver.build();
+    }
+
+    File refreshedSegmentFile = new File(workingDir, segmentName);
+    SegmentConversionResult result = new 
SegmentConversionResult.Builder().setFile(refreshedSegmentFile)
+        .setTableNameWithType(tableNameWithType)
+        .setSegmentName(segmentName)
+        .build();
+
+    long endMillis = System.currentTimeMillis();
+    LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", 
taskType, configs,
+        (endMillis - _taskStartTime));
+
+    return result;
+  }
+
+  private static SegmentGeneratorConfig getSegmentGeneratorConfig(File 
workingDir, TableConfig tableConfig,
+      SegmentMetadataImpl segmentMetadata, String segmentName, Schema schema) {
+    
tableConfig.getIndexingConfig().setCreateInvertedIndexDuringSegmentGeneration(true);

Review Comment:
   Can we add a comment for reason to enable inv index creation here?



##########
pinot-plugins/pinot-minion-tasks/pinot-minion-builtin-tasks/src/main/java/org/apache/pinot/plugin/minion/tasks/segmentrefresh/SegmentRefreshTaskExecutor.java:
##########
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.plugin.minion.tasks.segmentrefresh;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import 
org.apache.pinot.common.metadata.segment.SegmentZKMetadataCustomMapModifier;
+import org.apache.pinot.core.common.MinionConstants;
+import org.apache.pinot.core.minion.PinotTaskConfig;
+import 
org.apache.pinot.plugin.minion.tasks.BaseSingleSegmentConversionExecutor;
+import org.apache.pinot.plugin.minion.tasks.SegmentConversionResult;
+import 
org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader;
+import 
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.index.loader.IndexLoadingConfig;
+import org.apache.pinot.segment.local.segment.readers.PinotSegmentRecordReader;
+import org.apache.pinot.segment.spi.ColumnMetadata;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.segment.spi.index.metadata.SegmentMetadataImpl;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderContext;
+import org.apache.pinot.segment.spi.loader.SegmentDirectoryLoaderRegistry;
+import org.apache.pinot.segment.spi.store.SegmentDirectory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.env.PinotConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class SegmentRefreshTaskExecutor extends 
BaseSingleSegmentConversionExecutor {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentRefreshTaskGenerator.class);
+
+  private long _taskStartTime;
+
+  /**
+   * The code here currently covers segmentRefresh for the following cases:
+   * 1. Process newly added columns.
+   * 2. Addition/removal of indexes.
+   * 3. Compatible datatype change for existing columns
+   */
+  @Override
+  protected SegmentConversionResult convert(PinotTaskConfig pinotTaskConfig, 
File indexDir, File workingDir)
+      throws Exception {
+    _eventObserver.notifyProgress(pinotTaskConfig, "Refreshing segment: " + 
indexDir);
+
+    // We set _taskStartTime before fetching the tableConfig. Task Generation 
relies on tableConfig/Schema updates
+    // happening after the last processed time. So we explicity use the 
timestamp before fetching tableConfig as the
+    // processedTime.
+    _taskStartTime = System.currentTimeMillis();
+    Map<String, String> configs = pinotTaskConfig.getConfigs();
+    String tableNameWithType = configs.get(MinionConstants.TABLE_NAME_KEY);
+    String segmentName = configs.get(MinionConstants.SEGMENT_NAME_KEY);
+    String taskType = pinotTaskConfig.getTaskType();
+
+    LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
+
+    TableConfig tableConfig = getTableConfig(tableNameWithType);
+    Schema schema = getSchema(tableNameWithType);
+
+    IndexLoadingConfig indexLoadingConfig = new 
IndexLoadingConfig(tableConfig, schema);
+    SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir);
+    PinotConfiguration segmentDirectoryConfigs = 
indexLoadingConfig.getSegmentDirectoryConfigs();
+    SegmentDirectoryLoaderContext segmentLoaderContext =
+        new 
SegmentDirectoryLoaderContext.Builder().setTableConfig(indexLoadingConfig.getTableConfig())
+            .setSchema(schema)
+            .setInstanceId(indexLoadingConfig.getInstanceId())
+            .setSegmentName(segmentMetadata.getName())
+            .setSegmentCrc(segmentMetadata.getCrc())
+            .setSegmentDirectoryConfigs(segmentDirectoryConfigs)
+            .build();
+    SegmentDirectory segmentDirectory =
+        
SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader().load(indexDir.toURI(),
 segmentLoaderContext);
+
+    // BaseDefaultColumnHandler part of needPreprocess() does not process any 
changes to existing columns like datatype,
+    // change from dimension to metric, etc.
+    boolean needPreprocess = 
ImmutableSegmentLoader.needPreprocess(segmentDirectory, indexLoadingConfig, 
schema);
+    closeSegmentDirectoryQuietly(segmentDirectory);
+    Set<String> refreshColumnSet = new HashSet<>();
+
+    for (FieldSpec fieldSpecInSchema : schema.getAllFieldSpecs()) {
+      // Virtual columns are constructed while loading the segment, thus do 
not exist in the record, nor should be
+      // persisted to the disk.
+      if (fieldSpecInSchema.isVirtualColumn()) {
+        continue;
+      }
+
+      String column = fieldSpecInSchema.getName();
+      ColumnMetadata columnMetadata = 
segmentMetadata.getColumnMetadataFor(column);
+      if (columnMetadata != null) {
+        FieldSpec fieldSpecInSegment = columnMetadata.getFieldSpec();
+
+        // Check the data type and default value matches.
+        FieldSpec.DataType dataTypeInSegment = 
fieldSpecInSegment.getDataType();
+        FieldSpec.DataType dataTypeInSchema = fieldSpecInSchema.getDataType();
+
+        // Column exists in segment.
+        if (dataTypeInSegment != dataTypeInSchema) {
+          // Check if we need to update the data-type. DataType change is 
dependent on segmentGeneration code converting
+          // the object to the destination datatype. If the existing data is 
the column is not compatible with the
+          // destination data-type, the refresh task will fail.
+          refreshColumnSet.add(column);
+        }
+
+        // TODO: Maybe we can support singleValue to multi-value conversions 
are supproted and vice-versa.
+      } else {
+        refreshColumnSet.add(column);
+      }
+    }
+
+    if (!needPreprocess && refreshColumnSet.isEmpty()) {
+      LOGGER.info("Skipping segment={}, table={} as it is up-to-date with new 
table/schema", segmentName,
+          tableNameWithType);
+      return new 
SegmentConversionResult.Builder().setTableNameWithType(tableNameWithType)
+          .setSegmentName(segmentName)
+          .build();
+    }
+
+    // Refresh the segment. Segment reload is achieved purely by
+    try (PinotSegmentRecordReader recordReader = new 
PinotSegmentRecordReader()) {
+      recordReader.init(indexDir, null, null);
+      SegmentGeneratorConfig config = getSegmentGeneratorConfig(workingDir, 
tableConfig, segmentMetadata, segmentName,
+          getSchema(tableNameWithType));
+      SegmentIndexCreationDriverImpl driver = new 
SegmentIndexCreationDriverImpl();
+      driver.init(config, recordReader);
+      driver.build();
+    }
+
+    File refreshedSegmentFile = new File(workingDir, segmentName);
+    SegmentConversionResult result = new 
SegmentConversionResult.Builder().setFile(refreshedSegmentFile)
+        .setTableNameWithType(tableNameWithType)
+        .setSegmentName(segmentName)
+        .build();
+
+    long endMillis = System.currentTimeMillis();
+    LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", 
taskType, configs,
+        (endMillis - _taskStartTime));
+
+    return result;
+  }
+
+  private static SegmentGeneratorConfig getSegmentGeneratorConfig(File 
workingDir, TableConfig tableConfig,

Review Comment:
   Consider avoiding duplicating this function with `UpsertCompactionTask`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to