krishan1390 commented on code in PR #16857: URL: https://github.com/apache/pinot/pull/16857#discussion_r2413154419
########## pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/DistributedTaskLockManager.java: ########## @@ -0,0 +1,288 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pinot.controller.helix.core.minion; + +import com.google.common.annotations.VisibleForTesting; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.apache.helix.AccessOption; +import org.apache.helix.store.zk.ZkHelixPropertyStore; +import org.apache.helix.zookeeper.datamodel.ZNRecord; +import org.apache.pinot.common.metadata.ZKMetadataProvider; +import org.apache.pinot.common.metrics.ControllerMetrics; +import org.apache.pinot.common.metrics.ControllerTimer; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Manages distributed locks for minion task generation using ZooKeeper ephemeral nodes that automatically disappear + * when the controller session ends or when the lock is explicitly released. This approach provides automatic cleanup + * and is suitable for long-running task generation. + * Locks are at the table level, to ensure that only one type of task can be generated per table at any given time. + * This is to prevent task types which shouldn't run in parallel from being generated at the same time. + * <p> + * ZK EPHEMERAL Lock Node: + * <ul> + * <li>Every lock is created at the table level with the name: {tableName}-Lock, under the base path + * MINION_TASK_METADATA within the PROPERTYSTORE. + * <li>If the propertyStore::create() call returns true, that means the lock node was successfully created and the + * lock belongs to the current controller, otherwise it was not. If the lock node already exists, this will return + * false. No clean-up of the lock node is needed if the propertyStore::create() call returns false. + * <li>The locks are EPHEMERAL in nature, meaning that once the session with ZK is lost, the lock is automatically + * cleaned up. Scenarios when the ZK session can be lost: a) controller shutdown, b) controller crash, c) ZK session + * expiry (e.g. long GC pauses can cause this). This property helps ensure that the lock is released under + * controller failure. + * </ul> + * <p> + */ +public class DistributedTaskLockManager { + private static final Logger LOGGER = LoggerFactory.getLogger(DistributedTaskLockManager.class); + + // Lock paths are constructed using ZKMetadataProvider + private static final String LOCK_OWNER_KEY = "lockOwner"; + private static final String LOCK_CREATION_TIME_MS = "lockCreationTimeMs"; + + private final ZkHelixPropertyStore<ZNRecord> _propertyStore; + private final String _controllerInstanceId; + private final ControllerMetrics _controllerMetrics; + + public DistributedTaskLockManager(ZkHelixPropertyStore<ZNRecord> propertyStore, String controllerInstanceId) { + _propertyStore = propertyStore; + _controllerInstanceId = controllerInstanceId; + _controllerMetrics = ControllerMetrics.get(); + } + + /** + * Attempts to acquire a distributed lock at the table level for task generation using session-based locking. + * The lock is at the table level instead of the task level to ensure that only a single task can be generated for + * a given table at any time. Certain tasks depend on other tasks not being generated at the same time + * The lock is held until explicitly released or the controller session ends. + * + * @param tableNameWithType the table name with type + * @return TaskLock object if successful, null if lock could not be acquired + */ + @Nullable + public TaskLock acquireLock(String tableNameWithType) { + LOGGER.info("Attempting to acquire task generation lock for table: {} by controller: {}", tableNameWithType, + _controllerInstanceId); + + try { + // Check if task generation is already in progress + if (isTaskGenerationInProgress(tableNameWithType)) { + LOGGER.info("Task generation already in progress for: {} by this or another controller", tableNameWithType); + return null; + } + + // Try to acquire the lock using ephemeral node + TaskLock lock = tryAcquireSessionBasedLock(tableNameWithType); + if (lock != null) { + LOGGER.info("Successfully acquired task generation lock for table: {} by controller: {}", tableNameWithType, + _controllerInstanceId); + return lock; + } else { + LOGGER.warn("Could not acquire lock for table: {} - another controller must hold it", tableNameWithType); + } + } catch (Exception e) { + LOGGER.error("Error while trying to acquire task lock for table: {}", tableNameWithType, e); + } + return null; + } + + private String getLockPath(String tableNameForPath) { + return ZKMetadataProvider.constructPropertyStorePathForMinionTaskGenerationLock(tableNameForPath); + } + + /** + * Releases a previously acquired session-based lock and marks task generation as completed. + * + * @param lock the lock to release + * @return true if successfully released, false otherwise + */ + public boolean releaseLock(TaskLock lock) { + if (lock == null) { + return true; + } + + String tableNameWithType = lock.getTableNameWithType(); + String lockNode = lock.getLockZNodePath(); + + // Remove the ephemeral lock node + boolean status = true; + if (lockNode != null) { + try { + if (_propertyStore.exists(lockNode, AccessOption.EPHEMERAL)) { + status = _propertyStore.remove(lockNode, AccessOption.EPHEMERAL); Review Comment: shall we add retries (with some delay factor) here to handle temporary failures. I guess we don't need a seperate alert if status = false because the metric MINION_TASK_GENERATION_LOCK_HELD_ELAPSED_TIME_MS can be used to catch such issues. ########## pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotTaskRestletResource.java: ########## @@ -785,4 +785,24 @@ public SuccessResponse deleteTask( _pinotHelixTaskResourceManager.deleteTask(taskName, forceDelete); return new SuccessResponse("Successfully deleted task: " + taskName); } + + @DELETE + @Path("/tasks/lock/forceRelease") + @Authorize(targetType = TargetType.TABLE, action = Actions.Table.FORCE_RELEASE_TASK_GENERATION_LOCK, + paramName = "tableNameWithType") + @Produces(MediaType.APPLICATION_JSON) + @Authenticate(AccessType.DELETE) + @ApiOperation("Force releases the task generation lock for a given table. Call this API with caution") + public SuccessResponse forceReleaseTaskGenerationLock( + @ApiParam(value = "Table name (with type suffix).", required = true) + @QueryParam("tableNameWithType") String tableNameWithType) { + boolean lockReleased = _pinotTaskManager.forceReleaseLock(tableNameWithType); Review Comment: can we add a test case for forceReleaseLock() when lock is held by another thread generating task ? if the test case calls this API, that would be great. otherwise we can manually test this API to validate the API itself is callable correctly ########## pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/DistributedTaskLockManager.java: ########## @@ -0,0 +1,288 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pinot.controller.helix.core.minion; + +import com.google.common.annotations.VisibleForTesting; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.apache.helix.AccessOption; +import org.apache.helix.store.zk.ZkHelixPropertyStore; +import org.apache.helix.zookeeper.datamodel.ZNRecord; +import org.apache.pinot.common.metadata.ZKMetadataProvider; +import org.apache.pinot.common.metrics.ControllerMetrics; +import org.apache.pinot.common.metrics.ControllerTimer; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * Manages distributed locks for minion task generation using ZooKeeper ephemeral nodes that automatically disappear + * when the controller session ends or when the lock is explicitly released. This approach provides automatic cleanup + * and is suitable for long-running task generation. + * Locks are at the table level, to ensure that only one type of task can be generated per table at any given time. + * This is to prevent task types which shouldn't run in parallel from being generated at the same time. + * <p> + * ZK EPHEMERAL Lock Node: + * <ul> + * <li>Every lock is created at the table level with the name: {tableName}-Lock, under the base path + * MINION_TASK_METADATA within the PROPERTYSTORE. + * <li>If the propertyStore::create() call returns true, that means the lock node was successfully created and the + * lock belongs to the current controller, otherwise it was not. If the lock node already exists, this will return + * false. No clean-up of the lock node is needed if the propertyStore::create() call returns false. + * <li>The locks are EPHEMERAL in nature, meaning that once the session with ZK is lost, the lock is automatically + * cleaned up. Scenarios when the ZK session can be lost: a) controller shutdown, b) controller crash, c) ZK session + * expiry (e.g. long GC pauses can cause this). This property helps ensure that the lock is released under + * controller failure. + * </ul> + * <p> + */ +public class DistributedTaskLockManager { + private static final Logger LOGGER = LoggerFactory.getLogger(DistributedTaskLockManager.class); + + // Lock paths are constructed using ZKMetadataProvider + private static final String LOCK_OWNER_KEY = "lockOwner"; + private static final String LOCK_CREATION_TIME_MS = "lockCreationTimeMs"; + + private final ZkHelixPropertyStore<ZNRecord> _propertyStore; + private final String _controllerInstanceId; + private final ControllerMetrics _controllerMetrics; + + public DistributedTaskLockManager(ZkHelixPropertyStore<ZNRecord> propertyStore, String controllerInstanceId) { + _propertyStore = propertyStore; + _controllerInstanceId = controllerInstanceId; + _controllerMetrics = ControllerMetrics.get(); + } + + /** + * Attempts to acquire a distributed lock at the table level for task generation using session-based locking. + * The lock is at the table level instead of the task level to ensure that only a single task can be generated for + * a given table at any time. Certain tasks depend on other tasks not being generated at the same time + * The lock is held until explicitly released or the controller session ends. + * + * @param tableNameWithType the table name with type + * @return TaskLock object if successful, null if lock could not be acquired + */ + @Nullable + public TaskLock acquireLock(String tableNameWithType) { + LOGGER.info("Attempting to acquire task generation lock for table: {} by controller: {}", tableNameWithType, + _controllerInstanceId); + + try { + // Check if task generation is already in progress + if (isTaskGenerationInProgress(tableNameWithType)) { + LOGGER.info("Task generation already in progress for: {} by this or another controller", tableNameWithType); + return null; + } + + // Try to acquire the lock using ephemeral node + TaskLock lock = tryAcquireSessionBasedLock(tableNameWithType); + if (lock != null) { + LOGGER.info("Successfully acquired task generation lock for table: {} by controller: {}", tableNameWithType, + _controllerInstanceId); + return lock; + } else { + LOGGER.warn("Could not acquire lock for table: {} - another controller must hold it", tableNameWithType); + } + } catch (Exception e) { + LOGGER.error("Error while trying to acquire task lock for table: {}", tableNameWithType, e); + } + return null; + } + + private String getLockPath(String tableNameForPath) { + return ZKMetadataProvider.constructPropertyStorePathForMinionTaskGenerationLock(tableNameForPath); + } + + /** + * Releases a previously acquired session-based lock and marks task generation as completed. + * + * @param lock the lock to release + * @return true if successfully released, false otherwise + */ + public boolean releaseLock(TaskLock lock) { + if (lock == null) { + return true; + } + + String tableNameWithType = lock.getTableNameWithType(); + String lockNode = lock.getLockZNodePath(); + + // Remove the ephemeral lock node + boolean status = true; + if (lockNode != null) { + try { + if (_propertyStore.exists(lockNode, AccessOption.EPHEMERAL)) { + status = _propertyStore.remove(lockNode, AccessOption.EPHEMERAL); + LOGGER.info("Tried to removed ephemeral lock node: {}, for table: {} by controller: {}, removal success: {}", + lockNode, tableNameWithType, _controllerInstanceId, status); + } else { + LOGGER.warn("Ephemeral lock node: {} does not exist for table: {}, nothing to remove", + lockNode, tableNameWithType); + } + } catch (Exception e) { + status = false; + LOGGER.warn("Exception while trying to remove ephemeral lock node: {}", lockNode, e); + } + } else { + LOGGER.warn("Lock node path seems to be null for task lock: {}, treating release as a no-op", lock); + } + + return status; + } + + /** + * Force release the lock without checking if any tasks are in progress + */ + public boolean forceReleaseLock(String tableNameWithType) { + LOGGER.info("Trying to force release the lock for table: {}", tableNameWithType); + String lockPath = getLockPath(tableNameWithType); + + boolean released = true; + if (!_propertyStore.exists(lockPath, AccessOption.EPHEMERAL)) { + LOGGER.info("No lock ZNode: {} found for table: {}, nothing to force release", lockPath, tableNameWithType); Review Comment: we can probably throw an exception. that way client will know what actually happened. otherwise returning either true / false in such a case might be a false signal. similarly an exception can be thrown if _propertyStore.remove returns false ########## pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/minion/PinotTaskManager.java: ########## @@ -239,40 +254,76 @@ public Map<String, String> createTask(String taskType, String tableName, @Nullab // Example usage in BaseTaskGenerator.getNumSubTasks() taskConfigs.put(MinionConstants.TRIGGERED_BY, CommonConstants.TaskTriggers.ADHOC_TRIGGER.name()); - List<PinotTaskConfig> pinotTaskConfigs = taskGenerator.generateTasks(tableConfig, taskConfigs); - if (pinotTaskConfigs.isEmpty()) { - LOGGER.warn("No ad-hoc task generated for task type: {}", taskType); - continue; + // Acquire distributed lock before proceeding with ad-hoc task generation + // Need locking to protect against: + // 1. Race conditions with periodic task generation + // 2. Multiple simultaneous ad-hoc requests + // 3. Leadership changes during task generation + DistributedTaskLockManager.TaskLock lock = null; + if (_distributedTaskLockManager != null) { + lock = _distributedTaskLockManager.acquireLock(tableNameWithType); + if (lock == null) { + String message = String.format("Could not acquire table level distributed lock for ad-hoc task type: %s, " + + "table: %s. Another controller is likely generating tasks for this table. Please try again later.", + taskType, tableNameWithType); + LOGGER.warn(message); + throw new RuntimeException(message); + } + LOGGER.info("Acquired table level distributed lock for ad-hoc task type: {} on table: {}", taskType, + tableNameWithType); } - int maxNumberOfSubTasks = taskGenerator.getMaxAllowedSubTasksPerTask(); - if (pinotTaskConfigs.size() > maxNumberOfSubTasks) { - String message = String.format( - "Number of tasks generated for task type: %s for table: %s is %d, which is greater than the " - + "maximum number of tasks to schedule: %d. This is " - + "controlled by the cluster config %s which is set based on controller's performance.", taskType, - tableName, pinotTaskConfigs.size(), maxNumberOfSubTasks, MinionConstants.MAX_ALLOWED_SUB_TASKS_KEY); - message += "Optimise the task config or reduce tableMaxNumTasks to avoid the error"; - // We throw an exception to notify the user - // This is to ensure that the user is aware of the task generation limit - throw new RuntimeException(message); + + try { + List<PinotTaskConfig> pinotTaskConfigs = taskGenerator.generateTasks(tableConfig, taskConfigs); + if (pinotTaskConfigs.isEmpty()) { + LOGGER.warn("No ad-hoc task generated for task type: {}, for table: {}", taskType, tableNameWithType); + continue; + } + int maxNumberOfSubTasks = taskGenerator.getMaxAllowedSubTasksPerTask(); + if (pinotTaskConfigs.size() > maxNumberOfSubTasks) { + String message = String.format( + "Number of tasks generated for task type: %s for table: %s is %d, which is greater than the " + + "maximum number of tasks to schedule: %d. This is controlled by the cluster config %s which is set " + + "based on controller's performance.", taskType, tableNameWithType, pinotTaskConfigs.size(), + maxNumberOfSubTasks, MinionConstants.MAX_ALLOWED_SUB_TASKS_KEY); + message += "Optimise the task config or reduce tableMaxNumTasks to avoid the error"; + // We throw an exception to notify the user + // This is to ensure that the user is aware of the task generation limit + throw new RuntimeException(message); + } + pinotTaskConfigs.forEach(pinotTaskConfig -> pinotTaskConfig.getConfigs() + .computeIfAbsent(MinionConstants.TRIGGERED_BY, k -> CommonConstants.TaskTriggers.ADHOC_TRIGGER.name())); + addDefaultsToTaskConfig(pinotTaskConfigs); + LOGGER.info("Submitting ad-hoc task for task type: {} with task configs: {}", taskType, pinotTaskConfigs); + _controllerMetrics.addMeteredTableValue(taskType, ControllerMeter.NUMBER_ADHOC_TASKS_SUBMITTED, 1); + responseMap.put(tableNameWithType, + _helixTaskResourceManager.submitTask(parentTaskName, pinotTaskConfigs, minionInstanceTag, + taskGenerator.getTaskTimeoutMs(minionInstanceTag), + taskGenerator.getNumConcurrentTasksPerInstance(minionInstanceTag), + taskGenerator.getMaxAttemptsPerTask(minionInstanceTag))); + } finally { + if (!responseMap.containsKey(tableNameWithType)) { + LOGGER.warn("No task submitted for tableNameWithType: {}", tableNameWithType); + } + if (lock != null) { + _distributedTaskLockManager.releaseLock(lock); + } } - pinotTaskConfigs.forEach(pinotTaskConfig -> pinotTaskConfig.getConfigs() - .computeIfAbsent(MinionConstants.TRIGGERED_BY, k -> CommonConstants.TaskTriggers.ADHOC_TRIGGER.name())); - addDefaultsToTaskConfig(pinotTaskConfigs); - LOGGER.info("Submitting ad-hoc task for task type: {} with task configs: {}", taskType, pinotTaskConfigs); - _controllerMetrics.addMeteredTableValue(taskType, ControllerMeter.NUMBER_ADHOC_TASKS_SUBMITTED, 1); - responseMap.put(tableNameWithType, - _helixTaskResourceManager.submitTask(parentTaskName, pinotTaskConfigs, minionInstanceTag, - taskGenerator.getTaskTimeoutMs(minionInstanceTag), - taskGenerator.getNumConcurrentTasksPerInstance(minionInstanceTag), - taskGenerator.getMaxAttemptsPerTask(minionInstanceTag))); } if (responseMap.isEmpty()) { LOGGER.warn("No task submitted for tableName: {}", tableName); } return responseMap; } + public boolean forceReleaseLock(String tableNameWithType) { + if (_distributedTaskLockManager == null) { + LOGGER.info("Distributed task lock manager is disabled, no locks to release"); Review Comment: same comment as earlier. might be useful to throw this as an exception -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
