vvivekiyer commented on code in PR #15109:
URL: https://github.com/apache/pinot/pull/15109#discussion_r2117357912


##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,295 @@
+/**

Review Comment:
   I think PinotQueryWorkloadRestletResource would be a better name, 



##########
pinot-common/src/main/java/org/apache/pinot/common/utils/config/QueryWorkloadConfigUtils.java:
##########
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.utils.config;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+import org.apache.hc.core5.http.HttpHeaders;
+import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http.HttpVersion;
+import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
+import org.apache.helix.zookeeper.datamodel.ZNRecord;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.common.utils.http.HttpClientConfig;
+import org.apache.pinot.common.utils.tls.TlsUtils;
+import org.apache.pinot.spi.config.workload.EnforcementProfile;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.apache.pinot.spi.utils.retry.RetryPolicy;
+import org.slf4j.Logger;
+
+
+public class QueryWorkloadConfigUtils {
+  private QueryWorkloadConfigUtils() {
+  }
+
+  private static final Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(QueryWorkloadConfigUtils.class);
+  private static final HttpClient HTTP_CLIENT = new 
HttpClient(HttpClientConfig.DEFAULT_HTTP_CLIENT_CONFIG,
+          TlsUtils.getSslContext());
+
+  /**
+   * Converts a ZNRecord into a QueryWorkloadConfig object by extracting 
mapFields.
+   *
+   * @param znRecord The ZNRecord containing workload config data.
+   * @return A QueryWorkloadConfig object.
+   */
+  public static QueryWorkloadConfig fromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String queryWorkloadName = 
znRecord.getSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME);
+    Preconditions.checkNotNull(queryWorkloadName, "queryWorkloadName cannot be 
null");
+    String nodeConfigsJson = 
znRecord.getSimpleField(QueryWorkloadConfig.NODE_CONFIGS);
+    Preconditions.checkNotNull(nodeConfigsJson, "nodeConfigs cannot be null");
+    try {
+      List<NodeConfig> nodeConfigs = JsonUtils.stringToObject(nodeConfigsJson, 
new TypeReference<>() { });
+      return new QueryWorkloadConfig(queryWorkloadName, nodeConfigs);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
QueryWorkloadConfig", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Updates a ZNRecord with the fields from a WorkloadConfig object.
+   *
+   * @param queryWorkloadConfig The QueryWorkloadConfig object to convert.
+   * @param znRecord The ZNRecord to update.
+   */
+  public static void updateZNRecordWithWorkloadConfig(ZNRecord znRecord, 
QueryWorkloadConfig queryWorkloadConfig) {
+    znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadConfig.getQueryWorkloadName());
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.NODE_CONFIGS,
+          JsonUtils.objectToString(queryWorkloadConfig.getNodeConfigs()));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert 
QueryWorkloadConfig : %s to ZNRecord",
+          queryWorkloadConfig);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static void updateZNRecordWithInstanceCost(ZNRecord znRecord, String 
queryWorkloadName,
+      InstanceCost instanceCost) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    Preconditions.checkNotNull(instanceCost, "InstanceCost cannot be null");
+    try {
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.QUERY_WORKLOAD_NAME, 
queryWorkloadName);
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST, 
JsonUtils.objectToString(instanceCost));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert InstanceCost : %s 
to ZNRecord",
+          instanceCost);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static InstanceCost getInstanceCostFromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String instanceCostJson = 
znRecord.getSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST);
+    Preconditions.checkNotNull(instanceCostJson, "InstanceCost cannot be 
null");
+    try {
+      return JsonUtils.stringToObject(instanceCostJson, InstanceCost.class);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
InstanceCost", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static List<QueryWorkloadConfig> 
getQueryWorkloadConfigsFromController(String controllerUrl, String instanceId,

Review Comment:
   unused function? Is it for some testing?



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The QueryWorkloadManager is responsible for managing the query workload 
configuration and propagating/computing
+ * the cost to be enforced by relevant instances based on the propagation 
scheme.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private final PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkloadUpdateMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instanceCostMap.entrySet().stream()
+          .collect(Collectors.toMap(Map.Entry::getKey, entry -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.REFRESH_QUERY_WORKLOAD_MSG_SUB_TYPE, 
entry.getValue())));
+      // Send the QueryWorkloadRefreshMessage to the instances
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate delete workload refresh message for the given 
queryWorkloadConfig
+   * @param queryWorkloadConfig The query workload configuration to delete
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Send the {@link QueryWorkloadRefreshMessage} with 
DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE to the instances
+   */
+  public void propagateDeleteWorkloadMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instances.stream()
+          .collect(Collectors.toMap(instance -> instance, instance -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE, 
null)));
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate the workload for the given table name, it does fast exits if 
queryWorkloadConfigs is empty
+   * @param tableName The table name to propagate the workload for, it can be 
a rawTableName or a tableNameWithType
+   * if rawTableName is provided, it will resolve all available tableTypes and 
propagate the workload for each tableType
+   *
+   * This method performs the following steps:
+   * 1. Find all the helix tags associated with the table
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Propagate the workload cost for instances associated with the workloads
+   */
+  public void propagateWorkloadFor(String tableName) {
+    try {
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+          return;
+      }
+      // Get the helixTags associated with the table
+      List<String> helixTags = 
PropagationUtils.getHelixTagsForTable(_pinotHelixResourceManager, tableName);
+      // Find all workloads associated with the helix tags
+      Set<QueryWorkloadConfig> queryWorkloadConfigsForTags =
+          
PropagationUtils.getQueryWorkloadConfigsForTags(_pinotHelixResourceManager, 
helixTags, queryWorkloadConfigs);
+      // Propagate the workload for each QueryWorkloadConfig
+      for (QueryWorkloadConfig queryWorkloadConfig : 
queryWorkloadConfigsForTags) {
+        propagateWorkloadUpdateMessage(queryWorkloadConfig);
+      }
+    } catch (Exception e) {
+      String errorMsg = String.format("Failed to propagate workload for table: 
%s", tableName);
+      LOGGER.error(errorMsg, e);
+      throw new RuntimeException(errorMsg, e);
+    }
+  }
+
+  /**
+   * Get all the workload costs associated with the given instance and node 
type
+   * 1. Find all the helix tags associated with the instance
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Find the instance associated with the {@link QueryWorkloadConfig} and 
node type
+   *
+   * @param instanceName The instance name to get the workload costs for
+   * @param nodeType {@link NodeConfig.Type} The node type to get the workload 
costs for
+   * @return A map of workload name to {@link InstanceCost} for the given 
instance and node type
+   */
+  public Map<String, InstanceCost> getWorkloadToInstanceCostFor(String 
instanceName, NodeConfig.Type nodeType) {

Review Comment:
   This method seems out of place here. can we move this to 
QueryWorkloadConfigUtils?



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The QueryWorkloadManager is responsible for managing the query workload 
configuration and propagating/computing
+ * the cost to be enforced by relevant instances based on the propagation 
scheme.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private final PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkloadUpdateMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instanceCostMap.entrySet().stream()
+          .collect(Collectors.toMap(Map.Entry::getKey, entry -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.REFRESH_QUERY_WORKLOAD_MSG_SUB_TYPE, 
entry.getValue())));
+      // Send the QueryWorkloadRefreshMessage to the instances
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate delete workload refresh message for the given 
queryWorkloadConfig
+   * @param queryWorkloadConfig The query workload configuration to delete
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Send the {@link QueryWorkloadRefreshMessage} with 
DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE to the instances
+   */
+  public void propagateDeleteWorkloadMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instances.stream()
+          .collect(Collectors.toMap(instance -> instance, instance -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE, 
null)));
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate the workload for the given table name, it does fast exits if 
queryWorkloadConfigs is empty
+   * @param tableName The table name to propagate the workload for, it can be 
a rawTableName or a tableNameWithType
+   * if rawTableName is provided, it will resolve all available tableTypes and 
propagate the workload for each tableType
+   *
+   * This method performs the following steps:
+   * 1. Find all the helix tags associated with the table
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Propagate the workload cost for instances associated with the workloads
+   */
+  public void propagateWorkloadFor(String tableName) {
+    try {
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+          return;
+      }
+      // Get the helixTags associated with the table
+      List<String> helixTags = 
PropagationUtils.getHelixTagsForTable(_pinotHelixResourceManager, tableName);
+      // Find all workloads associated with the helix tags
+      Set<QueryWorkloadConfig> queryWorkloadConfigsForTags =
+          
PropagationUtils.getQueryWorkloadConfigsForTags(_pinotHelixResourceManager, 
helixTags, queryWorkloadConfigs);
+      // Propagate the workload for each QueryWorkloadConfig
+      for (QueryWorkloadConfig queryWorkloadConfig : 
queryWorkloadConfigsForTags) {
+        propagateWorkloadUpdateMessage(queryWorkloadConfig);
+      }
+    } catch (Exception e) {
+      String errorMsg = String.format("Failed to propagate workload for table: 
%s", tableName);
+      LOGGER.error(errorMsg, e);
+      throw new RuntimeException(errorMsg, e);
+    }
+  }
+
+  /**
+   * Get all the workload costs associated with the given instance and node 
type
+   * 1. Find all the helix tags associated with the instance
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Find the instance associated with the {@link QueryWorkloadConfig} and 
node type
+   *
+   * @param instanceName The instance name to get the workload costs for
+   * @param nodeType {@link NodeConfig.Type} The node type to get the workload 
costs for
+   * @return A map of workload name to {@link InstanceCost} for the given 
instance and node type
+   */
+  public Map<String, InstanceCost> getWorkloadToInstanceCostFor(String 
instanceName, NodeConfig.Type nodeType) {
+    try {
+      Map<String, InstanceCost> workloadToInstanceCostMap = new HashMap<>();
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {

Review Comment:
   Isn't this guaranteed to be an empty list (and never null)? Same in the 
RestletResource class. 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java:
##########
@@ -2209,6 +2219,8 @@ public void setExistingTableConfig(TableConfig 
tableConfig, int expectedVersion)
 
     // Send update query quota message if quota is specified
     sendTableConfigRefreshMessage(tableNameWithType);
+    // TODO: Propagate workload for tables if there is change is change 
instance characteristics
+    _queryWorkloadManager.propagateWorkloadFor(tableNameWithType);
   }
 
   public void deleteUser(String username) {

Review Comment:
   We should propogateWorkload costs even when a table is deleted right? 
Because the instances change? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java:
##########
@@ -3837,6 +3849,7 @@ public RebalanceResult rebalanceTable(String 
tableNameWithType, TableConfig tabl
     TableRebalancer tableRebalancer =
         new TableRebalancer(_helixZkManager, zkBasedTableRebalanceObserver, 
_controllerMetrics, _rebalancePreChecker,
             _tableSizeReader);
+    _queryWorkloadManager.propagateWorkloadFor(tableNameWithType);

Review Comment:
   Hmmm, we need to also update our impact free rebalancer to propogate costs. 
Can you please take that AI? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.utils.config.QueryWorkloadConfigUtils;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+        return JsonUtils.objectToString(Map.of());
+      }
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config

Review Comment:
   Typo: API to **fetch** a specific...



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.utils.config.QueryWorkloadConfigUtils;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+        return JsonUtils.objectToString(Map.of());
+      }
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "brokerNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "serverNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=serverNode
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,
+      @QueryParam("nodeType") String nodeTypeString, @Context HttpHeaders 
httpHeaders) {
+    try {
+      NodeConfig.Type nodeType = NodeConfig.Type.forValue(nodeTypeString);
+      Map<String, InstanceCost> workloadToInstanceCostMap = 
_pinotHelixResourceManager.getQueryWorkloadManager()
+          .getWorkloadToInstanceCostFor(instanceName, nodeType);
+      if (workloadToInstanceCostMap == null || 
workloadToInstanceCostMap.isEmpty()) {
+        throw new ControllerApplicationException(LOGGER, "No workload configs 
found for instance: " + instanceName,
+            Response.Status.NOT_FOUND, null);
+      }
+      return JsonUtils.objectToString(workloadToInstanceCostMap);
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for instance: %s, error: %s",
+            instanceName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+  /**
+   * Updates the query workload config
+   * @param requestString JSON string representing the QueryWorkloadConfig
+   * Example request:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *    {
+   *       "nodeType" : "brokerNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "serverNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   *
+   */
+  @POST
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.UPDATE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.UPDATE)
+  @ApiOperation(value = "Update query workload config", notes = "Update 
workload config for the workload name")
+  public Response updateQueryWorkloadConfig(String requestString, @Context 
HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to update queryWorkloadConfig with 
request: {}", requestString);
+      QueryWorkloadConfig queryWorkloadConfig = 
JsonUtils.stringToObject(requestString, QueryWorkloadConfig.class);
+      List<String> validationErrors = 
QueryWorkloadConfigUtils.validateQueryWorkloadConfig(queryWorkloadConfig);
+      if (!validationErrors.isEmpty()) {
+        String errorMessage = String.format("Invalid query workload config: 
%s", validationErrors);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.BAD_REQUEST, null);
+      }
+      _pinotHelixResourceManager.setQueryWorkloadConfig(queryWorkloadConfig);
+      String successMessage = String.format("Query Workload config updated 
successfully for workload: %s",
+          queryWorkloadConfig.getQueryWorkloadName());
+      LOGGER.info(successMessage);
+      return Response.ok().entity(successMessage).build();
+    } catch (Exception e) {
+      String errorMessage = String.format("Error when updating query workload 
request: %s, error: %s",
+          requestString, e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * Deletes the query workload config
+   * @param queryWorkloadName Name of the query workload to be deleted
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   */
+  @DELETE
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.DELETE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.DELETE)
+  @ApiOperation(value = "Delete query workload config", notes = "Delete 
workload config for the workload name")
+  public Response deleteQueryWorkloadConfig(@PathParam("queryWorkloadName") 
String queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      _pinotHelixResourceManager.deleteQueryWorkloadConfig(queryWorkloadName);
+      String successMessage = String.format("Query Workload config deleted 
successfully for workload: %s",
+          queryWorkloadName);
+      LOGGER.info(successMessage);
+      return Response.ok().entity(successMessage).build();
+    } catch (Exception e) {
+      String errorMessage = String.format("Error when deleting query workload 
for workload: %s, error: %s",
+          queryWorkloadName, e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+}

Review Comment:
   Can we add another API to force the propogation of costs for a queryWorkload 
(or all queryWorkloads)? This will be useful when more instances are added to a 
tenant but the cost is not updated. 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "NonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=LEAF_NODE
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,

Review Comment:
   Given that we've renamed to brokerNode and serverNode now, do we still need 
this NodeType param here? We can just return the instanceCost? And we should 
can also return whether this is broker or server node? 



##########
pinot-common/src/main/java/org/apache/pinot/common/utils/config/QueryWorkloadConfigUtils.java:
##########
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.utils.config;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+import org.apache.hc.core5.http.HttpHeaders;
+import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http.HttpVersion;
+import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
+import org.apache.helix.zookeeper.datamodel.ZNRecord;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.common.utils.http.HttpClientConfig;
+import org.apache.pinot.common.utils.tls.TlsUtils;
+import org.apache.pinot.spi.config.workload.EnforcementProfile;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.apache.pinot.spi.utils.retry.RetryPolicy;
+import org.slf4j.Logger;
+
+
+public class QueryWorkloadConfigUtils {
+  private QueryWorkloadConfigUtils() {
+  }
+
+  private static final Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(QueryWorkloadConfigUtils.class);
+  private static final HttpClient HTTP_CLIENT = new 
HttpClient(HttpClientConfig.DEFAULT_HTTP_CLIENT_CONFIG,
+          TlsUtils.getSslContext());
+
+  /**
+   * Converts a ZNRecord into a QueryWorkloadConfig object by extracting 
mapFields.
+   *
+   * @param znRecord The ZNRecord containing workload config data.
+   * @return A QueryWorkloadConfig object.
+   */
+  public static QueryWorkloadConfig fromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String queryWorkloadName = 
znRecord.getSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME);
+    Preconditions.checkNotNull(queryWorkloadName, "queryWorkloadName cannot be 
null");
+    String nodeConfigsJson = 
znRecord.getSimpleField(QueryWorkloadConfig.NODE_CONFIGS);
+    Preconditions.checkNotNull(nodeConfigsJson, "nodeConfigs cannot be null");
+    try {
+      List<NodeConfig> nodeConfigs = JsonUtils.stringToObject(nodeConfigsJson, 
new TypeReference<>() { });
+      return new QueryWorkloadConfig(queryWorkloadName, nodeConfigs);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
QueryWorkloadConfig", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Updates a ZNRecord with the fields from a WorkloadConfig object.
+   *
+   * @param queryWorkloadConfig The QueryWorkloadConfig object to convert.
+   * @param znRecord The ZNRecord to update.
+   */
+  public static void updateZNRecordWithWorkloadConfig(ZNRecord znRecord, 
QueryWorkloadConfig queryWorkloadConfig) {
+    znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadConfig.getQueryWorkloadName());
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.NODE_CONFIGS,
+          JsonUtils.objectToString(queryWorkloadConfig.getNodeConfigs()));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert 
QueryWorkloadConfig : %s to ZNRecord",
+          queryWorkloadConfig);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static void updateZNRecordWithInstanceCost(ZNRecord znRecord, String 
queryWorkloadName,
+      InstanceCost instanceCost) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    Preconditions.checkNotNull(instanceCost, "InstanceCost cannot be null");
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadName);
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST, 
JsonUtils.objectToString(instanceCost));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert InstanceCost : %s 
to ZNRecord",
+          instanceCost);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static InstanceCost getInstanceCostFromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String instanceCostJson = 
znRecord.getSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST);
+    Preconditions.checkNotNull(instanceCostJson, "InstanceCost cannot be 
null");
+    try {
+      return JsonUtils.stringToObject(instanceCostJson, InstanceCost.class);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
InstanceCost", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static List<QueryWorkloadConfig> 
getQueryWorkloadConfigsFromController(String controllerUrl, String instanceId,
+                                                                               
 NodeConfig.Type nodeType) {
+    try {
+      if (controllerUrl == null || controllerUrl.isEmpty()) {
+        LOGGER.warn("Controller URL is empty, cannot fetch query workload 
configs for instance: {}", instanceId);
+        return Collections.emptyList();
+      }
+      URI queryWorkloadURI = new URI(controllerUrl + 
"/queryWorkloadConfigs/instance/" + instanceId + "?nodeType="
+              + nodeType);
+      ClassicHttpRequest request = ClassicRequestBuilder.get(queryWorkloadURI)
+              .setVersion(HttpVersion.HTTP_1_1)
+              .setHeader(HttpHeaders.CONTENT_TYPE, 
HttpClient.JSON_CONTENT_TYPE)
+              .build();
+      AtomicReference<List<QueryWorkloadConfig>> workloadConfigs = new 
AtomicReference<>(null);
+      RetryPolicy retryPolicy = RetryPolicies.exponentialBackoffRetryPolicy(3, 
3000L, 1.2f);
+      retryPolicy.attempt(() -> {
+        try {
+          SimpleHttpResponse response = HttpClient.wrapAndThrowHttpException(
+                  HTTP_CLIENT.sendRequest(request, 
HttpClient.DEFAULT_SOCKET_TIMEOUT_MS)
+          );
+          if (response.getStatusCode() == HttpStatus.SC_OK) {
+            
workloadConfigs.set(QueryWorkloadConfigUtils.getQueryWorkloadConfigs(response.getResponse()));
+            LOGGER.info("Successfully fetched query workload configs from 
controller: {}, Instance: {}",
+                    controllerUrl, instanceId);
+            return true;
+          } else if (response.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
+            LOGGER.info("No query workload configs found for controller: {}, 
Instance: {}", controllerUrl, instanceId);
+            workloadConfigs.set(Collections.emptyList());
+            return true;
+          } else {
+            LOGGER.warn("Failed to fetch query workload configs from 
controller: {}, Instance: {}, Status: {}",
+                    controllerUrl, instanceId, response.getStatusCode());
+            return false;
+          }
+        } catch (Exception e) {
+          LOGGER.warn("Failed to fetch query workload configs from controller: 
{}, Instance: {}",
+                  controllerUrl, instanceId, e);
+          return false;
+        }
+      });
+      return workloadConfigs.get();
+    } catch (Exception e) {
+      LOGGER.warn("Failed to fetch query workload configs from controller: {}, 
Instance: {}",
+              controllerUrl, instanceId, e);
+      return Collections.emptyList();
+    }
+  }
+
+  public static List<QueryWorkloadConfig> getQueryWorkloadConfigs(String 
queryWorkloadConfigsJson) {
+    Preconditions.checkNotNull(queryWorkloadConfigsJson, "Query workload 
configs JSON cannot be null");
+    try {
+      return JsonUtils.stringToObject(queryWorkloadConfigsJson, new 
TypeReference<>() { });
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert query workload 
configs: %s to list of QueryWorkloadConfig",
+          queryWorkloadConfigsJson);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Validates the given QueryWorkloadConfig and returns a list of validation 
error messages.
+   *
+   * @param config the QueryWorkloadConfig to validate
+   * @return a list of validation errors; empty if config is valid
+   */
+  public static List<String> validateQueryWorkloadConfig(QueryWorkloadConfig 
config) {

Review Comment:
   This is similar to tableConfig right? If there's an in-progress upgrade that 
adds a new field it should be used only after all the controllers are upgraded 
to the new version. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to