praveenc7 commented on code in PR #15109:
URL: https://github.com/apache/pinot/pull/15109#discussion_r2118336729


##########
pinot-common/src/main/java/org/apache/pinot/common/utils/config/QueryWorkloadConfigUtils.java:
##########
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.utils.config;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+import org.apache.hc.core5.http.HttpHeaders;
+import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http.HttpVersion;
+import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
+import org.apache.helix.zookeeper.datamodel.ZNRecord;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.common.utils.http.HttpClientConfig;
+import org.apache.pinot.common.utils.tls.TlsUtils;
+import org.apache.pinot.spi.config.workload.EnforcementProfile;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.apache.pinot.spi.utils.retry.RetryPolicy;
+import org.slf4j.Logger;
+
+
+public class QueryWorkloadConfigUtils {
+  private QueryWorkloadConfigUtils() {
+  }
+
+  private static final Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(QueryWorkloadConfigUtils.class);
+  private static final HttpClient HTTP_CLIENT = new 
HttpClient(HttpClientConfig.DEFAULT_HTTP_CLIENT_CONFIG,
+          TlsUtils.getSslContext());
+
+  /**
+   * Converts a ZNRecord into a QueryWorkloadConfig object by extracting 
mapFields.
+   *
+   * @param znRecord The ZNRecord containing workload config data.
+   * @return A QueryWorkloadConfig object.
+   */
+  public static QueryWorkloadConfig fromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String queryWorkloadName = 
znRecord.getSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME);
+    Preconditions.checkNotNull(queryWorkloadName, "queryWorkloadName cannot be 
null");
+    String nodeConfigsJson = 
znRecord.getSimpleField(QueryWorkloadConfig.NODE_CONFIGS);
+    Preconditions.checkNotNull(nodeConfigsJson, "nodeConfigs cannot be null");
+    try {
+      List<NodeConfig> nodeConfigs = JsonUtils.stringToObject(nodeConfigsJson, 
new TypeReference<>() { });
+      return new QueryWorkloadConfig(queryWorkloadName, nodeConfigs);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
QueryWorkloadConfig", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Updates a ZNRecord with the fields from a WorkloadConfig object.
+   *
+   * @param queryWorkloadConfig The QueryWorkloadConfig object to convert.
+   * @param znRecord The ZNRecord to update.
+   */
+  public static void updateZNRecordWithWorkloadConfig(ZNRecord znRecord, 
QueryWorkloadConfig queryWorkloadConfig) {
+    znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadConfig.getQueryWorkloadName());
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.NODE_CONFIGS,
+          JsonUtils.objectToString(queryWorkloadConfig.getNodeConfigs()));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert 
QueryWorkloadConfig : %s to ZNRecord",
+          queryWorkloadConfig);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static void updateZNRecordWithInstanceCost(ZNRecord znRecord, String 
queryWorkloadName,
+      InstanceCost instanceCost) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    Preconditions.checkNotNull(instanceCost, "InstanceCost cannot be null");
+    try {
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.QUERY_WORKLOAD_NAME, 
queryWorkloadName);
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST, 
JsonUtils.objectToString(instanceCost));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert InstanceCost : %s 
to ZNRecord",
+          instanceCost);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static InstanceCost getInstanceCostFromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String instanceCostJson = 
znRecord.getSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST);
+    Preconditions.checkNotNull(instanceCostJson, "InstanceCost cannot be 
null");
+    try {
+      return JsonUtils.stringToObject(instanceCostJson, InstanceCost.class);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
InstanceCost", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static List<QueryWorkloadConfig> 
getQueryWorkloadConfigsFromController(String controllerUrl, String instanceId,

Review Comment:
   This would be used by the server/broker to get the workload cost relevant to 
it during startup, added here for completeness of propogation. 
   
   Added comments on this as well



##########
pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java:
##########
@@ -3837,6 +3849,7 @@ public RebalanceResult rebalanceTable(String 
tableNameWithType, TableConfig tabl
     TableRebalancer tableRebalancer =
         new TableRebalancer(_helixZkManager, zkBasedTableRebalanceObserver, 
_controllerMetrics, _rebalancePreChecker,
             _tableSizeReader);
+    _queryWorkloadManager.propagateWorkloadFor(tableNameWithType);

Review Comment:
   sure I will take that AI



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The QueryWorkloadManager is responsible for managing the query workload 
configuration and propagating/computing
+ * the cost to be enforced by relevant instances based on the propagation 
scheme.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private final PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkloadUpdateMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instanceCostMap.entrySet().stream()
+          .collect(Collectors.toMap(Map.Entry::getKey, entry -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.REFRESH_QUERY_WORKLOAD_MSG_SUB_TYPE, 
entry.getValue())));
+      // Send the QueryWorkloadRefreshMessage to the instances
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate delete workload refresh message for the given 
queryWorkloadConfig
+   * @param queryWorkloadConfig The query workload configuration to delete
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Send the {@link QueryWorkloadRefreshMessage} with 
DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE to the instances
+   */
+  public void propagateDeleteWorkloadMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instances.stream()
+          .collect(Collectors.toMap(instance -> instance, instance -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE, 
null)));
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate the workload for the given table name, it does fast exits if 
queryWorkloadConfigs is empty
+   * @param tableName The table name to propagate the workload for, it can be 
a rawTableName or a tableNameWithType
+   * if rawTableName is provided, it will resolve all available tableTypes and 
propagate the workload for each tableType
+   *
+   * This method performs the following steps:
+   * 1. Find all the helix tags associated with the table
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Propagate the workload cost for instances associated with the workloads
+   */
+  public void propagateWorkloadFor(String tableName) {
+    try {
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+          return;
+      }
+      // Get the helixTags associated with the table
+      List<String> helixTags = 
PropagationUtils.getHelixTagsForTable(_pinotHelixResourceManager, tableName);
+      // Find all workloads associated with the helix tags
+      Set<QueryWorkloadConfig> queryWorkloadConfigsForTags =
+          
PropagationUtils.getQueryWorkloadConfigsForTags(_pinotHelixResourceManager, 
helixTags, queryWorkloadConfigs);
+      // Propagate the workload for each QueryWorkloadConfig
+      for (QueryWorkloadConfig queryWorkloadConfig : 
queryWorkloadConfigsForTags) {
+        propagateWorkloadUpdateMessage(queryWorkloadConfig);
+      }
+    } catch (Exception e) {
+      String errorMsg = String.format("Failed to propagate workload for table: 
%s", tableName);
+      LOGGER.error(errorMsg, e);
+      throw new RuntimeException(errorMsg, e);
+    }
+  }
+
+  /**
+   * Get all the workload costs associated with the given instance and node 
type
+   * 1. Find all the helix tags associated with the instance
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Find the instance associated with the {@link QueryWorkloadConfig} and 
node type
+   *
+   * @param instanceName The instance name to get the workload costs for
+   * @param nodeType {@link NodeConfig.Type} The node type to get the workload 
costs for
+   * @return A map of workload name to {@link InstanceCost} for the given 
instance and node type
+   */
+  public Map<String, InstanceCost> getWorkloadToInstanceCostFor(String 
instanceName, NodeConfig.Type nodeType) {
+    try {
+      Map<String, InstanceCost> workloadToInstanceCostMap = new HashMap<>();
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {

Review Comment:
   Good catch, missed to clean this up



##########
pinot-common/src/main/java/org/apache/pinot/common/utils/config/QueryWorkloadConfigUtils.java:
##########
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.utils.config;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+import org.apache.hc.core5.http.HttpHeaders;
+import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http.HttpVersion;
+import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
+import org.apache.helix.zookeeper.datamodel.ZNRecord;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.common.utils.http.HttpClientConfig;
+import org.apache.pinot.common.utils.tls.TlsUtils;
+import org.apache.pinot.spi.config.workload.EnforcementProfile;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.apache.pinot.spi.utils.retry.RetryPolicy;
+import org.slf4j.Logger;
+
+
+public class QueryWorkloadConfigUtils {
+  private QueryWorkloadConfigUtils() {
+  }
+
+  private static final Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(QueryWorkloadConfigUtils.class);
+  private static final HttpClient HTTP_CLIENT = new 
HttpClient(HttpClientConfig.DEFAULT_HTTP_CLIENT_CONFIG,
+          TlsUtils.getSslContext());
+
+  /**
+   * Converts a ZNRecord into a QueryWorkloadConfig object by extracting 
mapFields.
+   *
+   * @param znRecord The ZNRecord containing workload config data.
+   * @return A QueryWorkloadConfig object.
+   */
+  public static QueryWorkloadConfig fromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String queryWorkloadName = 
znRecord.getSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME);
+    Preconditions.checkNotNull(queryWorkloadName, "queryWorkloadName cannot be 
null");
+    String nodeConfigsJson = 
znRecord.getSimpleField(QueryWorkloadConfig.NODE_CONFIGS);
+    Preconditions.checkNotNull(nodeConfigsJson, "nodeConfigs cannot be null");
+    try {
+      List<NodeConfig> nodeConfigs = JsonUtils.stringToObject(nodeConfigsJson, 
new TypeReference<>() { });
+      return new QueryWorkloadConfig(queryWorkloadName, nodeConfigs);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
QueryWorkloadConfig", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Updates a ZNRecord with the fields from a WorkloadConfig object.
+   *
+   * @param queryWorkloadConfig The QueryWorkloadConfig object to convert.
+   * @param znRecord The ZNRecord to update.
+   */
+  public static void updateZNRecordWithWorkloadConfig(ZNRecord znRecord, 
QueryWorkloadConfig queryWorkloadConfig) {
+    znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadConfig.getQueryWorkloadName());
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.NODE_CONFIGS,
+          JsonUtils.objectToString(queryWorkloadConfig.getNodeConfigs()));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert 
QueryWorkloadConfig : %s to ZNRecord",
+          queryWorkloadConfig);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static void updateZNRecordWithInstanceCost(ZNRecord znRecord, String 
queryWorkloadName,
+      InstanceCost instanceCost) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    Preconditions.checkNotNull(instanceCost, "InstanceCost cannot be null");
+    try {
+      znRecord.setSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME, 
queryWorkloadName);
+      znRecord.setSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST, 
JsonUtils.objectToString(instanceCost));
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert InstanceCost : %s 
to ZNRecord",
+          instanceCost);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static InstanceCost getInstanceCostFromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String instanceCostJson = 
znRecord.getSimpleField(QueryWorkloadRefreshMessage.INSTANCE_COST);
+    Preconditions.checkNotNull(instanceCostJson, "InstanceCost cannot be 
null");
+    try {
+      return JsonUtils.stringToObject(instanceCostJson, InstanceCost.class);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
InstanceCost", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  public static List<QueryWorkloadConfig> 
getQueryWorkloadConfigsFromController(String controllerUrl, String instanceId,
+                                                                               
 NodeConfig.Type nodeType) {
+    try {
+      if (controllerUrl == null || controllerUrl.isEmpty()) {
+        LOGGER.warn("Controller URL is empty, cannot fetch query workload 
configs for instance: {}", instanceId);
+        return Collections.emptyList();
+      }
+      URI queryWorkloadURI = new URI(controllerUrl + 
"/queryWorkloadConfigs/instance/" + instanceId + "?nodeType="
+              + nodeType);
+      ClassicHttpRequest request = ClassicRequestBuilder.get(queryWorkloadURI)
+              .setVersion(HttpVersion.HTTP_1_1)
+              .setHeader(HttpHeaders.CONTENT_TYPE, 
HttpClient.JSON_CONTENT_TYPE)
+              .build();
+      AtomicReference<List<QueryWorkloadConfig>> workloadConfigs = new 
AtomicReference<>(null);
+      RetryPolicy retryPolicy = RetryPolicies.exponentialBackoffRetryPolicy(3, 
3000L, 1.2f);
+      retryPolicy.attempt(() -> {
+        try {
+          SimpleHttpResponse response = HttpClient.wrapAndThrowHttpException(
+                  HTTP_CLIENT.sendRequest(request, 
HttpClient.DEFAULT_SOCKET_TIMEOUT_MS)
+          );
+          if (response.getStatusCode() == HttpStatus.SC_OK) {
+            
workloadConfigs.set(QueryWorkloadConfigUtils.getQueryWorkloadConfigs(response.getResponse()));
+            LOGGER.info("Successfully fetched query workload configs from 
controller: {}, Instance: {}",
+                    controllerUrl, instanceId);
+            return true;
+          } else if (response.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
+            LOGGER.info("No query workload configs found for controller: {}, 
Instance: {}", controllerUrl, instanceId);
+            workloadConfigs.set(Collections.emptyList());
+            return true;
+          } else {
+            LOGGER.warn("Failed to fetch query workload configs from 
controller: {}, Instance: {}, Status: {}",
+                    controllerUrl, instanceId, response.getStatusCode());
+            return false;
+          }
+        } catch (Exception e) {
+          LOGGER.warn("Failed to fetch query workload configs from controller: 
{}, Instance: {}",
+                  controllerUrl, instanceId, e);
+          return false;
+        }
+      });
+      return workloadConfigs.get();
+    } catch (Exception e) {
+      LOGGER.warn("Failed to fetch query workload configs from controller: {}, 
Instance: {}",
+              controllerUrl, instanceId, e);
+      return Collections.emptyList();
+    }
+  }
+
+  public static List<QueryWorkloadConfig> getQueryWorkloadConfigs(String 
queryWorkloadConfigsJson) {
+    Preconditions.checkNotNull(queryWorkloadConfigsJson, "Query workload 
configs JSON cannot be null");
+    try {
+      return JsonUtils.stringToObject(queryWorkloadConfigsJson, new 
TypeReference<>() { });
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert query workload 
configs: %s to list of QueryWorkloadConfig",
+          queryWorkloadConfigsJson);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Validates the given QueryWorkloadConfig and returns a list of validation 
error messages.
+   *
+   * @param config the QueryWorkloadConfig to validate
+   * @return a list of validation errors; empty if config is valid
+   */
+  public static List<String> validateQueryWorkloadConfig(QueryWorkloadConfig 
config) {

Review Comment:
   Right we can think of it that way



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,194 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The QueryWorkloadManager is responsible for managing the query workload 
configuration and propagating/computing
+ * the cost to be enforced by relevant instances based on the propagation 
scheme.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private final PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkloadUpdateMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instanceCostMap.entrySet().stream()
+          .collect(Collectors.toMap(Map.Entry::getKey, entry -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.REFRESH_QUERY_WORKLOAD_MSG_SUB_TYPE, 
entry.getValue())));
+      // Send the QueryWorkloadRefreshMessage to the instances
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate delete workload refresh message for the given 
queryWorkloadConfig
+   * @param queryWorkloadConfig The query workload configuration to delete
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Send the {@link QueryWorkloadRefreshMessage} with 
DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE to the instances
+   */
+  public void propagateDeleteWorkloadMessage(QueryWorkloadConfig 
queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        continue;
+      }
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instances.stream()
+          .collect(Collectors.toMap(instance -> instance, instance -> new 
QueryWorkloadRefreshMessage(queryWorkloadName,
+              QueryWorkloadRefreshMessage.DELETE_QUERY_WORKLOAD_MSG_SUB_TYPE, 
null)));
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate the workload for the given table name, it does fast exits if 
queryWorkloadConfigs is empty
+   * @param tableName The table name to propagate the workload for, it can be 
a rawTableName or a tableNameWithType
+   * if rawTableName is provided, it will resolve all available tableTypes and 
propagate the workload for each tableType
+   *
+   * This method performs the following steps:
+   * 1. Find all the helix tags associated with the table
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Propagate the workload cost for instances associated with the workloads
+   */
+  public void propagateWorkloadFor(String tableName) {
+    try {
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+          return;
+      }
+      // Get the helixTags associated with the table
+      List<String> helixTags = 
PropagationUtils.getHelixTagsForTable(_pinotHelixResourceManager, tableName);
+      // Find all workloads associated with the helix tags
+      Set<QueryWorkloadConfig> queryWorkloadConfigsForTags =
+          
PropagationUtils.getQueryWorkloadConfigsForTags(_pinotHelixResourceManager, 
helixTags, queryWorkloadConfigs);
+      // Propagate the workload for each QueryWorkloadConfig
+      for (QueryWorkloadConfig queryWorkloadConfig : 
queryWorkloadConfigsForTags) {
+        propagateWorkloadUpdateMessage(queryWorkloadConfig);
+      }
+    } catch (Exception e) {
+      String errorMsg = String.format("Failed to propagate workload for table: 
%s", tableName);
+      LOGGER.error(errorMsg, e);
+      throw new RuntimeException(errorMsg, e);
+    }
+  }
+
+  /**
+   * Get all the workload costs associated with the given instance and node 
type
+   * 1. Find all the helix tags associated with the instance
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Find the instance associated with the {@link QueryWorkloadConfig} and 
node type
+   *
+   * @param instanceName The instance name to get the workload costs for
+   * @param nodeType {@link NodeConfig.Type} The node type to get the workload 
costs for
+   * @return A map of workload name to {@link InstanceCost} for the given 
instance and node type
+   */
+  public Map<String, InstanceCost> getWorkloadToInstanceCostFor(String 
instanceName, NodeConfig.Type nodeType) {

Review Comment:
   This method is invoked by `PinotQueryWorkloadRestletResource` during 
broker/server startup to retrieve the instance’s workload cost, 
   
   This aligns with how other methods in the class to resolve workload cost. 
Since it needs to be easily accessible in that context, keeping it here makes 
sense. 
   
   In contrast, `QueryWorkloadConfigUtils` is intended primarily for 
serialization/deserialization and request construction.



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "NonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=LEAF_NODE
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,

Review Comment:
    We can get away with asking the nodeType since it is either broker or 
server, which can infered from the instanceConfig.
   
   Given the caller already knowns whether it is server or broker, I think it 
is okay if we don't return that info. Given this is additive we can always add 
more info in the response



##########
pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java:
##########
@@ -2209,6 +2219,8 @@ public void setExistingTableConfig(TableConfig 
tableConfig, int expectedVersion)
 
     // Send update query quota message if quota is specified
     sendTableConfigRefreshMessage(tableNameWithType);
+    // TODO: Propagate workload for tables if there is change is change 
instance characteristics
+    _queryWorkloadManager.propagateWorkloadFor(tableNameWithType);
   }
 
   public void deleteUser(String username) {

Review Comment:
   I think we discussed this offline as well on what are actions we should be 
doing on table deletion
   like Cleaning up QueryWorkloadConfig associated with the table.
   
   This would require updating the QueryWorkloadConfig to remove all 
table/tenant entries in the workloadConfig, currently the queryworkload config 
is only updated on POST operation, there might be some condition. In order to 
handle that well, I will take an AI to do this i n a follow PR



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,295 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.utils.config.QueryWorkloadConfigUtils;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+        return JsonUtils.objectToString(Map.of());
+      }
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "brokerNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "serverNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=serverNode
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,
+      @QueryParam("nodeType") String nodeTypeString, @Context HttpHeaders 
httpHeaders) {
+    try {
+      NodeConfig.Type nodeType = NodeConfig.Type.forValue(nodeTypeString);
+      Map<String, InstanceCost> workloadToInstanceCostMap = 
_pinotHelixResourceManager.getQueryWorkloadManager()
+          .getWorkloadToInstanceCostFor(instanceName, nodeType);
+      if (workloadToInstanceCostMap == null || 
workloadToInstanceCostMap.isEmpty()) {
+        throw new ControllerApplicationException(LOGGER, "No workload configs 
found for instance: " + instanceName,
+            Response.Status.NOT_FOUND, null);
+      }
+      return JsonUtils.objectToString(workloadToInstanceCostMap);
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for instance: %s, error: %s",
+            instanceName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+  /**
+   * Updates the query workload config
+   * @param requestString JSON string representing the QueryWorkloadConfig
+   * Example request:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *    {
+   *       "nodeType" : "brokerNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "serverNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   *
+   */
+  @POST
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.UPDATE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.UPDATE)
+  @ApiOperation(value = "Update query workload config", notes = "Update 
workload config for the workload name")
+  public Response updateQueryWorkloadConfig(String requestString, @Context 
HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to update queryWorkloadConfig with 
request: {}", requestString);
+      QueryWorkloadConfig queryWorkloadConfig = 
JsonUtils.stringToObject(requestString, QueryWorkloadConfig.class);
+      List<String> validationErrors = 
QueryWorkloadConfigUtils.validateQueryWorkloadConfig(queryWorkloadConfig);
+      if (!validationErrors.isEmpty()) {
+        String errorMessage = String.format("Invalid query workload config: 
%s", validationErrors);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.BAD_REQUEST, null);
+      }
+      _pinotHelixResourceManager.setQueryWorkloadConfig(queryWorkloadConfig);
+      String successMessage = String.format("Query Workload config updated 
successfully for workload: %s",
+          queryWorkloadConfig.getQueryWorkloadName());
+      LOGGER.info(successMessage);
+      return Response.ok().entity(successMessage).build();
+    } catch (Exception e) {
+      String errorMessage = String.format("Error when updating query workload 
request: %s, error: %s",
+          requestString, e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * Deletes the query workload config
+   * @param queryWorkloadName Name of the query workload to be deleted
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   */
+  @DELETE
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.DELETE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.DELETE)
+  @ApiOperation(value = "Delete query workload config", notes = "Delete 
workload config for the workload name")
+  public Response deleteQueryWorkloadConfig(@PathParam("queryWorkloadName") 
String queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      _pinotHelixResourceManager.deleteQueryWorkloadConfig(queryWorkloadName);
+      String successMessage = String.format("Query Workload config deleted 
successfully for workload: %s",
+          queryWorkloadName);
+      LOGGER.info(successMessage);
+      return Response.ok().entity(successMessage).build();
+    } catch (Exception e) {
+      String errorMessage = String.format("Error when deleting query workload 
for workload: %s, error: %s",
+          queryWorkloadName, e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+}

Review Comment:
   Ideally, adding new instances at some-point it would trigger a rebalance, 
and we already have logic in place to handle updates in that scenario. However, 
I understand there may be cases where we want to initiate this process 
manually. To accommodate that, I will add a new API endpoint specifically for 
triggering a refresh and propagating changes.
   
   While we could reuse the existing POST API with a “force reload” option, 
doing so would require resending the full workload configuration each time. 
Since the goal here is simply to refresh, it makes more sense to introduce a 
dedicated endpoint for this purpose.
   
   For now will have a api per workload, given accidentally calling for all 
might trigger multiple zk calls at once



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to