vvivekiyer commented on code in PR #15109:
URL: https://github.com/apache/pinot/pull/15109#discussion_r2103334634


##########
pinot-spi/src/main/java/org/apache/pinot/spi/config/workload/QueryWorkloadConfig.java:
##########
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.spi.config.workload;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+import java.util.List;
+import org.apache.pinot.spi.config.BaseJsonConfig;
+
+/**
+ * Class to represent the query workload configuration.
+ * A QueryWorkload is applied to a collection of queries at the Helix Cluster 
Level, queries specify the workload they
+ * belong to by specifying the query workload name in the query options
+ */
+public class QueryWorkloadConfig extends BaseJsonConfig {
+
+  public static final String QUERY_WORKLOAD_NAME = "queryWorkloadName";

Review Comment:
   Please add detailed comments for all SPI functions. 
   



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);

Review Comment:
   If no workloads are found, do you want to return a meaningful response 
rather than "null"? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The PropagationManager class is responsible for propagating the query 
workload

Review Comment:
   Update comment. 



##########
pinot-common/src/main/java/org/apache/pinot/common/metadata/ZKMetadataProvider.java:
##########
@@ -834,6 +845,54 @@ private static Map<String, Double> 
toApplicationQpsQuotas(Map<String, String> qu
     }
   }
 
+  @Nullable
+  public static List<QueryWorkloadConfig> 
getAllQueryWorkloadConfigs(ZkHelixPropertyStore<ZNRecord> propertyStore) {
+    List<ZNRecord> znRecords =
+        propertyStore.getChildren(getPropertyStoreWorkloadConfigsPrefix(), 
null, AccessOption.PERSISTENT,
+            CommonConstants.Helix.ZkClient.RETRY_COUNT, 
CommonConstants.Helix.ZkClient.RETRY_INTERVAL_MS);
+    if (znRecords != null) {

Review Comment:
   Enhances readability if organized like this with less if blocks to read. 
   ```
   if (znRecords == null) {
      return nulll;
   }
   
   rest of the code 
   ```



##########
pinot-spi/src/main/java/org/apache/pinot/spi/config/workload/NodeConfig.java:
##########
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.spi.config.workload;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+import com.fasterxml.jackson.annotation.JsonValue;
+import javax.annotation.Nullable;
+import org.apache.pinot.spi.config.BaseJsonConfig;
+
+
+public class NodeConfig extends BaseJsonConfig {
+
+  public enum Type {
+    LEAF_NODE("leafNode"),
+    NON_LEAF_NODE("nonLeafNode");

Review Comment:
   Currently this only supports Brokers? What about intermediate server 
processing for MSE? If this is not tackled as a part of this PR and coming as a 
followup, can we add a TODO? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "NonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=LEAF_NODE
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,
+      @QueryParam("nodeType") String nodeTypeString, @Context HttpHeaders 
httpHeaders) {
+    try {
+      NodeConfig.Type nodeType = NodeConfig.Type.forValue(nodeTypeString);
+      Map<String, InstanceCost> workloadToInstanceCostMap = 
_pinotHelixResourceManager.getQueryWorkloadManager()
+          .getWorkloadToInstanceCostFor(instanceName, nodeType);
+      if (workloadToInstanceCostMap == null || 
workloadToInstanceCostMap.isEmpty()) {
+        throw new ControllerApplicationException(LOGGER, "No workload configs 
found for instance: " + instanceName,
+            Response.Status.NOT_FOUND, null);
+      }
+      return JsonUtils.objectToString(workloadToInstanceCostMap);
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for instance: %s, error: %s",
+            instanceName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+  /**
+   * Updates the query workload config
+   * @param requestString JSON string representing the QueryWorkloadConfig
+   * Example request:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *    {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "nonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   *
+   */
+  @POST
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.UPDATE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.UPDATE)
+  @ApiOperation(value = "Update query workload config", notes = "Update 
workload config for the workload name")
+  public Response updateQueryWorkloadConfig(String requestString, @Context 
HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to update queryWorkloadConfig with 
request: {}", requestString);
+      QueryWorkloadConfig queryWorkloadConfig = 
JsonUtils.stringToObject(requestString, QueryWorkloadConfig.class);
+      _pinotHelixResourceManager.setQueryWorkloadConfig(queryWorkloadConfig);

Review Comment:
   Can we add a validation function to validate configs to see if it's set 
properly? 
   
   For example:
   1. There should only be one NodeConfig for each each type - leaf/non-leaf 
   2. There should only be one propogationScheme defined.
   3.  It should only contain 3 high-level fields - nodeType, 
PropogationSchema,. enforcementProfile. 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "NonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=LEAF_NODE
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,

Review Comment:
   Curious: Why is the nodeType required here? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The PropagationManager class is responsible for propagating the query 
workload
+ * refresh message to the relevant instances based on the node configurations.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkload(QueryWorkloadConfig queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        return;
+      }
+      // Calculate the instance cost for each instance
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);
+      Map<String, QueryWorkloadRefreshMessage> instanceToRefreshMessageMap = 
instanceCostMap.entrySet().stream()
+              .collect(Collectors.toMap(Map.Entry::getKey,
+                      entry -> new 
QueryWorkloadRefreshMessage(queryWorkloadName, entry.getValue())));
+      // Send the QueryWorkloadRefreshMessage to the instances
+      
_pinotHelixResourceManager.sendQueryWorkloadRefreshMessage(instanceToRefreshMessageMap);
+    }
+  }
+
+  /**
+   * Propagate the workload for the given table name, it does fast exits if 
queryWorkloadConfigs is empty
+   * @param tableName The table name to propagate the workload for, it can be 
a rawTableName or a tableNameWithType
+   * if rawTableName is provided, it will resolve all available tableTypes and 
propagate the workload for each tableType
+   *
+   * This method performs the following steps:
+   * 1. Find all the helix tags associated with the table
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Propagate the workload cost for instances associated with the workloads
+   */
+  public void propagateWorkloadFor(String tableName) {
+    try {
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+          return;
+      }
+      // Get the helixTags associated with the table
+      Set<String> helixTags = 
PropagationUtils.getHelixTagsForTable(_pinotHelixResourceManager, tableName);
+      // Find all workloads associated with the helix tags
+      Set<QueryWorkloadConfig> queryWorkloadConfigsForTags =
+          
PropagationUtils.getQueryWorkloadConfigsForTags(_pinotHelixResourceManager, 
helixTags, queryWorkloadConfigs);
+      // Propagate the workload for each QueryWorkloadConfig
+      for (QueryWorkloadConfig queryWorkloadConfig : 
queryWorkloadConfigsForTags) {
+        propagateWorkload(queryWorkloadConfig);
+      }
+    } catch (Exception e) {
+      String errorMsg = String.format("Failed to propagate workload for table: 
%s", tableName);
+      LOGGER.error(errorMsg, e);
+      throw new RuntimeException(errorMsg, e);
+    }
+  }
+
+  /**
+   * Get all the workload costs associated with the given instance and node 
type
+   * 1. Find all the helix tags associated with the instance
+   * 2. Find all the {@link QueryWorkloadConfig} associated with the helix tags
+   * 3. Find the instance associated with the {@link QueryWorkloadConfig} and 
node type
+   *
+   * @param instanceName The instance name to get the workload costs for
+   * @param nodeType {@link NodeConfig.Type} The node type to get the workload 
costs for
+   * @return A map of workload name to {@link InstanceCost} for the given 
instance and node type
+   */
+  public Map<String, InstanceCost> getWorkloadToInstanceCostFor(String 
instanceName, NodeConfig.Type nodeType) {
+    try {
+      Map<String, InstanceCost> workloadToInstanceCostMap = new HashMap<>();
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      if (queryWorkloadConfigs == null || queryWorkloadConfigs.isEmpty()) {
+        LOGGER.warn("No query workload configs found in zookeeper");
+        return workloadToInstanceCostMap;
+      }
+      // Find all the helix tags associated with the instance
+      Map<String, Set<String>> instanceToHelixTags
+          = 
PropagationUtils.getInstanceToHelixTags(_pinotHelixResourceManager);

Review Comment:
   This seems to be getting the InstanceConfig for all instances unnecessarily. 
Can't we use `getHelixInstanceConfig` instead of `getAllHelixInstanceConfigs` 
(used inside getInstanceToHelixTags() ? 
   
   You also don't need the getAllHelixInstanceConfigs function anymore. 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/scheme/PropagationUtils.java:
##########
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload.scheme;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.utils.config.TagNameUtils;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.config.table.TenantConfig;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.builder.TableNameBuilder;
+
+
+/**
+ * This class provides utility methods for workload propagation.
+ */
+public class PropagationUtils {
+
+  private PropagationUtils() {
+  }
+
+  /**
+   * Get the mapping tableNameWithType → {NON_LEAF_NODE→brokerTag, 
LEAF_NODE→(serverTag + overrides)}
+   * 1. Get all table configs from the PinotHelixResourceManager
+   * 2. For each table config, extract the tenant config
+   * 3. For each tenant config, get the broker and server tags
+   * 4. Populate the helix tags for NON_LEAF_NODE and LEAF_NODE separately
+   */
+  public static Map<String, Map<NodeConfig.Type, Set<String>>> 
getTableToHelixTags(
+          PinotHelixResourceManager pinotResourceManager) {
+    Map<String, Map<NodeConfig.Type, Set<String>>> tableToTags = new 
HashMap<>();
+    for (TableConfig tableConfig : pinotResourceManager.getAllTableConfigs()) {
+      TenantConfig tenantConfig = tableConfig.getTenantConfig();
+      TableType tableType = tableConfig.getTableType();
+
+      // Gather all relevant tags for this tenant
+      Set<String> tenantTags = new HashSet<>();
+      collectHelixTagsForTable(tenantTags, tenantConfig, tableType);

Review Comment:
   Can you verify if this also fetches the tenant from tagOverrideConfig for 
realtime tables? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/scheme/PropagationUtils.java:
##########
@@ -0,0 +1,208 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload.scheme;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.helix.model.InstanceConfig;
+import org.apache.pinot.common.utils.config.TagNameUtils;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.config.table.TenantConfig;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.PropagationScheme;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.builder.TableNameBuilder;
+
+
+/**
+ * This class provides utility methods for workload propagation.
+ */
+public class PropagationUtils {
+
+  private PropagationUtils() {
+  }
+
+  /**
+   * Get the mapping tableNameWithType → {NON_LEAF_NODE→brokerTag, 
LEAF_NODE→(serverTag + overrides)}
+   * 1. Get all table configs from the PinotHelixResourceManager
+   * 2. For each table config, extract the tenant config
+   * 3. For each tenant config, get the broker and server tags
+   * 4. Populate the helix tags for NON_LEAF_NODE and LEAF_NODE separately
+   */
+  public static Map<String, Map<NodeConfig.Type, Set<String>>> 
getTableToHelixTags(
+          PinotHelixResourceManager pinotResourceManager) {
+    Map<String, Map<NodeConfig.Type, Set<String>>> tableToTags = new 
HashMap<>();
+    for (TableConfig tableConfig : pinotResourceManager.getAllTableConfigs()) {
+      TenantConfig tenantConfig = tableConfig.getTenantConfig();
+      TableType tableType = tableConfig.getTableType();
+
+      // Gather all relevant tags for this tenant
+      Set<String> tenantTags = new HashSet<>();
+      collectHelixTagsForTable(tenantTags, tenantConfig, tableType);
+
+      // Populate the helix tags for NON_LEAF_NODE and LEAF_NODE separately to 
provide flexibility
+      // in workload propagation to either leaf nodes or non-leaf nodes
+      String brokerTag = 
TagNameUtils.getBrokerTagForTenant(tenantConfig.getBroker());

Review Comment:
   Hmmm, we want even intermediate servers in multi-stage engine to fall under 
non-leaf nodes right? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/api/resources/PinotQueryWorkloadConfigRestletResource.java:
##########
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.api.resources;
+
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiKeyAuthDefinition;
+import io.swagger.annotations.ApiOperation;
+import io.swagger.annotations.Authorization;
+import io.swagger.annotations.SecurityDefinition;
+import io.swagger.annotations.SwaggerDefinition;
+import java.util.List;
+import java.util.Map;
+import javax.inject.Inject;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.controller.api.access.AccessType;
+import org.apache.pinot.controller.api.access.Authenticate;
+import 
org.apache.pinot.controller.api.exception.ControllerApplicationException;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.core.auth.Actions;
+import org.apache.pinot.core.auth.Authorize;
+import org.apache.pinot.core.auth.TargetType;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.CommonConstants;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.pinot.spi.utils.CommonConstants.SWAGGER_AUTHORIZATION_KEY;
+
+@Api(tags = Constants.QUERY_WORKLOAD_TAG, authorizations = 
{@Authorization(value = SWAGGER_AUTHORIZATION_KEY)})
+@SwaggerDefinition(securityDefinition = 
@SecurityDefinition(apiKeyAuthDefinitions = {
+    @ApiKeyAuthDefinition(name = HttpHeaders.AUTHORIZATION, in = 
ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key =
+        SWAGGER_AUTHORIZATION_KEY, description =
+        "The format of the key is  ```\"Basic <token>\" or \"Bearer "
+            + "<token>\"```"), @ApiKeyAuthDefinition(name = 
CommonConstants.QUERY_WORKLOAD, in =
+    ApiKeyAuthDefinition.ApiKeyLocation.HEADER, key = 
CommonConstants.QUERY_WORKLOAD, description =
+    "Workload context passed through http header. If no context is provided 
'default' workload "
+        + "context will be considered.")
+}))
+@Path("/")
+public class PinotQueryWorkloadConfigRestletResource {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(PinotQueryWorkloadConfigRestletResource.class);
+
+  @Inject
+  PinotHelixResourceManager _pinotHelixResourceManager;
+
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all query workload configs", notes = "Get all 
workload configs")
+  public String getQueryWorkloadConfigs(@Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get all queryWorkloadConfigs");
+      List<QueryWorkloadConfig> queryWorkloadConfigs = 
_pinotHelixResourceManager.getAllQueryWorkloadConfigs();
+      String response = JsonUtils.objectToString(queryWorkloadConfigs);
+      LOGGER.info("Successfully fetched all queryWorkloadConfigs");
+      return response;
+    } catch (Exception e) {
+      String errorMessage = String.format("Error while getting all workload 
configs, error: %s", e);
+      throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+    }
+  }
+
+  /**
+   * API to specific query workload config
+   * @param queryWorkloadName Name of the query workload
+   * Example request:
+   * /queryWorkloadConfigs/workload-foo1
+   * Example response:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *   {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "NonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/{queryWorkloadName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get query workload config", notes = "Get workload 
configs for the workload name")
+  public String getQueryWorkloadConfig(@PathParam("queryWorkloadName") String 
queryWorkloadName,
+      @Context HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to get workload config for workload: {}", 
queryWorkloadName);
+      QueryWorkloadConfig queryWorkloadConfig = 
_pinotHelixResourceManager.getQueryWorkloadConfig(queryWorkloadName);
+      if (queryWorkloadConfig == null) {
+        throw new ControllerApplicationException(LOGGER, "Workload config not 
found for workload: " + queryWorkloadName,
+            Response.Status.NOT_FOUND, null);
+      }
+      String response = queryWorkloadConfig.toJsonString();
+      LOGGER.info("Successfully fetched workload config for workload: {}", 
queryWorkloadName);
+      return response;
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for workload: %s, error: %s",
+            queryWorkloadName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * API to get all workload configs associated with the instance
+   * @param instanceName Helix instance name
+   * @param nodeTypeString  {@link NodeConfig.Type} string representation of 
the instance
+   * @return Map of workload name to instance cost
+   * Example request:
+   * /queryWorkloadConfigs/instance/Server_localhost_1234?nodeType=LEAF_NODE
+   * Example response:
+   * {
+   *  "workload1": {
+   *    "cpuCostNs": 100,
+   *    "memoryCostBytes":100
+   *  },
+   *  "workload2": {
+   *    "cpuCostNs": 50,
+   *    "memoryCostBytes": 50
+   *  }
+   */
+  @GET
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs/instance/{instanceName}")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.GET_INSTANCE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.READ)
+  @ApiOperation(value = "Get all workload configs associated with the 
instance",
+      notes = "Get all workload configs associated with the instance")
+  public String getQueryWorkloadConfigForInstance(@PathParam("instanceName") 
String instanceName,
+      @QueryParam("nodeType") String nodeTypeString, @Context HttpHeaders 
httpHeaders) {
+    try {
+      NodeConfig.Type nodeType = NodeConfig.Type.forValue(nodeTypeString);
+      Map<String, InstanceCost> workloadToInstanceCostMap = 
_pinotHelixResourceManager.getQueryWorkloadManager()
+          .getWorkloadToInstanceCostFor(instanceName, nodeType);
+      if (workloadToInstanceCostMap == null || 
workloadToInstanceCostMap.isEmpty()) {
+        throw new ControllerApplicationException(LOGGER, "No workload configs 
found for instance: " + instanceName,
+            Response.Status.NOT_FOUND, null);
+      }
+      return JsonUtils.objectToString(workloadToInstanceCostMap);
+    } catch (Exception e) {
+      if (e instanceof ControllerApplicationException) {
+        throw (ControllerApplicationException) e;
+      } else {
+        String errorMessage = String.format("Error while getting workload 
config for instance: %s, error: %s",
+            instanceName, e);
+        throw new ControllerApplicationException(LOGGER, errorMessage, 
Response.Status.INTERNAL_SERVER_ERROR, e);
+      }
+    }
+  }
+
+  /**
+   * Updates the query workload config
+   * @param requestString JSON string representing the QueryWorkloadConfig
+   * Example request:
+   * {
+   *   "queryWorkloadName" : "workload-foo1",
+   *   "nodeConfigs" : {
+   *    {
+   *       "nodeType" : "LeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 500,
+   *         "memoryCostBytes": 1000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TABLE",
+   *         "values": ["airlineStats"]
+   *       }
+   *     },
+   *     {
+   *       "nodeType" : "nonLeafNode",
+   *       "enforcementProfile": {
+   *         "cpuCostNs": 1500,
+   *         "memoryCostBytes": 12000
+   *       },
+   *       "propagationScheme": {
+   *         "propagationType": "TENANT",
+   *         "values": ["DefaultTenant"]
+   *       }
+   *     }
+   *   }
+   * }
+   *
+   */
+  @POST
+  @Produces(MediaType.APPLICATION_JSON)
+  @Path("/queryWorkloadConfigs")
+  @Authorize(targetType = TargetType.CLUSTER, action = 
Actions.Cluster.UPDATE_QUERY_WORKLOAD_CONFIG)
+  @Authenticate(AccessType.UPDATE)
+  @ApiOperation(value = "Update query workload config", notes = "Update 
workload config for the workload name")
+  public Response updateQueryWorkloadConfig(String requestString, @Context 
HttpHeaders httpHeaders) {
+    try {
+      LOGGER.info("Received request to update queryWorkloadConfig with 
request: {}", requestString);
+      QueryWorkloadConfig queryWorkloadConfig = 
JsonUtils.stringToObject(requestString, QueryWorkloadConfig.class);
+      _pinotHelixResourceManager.setQueryWorkloadConfig(queryWorkloadConfig);
+      String successMessage = String.format("Query Workload config updated 
successfully for workload: %s",
+          queryWorkloadConfig.getQueryWorkloadName());
+      LOGGER.info(successMessage);
+      return Response.ok().entity(successMessage).build();
+    } catch (Exception e) {
+      String errorMessage = String.format("Error when updating query workload 
request: %s, error: %s",

Review Comment:
   We should be returning BAD_REQUEST here if the request is malformed. 



##########
pinot-common/src/main/java/org/apache/pinot/common/utils/config/QueryWorkloadConfigUtils.java:
##########
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.common.utils.config;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.google.common.base.Preconditions;
+import java.net.URI;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hc.core5.http.ClassicHttpRequest;
+import org.apache.hc.core5.http.HttpHeaders;
+import org.apache.hc.core5.http.HttpStatus;
+import org.apache.hc.core5.http.HttpVersion;
+import org.apache.hc.core5.http.io.support.ClassicRequestBuilder;
+import org.apache.helix.zookeeper.datamodel.ZNRecord;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.common.utils.http.HttpClientConfig;
+import org.apache.pinot.common.utils.tls.TlsUtils;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.apache.pinot.spi.utils.retry.RetryPolicy;
+import org.slf4j.Logger;
+
+
+public class QueryWorkloadConfigUtils {
+  private QueryWorkloadConfigUtils() {
+  }
+
+  private static final Logger LOGGER = 
org.slf4j.LoggerFactory.getLogger(QueryWorkloadConfigUtils.class);
+  private static final HttpClient HTTP_CLIENT = new 
HttpClient(HttpClientConfig.DEFAULT_HTTP_CLIENT_CONFIG,
+          TlsUtils.getSslContext());
+
+  /**
+   * Converts a ZNRecord into a QueryWorkloadConfig object by extracting 
mapFields.
+   *
+   * @param znRecord The ZNRecord containing workload config data.
+   * @return A QueryWorkloadConfig object.
+   */
+  public static QueryWorkloadConfig fromZNRecord(ZNRecord znRecord) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");
+    String queryWorkloadName = 
znRecord.getSimpleField(QueryWorkloadConfig.QUERY_WORKLOAD_NAME);
+    Preconditions.checkNotNull(queryWorkloadName, "queryWorkloadName cannot be 
null");
+    String nodeConfigsJson = 
znRecord.getSimpleField(QueryWorkloadConfig.NODE_CONFIGS);
+    Preconditions.checkNotNull(nodeConfigsJson, "nodeConfigs cannot be null");
+    try {
+      List<NodeConfig> nodeConfigs = JsonUtils.stringToObject(nodeConfigsJson, 
new TypeReference<>() { });
+      return new QueryWorkloadConfig(queryWorkloadName, nodeConfigs);
+    } catch (Exception e) {
+      String errorMessage = String.format("Failed to convert ZNRecord : %s to 
QueryWorkloadConfig", znRecord);
+      throw new RuntimeException(errorMessage, e);
+    }
+  }
+
+  /**
+   * Updates a ZNRecord with the fields from a WorkloadConfig object.
+   *
+   * @param queryWorkloadConfig The QueryWorkloadConfig object to convert.
+   * @param znRecord The ZNRecord to update.
+   */
+  public static void updateZNRecordWithWorkloadConfig(ZNRecord znRecord, 
QueryWorkloadConfig queryWorkloadConfig) {
+    Preconditions.checkNotNull(znRecord, "ZNRecord cannot be null");

Review Comment:
   Would prefer that we have all these conditions (in addition to other ones) 
in a common validation function that checks whether an incoming 
QueryWorkloadConfig (including the NodeConfig, EnforcementProfile, 
PropogationSchema) is valid. 
   



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The PropagationManager class is responsible for propagating the query 
workload
+ * refresh message to the relevant instances based on the node configurations.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkload(QueryWorkloadConfig queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        return;
+      }
+      // Calculate the instance cost for each instance
+      Map<String, InstanceCost> instanceCostMap = 
_costSplitter.computeInstanceCostMap(nodeConfig, instances);

Review Comment:
   Que: This is with the assumption that one instance can have only one Cost. 
If a given server is used both for leaf stage and intermediate stage 
processing, do we plan to update both leaf+non-leaf costs against the same 
workload?  



##########
pinot-controller/src/main/java/org/apache/pinot/controller/helix/core/PinotHelixResourceManager.java:
##########
@@ -4764,6 +4777,48 @@ public Map<String, Integer> 
minimumInstancesRequiredForTags() {
     return tagMinInstanceMap;
   }
 
+  @Nullable
+  public List<QueryWorkloadConfig> getAllQueryWorkloadConfigs() {
+    return ZKMetadataProvider.getAllQueryWorkloadConfigs(_propertyStore);
+  }
+
+  @Nullable
+  public QueryWorkloadConfig getQueryWorkloadConfig(String queryWorkloadName) {
+    return ZKMetadataProvider.getQueryWorkloadConfig(_propertyStore, 
queryWorkloadName);
+  }
+
+  public void setQueryWorkloadConfig(QueryWorkloadConfig queryWorkloadConfig) {
+    if (!ZKMetadataProvider.setQueryWorkloadConfig(_propertyStore, 
queryWorkloadConfig)) {
+      throw new RuntimeException("Failed to set workload config for 
queryWorkloadName: "
+          + queryWorkloadConfig.getQueryWorkloadName());
+    }
+    _queryWorkloadManager.propagateWorkload(queryWorkloadConfig);
+  }
+
+  public void sendQueryWorkloadRefreshMessage(Map<String, 
QueryWorkloadRefreshMessage> instanceToRefreshMessageMap) {
+    instanceToRefreshMessageMap.forEach((instance, message) -> {
+      Criteria criteria = new Criteria();
+      criteria.setRecipientInstanceType(InstanceType.PARTICIPANT);
+      criteria.setInstanceName(instance);
+      criteria.setSessionSpecific(true);
+
+      int numMessagesSent = 
_helixZkManager.getMessagingService().send(criteria, message, null, -1);
+      if (numMessagesSent > 0) {
+        LOGGER.info("Sent {} query workload config refresh messages to 
instance: {}", numMessagesSent, instance);
+      } else {
+        LOGGER.warn("No query workload config refresh message sent to 
instance: {}", instance);
+      }
+    });
+  }
+
+  public void deleteQueryWorkloadConfig(String workload) {
+    ZKMetadataProvider.deleteQueryWorkloadConfig(_propertyStore, workload);

Review Comment:
   Shouldn't this send a message to all the instances to remove the workload 
costs? 



##########
pinot-controller/src/main/java/org/apache/pinot/controller/workload/QueryWorkloadManager.java:
##########
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.controller.workload;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.pinot.common.messages.QueryWorkloadRefreshMessage;
+import org.apache.pinot.controller.helix.core.PinotHelixResourceManager;
+import org.apache.pinot.controller.workload.scheme.PropagationScheme;
+import org.apache.pinot.controller.workload.scheme.PropagationSchemeProvider;
+import org.apache.pinot.controller.workload.scheme.PropagationUtils;
+import org.apache.pinot.controller.workload.splitter.CostSplitter;
+import org.apache.pinot.controller.workload.splitter.DefaultCostSplitter;
+import org.apache.pinot.spi.config.workload.InstanceCost;
+import org.apache.pinot.spi.config.workload.NodeConfig;
+import org.apache.pinot.spi.config.workload.QueryWorkloadConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * The PropagationManager class is responsible for propagating the query 
workload
+ * refresh message to the relevant instances based on the node configurations.
+ */
+public class QueryWorkloadManager {
+  public static final Logger LOGGER = 
LoggerFactory.getLogger(QueryWorkloadManager.class);
+
+  private final PinotHelixResourceManager _pinotHelixResourceManager;
+  private PropagationSchemeProvider _propagationSchemeProvider;
+  private final CostSplitter _costSplitter;
+
+  public QueryWorkloadManager(PinotHelixResourceManager 
pinotHelixResourceManager) {
+    _pinotHelixResourceManager = pinotHelixResourceManager;
+    _propagationSchemeProvider = new 
PropagationSchemeProvider(pinotHelixResourceManager);
+    // TODO: To make this configurable once we have multiple cost splitters 
implementations
+    _costSplitter = new DefaultCostSplitter();
+  }
+
+  /**
+   * Propagate the workload to the relevant instances based on the 
PropagationScheme
+   * @param queryWorkloadConfig The query workload configuration to propagate
+   * 1. Resolve the instances based on the node type and propagation scheme
+   * 2. Calculate the instance cost for each instance
+   * 3. Send the {@link QueryWorkloadRefreshMessage} to the instances
+   */
+  public void propagateWorkload(QueryWorkloadConfig queryWorkloadConfig) {
+    String queryWorkloadName = queryWorkloadConfig.getQueryWorkloadName();
+    for (NodeConfig nodeConfig: queryWorkloadConfig.getNodeConfigs()) {
+      // Resolve the instances based on the node type and propagation scheme
+      Set<String> instances = resolveInstances(nodeConfig);
+      if (instances.isEmpty()) {
+        String errorMsg = String.format("No instances found for Workload: %s", 
queryWorkloadName);
+        LOGGER.warn(errorMsg);
+        return;

Review Comment:
   Shouldn't we continue here for other nodeConfig definitions? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to