snleee commented on code in PR #9295:
URL: https://github.com/apache/pinot/pull/9295#discussion_r959249986


##########
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/utils/ConsistentDataPushUtils.java:
##########
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.segment.local.utils;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.auth.AuthProviderUtils;
+import org.apache.pinot.common.exception.HttpErrorStatusException;
+import org.apache.pinot.common.restlet.resources.StartReplaceSegmentsRequest;
+import org.apache.pinot.common.segment.generation.SegmentGenerationUtils;
+import org.apache.pinot.common.utils.FileUploadDownloadClient;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.spi.auth.AuthProvider;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.ingestion.batch.spec.PinotClusterSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentGenerationJobSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentNameGeneratorSpec;
+import org.apache.pinot.spi.utils.IngestionConfigUtils;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class ConsistentDataPushUtils {
+  private ConsistentDataPushUtils() {
+  }
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentPushUtils.class);
+  private static final FileUploadDownloadClient FILE_UPLOAD_DOWNLOAD_CLIENT = 
new FileUploadDownloadClient();
+  public static final String SEGMENT_NAME_POSTFIX = "segment.name.postfix";
+
+  /**
+   * Checks for enablement of consistent data push. If enabled, fetch the list 
of segments to be replaced, then
+   * invoke startReplaceSegments API and returns a map of controller URI to 
lineage entry IDs.
+   * If not, returns an empty hashmap.
+   */
+  public static Map<URI, String> preUpload(SegmentGenerationJobSpec spec, 
List<String> segmentsTo)
+      throws Exception {
+    String rawTableName = spec.getTableSpec().getTableName();
+    boolean consistentDataPushEnabled = consistentDataPushEnabled(spec);
+    LOGGER.info("Consistent data push is: {}", consistentDataPushEnabled ? 
"enabled" : "disabled");
+    Map<URI, String> uriToLineageEntryIdMap = null;
+    if (consistentDataPushEnabled) {
+      LOGGER.info("Start consistent push for table: " + rawTableName);
+      Map<URI, List<String>> uriToExistingOfflineSegments = 
getSegmentsToReplace(spec, rawTableName);
+      LOGGER.info("Existing segments for table {}: " + 
uriToExistingOfflineSegments, rawTableName);
+      LOGGER.info("New segments for table: {}: " + segmentsTo, rawTableName);
+      uriToLineageEntryIdMap = startReplaceSegments(spec, 
uriToExistingOfflineSegments, segmentsTo);
+    }
+    return uriToLineageEntryIdMap;
+  }
+
+  /**
+   * uriToLineageEntryIdMap is non-empty if and only if consistent data push 
is enabled.
+   * If uriToLineageEntryIdMap is non-empty, end the consistent data push 
protocol for each controller.
+   */
+  public static void postUpload(SegmentGenerationJobSpec spec, Map<URI, 
String> uriToLineageEntryIdMap) {
+    String rawTableName = spec.getTableSpec().getTableName();
+    if (uriToLineageEntryIdMap != null && !uriToLineageEntryIdMap.isEmpty()) {
+      LOGGER.info("End consistent push for table: " + rawTableName);
+      endReplaceSegments(spec, uriToLineageEntryIdMap);
+    }
+  }
+
+  /**
+   * Builds a map of controller URI to startReplaceSegments URI for each Pinot 
cluster in the spec.
+   */
+  public static Map<URI, URI> 
getStartReplaceSegmentUris(SegmentGenerationJobSpec spec, String rawTableName) {
+    Map<URI, URI> baseUriToStartReplaceSegmentUriMap = new HashMap<>();
+    for (PinotClusterSpec pinotClusterSpec : spec.getPinotClusterSpecs()) {
+      URI controllerURI;
+      try {
+        controllerURI = new URI(pinotClusterSpec.getControllerURI());
+        baseUriToStartReplaceSegmentUriMap.put(controllerURI,
+            FileUploadDownloadClient.getStartReplaceSegmentsURI(controllerURI, 
rawTableName,
+                TableType.OFFLINE.toString(), true));
+      } catch (URISyntaxException e) {
+        throw new RuntimeException("Got invalid controller uri - '" + 
pinotClusterSpec.getControllerURI() + "'");
+      }
+    }
+    return baseUriToStartReplaceSegmentUriMap;
+  }
+
+  /**
+   * Starts consistent data push protocol for each Pinot cluster in the spec.
+   * Returns a map of controller URI to segment lineage entry ID.
+   */
+  public static Map<URI, String> startReplaceSegments(SegmentGenerationJobSpec 
spec,
+      Map<URI, List<String>> uriToSegmentsFrom, List<String> segmentsTo)
+      throws Exception {
+    Map<URI, String> uriToLineageEntryIdMap = new HashMap<>();
+    String rawTableName = spec.getTableSpec().getTableName();
+    Map<URI, URI> segmentsUris = getStartReplaceSegmentUris(spec, 
rawTableName);
+    AuthProvider authProvider = 
AuthProviderUtils.makeAuthProvider(spec.getAuthToken());
+    LOGGER.info("Start replace segment URIs: " + segmentsUris);
+
+    int attempts = 1;
+    long retryWaitMs = 1000L;
+
+    for (Map.Entry<URI, URI> entry : segmentsUris.entrySet()) {
+      URI controllerUri = entry.getKey();
+      URI startSegmentUri = entry.getValue();
+      List<String> segmentsFrom = uriToSegmentsFrom.get(controllerUri);
+
+      if (!Collections.disjoint(segmentsFrom, segmentsTo)) {
+        String errorMsg =
+            String.format("Found same segment names when attempting to enable 
consistent push for table: %s",
+                rawTableName);
+        LOGGER.error("SegmentsFrom: {}", segmentsFrom);
+        LOGGER.error("SegmentsTo: {}", segmentsTo);
+        LOGGER.error(errorMsg);
+        throw new RuntimeException(errorMsg);
+      }
+
+      StartReplaceSegmentsRequest startReplaceSegmentsRequest =
+          new StartReplaceSegmentsRequest(segmentsFrom, segmentsTo);
+      RetryPolicies.exponentialBackoffRetryPolicy(attempts, retryWaitMs, 
5).attempt(() -> {
+        try {
+          SimpleHttpResponse response =
+              
FILE_UPLOAD_DOWNLOAD_CLIENT.startReplaceSegments(startSegmentUri, 
startReplaceSegmentsRequest,
+                  authProvider);
+
+          String responseString = response.getResponse();
+          LOGGER.info(
+              "Got response {}: {} while sending start replace segment request 
for table: {}, uploadURI: {}, request:"
+                  + " {}", response.getStatusCode(), responseString, 
rawTableName, startSegmentUri,
+              startReplaceSegmentsRequest);
+          String segmentLineageEntryId =
+              
JsonUtils.stringToJsonNode(responseString).get("segmentLineageEntryId").asText();
+          uriToLineageEntryIdMap.put(controllerUri, segmentLineageEntryId);
+          return true;
+        } catch (SocketTimeoutException se) {
+          // In case of the timeout, we should re-try.
+          return false;
+        } catch (HttpErrorStatusException e) {
+          if (e.getStatusCode() >= 500) {
+            return false;
+          } else {
+            if (e.getStatusCode() == 
Response.Status.NOT_FOUND.getStatusCode()) {
+              LOGGER.error("Table: {} not found when sending request: {}", 
rawTableName, startSegmentUri);
+            }
+            throw e;
+          }
+        }
+      });
+    }
+    return uriToLineageEntryIdMap;
+  }
+
+  /**
+   * Ends consistent data push protocol for each Pinot cluster in the spec.
+   */
+  public static void endReplaceSegments(SegmentGenerationJobSpec spec, 
Map<URI, String> uriToLineageEntryIdMap) {
+    AuthProvider authProvider = 
AuthProviderUtils.makeAuthProvider(spec.getAuthToken());
+    String rawTableName = spec.getTableSpec().getTableName();
+    for (URI uri : uriToLineageEntryIdMap.keySet()) {
+      String segmentLineageEntryId = uriToLineageEntryIdMap.get(uri);
+      try {
+        FILE_UPLOAD_DOWNLOAD_CLIENT.endReplaceSegments(

Review Comment:
   can we add the retry here as well to keep the consistency with the 
startReplaceSegments call??



##########
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/utils/ConsistentDataPushUtils.java:
##########
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.segment.local.utils;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.auth.AuthProviderUtils;
+import org.apache.pinot.common.exception.HttpErrorStatusException;
+import org.apache.pinot.common.restlet.resources.StartReplaceSegmentsRequest;
+import org.apache.pinot.common.segment.generation.SegmentGenerationUtils;
+import org.apache.pinot.common.utils.FileUploadDownloadClient;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.spi.auth.AuthProvider;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.ingestion.batch.spec.PinotClusterSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentGenerationJobSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentNameGeneratorSpec;
+import org.apache.pinot.spi.utils.IngestionConfigUtils;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class ConsistentDataPushUtils {

Review Comment:
   I think that we should have the retries for GET API calls to the controller. 
Can you double-check on how `PinotClusterAccessor:GetRequestCallable` is 
achieving in our internal push job (pinot-build-push)?



##########
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/utils/ConsistentDataPushUtils.java:
##########
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.segment.local.utils;
+
+import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import javax.ws.rs.core.Response;
+import org.apache.pinot.common.auth.AuthProviderUtils;
+import org.apache.pinot.common.exception.HttpErrorStatusException;
+import org.apache.pinot.common.restlet.resources.StartReplaceSegmentsRequest;
+import org.apache.pinot.common.segment.generation.SegmentGenerationUtils;
+import org.apache.pinot.common.utils.FileUploadDownloadClient;
+import org.apache.pinot.common.utils.SimpleHttpResponse;
+import org.apache.pinot.common.utils.http.HttpClient;
+import org.apache.pinot.spi.auth.AuthProvider;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.ingestion.batch.spec.PinotClusterSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentGenerationJobSpec;
+import org.apache.pinot.spi.ingestion.batch.spec.SegmentNameGeneratorSpec;
+import org.apache.pinot.spi.utils.IngestionConfigUtils;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.retry.RetryPolicies;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class ConsistentDataPushUtils {
+  private ConsistentDataPushUtils() {
+  }
+
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(SegmentPushUtils.class);
+  private static final FileUploadDownloadClient FILE_UPLOAD_DOWNLOAD_CLIENT = 
new FileUploadDownloadClient();
+  public static final String SEGMENT_NAME_POSTFIX = "segment.name.postfix";
+
+  /**
+   * Checks for enablement of consistent data push. If enabled, fetch the list 
of segments to be replaced, then
+   * invoke startReplaceSegments API and returns a map of controller URI to 
lineage entry IDs.
+   * If not, returns an empty hashmap.
+   */
+  public static Map<URI, String> preUpload(SegmentGenerationJobSpec spec, 
List<String> segmentsTo)
+      throws Exception {
+    String rawTableName = spec.getTableSpec().getTableName();
+    boolean consistentDataPushEnabled = consistentDataPushEnabled(spec);
+    LOGGER.info("Consistent data push is: {}", consistentDataPushEnabled ? 
"enabled" : "disabled");
+    Map<URI, String> uriToLineageEntryIdMap = null;
+    if (consistentDataPushEnabled) {
+      LOGGER.info("Start consistent push for table: " + rawTableName);
+      Map<URI, List<String>> uriToExistingOfflineSegments = 
getSegmentsToReplace(spec, rawTableName);
+      LOGGER.info("Existing segments for table {}: " + 
uriToExistingOfflineSegments, rawTableName);
+      LOGGER.info("New segments for table: {}: " + segmentsTo, rawTableName);
+      uriToLineageEntryIdMap = startReplaceSegments(spec, 
uriToExistingOfflineSegments, segmentsTo);
+    }
+    return uriToLineageEntryIdMap;
+  }
+
+  /**
+   * uriToLineageEntryIdMap is non-empty if and only if consistent data push 
is enabled.
+   * If uriToLineageEntryIdMap is non-empty, end the consistent data push 
protocol for each controller.
+   */
+  public static void postUpload(SegmentGenerationJobSpec spec, Map<URI, 
String> uriToLineageEntryIdMap) {
+    String rawTableName = spec.getTableSpec().getTableName();
+    if (uriToLineageEntryIdMap != null && !uriToLineageEntryIdMap.isEmpty()) {
+      LOGGER.info("End consistent push for table: " + rawTableName);
+      endReplaceSegments(spec, uriToLineageEntryIdMap);
+    }
+  }
+
+  /**
+   * Builds a map of controller URI to startReplaceSegments URI for each Pinot 
cluster in the spec.
+   */
+  public static Map<URI, URI> 
getStartReplaceSegmentUris(SegmentGenerationJobSpec spec, String rawTableName) {
+    Map<URI, URI> baseUriToStartReplaceSegmentUriMap = new HashMap<>();
+    for (PinotClusterSpec pinotClusterSpec : spec.getPinotClusterSpecs()) {
+      URI controllerURI;
+      try {
+        controllerURI = new URI(pinotClusterSpec.getControllerURI());
+        baseUriToStartReplaceSegmentUriMap.put(controllerURI,
+            FileUploadDownloadClient.getStartReplaceSegmentsURI(controllerURI, 
rawTableName,
+                TableType.OFFLINE.toString(), true));
+      } catch (URISyntaxException e) {
+        throw new RuntimeException("Got invalid controller uri - '" + 
pinotClusterSpec.getControllerURI() + "'");
+      }
+    }
+    return baseUriToStartReplaceSegmentUriMap;
+  }
+
+  /**
+   * Starts consistent data push protocol for each Pinot cluster in the spec.
+   * Returns a map of controller URI to segment lineage entry ID.
+   */
+  public static Map<URI, String> startReplaceSegments(SegmentGenerationJobSpec 
spec,
+      Map<URI, List<String>> uriToSegmentsFrom, List<String> segmentsTo)
+      throws Exception {
+    Map<URI, String> uriToLineageEntryIdMap = new HashMap<>();
+    String rawTableName = spec.getTableSpec().getTableName();
+    Map<URI, URI> segmentsUris = getStartReplaceSegmentUris(spec, 
rawTableName);
+    AuthProvider authProvider = 
AuthProviderUtils.makeAuthProvider(spec.getAuthToken());
+    LOGGER.info("Start replace segment URIs: " + segmentsUris);
+
+    int attempts = 1;

Review Comment:
   Can we have a larger number than 1 for a retrial attempt? What's the 
reasoning behind of doing a retry only for once?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to