Copilot commented on code in PR #17806:
URL: https://github.com/apache/pinot/pull/17806#discussion_r2901333951


##########
pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java:
##########
@@ -637,42 +683,127 @@ private boolean copySrcToDst(URI srcUri, URI dstUri)
     PathProperties pathProperties =
         
_fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToAzureStylePath(srcUri)).getProperties();
     try (InputStream inputStream = open(srcUri)) {
-      return copyInputStreamToDst(inputStream, dstUri, 
pathProperties.getContentMd5());
+      return copyInputStreamToDst(inputStream, dstUri, 
pathProperties.getContentMd5(),
+          pathProperties.getFileSize());
     }
   }
 
   /**
    * Helper function to copy input stream to destination URI.
    *
+   * <p>Uses the Azure Blob API for uploads, which is compatible with storage 
accounts that have Blob Soft Delete
+   * enabled. The DFS (Data Lake) API does not support Soft Delete and will 
fail with 409
+   * EndpointUnsupportedAccountFeatures on such accounts.</p>
+   *
    * NOTE: the caller has to close the input stream.
    *
    * @param inputStream input stream that will be written to dstUri
    * @param dstUri destination URI
+   * @param contentMd5 optional MD5 hash of the content
+   * @param contentLength length of the content in bytes
    * @return true if the copy succeeds
    */
-  private boolean copyInputStreamToDst(InputStream inputStream, URI dstUri, 
byte[] contentMd5)
+  private boolean copyInputStreamToDst(InputStream inputStream, URI dstUri, 
byte[] contentMd5, long contentLength)
+      throws IOException {
+    String path = AzurePinotFSUtil.convertUriToAzureStylePath(dstUri);
+
+    if (_blobContainerClient != null) {
+      return copyInputStreamToDstViaBlob(inputStream, dstUri, path, 
contentMd5);
+    }
+    return copyInputStreamToDstViaDfs(inputStream, dstUri, path, contentMd5);
+  }

Review Comment:
   `contentLength` is passed into `copyInputStreamToDst(...)` but never used. 
Either remove it to avoid dead parameters, or use it to select/parameterize the 
Blob upload call (e.g., provide the known length to the SDK upload options).



##########
pinot-plugins/pinot-file-system/pinot-adls/src/main/java/org/apache/pinot/plugin/filesystem/ADLSGen2PinotFS.java:
##########
@@ -637,42 +683,127 @@ private boolean copySrcToDst(URI srcUri, URI dstUri)
     PathProperties pathProperties =
         
_fileSystemClient.getFileClient(AzurePinotFSUtil.convertUriToAzureStylePath(srcUri)).getProperties();
     try (InputStream inputStream = open(srcUri)) {
-      return copyInputStreamToDst(inputStream, dstUri, 
pathProperties.getContentMd5());
+      return copyInputStreamToDst(inputStream, dstUri, 
pathProperties.getContentMd5(),
+          pathProperties.getFileSize());
     }
   }
 
   /**
    * Helper function to copy input stream to destination URI.
    *
+   * <p>Uses the Azure Blob API for uploads, which is compatible with storage 
accounts that have Blob Soft Delete
+   * enabled. The DFS (Data Lake) API does not support Soft Delete and will 
fail with 409
+   * EndpointUnsupportedAccountFeatures on such accounts.</p>
+   *
    * NOTE: the caller has to close the input stream.
    *
    * @param inputStream input stream that will be written to dstUri
    * @param dstUri destination URI
+   * @param contentMd5 optional MD5 hash of the content
+   * @param contentLength length of the content in bytes
    * @return true if the copy succeeds
    */
-  private boolean copyInputStreamToDst(InputStream inputStream, URI dstUri, 
byte[] contentMd5)
+  private boolean copyInputStreamToDst(InputStream inputStream, URI dstUri, 
byte[] contentMd5, long contentLength)
+      throws IOException {
+    String path = AzurePinotFSUtil.convertUriToAzureStylePath(dstUri);
+
+    if (_blobContainerClient != null) {
+      return copyInputStreamToDstViaBlob(inputStream, dstUri, path, 
contentMd5);
+    }
+    return copyInputStreamToDstViaDfs(inputStream, dstUri, path, contentMd5);
+  }
+
+  /**
+   * Upload via Azure Blob API. Compatible with Blob Soft Delete.
+   */
+  private boolean copyInputStreamToDstViaBlob(InputStream inputStream, URI 
dstUri, String path, byte[] contentMd5)
+      throws IOException {
+    try {
+      BlobClient blobClient = _blobContainerClient.getBlobClient(path);
+      BlobHttpHeaders blobHttpHeaders = contentMd5 != null ? 
getBlobHttpHeadersWithContentMd5(contentMd5)
+          : null;
+      if (_enableChecksum) {
+        uploadWithBlockLevelChecksum(blobClient.getBlockBlobClient(), 
inputStream, blobHttpHeaders);
+      } else {
+        BlobParallelUploadOptions uploadOptions = new 
BlobParallelUploadOptions(inputStream);
+        if (blobHttpHeaders != null) {
+          uploadOptions.setHeaders(blobHttpHeaders);
+        }
+        blobClient.uploadWithResponse(uploadOptions, null, Context.NONE);
+      }
+      return true;
+    } catch (BlobStorageException e) {
+      LOGGER.error("Exception thrown while uploading to destination via Blob 
API (dstUri={}, errorStatus={})", dstUri,
+          e.getStatusCode(), e);
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Uploads stream using block staging with per-block MD5 for transactional 
integrity.
+   */
+  private void uploadWithBlockLevelChecksum(BlockBlobClient blockBlobClient, 
InputStream inputStream,
+      BlobHttpHeaders blobHttpHeaders)
+      throws IOException, BlobStorageException {
+    int bytesRead;
+    int blockIdCounter = 0;
+    byte[] buffer = new byte[BUFFER_SIZE];
+    List<String> blockIds = new ArrayList<>();
+
+    try {
+      MessageDigest md5Block = MessageDigest.getInstance("MD5");
+      while ((bytesRead = inputStream.read(buffer)) != -1) {
+        md5Block.reset();
+        md5Block.update(buffer, 0, bytesRead);
+        byte[] md5BlockHash = md5Block.digest();
+
+        String blockId = Base64.getEncoder()
+            .encodeToString(String.format("%08d", 
blockIdCounter).getBytes(StandardCharsets.UTF_8));
+        blockIdCounter++;
+        blockIds.add(blockId);
+
+        try (ByteArrayInputStream byteArrayInputStream = new 
ByteArrayInputStream(buffer, 0, bytesRead)) {
+          blockBlobClient.stageBlockWithResponse(blockId, 
byteArrayInputStream, bytesRead, md5BlockHash, null, null,
+              Context.NONE);
+        }
+      }
+      if (blobHttpHeaders != null) {
+        blockBlobClient.commitBlockListWithResponse(blockIds, blobHttpHeaders, 
null, null, null, null, Context.NONE);
+      } else {
+        blockBlobClient.commitBlockList(blockIds);
+      }

Review Comment:
   `uploadWithBlockLevelChecksum(...)` reuses the DFS buffer size (4MB) for 
Blob block staging. For Block Blobs, small blocks can dramatically increase 
block count and risk hitting Azure's max-blocks-per-blob limit (which 
effectively caps max upload size at ~4MB * maxBlocks). Consider using a larger 
block size (or making it configurable) when staging blocks via the Blob API.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to