This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 3157f705d5c HDDS-13626. Update hadoop to 3.4.2 (#8980)
3157f705d5c is described below
commit 3157f705d5cff96bc8708adc5a492874e5971fd3
Author: slfan1989 <[email protected]>
AuthorDate: Mon Oct 6 21:33:50 2025 +0800
HDDS-13626. Update hadoop to 3.4.2 (#8980)
---
.../AbstractContractMultipartUploaderTest.java | 38 +++++++++++++---------
pom.xml | 2 +-
2 files changed, 23 insertions(+), 17 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
index 8b77338037d..ba902986eee 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
@@ -252,7 +252,7 @@ public void testSingleUpload() throws Exception {
// was interpreted as an inconsistent write.
MultipartUploader completer = uploader0;
// and upload with uploader 1 to validate cross-uploader uploads
- PartHandle partHandle = putPart(file, uploadHandle, 1, payload);
+ PartHandle partHandle = putPart(file, uploadHandle, 1, true, payload);
partHandles.put(1, partHandle);
PathHandle fd = complete(completer, uploadHandle, file,
partHandles);
@@ -321,12 +321,13 @@ protected PartHandle buildAndPutPart(
final Path file,
final UploadHandle uploadHandle,
final int index,
+ final boolean isLastPart,
final MessageDigest origDigest) throws IOException {
byte[] payload = generatePayload(index);
if (origDigest != null) {
origDigest.update(payload);
}
- return putPart(file, uploadHandle, index, payload);
+ return putPart(file, uploadHandle, index, isLastPart, payload);
}
/**
@@ -335,6 +336,7 @@ protected PartHandle buildAndPutPart(
* @param file destination
* @param uploadHandle handle
* @param index index of part
+ * @param isLastPart is last part of the upload ?
* @param payload byte array of payload
* @return the part handle
* @throws IOException IO failure.
@@ -342,6 +344,7 @@ protected PartHandle buildAndPutPart(
protected PartHandle putPart(final Path file,
final UploadHandle uploadHandle,
final int index,
+ final boolean isLastPart,
final byte[] payload) throws IOException {
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
PartHandle partHandle;
@@ -351,7 +354,7 @@ protected PartHandle putPart(final Path file,
payload.length,
file)) {
partHandle = awaitFuture(getUploader(index)
- .putPart(uploadHandle, index, file,
+ .putPart(uploadHandle, index, isLastPart, file,
new ByteArrayInputStream(payload),
payload.length));
}
@@ -492,7 +495,7 @@ public void testMultipartUpload() throws Exception {
MessageDigest origDigest = DigestUtils.getMd5Digest();
int payloadCount = getTestPayloadCount();
for (int i = 1; i <= payloadCount; ++i) {
- PartHandle partHandle = buildAndPutPart(file, uploadHandle, i,
+ PartHandle partHandle = buildAndPutPart(file, uploadHandle, i, i ==
payloadCount,
origDigest);
partHandles.put(i, partHandle);
}
@@ -508,7 +511,7 @@ public void testMultipartUpload() throws Exception {
@Test
public void testMultipartUploadEmptyPart() throws Exception {
FileSystem fs = getFileSystem();
- Path file = path("testMultipartUpload");
+ Path file = path("testMultipartUploadEmptyPart");
try (MultipartUploader uploader =
fs.createMultipartUploader(file).build()) {
UploadHandle uploadHandle = uploader.startUpload(file).get();
@@ -519,7 +522,7 @@ public void testMultipartUploadEmptyPart() throws Exception
{
origDigest.update(payload);
InputStream is = new ByteArrayInputStream(payload);
PartHandle partHandle = awaitFuture(
- uploader.putPart(uploadHandle, 1, file, is, payload.length));
+ uploader.putPart(uploadHandle, 1, true, file, is, payload.length));
partHandles.put(1, partHandle);
completeUpload(file, uploadHandle, partHandles, origDigest, 0);
}
@@ -534,7 +537,7 @@ public void testUploadEmptyBlock() throws Exception {
Path file = methodPath();
UploadHandle uploadHandle = startUpload(file);
Map<Integer, PartHandle> partHandles = new HashMap<>();
- partHandles.put(1, putPart(file, uploadHandle, 1, new byte[0]));
+ partHandles.put(1, putPart(file, uploadHandle, 1, true, new byte[0]));
completeUpload(file, uploadHandle, partHandles, null, 0);
}
@@ -554,7 +557,8 @@ public void testMultipartUploadReverseOrder() throws
Exception {
origDigest.update(payload);
}
for (int i = payloadCount; i > 0; --i) {
- partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null));
+ partHandles.put(i, buildAndPutPart(file, uploadHandle, i, i ==
payloadCount,
+ null));
}
completeUpload(file, uploadHandle, partHandles, origDigest,
payloadCount * partSizeInBytes());
@@ -578,7 +582,8 @@ public void
testMultipartUploadReverseOrderNonContiguousPartNumbers()
}
Map<Integer, PartHandle> partHandles = new HashMap<>();
for (int i = payloadCount; i > 0; i -= 2) {
- partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null));
+ partHandles.put(i, buildAndPutPart(file, uploadHandle, i, i ==
payloadCount,
+ null));
}
completeUpload(file, uploadHandle, partHandles, origDigest,
getTestPayloadCount() * partSizeInBytes());
@@ -595,7 +600,7 @@ public void testMultipartUploadAbort() throws Exception {
UploadHandle uploadHandle = startUpload(file);
Map<Integer, PartHandle> partHandles = new HashMap<>();
for (int i = 12; i > 10; i--) {
- partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null));
+ partHandles.put(i, buildAndPutPart(file, uploadHandle, i, i == 12,
null));
}
abortUpload(uploadHandle, file);
@@ -605,7 +610,7 @@ public void testMultipartUploadAbort() throws Exception {
intercept(IOException.class,
() -> awaitFuture(
- uploader0.putPart(uploadHandle, 49, file, is, len)));
+ uploader0.putPart(uploadHandle, 49, true, file, is, len)));
intercept(IOException.class,
() -> complete(uploader0, uploadHandle, file, partHandles));
@@ -705,7 +710,8 @@ public void testPutPartEmptyUploadID() throws Exception {
byte[] payload = generatePayload(1);
InputStream is = new ByteArrayInputStream(payload);
intercept(IllegalArgumentException.class,
- () -> uploader0.putPart(emptyHandle, 1, dest, is, payload.length));
+ () -> uploader0.putPart(emptyHandle, 1, true, dest, is,
+ payload.length));
}
/**
@@ -719,7 +725,7 @@ public void testCompleteEmptyUploadID() throws Exception {
UploadHandle emptyHandle =
BBUploadHandle.from(ByteBuffer.wrap(new byte[0]));
Map<Integer, PartHandle> partHandles = new HashMap<>();
- PartHandle partHandle = putPart(dest, realHandle, 1,
+ PartHandle partHandle = putPart(dest, realHandle, 1, true,
generatePayload(1, SMALL_FILE));
partHandles.put(1, partHandle);
@@ -747,7 +753,7 @@ public void testDirectoryInTheWay() throws Exception {
UploadHandle uploadHandle = startUpload(file);
Map<Integer, PartHandle> partHandles = new HashMap<>();
int size = SMALL_FILE;
- PartHandle partHandle = putPart(file, uploadHandle, 1,
+ PartHandle partHandle = putPart(file, uploadHandle, 1, true,
generatePayload(1, size));
partHandles.put(1, partHandle);
@@ -808,10 +814,10 @@ public void testConcurrentUploads() throws Throwable {
.isNotEqualTo(upload1);
// put part 1
- partHandles1.put(partId1, putPart(file, upload1, partId1, payload1));
+ partHandles1.put(partId1, putPart(file, upload1, partId1, false,
payload1));
// put part2
- partHandles2.put(partId2, putPart(file, upload2, partId2, payload2));
+ partHandles2.put(partId2, putPart(file, upload2, partId2, true, payload2));
// complete part u1. expect its size and digest to
// be as expected.
diff --git a/pom.xml b/pom.xml
index c7d46b3239b..09f916ec61e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -97,7 +97,7 @@
<guava.version>33.5.0-jre</guava.version>
<guice.version>6.0.0</guice.version>
<hadoop-thirdparty.version>1.4.0</hadoop-thirdparty.version>
- <hadoop.version>3.4.1</hadoop.version>
+ <hadoop.version>3.4.2</hadoop.version>
<hadoop2.version>2.10.2</hadoop2.version>
<hamcrest.version>2.2</hamcrest.version>
<hdds.rocks.native.version>${hdds.version}</hdds.rocks.native.version>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]