This is an automated email from the ASF dual-hosted git repository.

ggregory pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/commons-compress.git


The following commit(s) were added to refs/heads/master by this push:
     new 1b056105a No need to track the record size separately
1b056105a is described below

commit 1b056105a2a534bfd8e2b9eea6fd9bb1398f5eb1
Author: Gary Gregory <garydgreg...@gmail.com>
AuthorDate: Wed Feb 28 16:06:25 2024 -0500

    No need to track the record size separately
    
    The record size is always the record buffer length
---
 .../archivers/tar/TarArchiveInputStream.java       | 22 +++++++++-------------
 1 file changed, 9 insertions(+), 13 deletions(-)

diff --git 
a/src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
 
b/src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
index ce35ed9f6..f1b3f2702 100644
--- 
a/src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
+++ 
b/src/main/java/org/apache/commons/compress/archivers/tar/TarArchiveInputStream.java
@@ -82,9 +82,6 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
 
     private final byte[] smallBuf = new byte[SMALL_BUFFER_SIZE];
 
-    /** The size the TAR header. */
-    private final int recordSize;
-
     /** The buffer to store the TAR header. **/
     private final byte[] recordBuffer;
 
@@ -190,7 +187,6 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
         super(inputStream, encoding);
         this.atEof = false;
         this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
-        this.recordSize = recordSize;
         this.recordBuffer = new byte[recordSize];
         this.blockSize = blockSize;
         this.lenient = lenient;
@@ -514,7 +510,7 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
      * @return The TarBuffer record size.
      */
     public int getRecordSize() {
-        return recordSize;
+        return recordBuffer.length;
     }
 
     protected final boolean isAtEOF() {
@@ -532,7 +528,7 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
      * @return true if the record data is an End of Archive
      */
     protected boolean isEOFRecord(final byte[] record) {
-        return record == null || ArchiveUtils.isArrayZero(record, recordSize);
+        return record == null || ArchiveUtils.isArrayZero(record, 
getRecordSize());
     }
 
     /**
@@ -587,7 +583,7 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
 
         // for 1.0 PAX Format, the sparse map is stored in the file data block
         if (currEntry.isPaxGNU1XSparse()) {
-            sparseHeaders = TarUtils.parsePAX1XSparseHeaders(in, recordSize);
+            sparseHeaders = TarUtils.parsePAX1XSparseHeaders(in, 
getRecordSize());
             currEntry.setSparseHeaders(sparseHeaders);
         }
 
@@ -689,7 +685,7 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
     protected byte[] readRecord() throws IOException {
         final int readCount = IOUtils.readFully(in, recordBuffer);
         count(readCount);
-        if (readCount != recordSize) {
+        if (readCount != getRecordSize()) {
             return null;
         }
 
@@ -798,10 +794,10 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
      * @throws IOException if a truncated tar archive is detected
      */
     private void skipRecordPadding() throws IOException {
-        if (!isDirectory() && this.entrySize > 0 && this.entrySize % 
this.recordSize != 0) {
+        if (!isDirectory() && this.entrySize > 0 && this.entrySize % 
getRecordSize() != 0) {
             final long available = in.available();
-            final long numRecords = this.entrySize / this.recordSize + 1;
-            final long padding = numRecords * this.recordSize - this.entrySize;
+            final long numRecords = this.entrySize / getRecordSize() + 1;
+            final long padding = numRecords * getRecordSize() - this.entrySize;
             long skipped = org.apache.commons.io.IOUtils.skip(in, padding);
 
             skipped = getActuallySkipped(available, skipped, padding);
@@ -846,13 +842,13 @@ public class TarArchiveInputStream extends 
ArchiveInputStream<TarArchiveEntry> {
         boolean shouldReset = true;
         final boolean marked = in.markSupported();
         if (marked) {
-            in.mark(recordSize);
+            in.mark(getRecordSize());
         }
         try {
             shouldReset = !isEOFRecord(readRecord());
         } finally {
             if (shouldReset && marked) {
-                pushedBackBytes(recordSize);
+                pushedBackBytes(getRecordSize());
                 in.reset();
             }
         }

Reply via email to