Skip to content
Commits on Source (5)
......@@ -5,6 +5,44 @@ compression and archive formats. These include: bzip2, gzip, pack200,
lzma, xz, Snappy, traditional Unix Compress, DEFLATE, DEFLATE64, LZ4,
Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, arj.
Release 1.18
------------
New features:
o It is now possible to specify the arguments of zstd-jni's
ZstdOutputStream constructors via Commons Compress as well.
Issue: COMPRESS-460.
Thanks to Carmi Grushko.
Fixed Bugs:
o The example Expander class has been vulnerable to a path
traversal in the edge case that happens when the target
directory has a sibling directory and the name of the target
directory is a prefix of the sibling directory's name.
Thanks to Didier Loiseau.
o Changed the OSGi Import-Package to also optionally import
javax.crypto so encrypted archives can be read.
Issue: COMPRESS-456.
o Changed various implementations of the close method to better
ensure all held resources get closed even if exceptions are
thrown during the closing the stream.
Issue: COMPRESS-457.
o ZipArchiveInputStream can now detect the APK Signing Block
used in signed Android APK files and treats it as an "end of
archive" marker.
Issue: COMPRESS-455.
o The cpio streams didn't handle archives using a multi-byte
encoding properly.
Issue: COMPRESS-459.
Thanks to Jens Reimann.
o ZipArchiveInputStream#read would silently return -1 on a
corrupted stored entry and even return > 0 after hitting the
end of the archive.
Issue: COMPRESS-463.
o ArArchiveInputStream#read would allow to read from the stream
without opening an entry at all.
Issue: COMPRESS-462.
Release 1.17
------------
......@@ -12,8 +50,8 @@ New features:
o Added a unit test that is supposed to fail if we break the
OSGi manifest entries again.
Issue: COMPRESS-443.
o Add a new SkipShieldingInputStream class that can be used wit
streams that throw an IOException whne skip is invoked.
o Add a new SkipShieldingInputStream class that can be used with
streams that throw an IOException when skip is invoked.
Issue: COMPRESS-449.
o New constructors have been added to SevenZFile that accept
char[]s rather than byte[]s in order to avoid a common error
......
libcommons-compress-java (1.18-1) unstable; urgency=medium
* Team upload.
* New upstream version 1.18.
- Fix CVE-2018-11771.
When reading a specially crafted ZIP archive, the read method of Apache
Commons Compress ZipArchiveInputStream can fail to return the correct EOF
indication after the end of the stream has been reached. When combined
with a java.io.InputStreamReader this can lead to an infinite stream,
which can be used to mount a denial of service attack against services
that use Compress' zip package. Thanks to Salvatore Bonaccorso for the
report. (Closes: #906301)
* Declare compliance with Debian Policy 4.2.0.
-- Markus Koschany <apo@debian.org> Wed, 22 Aug 2018 21:43:55 +0200
libcommons-compress-java (1.17-1) unstable; urgency=medium
* Team upload.
......
......@@ -18,7 +18,7 @@ Build-Depends:
libpowermock-java,
libxz-java (>= 1.5),
maven-debian-helper
Standards-Version: 4.1.5
Standards-Version: 4.2.0
Vcs-Git: https://salsa.debian.org/java-team/libcommons-compress-java.git
Vcs-Browser: https://salsa.debian.org/java-team/libcommons-compress-java
Homepage: https://commons.apache.org/proper/commons-compress/
......
......@@ -10,3 +10,4 @@ org.codehaus.mojo cobertura-maven-plugin * * * *
org.codehaus.mojo findbugs-maven-plugin * * * *
org.eluder.coveralls coveralls-maven-plugin * * * *
org.ops4j.pax.exam * * * * *
com.github.siom79.japicmp japicmp-maven-plugin * * * *
......@@ -20,11 +20,11 @@
<parent>
<groupId>org.apache.commons</groupId>
<artifactId>commons-parent</artifactId>
<version>46</version>
<version>47</version>
</parent>
<artifactId>commons-compress</artifactId>
<version>1.17</version>
<version>1.18</version>
<name>Apache Commons Compress</name>
<url>https://commons.apache.org/proper/commons-compress/</url>
<!-- The description is not indented to make it look better in the release notes -->
......@@ -50,12 +50,21 @@ Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, arj.
<commons.manifestlocation>${project.build.outputDirectory}/META-INF</commons.manifestlocation>
<commons.manifestfile>${commons.manifestlocation}/MANIFEST.MF</commons.manifestfile>
<commons.osgi.import>
org.tukaani.xz;resolution:=optional,
org.brotli.dec;resolution:=optional,
com.github.luben.zstd;resolution:=optional,
javax.crypto.*;resolution:=optional,
*
</commons.osgi.import>
<!-- only show issues of the current version -->
<commons.changes.onlyCurrentVersion>true</commons.changes.onlyCurrentVersion>
<!-- generate report even if there are binary incompatible changes -->
<commons.japicmp.breakBuildOnBinaryIncompatibleModifications>false</commons.japicmp.breakBuildOnBinaryIncompatibleModifications>
<!-- 0.12.0 dies with a NullPointerException -->
<commons.japicmp.version>0.11.1</commons.japicmp.version>
<pax.exam.version>4.11.0</pax.exam.version>
<slf4j.version>1.7.21</slf4j.version>
......@@ -310,6 +319,14 @@ Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, arj.
<artifactId>maven-bundle-plugin</artifactId>
<version>${commons.felix.version}</version>
</plugin>
<!-- override skip property of parent pom -->
<plugin>
<groupId>com.github.siom79.japicmp</groupId>
<artifactId>japicmp-maven-plugin</artifactId>
<configuration>
<skip>false</skip>
</configuration>
</plugin>
</plugins>
</pluginManagement>
<plugins>
......@@ -341,9 +358,6 @@ Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, arj.
<artifactId>maven-bundle-plugin</artifactId>
<configuration>
<manifestLocation>${commons.manifestlocation}</manifestLocation>
<instructions>
<Import-Package>org.tukaani.xz;resolution:=optional,org.brotli.dec;resolution:=optional,com.github.luben.zstd;resolution:=optional</Import-Package>
</instructions>
</configuration>
</plugin>
<plugin>
......@@ -504,9 +518,9 @@ Brotli, Zstandard and ar, cpio, jar, tar, zip, dump, 7z, arj.
</build>
</profile>
<profile>
<id>java9</id>
<id>java9+</id>
<activation>
<jdk>9</jdk>
<jdk>[9,)</jdk>
</activation>
<properties>
<maven.compiler.release>9</maven.compiler.release>
......
......@@ -42,7 +42,49 @@ The <action> type attribute can be add,update,fix,remove.
<title>commons-compress</title>
</properties>
<body>
<release version="1.17" date="not released, yet"
<release version="1.18" date="not released, yet"
description="Release 1.18">
<action type="fix" date="2018-06-15" due-to="DidierLoiseau">
The example Expander class has been vulnerable to a path
traversal in the edge case that happens when the target
directory has a sibling directory and the name of the target
directory is a prefix of the sibling directory's name.
</action>
<action issue="COMPRESS-456" type="fix" date="2018-06-19">
Changed the OSGi Import-Package to also optionally import
javax.crypto so encrypted archives can be read.
</action>
<action issue="COMPRESS-457" type="fix" date="2018-07-01">
Changed various implementations of the close method to better
ensure all held resources get closed even if exceptions are
thrown during the closing the stream.
</action>
<action issue="COMPRESS-455" type="fix" date="2018-07-01">
ZipArchiveInputStream can now detect the APK Signing Block
used in signed Android APK files and treats it as an "end of
archive" marker.
</action>
<action issue="COMPRESS-459" type="fix" date="2018-07-11"
due-to="Jens Reimann">
The cpio streams didn't handle archives using a multi-byte
encoding properly.
</action>
<action issue="COMPRESS-460" type="add" date="2018-07-28"
due-to="Carmi Grushko">
It is now possible to specify the arguments of zstd-jni's
ZstdOutputStream constructors via Commons Compress as well.
</action>
<action issue="COMPRESS-463" type="fix" date="2018-08-09">
ZipArchiveInputStream#read would silently return -1 on a
corrupted stored entry and even return > 0 after hitting the
end of the archive.
</action>
<action issue="COMPRESS-462" type="fix" date="2018-08-10">
ArArchiveInputStream#read would allow to read from the stream
without opening an entry at all.
</action>
</release>
<release version="1.17" date="2018-06-03"
description="Release 1.17">
<action type="fix" date="2018-02-06">
Removed the objenesis dependency from the pom as it is not
......@@ -75,8 +117,8 @@ The <action> type attribute can be add,update,fix,remove.
OSGi manifest entries again.
</action>
<action issue="COMPRESS-449" type="add" date="2018-05-02">
Add a new SkipShieldingInputStream class that can be used wit
streams that throw an IOException whne skip is invoked.
Add a new SkipShieldingInputStream class that can be used with
streams that throw an IOException when skip is invoked.
</action>
<action issue="COMPRESS-451" type="fix" date="2018-05-04">
IOUtils.copy now verifies the buffer size is bigger than 0.
......
......@@ -54,12 +54,23 @@ public class ArArchiveInputStream extends ArchiveInputStream {
*/
private long entryOffset = -1;
// cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
private final byte[] nameBuf = new byte[16];
private final byte[] lastModifiedBuf = new byte[12];
private final byte[] idBuf = new byte[6];
private final byte[] fileModeBuf = new byte[8];
private final byte[] lengthBuf = new byte[10];
// offsets and length of meta data parts
private static final int NAME_OFFSET = 0;
private static final int NAME_LEN = 16;
private static final int LAST_MODIFIED_OFFSET = NAME_LEN;
private static final int LAST_MODIFIED_LEN = 12;
private static final int USER_ID_OFFSET = LAST_MODIFIED_OFFSET + LAST_MODIFIED_LEN;
private static final int USER_ID_LEN = 6;
private static final int GROUP_ID_OFFSET = USER_ID_OFFSET + USER_ID_LEN;
private static final int GROUP_ID_LEN = 6;
private static final int FILE_MODE_OFFSET = GROUP_ID_OFFSET + GROUP_ID_LEN;
private static final int FILE_MODE_LEN = 8;
private static final int LENGTH_OFFSET = FILE_MODE_OFFSET + FILE_MODE_LEN;
private static final int LENGTH_LEN = 10;
// cached buffer for meta data - must only be used locally in the class (COMPRESS-172 - reduce garbage collection)
private final byte[] metaData =
new byte[NAME_LEN + LAST_MODIFIED_LEN + USER_ID_LEN + GROUP_ID_LEN + FILE_MODE_LEN + LENGTH_LEN];
/**
* Constructs an Ar input stream with the referenced stream
......@@ -82,14 +93,16 @@ public class ArArchiveInputStream extends ArchiveInputStream {
public ArArchiveEntry getNextArEntry() throws IOException {
if (currentEntry != null) {
final long entryEnd = entryOffset + currentEntry.getLength();
IOUtils.skip(this, entryEnd - offset);
long skipped = IOUtils.skip(input, entryEnd - offset);
trackReadBytes(skipped);
currentEntry = null;
}
if (offset == 0) {
final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.HEADER);
final byte[] realized = new byte[expected.length];
final int read = IOUtils.readFully(this, realized);
final int read = IOUtils.readFully(input, realized);
trackReadBytes(read);
if (read != expected.length) {
throw new IOException("failed to read header. Occured at byte: " + getBytesRead());
}
......@@ -100,27 +113,31 @@ public class ArArchiveInputStream extends ArchiveInputStream {
}
}
if (offset % 2 != 0 && read() < 0) {
if (offset % 2 != 0) {
if (input.read() < 0) {
// hit eof
return null;
}
trackReadBytes(1);
}
if (input.available() == 0) {
return null;
}
IOUtils.readFully(this, nameBuf);
IOUtils.readFully(this, lastModifiedBuf);
IOUtils.readFully(this, idBuf);
final int userId = asInt(idBuf, true);
IOUtils.readFully(this, idBuf);
IOUtils.readFully(this, fileModeBuf);
IOUtils.readFully(this, lengthBuf);
{
final int read = IOUtils.readFully(input, metaData);
trackReadBytes(read);
if (read < metaData.length) {
throw new IOException("truncated ar archive");
}
}
{
final byte[] expected = ArchiveUtils.toAsciiBytes(ArArchiveEntry.TRAILER);
final byte[] realized = new byte[expected.length];
final int read = IOUtils.readFully(this, realized);
final int read = IOUtils.readFully(input, realized);
trackReadBytes(read);
if (read != expected.length) {
throw new IOException("failed to read entry trailer. Occured at byte: " + getBytesRead());
}
......@@ -136,13 +153,13 @@ public class ArArchiveInputStream extends ArchiveInputStream {
// GNU ar uses a '/' to mark the end of the filename; this allows for the use of spaces without the use of an extended filename.
// entry name is stored as ASCII string
String temp = ArchiveUtils.toAsciiString(nameBuf).trim();
String temp = ArchiveUtils.toAsciiString(metaData, NAME_OFFSET, NAME_LEN).trim();
if (isGNUStringTable(temp)) { // GNU extended filenames entry
currentEntry = readGNUStringTable(lengthBuf);
currentEntry = readGNUStringTable(metaData, LENGTH_OFFSET, LENGTH_LEN);
return getNextArEntry();
}
long len = asLong(lengthBuf);
long len = asLong(metaData, LENGTH_OFFSET, LENGTH_LEN);
if (temp.endsWith("/")) { // GNU terminator
temp = temp.substring(0, temp.length() - 1);
} else if (isGNULongName(temp)) {
......@@ -158,10 +175,11 @@ public class ArArchiveInputStream extends ArchiveInputStream {
entryOffset += nameLen;
}
currentEntry = new ArArchiveEntry(temp, len, userId,
asInt(idBuf, true),
asInt(fileModeBuf, 8),
asLong(lastModifiedBuf));
currentEntry = new ArArchiveEntry(temp, len,
asInt(metaData, USER_ID_OFFSET, USER_ID_LEN, true),
asInt(metaData, GROUP_ID_OFFSET, GROUP_ID_LEN, true),
asInt(metaData, FILE_MODE_OFFSET, FILE_MODE_LEN, 8),
asLong(metaData, LAST_MODIFIED_OFFSET, LAST_MODIFIED_LEN));
return currentEntry;
}
......@@ -187,24 +205,24 @@ public class ArArchiveInputStream extends ArchiveInputStream {
throw new IOException("Failed to read entry: " + offset);
}
private long asLong(final byte[] byteArray) {
return Long.parseLong(ArchiveUtils.toAsciiString(byteArray).trim());
private long asLong(final byte[] byteArray, int offset, int len) {
return Long.parseLong(ArchiveUtils.toAsciiString(byteArray, offset, len).trim());
}
private int asInt(final byte[] byteArray) {
return asInt(byteArray, 10, false);
private int asInt(final byte[] byteArray, int offset, int len) {
return asInt(byteArray, offset, len, 10, false);
}
private int asInt(final byte[] byteArray, final boolean treatBlankAsZero) {
return asInt(byteArray, 10, treatBlankAsZero);
private int asInt(final byte[] byteArray, int offset, int len, final boolean treatBlankAsZero) {
return asInt(byteArray, offset, len, 10, treatBlankAsZero);
}
private int asInt(final byte[] byteArray, final int base) {
return asInt(byteArray, base, false);
private int asInt(final byte[] byteArray, int offset, int len, final int base) {
return asInt(byteArray, offset, len, base, false);
}
private int asInt(final byte[] byteArray, final int base, final boolean treatBlankAsZero) {
final String string = ArchiveUtils.toAsciiString(byteArray).trim();
private int asInt(final byte[] byteArray, int offset, int len, final int base, final boolean treatBlankAsZero) {
final String string = ArchiveUtils.toAsciiString(byteArray, offset, len).trim();
if (string.length() == 0 && treatBlankAsZero) {
return 0;
}
......@@ -243,18 +261,18 @@ public class ArArchiveInputStream extends ArchiveInputStream {
*/
@Override
public int read(final byte[] b, final int off, final int len) throws IOException {
if (currentEntry == null) {
throw new IllegalStateException("No current ar entry");
}
int toRead = len;
if (currentEntry != null) {
final long entryEnd = entryOffset + currentEntry.getLength();
if (len > 0 && entryEnd > offset) {
toRead = (int) Math.min(len, entryEnd - offset);
} else {
return -1;
}
}
final int ret = this.input.read(b, off, toRead);
count(ret);
offset += ret > 0 ? ret : 0;
trackReadBytes(ret);
return ret;
}
......@@ -322,7 +340,8 @@ public class ArArchiveInputStream extends ArchiveInputStream {
final int nameLen =
Integer.parseInt(bsdLongName.substring(BSD_LONGNAME_PREFIX_LEN));
final byte[] name = new byte[nameLen];
final int read = IOUtils.readFully(this, name);
final int read = IOUtils.readFully(input, name);
trackReadBytes(read);
if (read != nameLen) {
throw new EOFException();
}
......@@ -352,15 +371,23 @@ public class ArArchiveInputStream extends ArchiveInputStream {
return GNU_STRING_TABLE_NAME.equals(name);
}
private void trackReadBytes(final long read) {
count(read);
if (read > 0) {
offset += read;
}
}
/**
* Reads the GNU archive String Table.
*
* @see #isGNUStringTable
*/
private ArArchiveEntry readGNUStringTable(final byte[] length) throws IOException {
final int bufflen = asInt(length); // Assume length will fit in an int
private ArArchiveEntry readGNUStringTable(final byte[] length, final int offset, final int len) throws IOException {
final int bufflen = asInt(length, offset, len); // Assume length will fit in an int
namebuffer = new byte[bufflen];
final int read = IOUtils.readFully(this, namebuffer, 0, bufflen);
final int read = IOUtils.readFully(input, namebuffer, 0, bufflen);
trackReadBytes(read);
if (read != bufflen){
throw new IOException("Failed to read complete // record: expected="
+ bufflen + " read=" + read);
......
......@@ -206,12 +206,15 @@ public class ArArchiveOutputStream extends ArchiveOutputStream {
*/
@Override
public void close() throws IOException {
try {
if (!finished) {
finish();
}
} finally {
out.close();
prevEntry = null;
}
}
@Override
public ArchiveEntry createArchiveEntry(final File inputFile, final String entryName)
......
......@@ -19,6 +19,7 @@
package org.apache.commons.compress.archivers.cpio;
import java.io.File;
import java.nio.charset.Charset;
import java.util.Date;
import org.apache.commons.compress.archivers.ArchiveEntry;
......@@ -466,13 +467,49 @@ public class CpioArchiveEntry implements CpioConstants, ArchiveEntry {
/**
* Get the number of bytes needed to pad the header to the alignment boundary.
*
* @deprecated This method doesn't properly work for multi-byte encodings. And
* creates corrupt archives. Use {@link #getHeaderPadCount(Charset)}
* or {@link #getHeaderPadCount(long)} in any case.
* @return the number of bytes needed to pad the header (0,1,2,3)
*/
@Deprecated
public int getHeaderPadCount(){
return getHeaderPadCount(null);
}
/**
* Get the number of bytes needed to pad the header to the alignment boundary.
*
* @param charset
* The character set used to encode the entry name in the stream.
* @return the number of bytes needed to pad the header (0,1,2,3)
* @since 1.18
*/
public int getHeaderPadCount(Charset charset) {
if (name == null) {
return 0;
}
if (charset == null) {
return getHeaderPadCount(name.length());
}
return getHeaderPadCount(name.getBytes(charset).length);
}
/**
* Get the number of bytes needed to pad the header to the alignment boundary.
*
* @param namesize
* The length of the name in bytes, as read in the stream.
* Without the trailing zero byte.
* @return the number of bytes needed to pad the header (0,1,2,3)
*
* @since 1.18
*/
public int getHeaderPadCount(long namesize) {
if (this.alignmentBoundary == 0) { return 0; }
int size = this.headerSize + 1; // Name has terminating null
if (name != null) {
size += name.length();
size += namesize;
}
final int remain = size % this.alignmentBoundary;
if (remain > 0) {
......
......@@ -332,7 +332,9 @@ public class CpioArchiveInputStream extends ArchiveInputStream implements
this.crc &= 0xFFFFFFFFL;
}
}
if (tmpread > 0) {
this.entryBytesRead += tmpread;
}
return tmpread;
}
......@@ -393,7 +395,7 @@ public class CpioArchiveInputStream extends ArchiveInputStream implements
+ ArchiveUtils.sanitize(name)
+ " Occured at byte: " + getBytesRead());
}
skip(ret.getHeaderPadCount());
skip(ret.getHeaderPadCount(namesize - 1));
return ret;
}
......@@ -449,7 +451,7 @@ public class CpioArchiveInputStream extends ArchiveInputStream implements
+ ArchiveUtils.sanitize(name)
+ "Occured at byte: " + getBytesRead());
}
skip(ret.getHeaderPadCount());
skip(ret.getHeaderPadCount(namesize - 1));
return ret;
}
......
......@@ -22,6 +22,7 @@ import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import org.apache.commons.compress.archivers.ArchiveEntry;
......@@ -299,10 +300,11 @@ public class CpioArchiveOutputStream extends ArchiveOutputStream implements
writeAsciiLong(devMin, 8, 16);
writeAsciiLong(entry.getRemoteDeviceMaj(), 8, 16);
writeAsciiLong(entry.getRemoteDeviceMin(), 8, 16);
writeAsciiLong(entry.getName().length() + 1L, 8, 16);
byte[] name = encode(entry.getName());
writeAsciiLong(name.length + 1L, 8, 16);
writeAsciiLong(entry.getChksum(), 8, 16);
writeCString(entry.getName());
pad(entry.getHeaderPadCount());
writeCString(name);
pad(entry.getHeaderPadCount(name.length));
}
private void writeOldAsciiEntry(final CpioArchiveEntry entry)
......@@ -330,9 +332,10 @@ public class CpioArchiveOutputStream extends ArchiveOutputStream implements
writeAsciiLong(entry.getNumberOfLinks(), 6, 8);
writeAsciiLong(entry.getRemoteDevice(), 6, 8);
writeAsciiLong(entry.getTime(), 11, 8);
writeAsciiLong(entry.getName().length() + 1L, 6, 8);
byte[] name = encode(entry.getName());
writeAsciiLong(name.length + 1L, 6, 8);
writeAsciiLong(entry.getSize(), 11, 8);
writeCString(entry.getName());
writeCString(name);
}
private void writeOldBinaryEntry(final CpioArchiveEntry entry,
......@@ -360,10 +363,11 @@ public class CpioArchiveOutputStream extends ArchiveOutputStream implements
writeBinaryLong(entry.getNumberOfLinks(), 2, swapHalfWord);
writeBinaryLong(entry.getRemoteDevice(), 2, swapHalfWord);
writeBinaryLong(entry.getTime(), 4, swapHalfWord);
writeBinaryLong(entry.getName().length() + 1L, 2, swapHalfWord);
byte[] name = encode(entry.getName());
writeBinaryLong(name.length + 1L, 2, swapHalfWord);
writeBinaryLong(entry.getSize(), 4, swapHalfWord);
writeCString(entry.getName());
pad(entry.getHeaderPadCount());
writeCString(name);
pad(entry.getHeaderPadCount(name.length));
}
/*(non-Javadoc)
......@@ -482,15 +486,17 @@ public class CpioArchiveOutputStream extends ArchiveOutputStream implements
*/
@Override
public void close() throws IOException {
try {
if (!finished) {
finish();
}
} finally {
if (!this.closed) {
out.close();
this.closed = true;
}
}
}
private void pad(final int count) throws IOException{
if (count > 0){
......@@ -534,16 +540,27 @@ public class CpioArchiveOutputStream extends ArchiveOutputStream implements
}
/**
* Writes an ASCII string to the stream followed by \0
* Encodes the given string using the configured encoding.
*
* @param str the String to write
* @throws IOException if the string couldn't be written
* @return result of encoding the string
*/
private void writeCString(final String str) throws IOException {
private byte[] encode(final String str) throws IOException {
final ByteBuffer buf = zipEncoding.encode(str);
final int len = buf.limit() - buf.position();
out.write(buf.array(), buf.arrayOffset(), len);
return Arrays.copyOfRange(buf.array(), buf.arrayOffset(), buf.arrayOffset() + len);
}
/**
* Writes an encoded string to the stream followed by \0
* @param str the String to write
* @throws IOException if the string couldn't be written
*/
private void writeCString(byte[] str) throws IOException {
out.write(str);
out.write('\0');
count(len + 1);
count(str.length + 1);
}
/**
......
......@@ -239,6 +239,9 @@ public class Expander {
private void expand(ArchiveEntrySupplier supplier, EntryWriter writer, File targetDirectory)
throws IOException {
String targetDirPath = targetDirectory.getCanonicalPath();
if (!targetDirPath.endsWith(File.separator)) {
targetDirPath += File.separatorChar;
}
ArchiveEntry nextEntry = supplier.getNextReadableEntry();
while (nextEntry != null) {
File f = new File(targetDirectory, nextEntry.getName());
......
......@@ -130,11 +130,14 @@ public class SevenZOutputFile implements Closeable {
*/
@Override
public void close() throws IOException {
try {
if (!finished) {
finish();
}
} finally {
channel.close();
}
}
/**
* Create an archive entry using the inputFile and entryName provided.
......
......@@ -302,15 +302,17 @@ public class TarArchiveOutputStream extends ArchiveOutputStream {
*/
@Override
public void close() throws IOException {
try {
if (!finished) {
finish();
}
} finally {
if (!closed) {
out.close();
closed = true;
}
}
}
/**
* Get the record size being used by this stream's TarBuffer.
......
......@@ -124,9 +124,12 @@ public class ScatterZipOutputStream implements Closeable {
*/
@Override
public void close() throws IOException {
try {
backingStore.close();
} finally {
streamCompressor.close();
}
}
/**
* Create a {@link ScatterZipOutputStream} with default compression level that is backed by a file
......
......@@ -24,7 +24,9 @@ import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.PushbackInputStream;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.zip.CRC32;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
......@@ -249,12 +251,12 @@ public class ZipArchiveInputStream extends ArchiveInputStream implements InputSt
}
final ZipLong sig = new ZipLong(lfhBuf);
if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG)) {
if (!sig.equals(ZipLong.LFH_SIG)) {
if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG) || isApkSigningBlock(lfhBuf)) {
hitCentralDirectory = true;
skipRemainderOfArchive();
return null;
}
if (!sig.equals(ZipLong.LFH_SIG)) {
throw new ZipException(String.format("Unexpected record signature: 0X%X", sig.getValue()));
}
......@@ -523,7 +525,8 @@ public class ZipArchiveInputStream extends ArchiveInputStream implements InputSt
buf.position(0);
final int l = in.read(buf.array());
if (l == -1) {
return -1;
buf.limit(0);
throw new IOException("Truncated ZIP file");
}
buf.limit(l);
......@@ -789,9 +792,14 @@ public class ZipArchiveInputStream extends ArchiveInputStream implements InputSt
}
private void readFully(final byte[] b) throws IOException {
final int count = IOUtils.readFully(in, b);
readFully(b, 0);
}
private void readFully(final byte[] b, final int off) throws IOException {
final int len = b.length - off;
final int count = IOUtils.readFully(in, b, off, len);
count(count);
if (count < b.length) {
if (count < len) {
throw new EOFException();
}
}
......@@ -1087,6 +1095,62 @@ public class ZipArchiveInputStream extends ArchiveInputStream implements InputSt
return b == ZipArchiveOutputStream.EOCD_SIG[0];
}
private static final byte[] APK_SIGNING_BLOCK_MAGIC = new byte[] {
'A', 'P', 'K', ' ', 'S', 'i', 'g', ' ', 'B', 'l', 'o', 'c', 'k', ' ', '4', '2',
};
private static final BigInteger LONG_MAX = BigInteger.valueOf(Long.MAX_VALUE);
/**
* Checks whether this might be an APK Signing Block.
*
* <p>Unfortunately the APK signing block does not start with some kind of signature, it rather ends with one. It
* starts with a length, so what we do is parse the suspect length, skip ahead far enough, look for the signature
* and if we've found it, return true.</p>
*
* @param suspectLocalFileHeader the bytes read from the underlying stream in the expectation that they would hold
* the local file header of the next entry.
*
* @return true if this looks like a APK signing block
*
* @see <a href="https://source.android.com/security/apksigning/v2">https://source.android.com/security/apksigning/v2</a>
*/
private boolean isApkSigningBlock(byte[] suspectLocalFileHeader) throws IOException {
// length of block excluding the size field itself
BigInteger len = ZipEightByteInteger.getValue(suspectLocalFileHeader);
// LFH has already been read and all but the first eight bytes contain (part of) the APK signing block,
// also subtract 16 bytes in order to position us at the magic string
BigInteger toSkip = len.add(BigInteger.valueOf(DWORD - suspectLocalFileHeader.length
- APK_SIGNING_BLOCK_MAGIC.length));
byte[] magic = new byte[APK_SIGNING_BLOCK_MAGIC.length];
try {
if (toSkip.signum() < 0) {
// suspectLocalFileHeader contains the start of suspect magic string
int off = suspectLocalFileHeader.length + toSkip.intValue();
// length was shorter than magic length
if (off < DWORD) {
return false;
}
int bytesInBuffer = Math.abs(toSkip.intValue());
System.arraycopy(suspectLocalFileHeader, off, magic, 0, Math.min(bytesInBuffer, magic.length));
if (bytesInBuffer < magic.length) {
readFully(magic, bytesInBuffer);
}
} else {
while (toSkip.compareTo(LONG_MAX) > 0) {
realSkip(Long.MAX_VALUE);
toSkip = toSkip.add(LONG_MAX.negate());
}
realSkip(toSkip.longValue());
readFully(magic);
}
} catch (EOFException ex) {
// length was invalid
return false;
}
return Arrays.equals(magic, APK_SIGNING_BLOCK_MAGIC);
}
/**
* Structure collecting information for the entry that is
* currently being read.
......
......@@ -958,11 +958,14 @@ public class ZipArchiveOutputStream extends ArchiveOutputStream {
*/
@Override
public void close() throws IOException {
try {
if (!finished) {
finish();
}
} finally {
destroy();
}
}
/**
* Flushes this output stream and forces any buffered output bytes
......@@ -1597,13 +1600,16 @@ public class ZipArchiveOutputStream extends ArchiveOutputStream {
* corrupt archives so they can clean up any temporary files.</p>
*/
void destroy() throws IOException {
try {
if (channel != null) {
channel.close();
}
} finally {
if (out != null) {
out.close();
}
}
}
/**
* enum that represents the possible policies for creating Unicode
......
......@@ -501,10 +501,13 @@ public class BZip2CompressorOutputStream extends CompressorOutputStream
public void close() throws IOException {
if (!closed) {
final OutputStream outShadow = this.out;
try {
finish();
} finally {
outShadow.close();
}
}
}
@Override
public void flush() throws IOException {
......
......@@ -95,12 +95,15 @@ public class Deflate64CompressorInputStream extends CompressorInputStream implem
@Override
public void close() throws IOException {
try {
closeDecoder();
} finally {
if (originalStream != null) {
originalStream.close();
originalStream = null;
}
}
}
/**
* @since 1.17
......
......@@ -204,11 +204,14 @@ public class GzipCompressorOutputStream extends CompressorOutputStream {
@Override
public void close() throws IOException {
if (!closed) {
try {
finish();
} finally {
deflater.end();
out.close();
closed = true;
}
}
}
}