update code style

This commit is contained in:
huangyuhui
2016-01-01 11:03:09 +08:00
parent 1f7eb04215
commit b82243a9c0
298 changed files with 3902 additions and 3998 deletions

View File

@@ -21,14 +21,17 @@ abstract class BCJCoder implements FilterCoder {
return filterID >= 0x04 && filterID <= 0x09;
}
@Override
public boolean changesSize() {
return false;
}
@Override
public boolean nonLastOK() {
return true;
}
@Override
public boolean lastOK() {
return false;
}

View File

@@ -17,27 +17,32 @@ class BCJDecoder extends BCJCoder implements FilterDecoder {
private final int startOffset;
BCJDecoder(long filterID, byte[] props)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
assert isBCJFilterID(filterID);
this.filterID = filterID;
if (props.length == 0)
switch (props.length) {
case 0:
startOffset = 0;
else if (props.length == 4) {
break;
case 4:
int n = 0;
for (int i = 0; i < 4; ++i)
n |= (props[i] & 0xFF) << (i * 8);
startOffset = n;
} else
break;
default:
throw new UnsupportedOptionsException(
"Unsupported BCJ filter properties");
"Unsupported BCJ filter properties");
}
}
@Override
public int getMemoryUsage() {
return SimpleInputStream.getMemoryUsage();
}
@Override
public InputStream getInputStream(InputStream in) {
SimpleFilter simpleFilter = null;

View File

@@ -18,15 +18,15 @@ abstract class BCJOptions extends FilterOptions {
}
/**
* Sets the start offset for the address conversions.
* Normally this is useless so you shouldn't use this function.
* The default value is <code>0</code>.
* Sets the start offset for the address conversions. Normally this is
* useless so you shouldn't use this function. The default value is
* <code>0</code>.
*/
public void setStartOffset(int startOffset)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
if ((startOffset & (alignment - 1)) != 0)
throw new UnsupportedOptionsException(
"Start offset must be a multiple of " + alignment);
"Start offset must be a multiple of " + alignment);
this.startOffset = startOffset;
}

View File

@@ -35,7 +35,7 @@ class BlockInputStream extends InputStream {
public BlockInputStream(InputStream in, Check check, int memoryLimit,
long unpaddedSizeInIndex,
long uncompressedSizeInIndex)
throws IOException, IndexIndicatorException {
throws IOException, IndexIndicatorException {
this.check = check;
inData = new DataInputStream(in);
@@ -59,7 +59,7 @@ class BlockInputStream extends InputStream {
// Check for reserved bits in Block Flags.
if ((buf[1] & 0x3C) != 0)
throw new UnsupportedOptionsException(
"Unsupported options in XZ Block Header");
"Unsupported options in XZ Block Header");
// Memory for the Filter Flags field
int filterCount = (buf[1] & 0x03) + 1;
@@ -69,7 +69,7 @@ class BlockInputStream extends InputStream {
// Use a stream to parse the fields after the Block Flags field.
// Exclude the CRC32 field at the end.
ByteArrayInputStream bufStream = new ByteArrayInputStream(
buf, 2, headerSize - 6);
buf, 2, headerSize - 6);
try {
// Set the maximum valid compressed size. This is overriden
@@ -114,7 +114,7 @@ class BlockInputStream extends InputStream {
for (int i = bufStream.available(); i > 0; --i)
if (bufStream.read() != 0x00)
throw new UnsupportedOptionsException(
"Unsupported options in XZ Block Header");
"Unsupported options in XZ Block Header");
// Validate the Blcok Header against the Index when doing
// random access reading.
@@ -125,7 +125,7 @@ class BlockInputStream extends InputStream {
int headerAndCheckSize = headerSize + check.getSize();
if (headerAndCheckSize >= unpaddedSizeInIndex)
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
"XZ Index does not match a Block Header");
// The compressed size calculated from Unpadded Size must
// match the value stored in the Compressed Size field in
@@ -136,7 +136,7 @@ class BlockInputStream extends InputStream {
|| (compressedSizeInHeader != -1
&& compressedSizeInHeader != compressedSizeFromIndex))
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
"XZ Index does not match a Block Header");
// The uncompressed size stored in the Index must match
// the value stored in the Uncompressed Size field in
@@ -144,7 +144,7 @@ class BlockInputStream extends InputStream {
if (uncompressedSizeInHeader != -1
&& uncompressedSizeInHeader != uncompressedSizeInIndex)
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
"XZ Index does not match a Block Header");
// For further validation, pretend that the values from the Index
// were stored in the Block Header.
@@ -170,7 +170,7 @@ class BlockInputStream extends InputStream {
else
throw new UnsupportedOptionsException(
"Unknown Filter ID " + filterIDs[i]);
"Unknown Filter ID " + filterIDs[i]);
RawCoder.validate(filters);

View File

@@ -9,25 +9,25 @@
package org.tukaani.xz;
/**
* Thrown when the compressed input data is corrupt.
* However, it is possible that some or all of the data
* already read from the input stream was corrupt too.
* Thrown when the compressed input data is corrupt. However, it is possible
* that some or all of the data already read from the input stream was corrupt
* too.
*/
public class CorruptedInputException extends XZIOException {
private static final long serialVersionUID = 3L;
/**
* Creates a new CorruptedInputException with
* the default error detail message.
* Creates a new CorruptedInputException with the default error detail
* message.
*/
public CorruptedInputException() {
super("Compressed data is corrupt");
}
/**
* Creates a new CorruptedInputException with
* the specified error detail message.
* Creates a new CorruptedInputException with the specified error detail
* message.
*
* @param s error detail message
*/

View File

@@ -17,15 +17,17 @@ class DeltaDecoder extends DeltaCoder implements FilterDecoder {
DeltaDecoder(byte[] props) throws UnsupportedOptionsException {
if (props.length != 1)
throw new UnsupportedOptionsException(
"Unsupported Delta filter properties");
"Unsupported Delta filter properties");
distance = (props[0] & 0xFF) + 1;
}
@Override
public int getMemoryUsage() {
return 1;
}
@Override
public InputStream getInputStream(InputStream in) {
return new DeltaInputStream(in, distance);
}

View File

@@ -15,9 +15,9 @@ import org.tukaani.xz.delta.DeltaDecoder;
/**
* Decodes raw Delta-filtered data (no XZ headers).
* <p>
* The delta filter doesn't change the size of the data and thus it
* cannot have an end-of-payload marker. It will simply decode until
* its input stream indicates end of input.
* The delta filter doesn't change the size of the data and thus it cannot have
* an end-of-payload marker. It will simply decode until its input stream
* indicates end of input.
*/
public class DeltaInputStream extends InputStream {
@@ -41,12 +41,10 @@ public class DeltaInputStream extends InputStream {
/**
* Creates a new Delta decoder with the given delta calculation distance.
*
* @param in input stream from which Delta filtered data
* is read
* @param in input stream from which Delta filtered data is read
*
* @param distance delta calculation distance, must be in the
* range [<code>DISTANCE_MIN</code>,
* <code>DISTANCE_MAX</code>]
* @param distance delta calculation distance, must be in the range
* [<code>DISTANCE_MIN</code>, <code>DISTANCE_MAX</code>]
*/
public DeltaInputStream(InputStream in, int distance) {
// Check for null because otherwise null isn't detect
@@ -61,11 +59,12 @@ public class DeltaInputStream extends InputStream {
/**
* Decode the next byte from this input stream.
*
* @return the next decoded byte, or <code>-1</code> to indicate
* the end of input on the input stream <code>in</code>
* @return the next decoded byte, or <code>-1</code> to indicate the end of
* input on the input stream <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
@Override
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
@@ -73,21 +72,22 @@ public class DeltaInputStream extends InputStream {
/**
* Decode into an array of bytes.
* <p>
* This calls <code>in.read(buf, off, len)</code> and defilters the
* returned data.
* This calls <code>in.read(buf, off, len)</code> and defilters the returned
* data.
*
* @param buf target buffer for decoded data
* @param off start offset in <code>buf</code>
* @param len maximum number of bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the input stream <code>in</code>
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the input stream <code>in</code>
*
* @throws XZIOException if the stream has been closed
*
* @throws IOException may be thrown by underlaying input
* stream <code>in</code>
* @throws IOException may be thrown by underlaying input stream
* <code>in</code>
*/
@Override
public int read(byte[] buf, int off, int len) throws IOException {
if (len == 0)
return 0;
@@ -118,6 +118,7 @@ public class DeltaInputStream extends InputStream {
*
* @return the value returned by <code>in.available()</code>
*/
@Override
public int available() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
@@ -129,11 +130,12 @@ public class DeltaInputStream extends InputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/
@Override
public void close() throws IOException {
if (in != null)
try {

View File

@@ -11,19 +11,19 @@ package org.tukaani.xz;
import java.io.InputStream;
/**
* Delta filter options. The Delta filter can be used only as a non-last
* filter in the chain, for example Delta + LZMA2.
* Delta filter options. The Delta filter can be used only as a non-last filter
* in the chain, for example Delta + LZMA2.
* <p>
* Currently only simple byte-wise delta is supported. The only option
* is the delta distance, which you should set to match your data.
* It's not possible to provide a generic default value for it.
* Currently only simple byte-wise delta is supported. The only option is the
* delta distance, which you should set to match your data. It's not possible to
* provide a generic default value for it.
* <p>
* For example, with distance = 2 and eight-byte input
* A1 B1 A2 B3 A3 B5 A4 B7, the output will be A1 B1 01 02 01 02 01 02.
* For example, with distance = 2 and eight-byte input A1 B1 A2 B3 A3 B5 A4 B7,
* the output will be A1 B1 01 02 01 02 01 02.
* <p>
* The Delta filter can be good with uncompressed bitmap images. It can
* also help with PCM audio, although special-purpose compressors like
* FLAC will give much smaller result at much better compression speed.
* The Delta filter can be good with uncompressed bitmap images. It can also
* help with PCM audio, although special-purpose compressors like FLAC will give
* much smaller result at much better compression speed.
*/
public class DeltaOptions extends FilterOptions {
@@ -53,14 +53,14 @@ public class DeltaOptions extends FilterOptions {
}
/**
* Sets the delta distance in bytes. The new distance must be in
* the range [DISTANCE_MIN, DISTANCE_MAX].
* Sets the delta distance in bytes. The new distance must be in the range
* [DISTANCE_MIN, DISTANCE_MAX].
*/
public void setDistance(int distance) throws UnsupportedOptionsException {
if (distance < DISTANCE_MIN || distance > DISTANCE_MAX)
throw new UnsupportedOptionsException(
"Delta distance must be in the range [" + DISTANCE_MIN
+ ", " + DISTANCE_MAX + "]: " + distance);
"Delta distance must be in the range [" + DISTANCE_MIN
+ ", " + DISTANCE_MAX + "]: " + distance);
this.distance = distance;
}
@@ -72,26 +72,32 @@ public class DeltaOptions extends FilterOptions {
return distance;
}
@Override
public int getEncoderMemoryUsage() {
return DeltaOutputStream.getMemoryUsage();
}
@Override
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return new DeltaOutputStream(out, this);
}
@Override
public int getDecoderMemoryUsage() {
return 1;
}
@Override
public InputStream getInputStream(InputStream in) {
return new DeltaInputStream(in, distance);
}
@Override
FilterEncoder getFilterEncoder() {
return new DeltaEncoder(this);
}
@Override
public Object clone() {
try {
return super.clone();

View File

@@ -18,10 +18,9 @@ import java.io.IOException;
public abstract class FilterOptions implements Cloneable {
/**
* Gets how much memory the encoder will need with
* the given filter chain. This function simply calls
* <code>getEncoderMemoryUsage()</code> for every filter
* in the array and returns the sum of the returned values.
* Gets how much memory the encoder will need with the given filter chain.
* This function simply calls <code>getEncoderMemoryUsage()</code> for every
* filter in the array and returns the sum of the returned values.
*/
public static int getEncoderMemoryUsage(FilterOptions[] options) {
int m = 0;
@@ -33,10 +32,9 @@ public abstract class FilterOptions implements Cloneable {
}
/**
* Gets how much memory the decoder will need with
* the given filter chain. This function simply calls
* <code>getDecoderMemoryUsage()</code> for every filter
* in the array and returns the sum of the returned values.
* Gets how much memory the decoder will need with the given filter chain.
* This function simply calls <code>getDecoderMemoryUsage()</code> for every
* filter in the array and returns the sum of the returned values.
*/
public static int getDecoderMemoryUsage(FilterOptions[] options) {
int m = 0;
@@ -53,18 +51,18 @@ public abstract class FilterOptions implements Cloneable {
public abstract int getEncoderMemoryUsage();
/**
* Gets a raw (no XZ headers) encoder output stream using these options.
* Raw streams are an advanced feature. In most cases you want to store
* the compressed data in the .xz container format instead of using
* a raw stream. To use this filter in a .xz file, pass this object
* to XZOutputStream.
* Gets a raw (no XZ headers) encoder output stream using these options. Raw
* streams are an advanced feature. In most cases you want to store the
* compressed data in the .xz container format instead of using a raw
* stream. To use this filter in a .xz file, pass this object to
* XZOutputStream.
*/
public abstract FinishableOutputStream getOutputStream(
FinishableOutputStream out);
FinishableOutputStream out);
/**
* Gets how much memory the decoder will need to decompress the data
* that was encoded with these options.
* Gets how much memory the decoder will need to decompress the data that
* was encoded with these options.
*/
public abstract int getDecoderMemoryUsage();
@@ -72,7 +70,7 @@ public abstract class FilterOptions implements Cloneable {
* Gets a raw (no XZ headers) decoder input stream using these options.
*/
public abstract InputStream getInputStream(InputStream in)
throws IOException;
throws IOException;
abstract FilterEncoder getFilterEncoder();

View File

@@ -35,6 +35,7 @@ public class FinishableWrapperOutputStream extends FinishableOutputStream {
/**
* Calls {@link java.io.OutputStream#write(int) out.write(b)}.
*/
@Override
public void write(int b) throws IOException {
out.write(b);
}
@@ -42,6 +43,7 @@ public class FinishableWrapperOutputStream extends FinishableOutputStream {
/**
* Calls {@link java.io.OutputStream#write(byte[]) out.write(buf)}.
*/
@Override
public void write(byte[] buf) throws IOException {
out.write(buf);
}
@@ -50,6 +52,7 @@ public class FinishableWrapperOutputStream extends FinishableOutputStream {
* Calls {@link java.io.OutputStream#write(byte[],int,int)
* out.write(buf, off, len)}.
*/
@Override
public void write(byte[] buf, int off, int len) throws IOException {
out.write(buf, off, len);
}
@@ -57,6 +60,7 @@ public class FinishableWrapperOutputStream extends FinishableOutputStream {
/**
* Calls {@link java.io.OutputStream#flush() out.flush()}.
*/
@Override
public void flush() throws IOException {
out.flush();
}
@@ -64,6 +68,7 @@ public class FinishableWrapperOutputStream extends FinishableOutputStream {
/**
* Calls {@link java.io.OutputStream#close() out.close()}.
*/
@Override
public void close() throws IOException {
out.close();
}

View File

@@ -19,16 +19,18 @@ class LZMA2Decoder extends LZMA2Coder implements FilterDecoder {
// are too big for int.
if (props.length != 1 || (props[0] & 0xFF) > 37)
throw new UnsupportedOptionsException(
"Unsupported LZMA2 properties");
"Unsupported LZMA2 properties");
dictSize = 2 | (props[0] & 1);
dictSize <<= (props[0] >>> 1) + 11;
}
@Override
public int getMemoryUsage() {
return LZMA2InputStream.getMemoryUsage(dictSize);
}
@Override
public InputStream getInputStream(InputStream in) {
return new LZMA2InputStream(in, dictSize);
}

View File

@@ -18,7 +18,7 @@ class LZMA2Encoder extends LZMA2Coder implements FilterEncoder {
LZMA2Encoder(LZMA2Options options) {
if (options.getPresetDict() != null)
throw new IllegalArgumentException(
"XZ doesn't support a preset dictionary for now");
"XZ doesn't support a preset dictionary for now");
if (options.getMode() == LZMA2Options.MODE_UNCOMPRESSED)
props[0] = (byte) 0;
@@ -32,18 +32,22 @@ class LZMA2Encoder extends LZMA2Coder implements FilterEncoder {
this.options = (LZMA2Options) options.clone();
}
@Override
public long getFilterID() {
return FILTER_ID;
}
@Override
public byte[] getFilterProps() {
return props;
}
@Override
public boolean supportsFlushing() {
return true;
}
@Override
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return options.getOutputStream(out);
}

View File

@@ -24,8 +24,8 @@ public class LZMA2InputStream extends InputStream {
/**
* Smallest valid LZMA2 dictionary size.
* <p>
* Very tiny dictionaries would be a performance problem, so
* the minimum is 4 KiB.
* Very tiny dictionaries would be a performance problem, so the minimum is
* 4 KiB.
*/
public static final int DICT_SIZE_MIN = 4096;
@@ -33,11 +33,11 @@ public class LZMA2InputStream extends InputStream {
* Largest dictionary size supported by this implementation.
* <p>
* The LZMA2 algorithm allows dictionaries up to one byte less than 4 GiB.
* This implementation supports only 16 bytes less than 2 GiB for raw
* LZMA2 streams, and for .xz files the maximum is 1.5 GiB. This
* limitation is due to Java using signed 32-bit integers for array
* indexing. The limitation shouldn't matter much in practice since so
* huge dictionaries are not normally used.
* This implementation supports only 16 bytes less than 2 GiB for raw LZMA2
* streams, and for .xz files the maximum is 1.5 GiB. This limitation is due
* to Java using signed 32-bit integers for array indexing. The limitation
* shouldn't matter much in practice since so huge dictionaries are not
* normally used.
*/
public static final int DICT_SIZE_MAX = Integer.MAX_VALUE & ~15;
@@ -62,12 +62,11 @@ public class LZMA2InputStream extends InputStream {
private final byte[] tempBuf = new byte[1];
/**
* Gets approximate decompressor memory requirements as kibibytes for
* the given dictionary size.
* Gets approximate decompressor memory requirements as kibibytes for the
* given dictionary size.
*
* @param dictSize LZMA2 dictionary size as bytes, must be
* in the range [<code>DICT_SIZE_MIN</code>,
* <code>DICT_SIZE_MAX</code>]
* @param dictSize LZMA2 dictionary size as bytes, must be in the range
* [<code>DICT_SIZE_MIN</code>, <code>DICT_SIZE_MAX</code>]
*
* @return approximate memory requirements as kibibytes (KiB)
*/
@@ -81,7 +80,7 @@ public class LZMA2InputStream extends InputStream {
private static int getDictSize(int dictSize) {
if (dictSize < DICT_SIZE_MIN || dictSize > DICT_SIZE_MAX)
throw new IllegalArgumentException(
"Unsupported dictionary size " + dictSize);
"Unsupported dictionary size " + dictSize);
// Round dictionary size upward to a multiple of 16. This way LZMA
// can use LZDecoder.getPos() for calculating LZMA's posMask.
@@ -91,27 +90,25 @@ public class LZMA2InputStream extends InputStream {
}
/**
* Creates a new input stream that decompresses raw LZMA2 data
* from <code>in</code>.
* Creates a new input stream that decompresses raw LZMA2 data from
* <code>in</code>.
* <p>
* The caller needs to know the dictionary size used when compressing;
* the dictionary size isn't stored as part of a raw LZMA2 stream.
* The caller needs to know the dictionary size used when compressing; the
* dictionary size isn't stored as part of a raw LZMA2 stream.
* <p>
* Specifying a too small dictionary size will prevent decompressing
* the stream. Specifying a too big dictionary is waste of memory but
* Specifying a too small dictionary size will prevent decompressing the
* stream. Specifying a too big dictionary is waste of memory but
* decompression will work.
* <p>
* There is no need to specify a dictionary bigger than
* the uncompressed size of the data even if a bigger dictionary
* was used when compressing. If you know the uncompressed size
* of the data, this might allow saving some memory.
* There is no need to specify a dictionary bigger than the uncompressed
* size of the data even if a bigger dictionary was used when compressing.
* If you know the uncompressed size of the data, this might allow saving
* some memory.
*
* @param in input stream from which LZMA2-compressed
* data is read
* @param in input stream from which LZMA2-compressed data is read
*
* @param dictSize LZMA2 dictionary size as bytes, must be
* in the range [<code>DICT_SIZE_MIN</code>,
* <code>DICT_SIZE_MAX</code>]
* @param dictSize LZMA2 dictionary size as bytes, must be in the range
* [<code>DICT_SIZE_MIN</code>, <code>DICT_SIZE_MAX</code>]
*/
public LZMA2InputStream(InputStream in, int dictSize) {
this(in, dictSize, null);
@@ -120,20 +117,18 @@ public class LZMA2InputStream extends InputStream {
/**
* Creates a new LZMA2 decompressor using a preset dictionary.
* <p>
* This is like <code>LZMA2InputStream(InputStream, int)</code> except
* that the dictionary may be initialized using a preset dictionary.
* If a preset dictionary was used when compressing the data, the
* same preset dictionary must be provided when decompressing.
* This is like <code>LZMA2InputStream(InputStream, int)</code> except that
* the dictionary may be initialized using a preset dictionary. If a preset
* dictionary was used when compressing the data, the same preset dictionary
* must be provided when decompressing.
*
* @param in input stream from which LZMA2-compressed
* data is read
* @param in input stream from which LZMA2-compressed data is read
*
* @param dictSize LZMA2 dictionary size as bytes, must be
* in the range [<code>DICT_SIZE_MIN</code>,
* <code>DICT_SIZE_MAX</code>]
* @param dictSize LZMA2 dictionary size as bytes, must be in the range
* [<code>DICT_SIZE_MIN</code>, <code>DICT_SIZE_MAX</code>]
*
* @param presetDict preset dictionary or <code>null</code>
* to use no preset dictionary
* @param presetDict preset dictionary or <code>null</code> to use no preset
* dictionary
*/
public LZMA2InputStream(InputStream in, int dictSize, byte[] presetDict) {
// Check for null because otherwise null isn't detect
@@ -151,19 +146,18 @@ public class LZMA2InputStream extends InputStream {
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream
* may be inefficient. Wrap it in <code>java.io.BufferedInputStream</code>
* if you need to read lots of data one byte at a time.
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in <code>java.io.BufferedInputStream</code> if
* you need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code>
* to indicate the end of the compressed stream
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -174,24 +168,23 @@ public class LZMA2InputStream extends InputStream {
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code>
* is returned. Otherwise this will block until <code>len</code>
* bytes have been decompressed, the end of the LZMA2 stream is reached,
* or an exception is thrown.
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will block until <code>len</code> bytes have
* been decompressed, the end of the LZMA2 stream is reached, or an
* exception is thrown.
*
* @param buf target buffer for uncompressed data
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the compressed stream
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -311,20 +304,18 @@ public class LZMA2InputStream extends InputStream {
}
/**
* Returns the number of uncompressed bytes that can be read
* without blocking. The value is returned with an assumption
* that the compressed input data will be valid. If the compressed
* data is corrupt, <code>CorruptedInputException</code> may get
* thrown before the number of bytes claimed to be available have
* been read from this input stream.
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
* <p>
* In LZMA2InputStream, the return value will be non-zero when the
* decompressor is in the middle of an LZMA2 chunk. The return value
* will then be the number of uncompressed bytes remaining from that
* chunk.
* decompressor is in the middle of an LZMA2 chunk. The return value will
* then be the number of uncompressed bytes remaining from that chunk.
*
* @return the number of uncompressed bytes that can be read
* without blocking
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
@@ -337,8 +328,8 @@ public class LZMA2InputStream extends InputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/

View File

@@ -16,9 +16,8 @@ import org.tukaani.xz.lzma.LZMAEncoder;
/**
* LZMA2 compression options.
* <p>
* While this allows setting the LZMA2 compression options in detail,
* often you only need <code>LZMA2Options()</code> or
* <code>LZMA2Options(int)</code>.
* While this allows setting the LZMA2 compression options in detail, often you
* only need <code>LZMA2Options()</code> or <code>LZMA2Options(int)</code>.
*/
public class LZMA2Options extends FilterOptions {
@@ -45,13 +44,13 @@ public class LZMA2Options extends FilterOptions {
/**
* Maximum dictionary size for compression is 768 MiB.
* <p>
* The decompressor supports bigger dictionaries, up to almost 2 GiB.
* With HC4 the encoder would support dictionaries bigger than 768 MiB.
* The 768 MiB limit comes from the current implementation of BT4 where
* we would otherwise hit the limits of signed ints in array indexing.
* The decompressor supports bigger dictionaries, up to almost 2 GiB. With
* HC4 the encoder would support dictionaries bigger than 768 MiB. The 768
* MiB limit comes from the current implementation of BT4 where we would
* otherwise hit the limits of signed ints in array indexing.
* <p>
* If you really need bigger dictionary for decompression,
* use {@link LZMA2InputStream} directly.
* If you really need bigger dictionary for decompression, use
* {@link LZMA2InputStream} directly.
*/
public static final int DICT_SIZE_MAX = 768 << 20;
@@ -86,20 +85,20 @@ public class LZMA2Options extends FilterOptions {
public static final int PB_DEFAULT = 2;
/**
* Compression mode: uncompressed.
* The data is wrapped into a LZMA2 stream without compression.
* Compression mode: uncompressed. The data is wrapped into a LZMA2 stream
* without compression.
*/
public static final int MODE_UNCOMPRESSED = 0;
/**
* Compression mode: fast.
* This is usually combined with a hash chain match finder.
* Compression mode: fast. This is usually combined with a hash chain match
* finder.
*/
public static final int MODE_FAST = LZMAEncoder.MODE_FAST;
/**
* Compression mode: normal.
* This is usually combined with a binary tree match finder.
* Compression mode: normal. This is usually combined with a binary tree
* match finder.
*/
public static final int MODE_NORMAL = LZMAEncoder.MODE_NORMAL;
@@ -125,9 +124,9 @@ public class LZMA2Options extends FilterOptions {
private static final int[] presetToDictSize = {
1 << 18, 1 << 20, 1 << 21, 1 << 22, 1 << 22,
1 << 23, 1 << 23, 1 << 24, 1 << 25, 1 << 26};
1 << 23, 1 << 23, 1 << 24, 1 << 25, 1 << 26 };
private static final int[] presetToDepthLimit = {4, 8, 24, 48};
private static final int[] presetToDepthLimit = { 4, 8, 24, 48 };
private int dictSize;
private byte[] presetDict = null;
@@ -140,8 +139,8 @@ public class LZMA2Options extends FilterOptions {
private int depthLimit;
/**
* Creates new LZMA2 options and sets them to the default values.
* This is equivalent to <code>LZMA2Options(PRESET_DEFAULT)</code>.
* Creates new LZMA2 options and sets them to the default values. This is
* equivalent to <code>LZMA2Options(PRESET_DEFAULT)</code>.
*/
public LZMA2Options() {
try {
@@ -155,8 +154,7 @@ public class LZMA2Options extends FilterOptions {
/**
* Creates new LZMA2 options and sets them to the given preset.
*
* @throws UnsupportedOptionsException
* <code>preset</code> is not supported
* @throws UnsupportedOptionsException <code>preset</code> is not supported
*/
public LZMA2Options(int preset) throws UnsupportedOptionsException {
setPreset(preset);
@@ -165,12 +163,11 @@ public class LZMA2Options extends FilterOptions {
/**
* Creates new LZMA2 options and sets them to the given custom values.
*
* @throws UnsupportedOptionsException
* unsupported options were specified
* @throws UnsupportedOptionsException unsupported options were specified
*/
public LZMA2Options(int dictSize, int lc, int lp, int pb, int mode,
int niceLen, int mf, int depthLimit)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
setDictSize(dictSize);
setLcLp(lc, lp);
setPb(pb);
@@ -183,23 +180,22 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the compression options to the given preset.
* <p>
* The presets 0-3 are fast presets with medium compression.
* The presets 4-6 are fairly slow presets with high compression.
* The default preset (<code>PRESET_DEFAULT</code>) is 6.
* The presets 0-3 are fast presets with medium compression. The presets 4-6
* are fairly slow presets with high compression. The default preset
* (<code>PRESET_DEFAULT</code>) is 6.
* <p>
* The presets 7-9 are like the preset 6 but use bigger dictionaries
* and have higher compressor and decompressor memory requirements.
* Unless the uncompressed size of the file exceeds 8&nbsp;MiB,
* 16&nbsp;MiB, or 32&nbsp;MiB, it is waste of memory to use the
* presets 7, 8, or 9, respectively.
* The presets 7-9 are like the preset 6 but use bigger dictionaries and
* have higher compressor and decompressor memory requirements. Unless the
* uncompressed size of the file exceeds 8&nbsp;MiB, 16&nbsp;MiB, or
* 32&nbsp;MiB, it is waste of memory to use the presets 7, 8, or 9,
* respectively.
*
* @throws UnsupportedOptionsException
* <code>preset</code> is not supported
* @throws UnsupportedOptionsException <code>preset</code> is not supported
*/
public void setPreset(int preset) throws UnsupportedOptionsException {
if (preset < 0 || preset > 9)
throw new UnsupportedOptionsException(
"Unsupported preset: " + preset);
"Unsupported preset: " + preset);
lc = LC_DEFAULT;
lp = LP_DEFAULT;
@@ -227,23 +223,22 @@ public class LZMA2Options extends FilterOptions {
* However, using a dictioanary bigger than the size of the uncompressed
* data is waste of memory.
* <p>
* Any value in the range [DICT_SIZE_MIN, DICT_SIZE_MAX] is valid,
* but sizes of 2^n and 2^n&nbsp;+&nbsp;2^(n-1) bytes are somewhat
* recommended.
* Any value in the range [DICT_SIZE_MIN, DICT_SIZE_MAX] is valid, but sizes
* of 2^n and 2^n&nbsp;+&nbsp;2^(n-1) bytes are somewhat recommended.
*
* @throws UnsupportedOptionsException
* <code>dictSize</code> is not supported
* @throws UnsupportedOptionsException <code>dictSize</code> is not
* supported
*/
public void setDictSize(int dictSize) throws UnsupportedOptionsException {
if (dictSize < DICT_SIZE_MIN)
throw new UnsupportedOptionsException(
"LZMA2 dictionary size must be at least 4 KiB: "
+ dictSize + " B");
"LZMA2 dictionary size must be at least 4 KiB: "
+ dictSize + " B");
if (dictSize > DICT_SIZE_MAX)
throw new UnsupportedOptionsException(
"LZMA2 dictionary size must not exceed "
+ (DICT_SIZE_MAX >> 20) + " MiB: " + dictSize + " B");
"LZMA2 dictionary size must not exceed "
+ (DICT_SIZE_MAX >> 20) + " MiB: " + dictSize + " B");
this.dictSize = dictSize;
}
@@ -256,18 +251,18 @@ public class LZMA2Options extends FilterOptions {
}
/**
* Sets a preset dictionary. Use null to disable the use of
* a preset dictionary. By default there is no preset dictionary.
* Sets a preset dictionary. Use null to disable the use of a preset
* dictionary. By default there is no preset dictionary.
* <p>
* <b>The .xz format doesn't support a preset dictionary for now.
* Do not set a preset dictionary unless you use raw LZMA2.</b>
* <b>The .xz format doesn't support a preset dictionary for now. Do not set
* a preset dictionary unless you use raw LZMA2.</b>
* <p>
* Preset dictionary can be useful when compressing many similar,
* relatively small chunks of data independently from each other.
* A preset dictionary should contain typical strings that occur in
* the files being compressed. The most probable strings should be
* near the end of the preset dictionary. The preset dictionary used
* for compression is also needed for decompression.
* Preset dictionary can be useful when compressing many similar, relatively
* small chunks of data independently from each other. A preset dictionary
* should contain typical strings that occur in the files being compressed.
* The most probable strings should be near the end of the preset
* dictionary. The preset dictionary used for compression is also needed for
* decompression.
*/
public void setPresetDict(byte[] presetDict) {
this.presetDict = presetDict;
@@ -283,20 +278,19 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the number of literal context bits and literal position bits.
* <p>
* The sum of <code>lc</code> and <code>lp</code> is limited to 4.
* Trying to exceed it will throw an exception. This function lets
* you change both at the same time.
* The sum of <code>lc</code> and <code>lp</code> is limited to 4. Trying to
* exceed it will throw an exception. This function lets you change both at
* the same time.
*
* @throws UnsupportedOptionsException
* <code>lc</code> and <code>lp</code>
* @throws UnsupportedOptionsException <code>lc</code> and <code>lp</code>
* are invalid
*/
public void setLcLp(int lc, int lp) throws UnsupportedOptionsException {
if (lc < 0 || lp < 0 || lc > LC_LP_MAX || lp > LC_LP_MAX
|| lc + lp > LC_LP_MAX)
throw new UnsupportedOptionsException(
"lc + lp must not exceed " + LC_LP_MAX + ": "
+ lc + " + " + lp);
"lc + lp must not exceed " + LC_LP_MAX + ": "
+ lc + " + " + lp);
this.lc = lc;
this.lp = lp;
@@ -305,28 +299,25 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the number of literal context bits.
* <p>
* All bytes that cannot be encoded as matches are encoded as literals.
* That is, literals are simply 8-bit bytes that are encoded one at
* a time.
* All bytes that cannot be encoded as matches are encoded as literals. That
* is, literals are simply 8-bit bytes that are encoded one at a time.
* <p>
* The literal coding makes an assumption that the highest <code>lc</code>
* bits of the previous uncompressed byte correlate with the next byte.
* For example, in typical English text, an upper-case letter is often
* followed by a lower-case letter, and a lower-case letter is usually
* followed by another lower-case letter. In the US-ASCII character set,
* the highest three bits are 010 for upper-case letters and 011 for
* lower-case letters. When <code>lc</code> is at least 3, the literal
* coding can take advantage of this property in the uncompressed data.
* bits of the previous uncompressed byte correlate with the next byte. For
* example, in typical English text, an upper-case letter is often followed
* by a lower-case letter, and a lower-case letter is usually followed by
* another lower-case letter. In the US-ASCII character set, the highest
* three bits are 010 for upper-case letters and 011 for lower-case letters.
* When <code>lc</code> is at least 3, the literal coding can take advantage
* of this property in the uncompressed data.
* <p>
* The default value (3) is usually good. If you want maximum compression,
* try <code>setLc(4)</code>. Sometimes it helps a little, and sometimes it
* makes compression worse. If it makes it worse, test for example
* <code>setLc(2)</code> too.
*
* @throws UnsupportedOptionsException
* <code>lc</code> is invalid, or the sum
* of <code>lc</code> and <code>lp</code>
* exceed LC_LP_MAX
* @throws UnsupportedOptionsException <code>lc</code> is invalid, or the
* sum of <code>lc</code> and <code>lp</code> exceed LC_LP_MAX
*/
public void setLc(int lc) throws UnsupportedOptionsException {
setLcLp(lc, lp);
@@ -335,14 +326,12 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the number of literal position bits.
* <p>
* This affets what kind of alignment in the uncompressed data is
* assumed when encoding literals. See {@link #setPb(int) setPb} for
* more information about alignment.
* This affets what kind of alignment in the uncompressed data is assumed
* when encoding literals. See {@link #setPb(int) setPb} for more
* information about alignment.
*
* @throws UnsupportedOptionsException
* <code>lp</code> is invalid, or the sum
* of <code>lc</code> and <code>lp</code>
* exceed LC_LP_MAX
* @throws UnsupportedOptionsException <code>lp</code> is invalid, or the
* sum of <code>lc</code> and <code>lp</code> exceed LC_LP_MAX
*/
public void setLp(int lp) throws UnsupportedOptionsException {
setLcLp(lc, lp);
@@ -365,31 +354,28 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the number of position bits.
* <p>
* This affects what kind of alignment in the uncompressed data is
* assumed in general. The default (2) means four-byte alignment
* (2^<code>pb</code> = 2^2 = 4), which is often a good choice when
* there's no better guess.
* This affects what kind of alignment in the uncompressed data is assumed
* in general. The default (2) means four-byte alignment (2^<code>pb</code>
* = 2^2 = 4), which is often a good choice when there's no better guess.
* <p>
* When the alignment is known, setting the number of position bits
* accordingly may reduce the file size a little. For example with text
* files having one-byte alignment (US-ASCII, ISO-8859-*, UTF-8), using
* <code>setPb(0)</code> can improve compression slightly. For UTF-16
* text, <code>setPb(1)</code> is a good choice. If the alignment is
* an odd number like 3 bytes, <code>setPb(0)</code> might be the best
* choice.
* <code>setPb(0)</code> can improve compression slightly. For UTF-16 text,
* <code>setPb(1)</code> is a good choice. If the alignment is an odd number
* like 3 bytes, <code>setPb(0)</code> might be the best choice.
* <p>
* Even though the assumed alignment can be adjusted with
* <code>setPb</code> and <code>setLp</code>, LZMA2 still slightly favors
* 16-byte alignment. It might be worth taking into account when designing
* file formats that are likely to be often compressed with LZMA2.
* Even though the assumed alignment can be adjusted with <code>setPb</code>
* and <code>setLp</code>, LZMA2 still slightly favors 16-byte alignment. It
* might be worth taking into account when designing file formats that are
* likely to be often compressed with LZMA2.
*
* @throws UnsupportedOptionsException
* <code>pb</code> is invalid
* @throws UnsupportedOptionsException <code>pb</code> is invalid
*/
public void setPb(int pb) throws UnsupportedOptionsException {
if (pb < 0 || pb > PB_MAX)
throw new UnsupportedOptionsException(
"pb must not exceed " + PB_MAX + ": " + pb);
"pb must not exceed " + PB_MAX + ": " + pb);
this.pb = pb;
}
@@ -404,25 +390,24 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the compression mode.
* <p>
* This specifies the method to analyze the data produced by
* a match finder. The default is <code>MODE_FAST</code> for presets
* 0-3 and <code>MODE_NORMAL</code> for presets 4-9.
* This specifies the method to analyze the data produced by a match finder.
* The default is <code>MODE_FAST</code> for presets 0-3 and
* <code>MODE_NORMAL</code> for presets 4-9.
* <p>
* Usually <code>MODE_FAST</code> is used with Hash Chain match finders
* and <code>MODE_NORMAL</code> with Binary Tree match finders. This is
* also what the presets do.
* Usually <code>MODE_FAST</code> is used with Hash Chain match finders and
* <code>MODE_NORMAL</code> with Binary Tree match finders. This is also
* what the presets do.
* <p>
* The special mode <code>MODE_UNCOMPRESSED</code> doesn't try to
* compress the data at all (and doesn't use a match finder) and will
* simply wrap it in uncompressed LZMA2 chunks.
* The special mode <code>MODE_UNCOMPRESSED</code> doesn't try to compress
* the data at all (and doesn't use a match finder) and will simply wrap it
* in uncompressed LZMA2 chunks.
*
* @throws UnsupportedOptionsException
* <code>mode</code> is not supported
* @throws UnsupportedOptionsException <code>mode</code> is not supported
*/
public void setMode(int mode) throws UnsupportedOptionsException {
if (mode < MODE_UNCOMPRESSED || mode > MODE_NORMAL)
throw new UnsupportedOptionsException(
"Unsupported compression mode: " + mode);
"Unsupported compression mode: " + mode);
this.mode = mode;
}
@@ -435,25 +420,23 @@ public class LZMA2Options extends FilterOptions {
}
/**
* Sets the nice length of matches.
* Once a match of at least <code>niceLen</code> bytes is found,
* the algorithm stops looking for better matches. Higher values tend
* to give better compression at the expense of speed. The default
* depends on the preset.
* Sets the nice length of matches. Once a match of at least
* <code>niceLen</code> bytes is found, the algorithm stops looking for
* better matches. Higher values tend to give better compression at the
* expense of speed. The default depends on the preset.
*
* @throws UnsupportedOptionsException
* <code>niceLen</code> is invalid
* @throws UnsupportedOptionsException <code>niceLen</code> is invalid
*/
public void setNiceLen(int niceLen) throws UnsupportedOptionsException {
if (niceLen < NICE_LEN_MIN)
throw new UnsupportedOptionsException(
"Minimum nice length of matches is "
+ NICE_LEN_MIN + " bytes: " + niceLen);
"Minimum nice length of matches is "
+ NICE_LEN_MIN + " bytes: " + niceLen);
if (niceLen > NICE_LEN_MAX)
throw new UnsupportedOptionsException(
"Maximum nice length of matches is " + NICE_LEN_MAX
+ ": " + niceLen);
"Maximum nice length of matches is " + NICE_LEN_MAX
+ ": " + niceLen);
this.niceLen = niceLen;
}
@@ -468,18 +451,17 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the match finder type.
* <p>
* Match finder has a major effect on compression speed, memory usage,
* and compression ratio. Usually Hash Chain match finders are faster
* than Binary Tree match finders. The default depends on the preset:
* 0-3 use <code>MF_HC4</code> and 4-9 use <code>MF_BT4</code>.
* Match finder has a major effect on compression speed, memory usage, and
* compression ratio. Usually Hash Chain match finders are faster than
* Binary Tree match finders. The default depends on the preset: 0-3 use
* <code>MF_HC4</code> and 4-9 use <code>MF_BT4</code>.
*
* @throws UnsupportedOptionsException
* <code>mf</code> is not supported
* @throws UnsupportedOptionsException <code>mf</code> is not supported
*/
public void setMatchFinder(int mf) throws UnsupportedOptionsException {
if (mf != MF_HC4 && mf != MF_BT4)
throw new UnsupportedOptionsException(
"Unsupported match finder: " + mf);
"Unsupported match finder: " + mf);
this.mf = mf;
}
@@ -494,24 +476,23 @@ public class LZMA2Options extends FilterOptions {
/**
* Sets the match finder search depth limit.
* <p>
* The default is a special value of <code>0</code> which indicates that
* the depth limit should be automatically calculated by the selected
* match finder from the nice length of matches.
* The default is a special value of <code>0</code> which indicates that the
* depth limit should be automatically calculated by the selected match
* finder from the nice length of matches.
* <p>
* Reasonable depth limit for Hash Chain match finders is 4-100 and
* 16-1000 for Binary Tree match finders. Using very high values can
* make the compressor extremely slow with some files. Avoid settings
* higher than 1000 unless you are prepared to interrupt the compression
* in case it is taking far too long.
* Reasonable depth limit for Hash Chain match finders is 4-100 and 16-1000
* for Binary Tree match finders. Using very high values can make the
* compressor extremely slow with some files. Avoid settings higher than
* 1000 unless you are prepared to interrupt the compression in case it is
* taking far too long.
*
* @throws UnsupportedOptionsException
* <code>depthLimit</code> is invalid
* @throws UnsupportedOptionsException <code>depthLimit</code> is invalid
*/
public void setDepthLimit(int depthLimit)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
if (depthLimit < 0)
throw new UnsupportedOptionsException(
"Depth limit cannot be negative: " + depthLimit);
"Depth limit cannot be negative: " + depthLimit);
this.depthLimit = depthLimit;
}
@@ -540,12 +521,12 @@ public class LZMA2Options extends FilterOptions {
* Gets how much memory the LZMA2 decoder will need to decompress the data
* that was encoded with these options and stored in a .xz file.
* <p>
* The returned value may bigger than the value returned by a direct call
* to {@link LZMA2InputStream#getMemoryUsage(int)} if the dictionary size
* is not 2^n or 2^n&nbsp;+&nbsp;2^(n-1) bytes. This is because the .xz
* headers store the dictionary size in such a format and other values
* are rounded up to the next such value. Such rounding is harmess except
* it might waste some memory if an unsual dictionary size is used.
* The returned value may bigger than the value returned by a direct call to
* {@link LZMA2InputStream#getMemoryUsage(int)} if the dictionary size is
* not 2^n or 2^n&nbsp;+&nbsp;2^(n-1) bytes. This is because the .xz headers
* store the dictionary size in such a format and other values are rounded
* up to the next such value. Such rounding is harmess except it might waste
* some memory if an unsual dictionary size is used.
* <p>
* If you use raw LZMA2 streams and unusual dictioanary size, call
* {@link LZMA2InputStream#getMemoryUsage} directly to get raw decoder

View File

@@ -133,7 +133,7 @@ class LZMA2OutputStream extends FinishableOutputStream {
}
private void writeLZMA(int uncompressedSize, int compressedSize)
throws IOException {
throws IOException {
int control;
if (propsNeeded)

View File

@@ -20,16 +20,16 @@ import org.tukaani.xz.lzma.LZMADecoder;
* Decompresses legacy .lzma files and raw LZMA streams (no .lzma header).
* <p>
* <b>IMPORTANT:</b> In contrast to other classes in this package, this class
* reads data from its input stream one byte at a time. If the input stream
* is for example {@link java.io.FileInputStream}, wrapping it into
* {@link java.io.BufferedInputStream} tends to improve performance a lot.
* This is not automatically done by this class because there may be use
* cases where it is desired that this class won't read any bytes past
* the end of the LZMA stream.
* reads data from its input stream one byte at a time. If the input stream is
* for example {@link java.io.FileInputStream}, wrapping it into
* {@link java.io.BufferedInputStream} tends to improve performance a lot. This
* is not automatically done by this class because there may be use cases where
* it is desired that this class won't read any bytes past the end of the LZMA
* stream.
* <p>
* Even when using <code>BufferedInputStream</code>, the performance tends
* to be worse (maybe 10-20&nbsp;% slower) than with {@link LZMA2InputStream}
* or {@link XZInputStream} (when the .xz file contains LZMA2-compressed data).
* Even when using <code>BufferedInputStream</code>, the performance tends to be
* worse (maybe 10-20&nbsp;% slower) than with {@link LZMA2InputStream} or
* {@link XZInputStream} (when the .xz file contains LZMA2-compressed data).
*
* @since 1.4
*/
@@ -39,10 +39,10 @@ public class LZMAInputStream extends InputStream {
* Largest dictionary size supported by this implementation.
* <p>
* LZMA allows dictionaries up to one byte less than 4 GiB. This
* implementation supports only 16 bytes less than 2 GiB. This
* limitation is due to Java using signed 32-bit integers for array
* indexing. The limitation shouldn't matter much in practice since so
* huge dictionaries are not normally used.
* implementation supports only 16 bytes less than 2 GiB. This limitation is
* due to Java using signed 32-bit integers for array indexing. The
* limitation shouldn't matter much in practice since so huge dictionaries
* are not normally used.
*/
public static final int DICT_SIZE_MAX = Integer.MAX_VALUE & ~15;
@@ -56,39 +56,35 @@ public class LZMAInputStream extends InputStream {
private final byte[] tempBuf = new byte[1];
/**
* Number of uncompressed bytes left to be decompressed, or -1 if
* the end marker is used.
* Number of uncompressed bytes left to be decompressed, or -1 if the end
* marker is used.
*/
private long remainingSize;
private IOException exception = null;
/**
* Gets approximate decompressor memory requirements as kibibytes for
* the given dictionary size and LZMA properties byte (lc, lp, and pb).
* Gets approximate decompressor memory requirements as kibibytes for the
* given dictionary size and LZMA properties byte (lc, lp, and pb).
*
* @param dictSize LZMA dictionary size as bytes, should be
* in the range [<code>0</code>,
* <code>DICT_SIZE_MAX</code>]
* @param dictSize LZMA dictionary size as bytes, should be in the range
* [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @param propsByte LZMA properties byte that encodes the values
* of lc, lp, and pb
* @param propsByte LZMA properties byte that encodes the values of lc, lp,
* and pb
*
* @return approximate memory requirements as kibibytes (KiB)
*
* @throws UnsupportedOptionsException
* if <code>dictSize</code> is outside
* the range [<code>0</code>,
* <code>DICT_SIZE_MAX</code>]
* @throws UnsupportedOptionsException if <code>dictSize</code> is outside
* the range [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @throws CorruptedInputException
* if <code>propsByte</code> is invalid
* @throws CorruptedInputException if <code>propsByte</code> is invalid
*/
public static int getMemoryUsage(int dictSize, byte propsByte)
throws UnsupportedOptionsException, CorruptedInputException {
throws UnsupportedOptionsException, CorruptedInputException {
if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
throw new UnsupportedOptionsException(
"LZMA dictionary is too big for this implementation");
"LZMA dictionary is too big for this implementation");
int props = propsByte & 0xFF;
if (props > (4 * 5 + 4) * 9 + 8)
@@ -102,18 +98,17 @@ public class LZMAInputStream extends InputStream {
}
/**
* Gets approximate decompressor memory requirements as kibibytes for
* the given dictionary size, lc, and lp. Note that pb isn't needed.
* Gets approximate decompressor memory requirements as kibibytes for the
* given dictionary size, lc, and lp. Note that pb isn't needed.
*
* @param dictSize LZMA dictionary size as bytes, must be
* in the range [<code>0</code>,
* <code>DICT_SIZE_MAX</code>]
* @param dictSize LZMA dictionary size as bytes, must be in the range
* [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @param lc number of literal context bits, must be
* in the range [0, 8]
* @param lc number of literal context bits, must be in the range [0,
* 8]
*
* @param lp number of literal position bits, must be
* in the range [0, 4]
* @param lp number of literal position bits, must be in the range [0,
* 4]
*
* @return approximate memory requirements as kibibytes (KiB)
*/
@@ -135,7 +130,7 @@ public class LZMAInputStream extends InputStream {
private static int getDictSize(int dictSize) {
if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
throw new IllegalArgumentException(
"LZMA dictionary is too big for this implementation");
"LZMA dictionary is too big for this implementation");
// For performance reasons, use a 4 KiB dictionary if something
// smaller was requested. It's a rare situation and the performance
@@ -156,25 +151,22 @@ public class LZMAInputStream extends InputStream {
}
/**
* Creates a new .lzma file format decompressor without
* a memory usage limit.
* Creates a new .lzma file format decompressor without a memory usage
* limit.
*
* @param in input stream from which .lzma data is read;
* it might be a good idea to wrap it in
* <code>BufferedInputStream</code>, see the
* note at the top of this page
* @param in input stream from which .lzma data is read; it might be a good
* idea to wrap it in <code>BufferedInputStream</code>, see the note at the
* top of this page
*
* @throws CorruptedInputException
* file is corrupt or perhaps not in
* the .lzma format at all
* @throws CorruptedInputException file is corrupt or perhaps not in the
* .lzma format at all
*
* @throws UnsupportedOptionsException
* dictionary size or uncompressed size is too
* big for this implementation
* @throws UnsupportedOptionsException dictionary size or uncompressed size
* is too big for this implementation
*
* @throws EOFException
* file is truncated or perhaps not in
* the .lzma format at all
* @throws EOFException file is truncated or perhaps not in
* the .lzma format
* at all
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -183,37 +175,33 @@ public class LZMAInputStream extends InputStream {
}
/**
* Creates a new .lzma file format decompressor with an optional
* memory usage limit.
* Creates a new .lzma file format decompressor with an optional memory
* usage limit.
*
* @param in input stream from which .lzma data is read;
* it might be a good idea to wrap it in
* <code>BufferedInputStream</code>, see the
* note at the top of this page
* @param in input stream from which .lzma data is read; it might
* be a good
* idea to wrap it in <code>BufferedInputStream</code>, see the note at the
* top of this page
*
* @param memoryLimit memory usage limit in kibibytes (KiB)
* or <code>-1</code> to impose no
* memory usage limit
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws CorruptedInputException
* file is corrupt or perhaps not in
* the .lzma format at all
* @throws CorruptedInputException file is corrupt or perhaps not in the
* .lzma format at all
*
* @throws UnsupportedOptionsException
* dictionary size or uncompressed size is too
* big for this implementation
* @throws UnsupportedOptionsException dictionary size or uncompressed size
* is too big for this implementation
*
* @throws MemoryLimitException
* memory usage limit was exceeded
* @throws MemoryLimitException memory usage limit was exceeded
*
* @throws EOFException
* file is truncated or perhaps not in
* the .lzma format at all
* @throws EOFException file is truncated or perhaps not in
* the .lzma format
* at all
*
* @throws IOException may be thrown by <code>in</code>
*/
public LZMAInputStream(InputStream in, int memoryLimit)
throws IOException {
throws IOException {
DataInputStream inData = new DataInputStream(in);
// Properties byte (lc, lp, and pb)
@@ -244,45 +232,43 @@ public class LZMAInputStream extends InputStream {
* Creates a new input stream that decompresses raw LZMA data (no .lzma
* header) from <code>in</code>.
* <p>
* The caller needs to know if the "end of payload marker (EOPM)" alias
* "end of stream marker (EOS marker)" alias "end marker" present.
* If the end marker isn't used, the caller must know the exact
* uncompressed size of the stream.
* The caller needs to know if the "end of payload marker (EOPM)" alias "end
* of stream marker (EOS marker)" alias "end marker" present. If the end
* marker isn't used, the caller must know the exact uncompressed size of
* the stream.
* <p>
* The caller also needs to provide the LZMA properties byte that encodes
* the number of literal context bits (lc), literal position bits (lp),
* and position bits (pb).
* the number of literal context bits (lc), literal position bits (lp), and
* position bits (pb).
* <p>
* The dictionary size used when compressing is also needed. Specifying
* a too small dictionary size will prevent decompressing the stream.
* Specifying a too big dictionary is waste of memory but decompression
* will work.
* The dictionary size used when compressing is also needed. Specifying a
* too small dictionary size will prevent decompressing the stream.
* Specifying a too big dictionary is waste of memory but decompression will
* work.
* <p>
* There is no need to specify a dictionary bigger than
* the uncompressed size of the data even if a bigger dictionary
* was used when compressing. If you know the uncompressed size
* of the data, this might allow saving some memory.
* There is no need to specify a dictionary bigger than the uncompressed
* size of the data even if a bigger dictionary was used when compressing.
* If you know the uncompressed size of the data, this might allow saving
* some memory.
*
* @param in input stream from which compressed
* data is read
* @param in input stream from which compressed data is read
*
* @param uncompSize uncompressed size of the LZMA stream or -1
* if the end marker is used in the LZMA stream
* @param uncompSize uncompressed size of the LZMA stream or -1 if the end
* marker is used in the LZMA stream
*
* @param propsByte LZMA properties byte that has the encoded
* values for literal context bits (lc), literal
* position bits (lp), and position bits (pb)
* @param propsByte LZMA properties byte that has the encoded values for
* literal context bits (lc), literal position bits (lp), and position bits
* (pb)
*
* @param dictSize dictionary size as bytes, must be in the range
* [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @throws CorruptedInputException
* if <code>propsByte</code> is invalid or
* @throws CorruptedInputException if <code>propsByte</code> is invalid
* or
* the first input byte is not 0x00
*
* @throws UnsupportedOptionsException
* dictionary size or uncompressed size is too
* big for this implementation
* @throws UnsupportedOptionsException dictionary size or uncompressed size
* is too big for this implementation
*
*
*/
@@ -295,29 +281,27 @@ public class LZMAInputStream extends InputStream {
* Creates a new input stream that decompresses raw LZMA data (no .lzma
* header) from <code>in</code> optionally with a preset dictionary.
*
* @param in input stream from which LZMA-compressed
* data is read
* @param in input stream from which LZMA-compressed data is read
*
* @param uncompSize uncompressed size of the LZMA stream or -1
* if the end marker is used in the LZMA stream
* @param uncompSize uncompressed size of the LZMA stream or -1 if the end
* marker is used in the LZMA stream
*
* @param propsByte LZMA properties byte that has the encoded
* values for literal context bits (lc), literal
* position bits (lp), and position bits (pb)
* @param propsByte LZMA properties byte that has the encoded values for
* literal context bits (lc), literal position bits (lp), and position bits
* (pb)
*
* @param dictSize dictionary size as bytes, must be in the range
* [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @param presetDict preset dictionary or <code>null</code>
* to use no preset dictionary
* @param presetDict preset dictionary or <code>null</code> to use no preset
* dictionary
*
* @throws CorruptedInputException
* if <code>propsByte</code> is invalid or
* @throws CorruptedInputException if <code>propsByte</code> is invalid
* or
* the first input byte is not 0x00
*
* @throws UnsupportedOptionsException
* dictionary size or uncompressed size is too
* big for this implementation
* @throws UnsupportedOptionsException dictionary size or uncompressed size
* is too big for this implementation
*
* @throws EOFException file is truncated or corrupt
*
@@ -325,7 +309,7 @@ public class LZMAInputStream extends InputStream {
*/
public LZMAInputStream(InputStream in, long uncompSize, byte propsByte,
int dictSize, byte[] presetDict)
throws IOException {
throws IOException {
initialize(in, uncompSize, propsByte, dictSize, presetDict);
}
@@ -333,29 +317,26 @@ public class LZMAInputStream extends InputStream {
* Creates a new input stream that decompresses raw LZMA data (no .lzma
* header) from <code>in</code> optionally with a preset dictionary.
*
* @param in input stream from which LZMA-compressed
* data is read
* @param in input stream from which LZMA-compressed data is read
*
* @param uncompSize uncompressed size of the LZMA stream or -1
* if the end marker is used in the LZMA stream
* @param uncompSize uncompressed size of the LZMA stream or -1 if the end
* marker is used in the LZMA stream
*
* @param lc number of literal context bits, must be
* in the range [0, 8]
* @param lc number of literal context bits, must be in the range
* [0, 8]
*
* @param lp number of literal position bits, must be
* in the range [0, 4]
* @param lp number of literal position bits, must be in the range
* [0, 4]
*
* @param pb number position bits, must be
* in the range [0, 4]
* @param pb number position bits, must be in the range [0, 4]
*
* @param dictSize dictionary size as bytes, must be in the range
* [<code>0</code>, <code>DICT_SIZE_MAX</code>]
*
* @param presetDict preset dictionary or <code>null</code>
* to use no preset dictionary
* @param presetDict preset dictionary or <code>null</code> to use no preset
* dictionary
*
* @throws CorruptedInputException
* if the first input byte is not 0x00
* @throws CorruptedInputException if the first input byte is not 0x00
*
* @throws EOFException file is truncated or corrupt
*
@@ -364,18 +345,18 @@ public class LZMAInputStream extends InputStream {
public LZMAInputStream(InputStream in, long uncompSize,
int lc, int lp, int pb,
int dictSize, byte[] presetDict)
throws IOException {
throws IOException {
initialize(in, uncompSize, lc, lp, pb, dictSize, presetDict);
}
private void initialize(InputStream in, long uncompSize, byte propsByte,
int dictSize, byte[] presetDict)
throws IOException {
throws IOException {
// Validate the uncompressed size since the other "initialize" throws
// IllegalArgumentException if uncompSize < -1.
if (uncompSize < -1)
throw new UnsupportedOptionsException(
"Uncompressed size is too big");
"Uncompressed size is too big");
// Decode the properties byte. In contrast to LZMA2, there is no
// limit of lc + lp <= 4.
@@ -392,7 +373,7 @@ public class LZMAInputStream extends InputStream {
// IllegalArgumentException if dictSize is not supported.
if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
throw new UnsupportedOptionsException(
"LZMA dictionary is too big for this implementation");
"LZMA dictionary is too big for this implementation");
initialize(in, uncompSize, lc, lp, pb, dictSize, presetDict);
}
@@ -400,7 +381,7 @@ public class LZMAInputStream extends InputStream {
private void initialize(InputStream in, long uncompSize,
int lc, int lp, int pb,
int dictSize, byte[] presetDict)
throws IOException {
throws IOException {
// getDictSize validates dictSize and gives a message in
// the exception too, so skip validating dictSize here.
if (uncompSize < -1 || lc < 0 || lc > 8 || lp < 0 || lp > 4
@@ -424,19 +405,18 @@ public class LZMAInputStream extends InputStream {
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream
* may be inefficient. Wrap it in <code>java.io.BufferedInputStream</code>
* if you need to read lots of data one byte at a time.
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in <code>java.io.BufferedInputStream</code> if
* you need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code>
* to indicate the end of the compressed stream
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -447,17 +427,17 @@ public class LZMAInputStream extends InputStream {
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code>
* is returned. Otherwise this will block until <code>len</code>
* bytes have been decompressed, the end of the LZMA stream is reached,
* or an exception is thrown.
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will block until <code>len</code> bytes have
* been decompressed, the end of the LZMA stream is reached, or an exception
* is thrown.
*
* @param buf target buffer for uncompressed data
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the compressed stream
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
*
@@ -552,8 +532,8 @@ public class LZMAInputStream extends InputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/

View File

@@ -9,11 +9,11 @@
package org.tukaani.xz;
/**
* Thrown when the memory usage limit given to the XZ decompressor
* would be exceeded.
* Thrown when the memory usage limit given to the XZ decompressor would be
* exceeded.
* <p>
* The amount of memory required and the memory usage limit are
* included in the error detail message in human readable format.
* The amount of memory required and the memory usage limit are included in the
* error detail message in human readable format.
*/
public class MemoryLimitException extends XZIOException {
@@ -25,8 +25,8 @@ public class MemoryLimitException extends XZIOException {
/**
* Creates a new MemoryLimitException.
* <p>
* The amount of memory needed and the memory usage limit are
* included in the error detail message.
* The amount of memory needed and the memory usage limit are included in
* the error detail message.
*
* @param memoryNeeded amount of memory needed as kibibytes (KiB)
* @param memoryLimit specified memory usage limit as kibibytes (KiB)
@@ -49,8 +49,8 @@ public class MemoryLimitException extends XZIOException {
}
/**
* Gets what the memory usage limit was at the time the exception
* was created.
* Gets what the memory usage limit was at the time the exception was
* created.
*
* @return memory usage limit as kibibytes (KiB)
*/

View File

@@ -11,15 +11,15 @@ package org.tukaani.xz;
class RawCoder {
static void validate(FilterCoder[] filters)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
for (int i = 0; i < filters.length - 1; ++i)
if (!filters[i].nonLastOK())
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
"Unsupported XZ filter chain");
if (!filters[filters.length - 1].lastOK())
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
"Unsupported XZ filter chain");
int changesSizeCount = 0;
for (int i = 0; i < filters.length; ++i)
@@ -28,6 +28,6 @@ class RawCoder {
if (changesSizeCount > 3)
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
"Unsupported XZ filter chain");
}
}

View File

@@ -14,14 +14,14 @@ import java.io.IOException;
import java.io.FileNotFoundException;
/**
* Wraps a {@link java.io.RandomAccessFile RandomAccessFile}
* in a SeekableInputStream.
* Wraps a {@link java.io.RandomAccessFile RandomAccessFile} in a
* SeekableInputStream.
*/
public class SeekableFileInputStream extends SeekableInputStream {
/**
* The RandomAccessFile that has been wrapped
* into a SeekableFileInputStream.
* The RandomAccessFile that has been wrapped into a
* SeekableFileInputStream.
*/
protected RandomAccessFile randomAccessFile;
@@ -33,8 +33,8 @@ public class SeekableFileInputStream extends SeekableInputStream {
}
/**
* Creates a new seekable input stream that reads from a file with
* the specified name.
* Creates a new seekable input stream that reads from a file with the
* specified name.
*/
public SeekableFileInputStream(String name) throws FileNotFoundException {
randomAccessFile = new RandomAccessFile(name, "r");
@@ -63,8 +63,7 @@ public class SeekableFileInputStream extends SeekableInputStream {
}
/**
* Calls
* {@link RandomAccessFile#read(byte[],int,int)
* Calls null null null null null null null null null null null {@link RandomAccessFile#read(byte[],int,int)
* randomAccessFile.read(buf, off, len)}.
*/
public int read(byte[] buf, int off, int len) throws IOException {

View File

@@ -19,22 +19,21 @@ public abstract class SeekableInputStream extends InputStream {
/**
* Seeks <code>n</code> bytes forward in this stream.
* <p>
* This will not seek past the end of the file. If the current position
* is already at or past the end of the file, this doesn't seek at all
* and returns <code>0</code>. Otherwise, if skipping <code>n</code> bytes
* would cause the position to exceed the stream size, this will do
* equivalent of <code>seek(length())</code> and the return value will
* be adjusted accordingly.
* This will not seek past the end of the file. If the current position is
* already at or past the end of the file, this doesn't seek at all and
* returns <code>0</code>. Otherwise, if skipping <code>n</code> bytes would
* cause the position to exceed the stream size, this will do equivalent of
* <code>seek(length())</code> and the return value will be adjusted
* accordingly.
* <p>
* If <code>n</code> is negative, the position isn't changed and
* the return value is <code>0</code>. It doesn't seek backward
* because it would conflict with the specification of
* If <code>n</code> is negative, the position isn't changed and the return
* value is <code>0</code>. It doesn't seek backward because it would
* conflict with the specification of
* {@link java.io.InputStream#skip(long) InputStream.skip}.
*
* @return <code>0</code> if <code>n</code> is negative,
* less than <code>n</code> if skipping <code>n</code>
* bytes would seek past the end of the file,
* <code>n</code> otherwise
* @return <code>0</code> if <code>n</code> is negative, less than
* <code>n</code> if skipping <code>n</code> bytes would seek past the end
* of the file, <code>n</code> otherwise
*
* @throws IOException might be thrown by {@link #seek(long)}
*/
@@ -68,14 +67,14 @@ public abstract class SeekableInputStream extends InputStream {
* Seeks to the specified absolute position in the stream.
* <p>
* Seeking past the end of the file should be supported by the subclasses
* unless there is a good reason to do otherwise. If one has seeked
* past the end of the stream, <code>read</code> will return
* <code>-1</code> to indicate end of stream.
* unless there is a good reason to do otherwise. If one has seeked past the
* end of the stream, <code>read</code> will return <code>-1</code> to
* indicate end of stream.
*
* @param pos new read position in the stream
*
* @throws IOException if <code>pos</code> is negative or if
* a stream-specific I/O error occurs
* @throws IOException if <code>pos</code> is negative or if a
* stream-specific I/O error occurs
*/
public abstract void seek(long pos) throws IOException;
}

View File

@@ -20,53 +20,52 @@ import org.tukaani.xz.index.IndexDecoder;
import org.tukaani.xz.index.BlockInfo;
/**
* Decompresses a .xz file in random access mode.
* This supports decompressing concatenated .xz files.
* Decompresses a .xz file in random access mode. This supports decompressing
* concatenated .xz files.
* <p>
* Each .xz file consist of one or more Streams. Each Stream consist of zero
* or more Blocks. Each Stream contains an Index of Streams' Blocks.
* The Indexes from all Streams are loaded in RAM by a constructor of this
* class. A typical .xz file has only one Stream, and parsing its Index will
* need only three or four seeks.
* Each .xz file consist of one or more Streams. Each Stream consist of zero or
* more Blocks. Each Stream contains an Index of Streams' Blocks. The Indexes
* from all Streams are loaded in RAM by a constructor of this class. A typical
* .xz file has only one Stream, and parsing its Index will need only three or
* four seeks.
* <p>
* To make random access possible, the data in a .xz file must be splitted
* into multiple Blocks of reasonable size. Decompression can only start at
* a Block boundary. When seeking to an uncompressed position that is not at
* a Block boundary, decompression starts at the beginning of the Block and
* throws away data until the target position is reached. Thus, smaller Blocks
* mean faster seeks to arbitrary uncompressed positions. On the other hand,
* smaller Blocks mean worse compression. So one has to make a compromise
* between random access speed and compression ratio.
* To make random access possible, the data in a .xz file must be splitted into
* multiple Blocks of reasonable size. Decompression can only start at a Block
* boundary. When seeking to an uncompressed position that is not at a Block
* boundary, decompression starts at the beginning of the Block and throws away
* data until the target position is reached. Thus, smaller Blocks mean faster
* seeks to arbitrary uncompressed positions. On the other hand, smaller Blocks
* mean worse compression. So one has to make a compromise between random access
* speed and compression ratio.
* <p>
* Implementation note: This class uses linear search to locate the correct
* Stream from the data structures in RAM. It was the simplest to implement
* and should be fine as long as there aren't too many Streams. The correct
* Block inside a Stream is located using binary search and thus is fast
* even with a huge number of Blocks.
* Stream from the data structures in RAM. It was the simplest to implement and
* should be fine as long as there aren't too many Streams. The correct Block
* inside a Stream is located using binary search and thus is fast even with a
* huge number of Blocks.
*
* <h4>Memory usage</h4>
* <p>
* The amount of memory needed for the Indexes is taken into account when
* checking the memory usage limit. Each Stream is calculated to need at
* least 1&nbsp;KiB of memory and each Block 16 bytes of memory, rounded up
* to the next kibibyte. So unless the file has a huge number of Streams or
* Blocks, these don't take significant amount of memory.
* checking the memory usage limit. Each Stream is calculated to need at least
* 1&nbsp;KiB of memory and each Block 16 bytes of memory, rounded up to the
* next kibibyte. So unless the file has a huge number of Streams or Blocks,
* these don't take significant amount of memory.
*
* <h4>Creating random-accessible .xz files</h4>
* <p>
* When using {@link XZOutputStream}, a new Block can be started by calling
* its {@link XZOutputStream#endBlock() endBlock} method. If you know
* that the decompressor will only need to seek to certain uncompressed
* positions, it can be a good idea to start a new Block at (some of) these
* positions (and only at these positions to get better compression ratio).
* When using {@link XZOutputStream}, a new Block can be started by calling its
* {@link XZOutputStream#endBlock() endBlock} method. If you know that the
* decompressor will only need to seek to certain uncompressed positions, it can
* be a good idea to start a new Block at (some of) these positions (and only at
* these positions to get better compression ratio).
* <p>
* liblzma in XZ Utils supports starting a new Block with
* <code>LZMA_FULL_FLUSH</code>. XZ Utils 5.1.1alpha added threaded
* compression which creates multi-Block .xz files. XZ Utils 5.1.1alpha
* also added the option <code>--block-size=SIZE</code> to the xz command
* line tool. XZ Utils 5.1.2alpha added a partial implementation of
* <code>--block-list=SIZES</code> which allows specifying sizes of
* individual Blocks.
* <code>LZMA_FULL_FLUSH</code>. XZ Utils 5.1.1alpha added threaded compression
* which creates multi-Block .xz files. XZ Utils 5.1.1alpha also added the
* option <code>--block-size=SIZE</code> to the xz command line tool. XZ Utils
* 5.1.2alpha added a partial implementation of <code>--block-list=SIZES</code>
* which allows specifying sizes of individual Blocks.
*
* @see SeekableFileInputStream
* @see XZInputStream
@@ -80,22 +79,21 @@ public class SeekableXZInputStream extends SeekableInputStream {
private SeekableInputStream in;
/**
* Memory usage limit after the memory usage of the IndexDecoders have
* been substracted.
* Memory usage limit after the memory usage of the IndexDecoders have been
* substracted.
*/
private final int memoryLimit;
/**
* Memory usage of the IndexDecoders.
* <code>memoryLimit + indexMemoryUsage</code> equals the original
* memory usage limit that was passed to the constructor.
* <code>memoryLimit + indexMemoryUsage</code> equals the original memory
* usage limit that was passed to the constructor.
*/
private int indexMemoryUsage = 0;
/**
* List of IndexDecoders, one for each Stream in the file.
* The list is in reverse order: The first element is
* the last Stream in the file.
* List of IndexDecoders, one for each Stream in the file. The list is in
* reverse order: The first element is the last Stream in the file.
*/
private final ArrayList streams = new ArrayList();
@@ -120,20 +118,20 @@ public class SeekableXZInputStream extends SeekableInputStream {
private int blockCount = 0;
/**
* Size and position information about the current Block.
* If there are no Blocks, all values will be <code>-1</code>.
* Size and position information about the current Block. If there are no
* Blocks, all values will be <code>-1</code>.
*/
private final BlockInfo curBlockInfo;
/**
* Temporary (and cached) information about the Block whose information
* is queried via <code>getBlockPos</code> and related functions.
* Temporary (and cached) information about the Block whose information is
* queried via <code>getBlockPos</code> and related functions.
*/
private final BlockInfo queriedBlockInfo;
/**
* Integrity Check in the current XZ Stream. The constructor leaves
* this to point to the Check of the first Stream.
* Integrity Check in the current XZ Stream. The constructor leaves this to
* point to the Check of the first Stream.
*/
private Check check;
@@ -153,14 +151,14 @@ public class SeekableXZInputStream extends SeekableInputStream {
private long seekPos;
/**
* True when <code>seek(long)</code> has been called but the actual
* seeking hasn't been done yet.
* True when <code>seek(long)</code> has been called but the actual seeking
* hasn't been done yet.
*/
private boolean seekNeeded = false;
/**
* True when end of the file was reached. This can be cleared by
* calling <code>seek(long)</code>.
* True when end of the file was reached. This can be cleared by calling
* <code>seek(long)</code>.
*/
private boolean endReached = false;
@@ -170,75 +168,67 @@ public class SeekableXZInputStream extends SeekableInputStream {
private IOException exception = null;
/**
* Temporary buffer for read(). This avoids reallocating memory
* on every read() call.
* Temporary buffer for read(). This avoids reallocating memory on every
* read() call.
*/
private final byte[] tempBuf = new byte[1];
/**
* Creates a new seekable XZ decompressor without a memory usage limit.
*
* @param in seekable input stream containing one or more
* XZ Streams; the whole input stream is used
* @param in seekable input stream containing one or more XZ Streams; the
* whole input stream is used
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ data is corrupt or truncated
* @throws CorruptedInputException XZ data is corrupt or truncated
*
* @throws UnsupportedOptionsException
* XZ headers seem valid but they specify
* options not supported by this implementation
* @throws UnsupportedOptionsException XZ headers seem valid but they
* specify options not supported by this implementation
*
* @throws EOFException
* less than 6 bytes of input was available
* from <code>in</code>, or (unlikely) the size
* of the underlying stream got smaller while
* this was reading from it
* @throws EOFException less than 6 bytes of input was
* available from
* <code>in</code>, or (unlikely) the size of the underlying stream got
* smaller while this was reading from it
*
* @throws IOException may be thrown by <code>in</code>
*/
public SeekableXZInputStream(SeekableInputStream in)
throws IOException {
throws IOException {
this(in, -1);
}
/**
* Creates a new seekable XZ decomporessor with an optional
* memory usage limit.
* Creates a new seekable XZ decomporessor with an optional memory usage
* limit.
*
* @param in seekable input stream containing one or more
* XZ Streams; the whole input stream is used
* @param in seekable input stream containing one or more XZ
* Streams; the
* whole input stream is used
*
* @param memoryLimit memory usage limit in kibibytes (KiB)
* or <code>-1</code> to impose no
* memory usage limit
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ data is corrupt or truncated
* @throws CorruptedInputException XZ data is corrupt or truncated
*
* @throws UnsupportedOptionsException
* XZ headers seem valid but they specify
* options not supported by this implementation
* @throws UnsupportedOptionsException XZ headers seem valid but they
* specify options not supported by this implementation
*
* @throws MemoryLimitException
* decoded XZ Indexes would need more memory
* @throws MemoryLimitException decoded XZ Indexes would need more
* memory
* than allowed by the memory usage limit
*
* @throws EOFException
* less than 6 bytes of input was available
* from <code>in</code>, or (unlikely) the size
* of the underlying stream got smaller while
* this was reading from it
* @throws EOFException less than 6 bytes of input was
* available from
* <code>in</code>, or (unlikely) the size of the underlying stream got
* smaller while this was reading from it
*
* @throws IOException may be thrown by <code>in</code>
*/
public SeekableXZInputStream(SeekableInputStream in, int memoryLimit)
throws IOException {
throws IOException {
this.in = in;
DataInputStream inData = new DataInputStream(in);
@@ -255,7 +245,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
long pos = in.length();
if ((pos & 3) != 0)
throw new CorruptedInputException(
"XZ file size is not a multiple of 4 bytes");
"XZ file size is not a multiple of 4 bytes");
// Parse the headers starting from the end of the file.
byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
@@ -287,7 +277,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
StreamFlags streamFooter = DecoderUtil.decodeStreamFooter(buf);
if (streamFooter.backwardSize >= pos)
throw new CorruptedInputException(
"Backward Size in XZ Stream Footer is too big");
"Backward Size in XZ Stream Footer is too big");
// Check that the Check ID is supported. Store it in case this
// is the first Stream in the file.
@@ -309,8 +299,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
// already needed so we need to recreate the exception.
assert memoryLimit >= 0;
throw new MemoryLimitException(
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
}
// Update the memory usage and limit counters.
@@ -342,7 +332,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
// Verify that the Stream Header matches the Stream Footer.
if (!DecoderUtil.areStreamFlagsEqual(streamHeader, streamFooter))
throw new CorruptedInputException(
"XZ Stream Footer does not match Stream Header");
"XZ Stream Footer does not match Stream Header");
// Update the total uncompressed size of the file and check that
// it doesn't overflow.
@@ -354,7 +344,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
blockCount += index.getRecordCount();
if (blockCount < 0)
throw new UnsupportedOptionsException(
"XZ file has over " + Integer.MAX_VALUE + " Blocks");
"XZ file has over " + Integer.MAX_VALUE + " Blocks");
// Add this Stream to the list of Streams.
streams.add(index);
@@ -391,13 +381,11 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Gets the types of integrity checks used in the .xz file.
* Multiple checks are possible only if there are multiple
* concatenated XZ Streams.
* Gets the types of integrity checks used in the .xz file. Multiple checks
* are possible only if there are multiple concatenated XZ Streams.
* <p>
* The returned value has a bit set for every check type that is present.
* For example, if CRC64 and SHA-256 were used, the return value is
* <code>(1&nbsp;&lt;&lt;&nbsp;XZ.CHECK_CRC64)
* For example, if CRC64 and SHA-256 were used, the return value is <code>(1&nbsp;&lt;&lt;&nbsp;XZ.CHECK_CRC64)
* | (1&nbsp;&lt;&lt;&nbsp;XZ.CHECK_SHA256)</code>.
*/
public int getCheckTypes() {
@@ -405,22 +393,21 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Gets the amount of memory in kibibytes (KiB) used by
* the data structures needed to locate the XZ Blocks.
* This is usually useless information but since it is calculated
* for memory usage limit anyway, it is nice to make it available to too.
* Gets the amount of memory in kibibytes (KiB) used by the data structures
* needed to locate the XZ Blocks. This is usually useless information but
* since it is calculated for memory usage limit anyway, it is nice to make
* it available to too.
*/
public int getIndexMemoryUsage() {
return indexMemoryUsage;
}
/**
* Gets the uncompressed size of the largest XZ Block in bytes.
* This can be useful if you want to check that the file doesn't
* have huge XZ Blocks which could make seeking to arbitrary offsets
* very slow. Note that huge Blocks don't automatically mean that
* seeking would be slow, for example, seeking to the beginning of
* any Block is always fast.
* Gets the uncompressed size of the largest XZ Block in bytes. This can be
* useful if you want to check that the file doesn't have huge XZ Blocks
* which could make seeking to arbitrary offsets very slow. Note that huge
* Blocks don't automatically mean that seeking would be slow, for example,
* seeking to the beginning of any Block is always fast.
*/
public long getLargestBlockSize() {
return largestBlockSize;
@@ -473,9 +460,9 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Gets the position where the given compressed Block starts in
* the underlying .xz file.
* This information is rarely useful to the users of this class.
* Gets the position where the given compressed Block starts in the
* underlying .xz file. This information is rarely useful to the users of
* this class.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
@@ -489,9 +476,9 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Gets the compressed size of the given Block.
* This together with the uncompressed size can be used to calculate
* the compression ratio of the specific Block.
* Gets the compressed size of the given Block. This together with the
* uncompressed size can be used to calculate the compression ratio of the
* specific Block.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
@@ -524,8 +511,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
* Gets the number of the Block that contains the byte at the given
* uncompressed position.
*
* @throws IndexOutOfBoundsException if
* <code>pos&nbsp;&lt;&nbsp;0</code> or
* @throws IndexOutOfBoundsException if <code>pos&nbsp;&lt;&nbsp;0</code> or
* <code>pos&nbsp;&gt;=&nbsp;length()</code>.
*
* @since 1.3
@@ -538,8 +524,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
/**
* Decompresses the next byte from this input stream.
*
* @return the next decompressed byte, or <code>-1</code>
* to indicate the end of the compressed stream
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -556,16 +542,16 @@ public class SeekableXZInputStream extends SeekableInputStream {
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code>
* is returned. Otherwise this will try to decompress <code>len</code>
* bytes of uncompressed data. Less than <code>len</code> bytes may
* be read only in the following situations:
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less than
* <code>len</code> bytes have already been successfully
* decompressed. The next call with non-zero <code>len</code>
* will immediately throw the pending exception.</li>
* <code>len</code> bytes have already been successfully decompressed. The
* next call with non-zero <code>len</code> will immediately throw the
* pending exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
@@ -573,8 +559,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the compressed stream
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -639,15 +625,14 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Returns the number of uncompressed bytes that can be read
* without blocking. The value is returned with an assumption
* that the compressed input data will be valid. If the compressed
* data is corrupt, <code>CorruptedInputException</code> may get
* thrown before the number of bytes claimed to be available have
* been read from this input stream.
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read
* without blocking
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
@@ -663,8 +648,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/
@@ -678,8 +663,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Gets the uncompressed size of this input stream. If there are multiple
* XZ Streams, the total uncompressed size of all XZ Streams is returned.
* Gets the uncompressed size of this input stream. If there are multiple XZ
* Streams, the total uncompressed size of all XZ Streams is returned.
*/
public long length() {
return uncompressedSize;
@@ -698,20 +683,19 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Seeks to the specified absolute uncompressed position in the stream.
* This only stores the new position, so this function itself is always
* very fast. The actual seek is done when <code>read</code> is called
* to read at least one byte.
* Seeks to the specified absolute uncompressed position in the stream. This
* only stores the new position, so this function itself is always very
* fast. The actual seek is done when <code>read</code> is called to read at
* least one byte.
* <p>
* Seeking past the end of the stream is possible. In that case
* <code>read</code> will return <code>-1</code> to indicate
* the end of the stream.
* <code>read</code> will return <code>-1</code> to indicate the end of the
* stream.
*
* @param pos new uncompressed read position
*
* @throws XZIOException
* if <code>pos</code> is negative, or
* if stream has been closed
* @throws XZIOException if <code>pos</code> is negative, or if stream has
* been closed
*/
public void seek(long pos) throws IOException {
if (in == null)
@@ -727,10 +711,9 @@ public class SeekableXZInputStream extends SeekableInputStream {
/**
* Seeks to the beginning of the given XZ Block.
*
* @throws XZIOException
* if <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>,
* or if stream has been closed
* @throws XZIOException if <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>, or if stream
* has been closed
*
* @since 1.3
*/
@@ -749,8 +732,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Does the actual seeking. This is also called when <code>read</code>
* needs a new Block to decode.
* Does the actual seeking. This is also called when <code>read</code> needs
* a new Block to decode.
*/
private void seek() throws IOException {
// If seek(long) wasn't called, we simply need to get the next Block
@@ -824,7 +807,7 @@ public class SeekableXZInputStream extends SeekableInputStream {
private void locateBlockByPos(BlockInfo info, long pos) {
if (pos < 0 || pos >= uncompressedSize)
throw new IndexOutOfBoundsException(
"Invalid uncompressed position: " + pos);
"Invalid uncompressed position: " + pos);
// Locate the Stream that contains the target position.
IndexDecoder index;
@@ -844,14 +827,14 @@ public class SeekableXZInputStream extends SeekableInputStream {
}
/**
* Locates the given Block and stores information about it
* to <code>info</code>.
* Locates the given Block and stores information about it to
* <code>info</code>.
*/
private void locateBlockByNumber(BlockInfo info, int blockNumber) {
// Validate.
if (blockNumber < 0 || blockNumber >= blockCount)
throw new IndexOutOfBoundsException(
"Invalid XZ Block number: " + blockNumber);
"Invalid XZ Block number: " + blockNumber);
// Skip the search if info already points to the correct Block.
if (info.blockNumber == blockNumber)
@@ -884,8 +867,8 @@ public class SeekableXZInputStream extends SeekableInputStream {
// already needed so we need to recreate the exception.
assert memoryLimit >= 0;
throw new MemoryLimitException(
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
} catch (IndexIndicatorException e) {
// It cannot be Index so the file must be corrupt.
throw new CorruptedInputException();

View File

@@ -18,11 +18,11 @@ import org.tukaani.xz.index.IndexHash;
import org.tukaani.xz.check.Check;
/**
* Decompresses exactly one XZ Stream in streamed mode (no seeking).
* The decompression stops after the first XZ Stream has been decompressed,
* and the read position in the input stream is left at the first byte
* after the end of the XZ Stream. This can be useful when XZ data has
* been stored inside some other file format or protocol.
* Decompresses exactly one XZ Stream in streamed mode (no seeking). The
* decompression stops after the first XZ Stream has been decompressed, and the
* read position in the input stream is left at the first byte after the end of
* the XZ Stream. This can be useful when XZ data has been stored inside some
* other file format or protocol.
* <p>
* Unless you know what you are doing, don't use this class to decompress
* standalone .xz files. For that purpose, use <code>XZInputStream</code>.
@@ -30,11 +30,11 @@ import org.tukaani.xz.check.Check;
* <h4>When uncompressed size is known beforehand</h4>
* <p>
* If you are decompressing complete XZ streams and your application knows
* exactly how much uncompressed data there should be, it is good to try
* reading one more byte by calling <code>read()</code> and checking
* that it returns <code>-1</code>. This way the decompressor will parse the
* file footers and verify the integrity checks, giving the caller more
* confidence that the uncompressed data is valid.
* exactly how much uncompressed data there should be, it is good to try reading
* one more byte by calling <code>read()</code> and checking that it returns
* <code>-1</code>. This way the decompressor will parse the file footers and
* verify the integrity checks, giving the caller more confidence that the
* uncompressed data is valid.
*
* @see XZInputStream
*/
@@ -52,29 +52,25 @@ public class SingleXZInputStream extends InputStream {
private final byte[] tempBuf = new byte[1];
/**
* Creates a new XZ decompressor that decompresses exactly one
* XZ Stream from <code>in</code> without a memory usage limit.
* Creates a new XZ decompressor that decompresses exactly one XZ Stream
* from <code>in</code> without a memory usage limit.
* <p>
* This constructor reads and parses the XZ Stream Header (12 bytes)
* from <code>in</code>. The header of the first Block is not read
* until <code>read</code> is called.
* This constructor reads and parses the XZ Stream Header (12 bytes) from
* <code>in</code>. The header of the first Block is not read until
* <code>read</code> is called.
*
* @param in input stream from which XZ-compressed
* data is read
* @param in input stream from which XZ-compressed data is read
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ header CRC32 doesn't match
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException
* XZ header is valid but specifies options
* not supported by this implementation
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException
* less than 12 bytes of input was available
* from <code>in</code>
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -83,37 +79,32 @@ public class SingleXZInputStream extends InputStream {
}
/**
* Creates a new XZ decompressor that decompresses exactly one
* XZ Stream from <code>in</code> with an optional memory usage limit.
* Creates a new XZ decompressor that decompresses exactly one XZ Stream
* from <code>in</code> with an optional memory usage limit.
* <p>
* This is identical to <code>SingleXZInputStream(InputStream)</code>
* except that this takes also the <code>memoryLimit</code> argument.
* This is identical to <code>SingleXZInputStream(InputStream)</code> except
* that this takes also the <code>memoryLimit</code> argument.
*
* @param in input stream from which XZ-compressed
* data is read
* @param in input stream from which XZ-compressed data is read
*
* @param memoryLimit memory usage limit in kibibytes (KiB)
* or <code>-1</code> to impose no
* memory usage limit
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ header CRC32 doesn't match
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException
* XZ header is valid but specifies options
* not supported by this implementation
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException
* less than 12 bytes of input was available
* from <code>in</code>
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
public SingleXZInputStream(InputStream in, int memoryLimit)
throws IOException {
throws IOException {
initialize(in, memoryLimit);
}
@@ -123,7 +114,7 @@ public class SingleXZInputStream extends InputStream {
}
private void initialize(InputStream in, int memoryLimit)
throws IOException {
throws IOException {
byte[] streamHeader = new byte[DecoderUtil.STREAM_HEADER_SIZE];
new DataInputStream(in).readFully(streamHeader);
initialize(in, memoryLimit, streamHeader);
@@ -158,12 +149,12 @@ public class SingleXZInputStream extends InputStream {
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream
* may be inefficient. Wrap it in {@link java.io.BufferedInputStream}
* if you need to read lots of data one byte at a time.
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in {@link java.io.BufferedInputStream} if you
* need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code>
* to indicate the end of the compressed stream
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -171,8 +162,8 @@ public class SingleXZInputStream extends InputStream {
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -183,16 +174,16 @@ public class SingleXZInputStream extends InputStream {
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code>
* is returned. Otherwise this will try to decompress <code>len</code>
* bytes of uncompressed data. Less than <code>len</code> bytes may
* be read only in the following situations:
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less <code>len</code>
* bytes have already been successfully decompressed.
* The next call with non-zero <code>len</code> will immediately
* throw the pending exception.</li>
* bytes have already been successfully decompressed. The next call with
* non-zero <code>len</code> will immediately throw the pending
* exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
@@ -200,8 +191,8 @@ public class SingleXZInputStream extends InputStream {
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the compressed stream
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -209,8 +200,8 @@ public class SingleXZInputStream extends InputStream {
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -237,7 +228,7 @@ public class SingleXZInputStream extends InputStream {
if (blockDecoder == null)
try {
blockDecoder = new BlockInputStream(
in, check, memoryLimit, -1, -1);
in, check, memoryLimit, -1, -1);
} catch (IndexIndicatorException e) {
indexHash.validate(in);
validateStreamFooter();
@@ -275,19 +266,18 @@ public class SingleXZInputStream extends InputStream {
streamFooterFlags)
|| indexHash.getIndexSize() != streamFooterFlags.backwardSize)
throw new CorruptedInputException(
"XZ Stream Footer does not match Stream Header");
"XZ Stream Footer does not match Stream Header");
}
/**
* Returns the number of uncompressed bytes that can be read
* without blocking. The value is returned with an assumption
* that the compressed input data will be valid. If the compressed
* data is corrupt, <code>CorruptedInputException</code> may get
* thrown before the number of bytes claimed to be available have
* been read from this input stream.
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read
* without blocking
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
@@ -300,8 +290,8 @@ public class SingleXZInputStream extends InputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/

View File

@@ -9,23 +9,23 @@
package org.tukaani.xz;
/**
* Thrown when compression options not supported by this implementation
* are detected. Some other implementation might support those options.
* Thrown when compression options not supported by this implementation are
* detected. Some other implementation might support those options.
*/
public class UnsupportedOptionsException extends XZIOException {
private static final long serialVersionUID = 3L;
/**
* Creates a new UnsupportedOptionsException with null
* as its error detail message.
* Creates a new UnsupportedOptionsException with null as its error detail
* message.
*/
public UnsupportedOptionsException() {
}
/**
* Creates a new UnsupportedOptionsException with the given
* error detail message.
* Creates a new UnsupportedOptionsException with the given error detail
* message.
*
* @param s error detail message
*/

View File

@@ -14,23 +14,23 @@ package org.tukaani.xz;
public class XZ {
/**
* XZ Header Magic Bytes begin a XZ file.
* This can be useful to detect XZ compressed data.
* XZ Header Magic Bytes begin a XZ file. This can be useful to detect XZ
* compressed data.
*/
public static final byte[] HEADER_MAGIC = {
(byte) 0xFD, '7', 'z', 'X', 'Z', '\0'};
(byte) 0xFD, '7', 'z', 'X', 'Z', '\0' };
/**
* XZ Footer Magic Bytes are the last bytes of a XZ Stream.
*/
public static final byte[] FOOTER_MAGIC = {'Y', 'Z'};
public static final byte[] FOOTER_MAGIC = { 'Y', 'Z' };
/**
* Integrity check ID indicating that no integrity check is calculated.
* <p>
* Omitting the integrity check is strongly discouraged except when
* the integrity of the data will be verified by other means anyway,
* and calculating the check twice would be useless.
* Omitting the integrity check is strongly discouraged except when the
* integrity of the data will be verified by other means anyway, and
* calculating the check twice would be useless.
*/
public static final int CHECK_NONE = 0;

View File

@@ -9,9 +9,8 @@
package org.tukaani.xz;
/**
* Generic {@link java.io.IOException IOException} specific to this package.
* The other IOExceptions in this package extend
* from <code>XZIOException</code>.
* Generic {@link java.io.IOException IOException} specific to this package. The
* other IOExceptions in this package extend from <code>XZIOException</code>.
*/
public class XZIOException extends java.io.IOException {

View File

@@ -17,9 +17,9 @@ import org.tukaani.xz.common.DecoderUtil;
/**
* Decompresses a .xz file in streamed mode (no seeking).
* <p>
* Use this to decompress regular standalone .xz files. This reads from
* its input stream until the end of the input or until an error occurs.
* This supports decompressing concatenated .xz files.
* Use this to decompress regular standalone .xz files. This reads from its
* input stream until the end of the input or until an error occurs. This
* supports decompressing concatenated .xz files.
*
* <h4>Typical use cases</h4>
* <p>
@@ -30,18 +30,18 @@ import org.tukaani.xz.common.DecoderUtil;
* XZInputStream inxz = new XZInputStream(infile);
* </pre></blockquote>
* <p>
* It's important to keep in mind that decompressor memory usage depends
* on the settings used to compress the file. The worst-case memory usage
* of XZInputStream is currently 1.5&nbsp;GiB. Still, very few files will
* require more than about 65&nbsp;MiB because that's how much decompressing
* a file created with the highest preset level will need, and only a few
* people use settings other than the predefined presets.
* It's important to keep in mind that decompressor memory usage depends on the
* settings used to compress the file. The worst-case memory usage of
* XZInputStream is currently 1.5&nbsp;GiB. Still, very few files will require
* more than about 65&nbsp;MiB because that's how much decompressing a file
* created with the highest preset level will need, and only a few people use
* settings other than the predefined presets.
* <p>
* It is possible to specify a memory usage limit for
* <code>XZInputStream</code>. If decompression requires more memory than
* the specified limit, MemoryLimitException will be thrown when reading
* from the stream. For example, the following sets the memory usage limit
* to 100&nbsp;MiB:
* <code>XZInputStream</code>. If decompression requires more memory than the
* specified limit, MemoryLimitException will be thrown when reading from the
* stream. For example, the following sets the memory usage limit to
* 100&nbsp;MiB:
* <p>
* <blockquote><pre>
* InputStream infile = new FileInputStream("foo.xz");
@@ -50,13 +50,12 @@ import org.tukaani.xz.common.DecoderUtil;
*
* <h4>When uncompressed size is known beforehand</h4>
* <p>
* If you are decompressing complete files and your application knows
* exactly how much uncompressed data there should be, it is good to try
* reading one more byte by calling <code>read()</code> and checking
* that it returns <code>-1</code>. This way the decompressor will parse the
* file footers and verify the integrity checks, giving the caller more
* confidence that the uncompressed data is valid. (This advice seems to
* apply to
* If you are decompressing complete files and your application knows exactly
* how much uncompressed data there should be, it is good to try reading one
* more byte by calling <code>read()</code> and checking that it returns
* <code>-1</code>. This way the decompressor will parse the file footers and
* verify the integrity checks, giving the caller more confidence that the
* uncompressed data is valid. (This advice seems to apply to
* {@link java.util.zip.GZIPInputStream java.util.zip.GZIPInputStream} too.)
*
* @see SingleXZInputStream
@@ -74,26 +73,22 @@ public class XZInputStream extends InputStream {
/**
* Creates a new XZ decompressor without a memory usage limit.
* <p>
* This constructor reads and parses the XZ Stream Header (12 bytes)
* from <code>in</code>. The header of the first Block is not read
* until <code>read</code> is called.
* This constructor reads and parses the XZ Stream Header (12 bytes) from
* <code>in</code>. The header of the first Block is not read until
* <code>read</code> is called.
*
* @param in input stream from which XZ-compressed
* data is read
* @param in input stream from which XZ-compressed data is read
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ header CRC32 doesn't match
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException
* XZ header is valid but specifies options
* not supported by this implementation
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException
* less than 12 bytes of input was available
* from <code>in</code>
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -104,29 +99,24 @@ public class XZInputStream extends InputStream {
/**
* Creates a new XZ decompressor with an optional memory usage limit.
* <p>
* This is identical to <code>XZInputStream(InputStream)</code> except
* that this takes also the <code>memoryLimit</code> argument.
* This is identical to <code>XZInputStream(InputStream)</code> except that
* this takes also the <code>memoryLimit</code> argument.
*
* @param in input stream from which XZ-compressed
* data is read
* @param in input stream from which XZ-compressed data is read
*
* @param memoryLimit memory usage limit in kibibytes (KiB)
* or <code>-1</code> to impose no
* memory usage limit
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException
* input is not in the XZ format
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException
* XZ header CRC32 doesn't match
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException
* XZ header is valid but specifies options
* not supported by this implementation
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException
* less than 12 bytes of input was available
* from <code>in</code>
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -139,12 +129,12 @@ public class XZInputStream extends InputStream {
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream
* may be inefficient. Wrap it in {@link java.io.BufferedInputStream}
* if you need to read lots of data one byte at a time.
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in {@link java.io.BufferedInputStream} if you
* need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code>
* to indicate the end of the compressed stream
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -152,8 +142,8 @@ public class XZInputStream extends InputStream {
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -164,16 +154,16 @@ public class XZInputStream extends InputStream {
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code>
* is returned. Otherwise this will try to decompress <code>len</code>
* bytes of uncompressed data. Less than <code>len</code> bytes may
* be read only in the following situations:
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less <code>len</code>
* bytes have already been successfully decompressed.
* The next call with non-zero <code>len</code> will immediately
* throw the pending exception.</li>
* bytes have already been successfully decompressed. The next call with
* non-zero <code>len</code> will immediately throw the pending
* exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
@@ -181,8 +171,8 @@ public class XZInputStream extends InputStream {
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate
* the end of the compressed stream
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
@@ -190,8 +180,8 @@ public class XZInputStream extends InputStream {
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException
* compressed input is truncated or corrupt
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
@@ -271,20 +261,19 @@ public class XZInputStream extends InputStream {
// Since this isn't the first .xz Stream, it is more
// logical to tell that the data is corrupt.
throw new CorruptedInputException(
"Garbage after a valid XZ Stream");
"Garbage after a valid XZ Stream");
}
}
/**
* Returns the number of uncompressed bytes that can be read
* without blocking. The value is returned with an assumption
* that the compressed input data will be valid. If the compressed
* data is corrupt, <code>CorruptedInputException</code> may get
* thrown before the number of bytes claimed to be available have
* been read from this input stream.
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read
* without blocking
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
@@ -297,8 +286,8 @@ public class XZInputStream extends InputStream {
}
/**
* Closes the stream and calls <code>in.close()</code>.
* If the stream was already closed, this does nothing.
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/

View File

@@ -20,25 +20,24 @@ import org.tukaani.xz.index.IndexEncoder;
*
* <h4>Examples</h4>
* <p>
* Getting an output stream to compress with LZMA2 using the default
* settings and the default integrity check type (CRC64):
* Getting an output stream to compress with LZMA2 using the default settings
* and the default integrity check type (CRC64):
* <p>
* <blockquote><pre>
* FileOutputStream outfile = new FileOutputStream("foo.xz");
* XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options());
* </pre></blockquote>
* <p>
* Using the preset level <code>8</code> for LZMA2 (the default
* is <code>6</code>) and SHA-256 instead of CRC64 for integrity checking:
* Using the preset level <code>8</code> for LZMA2 (the default is
* <code>6</code>) and SHA-256 instead of CRC64 for integrity checking:
* <p>
* <blockquote><pre>
* XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options(8),
* XZ.CHECK_SHA256);
* </pre></blockquote>
* <p>
* Using the x86 BCJ filter together with LZMA2 to compress x86 executables
* and printing the memory usage information before creating the
* XZOutputStream:
* Using the x86 BCJ filter together with LZMA2 to compress x86 executables and
* printing the memory usage information before creating the XZOutputStream:
* <p>
* <blockquote><pre>
* X86Options x86 = new X86Options();
@@ -64,9 +63,9 @@ public class XZOutputStream extends FinishableOutputStream {
private FilterEncoder[] filters;
/**
* True if the current filter chain supports flushing.
* If it doesn't support flushing, <code>flush()</code>
* will use <code>endBlock()</code> as a fallback.
* True if the current filter chain supports flushing. If it doesn't support
* flushing, <code>flush()</code> will use <code>endBlock()</code> as a
* fallback.
*/
private boolean filtersSupportFlushing;
@@ -76,88 +75,80 @@ public class XZOutputStream extends FinishableOutputStream {
private final byte[] tempBuf = new byte[1];
/**
* Creates a new XZ compressor using one filter and CRC64 as
* the integrity check. This constructor is equivalent to passing
* a single-member FilterOptions array to
* Creates a new XZ compressor using one filter and CRC64 as the integrity
* check. This constructor is equivalent to passing a single-member
* FilterOptions array to
* <code>XZOutputStream(OutputStream, FilterOptions[])</code>.
*
* @param out output stream to which the compressed data
* will be written
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions
* filter options to use
* @param filterOptions filter options to use
*
* @throws UnsupportedOptionsException
* invalid filter chain
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions filterOptions)
throws IOException {
throws IOException {
this(out, filterOptions, XZ.CHECK_CRC64);
}
/**
* Creates a new XZ compressor using one filter and the specified
* integrity check type. This constructor is equivalent to
* passing a single-member FilterOptions array to
* Creates a new XZ compressor using one filter and the specified integrity
* check type. This constructor is equivalent to passing a single-member
* FilterOptions array to
* <code>XZOutputStream(OutputStream, FilterOptions[], int)</code>.
*
* @param out output stream to which the compressed data
* will be written
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions
* filter options to use
* @param filterOptions filter options to use
*
* @param checkType type of the integrity check,
* for example XZ.CHECK_CRC32
* @param checkType type of the integrity check, for example
* XZ.CHECK_CRC32
*
* @throws UnsupportedOptionsException
* invalid filter chain
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions filterOptions,
int checkType) throws IOException {
this(out, new FilterOptions[] {filterOptions}, checkType);
this(out, new FilterOptions[] { filterOptions }, checkType);
}
/**
* Creates a new XZ compressor using 1-4 filters and CRC64 as
* the integrity check. This constructor is equivalent
* Creates a new XZ compressor using 1-4 filters and CRC64 as the integrity
* check. This constructor is equivalent
* <code>XZOutputStream(out, filterOptions, XZ.CHECK_CRC64)</code>.
*
* @param out output stream to which the compressed data
* will be written
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions
* array of filter options to use
* @param filterOptions array of filter options to use
*
* @throws UnsupportedOptionsException
* invalid filter chain
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions[] filterOptions)
throws IOException {
throws IOException {
this(out, filterOptions, XZ.CHECK_CRC64);
}
/**
* Creates a new XZ compressor using 1-4 filters and the specified
* integrity check type.
* Creates a new XZ compressor using 1-4 filters and the specified integrity
* check type.
*
* @param out output stream to which the compressed data
* will be written
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions
* array of filter options to use
* @param filterOptions array of filter options to use
*
* @param checkType type of the integrity check,
* for example XZ.CHECK_CRC32
* @param checkType type of the integrity check, for example
* XZ.CHECK_CRC32
*
* @throws UnsupportedOptionsException
* invalid filter chain
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
@@ -173,19 +164,17 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Updates the filter chain with a single filter.
* This is equivalent to passing a single-member FilterOptions array
* to <code>updateFilters(FilterOptions[])</code>.
* Updates the filter chain with a single filter. This is equivalent to
* passing a single-member FilterOptions array to
* <code>updateFilters(FilterOptions[])</code>.
*
* @param filterOptions
* new filter to use
* @param filterOptions new filter to use
*
* @throws UnsupportedOptionsException
* unsupported filter chain, or trying to change
* the filter chain in the middle of a Block
* @throws UnsupportedOptionsException unsupported filter chain, or trying
* to change the filter chain in the middle of a Block
*/
public void updateFilters(FilterOptions filterOptions)
throws XZIOException {
throws XZIOException {
FilterOptions[] opts = new FilterOptions[1];
opts[0] = filterOptions;
updateFilters(opts);
@@ -194,27 +183,25 @@ public class XZOutputStream extends FinishableOutputStream {
/**
* Updates the filter chain with 1-4 filters.
* <p>
* Currently this cannot be used to update e.g. LZMA2 options in the
* middle of a XZ Block. Use <code>endBlock()</code> to finish the
* current XZ Block before calling this function. The new filter chain
* will then be used for the next XZ Block.
* Currently this cannot be used to update e.g. LZMA2 options in the middle
* of a XZ Block. Use <code>endBlock()</code> to finish the current XZ Block
* before calling this function. The new filter chain will then be used for
* the next XZ Block.
*
* @param filterOptions
* new filter chain to use
* @param filterOptions new filter chain to use
*
* @throws UnsupportedOptionsException
* unsupported filter chain, or trying to change
* the filter chain in the middle of a Block
* @throws UnsupportedOptionsException unsupported filter chain, or trying
* to change the filter chain in the middle of a Block
*/
public void updateFilters(FilterOptions[] filterOptions)
throws XZIOException {
throws XZIOException {
if (blockEncoder != null)
throw new UnsupportedOptionsException("Changing filter options "
+ "in the middle of a XZ Block not implemented");
if (filterOptions.length < 1 || filterOptions.length > 4)
throw new UnsupportedOptionsException(
"XZ filter chain must be 1-4 filters");
"XZ filter chain must be 1-4 filters");
filtersSupportFlushing = true;
FilterEncoder[] newFilters = new FilterEncoder[filterOptions.length];
@@ -230,12 +217,10 @@ public class XZOutputStream extends FinishableOutputStream {
/**
* Writes one byte to be compressed.
*
* @throws XZIOException
* XZ Stream has grown too big
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException
* <code>finish()</code> or <code>close()</code>
* was already called
* @throws XZIOException <code>finish()</code> or <code>close()</code> was
* already called
*
* @throws IOException may be thrown by the underlying output stream
*/
@@ -245,26 +230,22 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Writes an array of bytes to be compressed.
* The compressors tend to do internal buffering and thus the written
* data won't be readable from the compressed output immediately.
* Use <code>flush()</code> to force everything written so far to
* be written to the underlaying output stream, but be aware that
* flushing reduces compression ratio.
* Writes an array of bytes to be compressed. The compressors tend to do
* internal buffering and thus the written data won't be readable from the
* compressed output immediately. Use <code>flush()</code> to force
* everything written so far to be written to the underlaying output stream,
* but be aware that flushing reduces compression ratio.
*
* @param buf buffer of bytes to be written
* @param off start offset in <code>buf</code>
* @param len number of bytes to write
*
* @throws XZIOException
* XZ Stream has grown too big: total file size
* about 8&nbsp;EiB or the Index field exceeds
* 16&nbsp;GiB; you shouldn't reach these sizes
* in practice
* @throws XZIOException XZ Stream has grown too big: total file size about
* 8&nbsp;EiB or the Index field exceeds 16&nbsp;GiB; you shouldn't reach
* these sizes in practice
*
* @throws XZIOException
* <code>finish()</code> or <code>close()</code>
* was already called and len &gt; 0
* @throws XZIOException <code>finish()</code> or <code>close()</code> was
* already called and len &gt; 0
*
* @throws IOException may be thrown by the underlying output stream
*/
@@ -290,27 +271,25 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Finishes the current XZ Block (but not the whole XZ Stream).
* This doesn't flush the stream so it's possible that not all data will
* be decompressible from the output stream when this function returns.
* Call also <code>flush()</code> if flushing is wanted in addition to
* finishing the current XZ Block.
* Finishes the current XZ Block (but not the whole XZ Stream). This doesn't
* flush the stream so it's possible that not all data will be
* decompressible from the output stream when this function returns. Call
* also <code>flush()</code> if flushing is wanted in addition to finishing
* the current XZ Block.
* <p>
* If there is no unfinished Block open, this function will do nothing.
* (No empty XZ Block will be created.)
* If there is no unfinished Block open, this function will do nothing. (No
* empty XZ Block will be created.)
* <p>
* This function can be useful, for example, to create
* random-accessible .xz files.
* This function can be useful, for example, to create random-accessible .xz
* files.
* <p>
* Starting a new XZ Block means that the encoder state is reset.
* Doing this very often will increase the size of the compressed
* file a lot (more than plain <code>flush()</code> would do).
* Starting a new XZ Block means that the encoder state is reset. Doing this
* very often will increase the size of the compressed file a lot (more than
* plain <code>flush()</code> would do).
*
* @throws XZIOException
* XZ Stream has grown too big
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException
* stream finished or closed
* @throws XZIOException stream finished or closed
*
* @throws IOException may be thrown by the underlying output stream
*/
@@ -337,24 +316,21 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Flushes the encoder and calls <code>out.flush()</code>.
* All buffered pending data will then be decompressible from
* the output stream.
* Flushes the encoder and calls <code>out.flush()</code>. All buffered
* pending data will then be decompressible from the output stream.
* <p>
* Calling this function very often may increase the compressed
* file size a lot. The filter chain options may affect the size
* increase too. For example, with LZMA2 the HC4 match finder has
* smaller penalty with flushing than BT4.
* Calling this function very often may increase the compressed file size a
* lot. The filter chain options may affect the size increase too. For
* example, with LZMA2 the HC4 match finder has smaller penalty with
* flushing than BT4.
* <p>
* Some filters don't support flushing. If the filter chain has
* such a filter, <code>flush()</code> will call <code>endBlock()</code>
* before flushing.
* Some filters don't support flushing. If the filter chain has such a
* filter, <code>flush()</code> will call <code>endBlock()</code> before
* flushing.
*
* @throws XZIOException
* XZ Stream has grown too big
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException
* stream finished or closed
* @throws XZIOException stream finished or closed
*
* @throws IOException may be thrown by the underlying output stream
*/
@@ -384,20 +360,19 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Finishes compression without closing the underlying stream.
* No more data can be written to this stream after finishing
* (calling <code>write</code> with an empty buffer is OK).
* Finishes compression without closing the underlying stream. No more data
* can be written to this stream after finishing (calling <code>write</code>
* with an empty buffer is OK).
* <p>
* Repeated calls to <code>finish()</code> do nothing unless
* an exception was thrown by this stream earlier. In that case
* the same exception is thrown again.
* Repeated calls to <code>finish()</code> do nothing unless an exception
* was thrown by this stream earlier. In that case the same exception is
* thrown again.
* <p>
* After finishing, the stream may be closed normally with
* <code>close()</code>. If the stream will be closed anyway, there
* usually is no need to call <code>finish()</code> separately.
* <code>close()</code>. If the stream will be closed anyway, there usually
* is no need to call <code>finish()</code> separately.
*
* @throws XZIOException
* XZ Stream has grown too big
* @throws XZIOException XZ Stream has grown too big
*
* @throws IOException may be thrown by the underlying output stream
*/
@@ -423,14 +398,13 @@ public class XZOutputStream extends FinishableOutputStream {
}
/**
* Finishes compression and closes the underlying stream.
* The underlying stream <code>out</code> is closed even if finishing
* fails. If both finishing and closing fail, the exception thrown
* by <code>finish()</code> is thrown and the exception from the failed
* <code>out.close()</code> is lost.
* Finishes compression and closes the underlying stream. The underlying
* stream <code>out</code> is closed even if finishing fails. If both
* finishing and closing fail, the exception thrown by <code>finish()</code>
* is thrown and the exception from the failed <code>out.close()</code> is
* lost.
*
* @throws XZIOException
* XZ Stream has grown too big
* @throws XZIOException XZ Stream has grown too big
*
* @throws IOException may be thrown by the underlying output stream
*/

View File

@@ -23,10 +23,10 @@ public class CRC32 extends Check {
public byte[] finish() {
long value = state.getValue();
byte[] buf = {(byte) (value),
(byte) (value >>> 8),
(byte) (value >>> 16),
(byte) (value >>> 24)};
byte[] buf = { (byte) (value),
(byte) (value >>> 8),
(byte) (value >>> 16),
(byte) (value >>> 24) };
state.reset();
return buf;
}

View File

@@ -33,27 +33,27 @@ public abstract class Check {
}
public static Check getInstance(int checkType)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
switch (checkType) {
case XZ.CHECK_NONE:
return new None();
case XZ.CHECK_NONE:
return new None();
case XZ.CHECK_CRC32:
return new CRC32();
case XZ.CHECK_CRC32:
return new CRC32();
case XZ.CHECK_CRC64:
return new CRC64();
case XZ.CHECK_CRC64:
return new CRC64();
case XZ.CHECK_SHA256:
try {
return new SHA256();
} catch (java.security.NoSuchAlgorithmException e) {
}
case XZ.CHECK_SHA256:
try {
return new SHA256();
} catch (java.security.NoSuchAlgorithmException e) {
}
break;
break;
}
throw new UnsupportedOptionsException(
"Unsupported Check ID " + checkType);
"Unsupported Check ID " + checkType);
}
}

View File

@@ -33,7 +33,7 @@ public class DecoderUtil extends Util {
}
public static StreamFlags decodeStreamHeader(byte[] buf)
throws IOException {
throws IOException {
for (int i = 0; i < XZ.HEADER_MAGIC.length; ++i)
if (buf[i] != XZ.HEADER_MAGIC[i])
throw new XZFormatException();
@@ -46,12 +46,12 @@ public class DecoderUtil extends Util {
return decodeStreamFlags(buf, XZ.HEADER_MAGIC.length);
} catch (UnsupportedOptionsException e) {
throw new UnsupportedOptionsException(
"Unsupported options in XZ Stream Header");
"Unsupported options in XZ Stream Header");
}
}
public static StreamFlags decodeStreamFooter(byte[] buf)
throws IOException {
throws IOException {
if (buf[10] != XZ.FOOTER_MAGIC[0] || buf[11] != XZ.FOOTER_MAGIC[1])
// NOTE: The exception could be XZFormatException too.
// It depends on the situation which one is better.
@@ -65,7 +65,7 @@ public class DecoderUtil extends Util {
streamFlags = decodeStreamFlags(buf, 8);
} catch (UnsupportedOptionsException e) {
throw new UnsupportedOptionsException(
"Unsupported options in XZ Stream Footer");
"Unsupported options in XZ Stream Footer");
}
streamFlags.backwardSize = 0;
@@ -78,7 +78,7 @@ public class DecoderUtil extends Util {
}
private static StreamFlags decodeStreamFlags(byte[] buf, int off)
throws UnsupportedOptionsException {
throws UnsupportedOptionsException {
if (buf[off] != 0x00 || (buf[off + 1] & 0xFF) >= 0x10)
throw new UnsupportedOptionsException();

View File

@@ -15,7 +15,7 @@ import java.util.zip.CRC32;
public class EncoderUtil extends Util {
public static void writeCRC32(OutputStream out, byte[] buf)
throws IOException {
throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(buf);
long value = crc32.getValue();
@@ -25,7 +25,7 @@ public class EncoderUtil extends Util {
}
public static void encodeVLI(OutputStream out, long num)
throws IOException {
throws IOException {
while (num >= 0x80) {
out.write((byte) (num | 0x80));
num >>>= 7;

View File

@@ -40,7 +40,7 @@ public class IndexDecoder extends IndexBase {
public IndexDecoder(SeekableInputStream in, StreamFlags streamFooterFlags,
long streamPadding, int memoryLimit)
throws IOException {
throws IOException {
super(new CorruptedInputException("XZ Index is corrupt"));
this.streamFlags = streamFooterFlags;
this.streamPadding = streamPadding;

View File

@@ -25,7 +25,7 @@ public class IndexEncoder extends IndexBase {
}
public void add(long unpaddedSize, long uncompressedSize)
throws XZIOException {
throws XZIOException {
super.add(unpaddedSize, uncompressedSize);
records.add(new IndexRecord(unpaddedSize, uncompressedSize));
}

View File

@@ -33,7 +33,7 @@ public class IndexHash extends IndexBase {
}
public void add(long unpaddedSize, long uncompressedSize)
throws XZIOException {
throws XZIOException {
super.add(unpaddedSize, uncompressedSize);
ByteBuffer buf = ByteBuffer.allocate(2 * 8);

View File

@@ -28,8 +28,8 @@ final class HC4 extends LZEncoder {
}
/**
* Creates a new LZEncoder with the HC4 match finder.
* See <code>LZEncoder.getInstance</code> for parameter descriptions.
* Creates a new LZEncoder with the HC4 match finder. See
* <code>LZEncoder.getInstance</code> for parameter descriptions.
*/
HC4(int dictSize, int beforeSizeMin, int readAheadMax,
int niceLen, int matchLenMax, int depthLimit) {
@@ -54,8 +54,8 @@ final class HC4 extends LZEncoder {
}
/**
* Moves to the next byte, checks that there is enough available space,
* and possibly normalizes the hash tables and the hash chain.
* Moves to the next byte, checks that there is enough available space, and
* possibly normalizes the hash tables and the hash chain.
*
* @return number of bytes available, including the current byte
*/

View File

@@ -104,7 +104,7 @@ public final class LZDecoder {
}
public void copyUncompressed(DataInputStream inData, int len)
throws IOException {
throws IOException {
int copySize = Math.min(buf.length - pos, len);
inData.readFully(buf, pos, copySize);
pos += copySize;

View File

@@ -18,16 +18,16 @@ public abstract class LZEncoder {
public static final int MF_BT4 = 0x14;
/**
* Number of bytes to keep available before the current byte
* when moving the LZ window.
* Number of bytes to keep available before the current byte when moving the
* LZ window.
*/
private final int keepSizeBefore;
/**
* Number of bytes that must be available, the current byte included,
* to make hasEnoughData return true. Flushing and finishing are
* naturally exceptions to this since there cannot be any data after
* the end of the uncompressed input.
* Number of bytes that must be available, the current byte included, to
* make hasEnoughData return true. Flushing and finishing are naturally
* exceptions to this since there cannot be any data after the end of the
* uncompressed input.
*/
private final int keepSizeAfter;
@@ -54,8 +54,8 @@ public abstract class LZEncoder {
* Gets the size of the LZ window buffer that needs to be allocated.
*/
private static int getBufSize(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax) {
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax) {
int keepSizeBefore = extraSizeBefore + dictSize;
int keepSizeAfter = extraSizeAfter + matchLenMax;
int reserveSize = Math.min(dictSize / 2 + (256 << 10), 512 << 20);
@@ -63,27 +63,27 @@ public abstract class LZEncoder {
}
/**
* Gets approximate memory usage of the LZEncoder base structure and
* the match finder as kibibytes.
* Gets approximate memory usage of the LZEncoder base structure and the
* match finder as kibibytes.
*/
public static int getMemoryUsage(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax, int mf) {
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax, int mf) {
// Buffer size + a little extra
int m = getBufSize(dictSize, extraSizeBefore, extraSizeAfter,
matchLenMax) / 1024 + 10;
switch (mf) {
case MF_HC4:
m += HC4.getMemoryUsage(dictSize);
break;
case MF_HC4:
m += HC4.getMemoryUsage(dictSize);
break;
case MF_BT4:
m += BT4.getMemoryUsage(dictSize);
break;
case MF_BT4:
m += BT4.getMemoryUsage(dictSize);
break;
default:
throw new IllegalArgumentException();
default:
throw new IllegalArgumentException();
}
return m;
@@ -94,17 +94,15 @@ public abstract class LZEncoder {
* <p>
* @param dictSize dictionary size
*
* @param extraSizeBefore
* number of bytes to keep available in the
* history in addition to dictSize
* @param extraSizeBefore number of bytes to keep available in the history
* in addition to dictSize
*
* @param extraSizeAfter
* number of bytes that must be available
* after current position + matchLenMax
* @param extraSizeAfter number of bytes that must be available after
* current position + matchLenMax
*
* @param niceLen if a match of at least <code>niceLen</code>
* bytes is found, be happy with it and don't
* stop looking for longer matches
* @param niceLen if a match of at least <code>niceLen</code> bytes
* is
* found, be happy with it and don't stop looking for longer matches
*
* @param matchLenMax don't test for matches longer than
* <code>matchLenMax</code> bytes
@@ -114,16 +112,16 @@ public abstract class LZEncoder {
* @param depthLimit match finder search depth limit
*/
public static LZEncoder getInstance(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int niceLen, int matchLenMax, int mf, int depthLimit) {
int dictSize, int extraSizeBefore, int extraSizeAfter,
int niceLen, int matchLenMax, int mf, int depthLimit) {
switch (mf) {
case MF_HC4:
return new HC4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
case MF_HC4:
return new HC4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
case MF_BT4:
return new BT4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
case MF_BT4:
return new BT4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
}
throw new IllegalArgumentException();
@@ -145,9 +143,9 @@ public abstract class LZEncoder {
}
/**
* Sets a preset dictionary. If a preset dictionary is wanted, this
* function must be called immediately after creating the LZEncoder
* before any data has been encoded.
* Sets a preset dictionary. If a preset dictionary is wanted, this function
* must be called immediately after creating the LZEncoder before any data
* has been encoded.
*/
public void setPresetDict(int dictSize, byte[] presetDict) {
assert !isStarted();
@@ -165,8 +163,8 @@ public abstract class LZEncoder {
}
/**
* Moves data from the end of the buffer to the beginning, discarding
* old data and making space for new input.
* Moves data from the end of the buffer to the beginning, discarding old
* data and making space for new input.
*/
private void moveWindow() {
// Align the move to a multiple of 16 bytes. LZMA2 needs this
@@ -212,8 +210,8 @@ public abstract class LZEncoder {
}
/**
* Process pending bytes remaining from preset dictionary initialization
* or encoder flush operation.
* Process pending bytes remaining from preset dictionary initialization or
* encoder flush operation.
*/
private void processPendingBytes() {
// After flushing or setting a preset dictionary there will be
@@ -234,16 +232,16 @@ public abstract class LZEncoder {
}
/**
* Returns true if at least one byte has already been run through
* the match finder.
* Returns true if at least one byte has already been run through the match
* finder.
*/
public boolean isStarted() {
return readPos != -1;
}
/**
* Marks that all the input needs to be made available in
* the encoded output.
* Marks that all the input needs to be made available in the encoded
* output.
*/
public void setFlushing() {
readLimit = writePos - 1;
@@ -251,8 +249,8 @@ public abstract class LZEncoder {
}
/**
* Marks that there is no more input remaining. The read position
* can be advanced until the end of the data.
* Marks that there is no more input remaining. The read position can be
* advanced until the end of the data.
*/
public void setFinishing() {
readLimit = writePos - 1;
@@ -261,15 +259,15 @@ public abstract class LZEncoder {
}
/**
* Tests if there is enough input available to let the caller encode
* at least one more byte.
* Tests if there is enough input available to let the caller encode at
* least one more byte.
*/
public boolean hasEnoughData(int alreadyReadLen) {
return readPos - alreadyReadLen < readLimit;
}
public void copyUncompressed(OutputStream out, int backward, int len)
throws IOException {
throws IOException {
out.write(buf, readPos + 1 - backward, len);
}
@@ -277,8 +275,8 @@ public abstract class LZEncoder {
* Get the number of bytes available, including the current byte.
* <p>
* Note that the result is undefined if <code>getMatches</code> or
* <code>skip</code> hasn't been called yet and no preset dictionary
* is being used.
* <code>skip</code> hasn't been called yet and no preset dictionary is
* being used.
*/
public int getAvail() {
assert isStarted();
@@ -296,9 +294,8 @@ public abstract class LZEncoder {
/**
* Gets the byte from the given backward offset.
* <p>
* The current byte is at <code>0</code>, the previous byte
* at <code>1</code> etc. To get a byte at zero-based distance,
* use <code>getByte(dist + 1)<code>.
* The current byte is at <code>0</code>, the previous byte at
* <code>1</code> etc. To get a byte at zero-based distance, use <code>getByte(dist + 1)<code>.
* <p>
* This function is equivalent to <code>getByte(0, backward)</code>.
*/
@@ -307,9 +304,9 @@ public abstract class LZEncoder {
}
/**
* Gets the byte from the given forward minus backward offset.
* The forward offset is added to the current position. This lets
* one read bytes ahead of the current byte.
* Gets the byte from the given forward minus backward offset. The forward
* offset is added to the current position. This lets one read bytes ahead
* of the current byte.
*/
public int getByte(int forward, int backward) {
return buf[readPos + forward - backward] & 0xFF;
@@ -354,10 +351,10 @@ public abstract class LZEncoder {
}
/**
* Verifies that the matches returned by the match finder are valid.
* This is meant to be used in an assert statement. This is totally
* useless for actual encoding since match finder's results should
* naturally always be valid if it isn't broken.
* Verifies that the matches returned by the match finder are valid. This is
* meant to be used in an assert statement. This is totally useless for
* actual encoding since match finder's results should naturally always be
* valid if it isn't broken.
*
* @param matches return value from <code>getMatches</code>
*
@@ -374,21 +371,17 @@ public abstract class LZEncoder {
}
/**
* Moves to the next byte, checks if there is enough input available,
* and returns the amount of input available.
* Moves to the next byte, checks if there is enough input available, and
* returns the amount of input available.
*
* @param requiredForFlushing
* minimum number of available bytes when
* flushing; encoding may be continued with
* new input after flushing
* @param requiredForFinishing
* minimum number of available bytes when
* finishing; encoding must not be continued
* after finishing or the match finder state
* may be corrupt
* @param requiredForFlushing minimum number of available bytes when
* flushing; encoding may be continued with new input after flushing
* @param requiredForFinishing minimum number of available bytes when
* finishing; encoding must not be continued after finishing or the match
* finder state may be corrupt
*
* @return the number of bytes available or zero if there
* is not enough input available
* @return the number of bytes available or zero if there is not enough
* input available
*/
int movePos(int requiredForFlushing, int requiredForFinishing) {
assert requiredForFlushing >= requiredForFinishing;

View File

@@ -44,11 +44,11 @@ abstract class LZMACoder {
final short[] isRep2 = new short[State.STATES];
final short[][] isRep0Long = new short[State.STATES][POS_STATES_MAX];
final short[][] distSlots = new short[DIST_STATES][DIST_SLOTS];
final short[][] distSpecial = {new short[2], new short[2],
new short[4], new short[4],
new short[8], new short[8],
new short[16], new short[16],
new short[32], new short[32]};
final short[][] distSpecial = { new short[2], new short[2],
new short[4], new short[4],
new short[8], new short[8],
new short[16], new short[16],
new short[32], new short[32] };
final short[] distAlign = new short[ALIGN_SIZE];
static final int getDistState(int len) {

View File

@@ -37,10 +37,10 @@ public final class LZMADecoder extends LZMACoder {
}
/**
* Returns true if LZMA end marker was detected. It is encoded as
* the maximum match distance which with signed ints becomes -1. This
* function is needed only for LZMA1. LZMA2 doesn't use the end marker
* in the LZMA layer.
* Returns true if LZMA end marker was detected. It is encoded as the
* maximum match distance which with signed ints becomes -1. This function
* is needed only for LZMA1. LZMA2 doesn't use the end marker in the LZMA
* layer.
*/
public boolean endMarkerDetected() {
return reps[0] == -1;
@@ -87,7 +87,7 @@ public final class LZMADecoder extends LZMACoder {
if (distSlot < DIST_MODEL_END)
reps[0] |= rc.decodeReverseBitTree(
distSpecial[distSlot - DIST_MODEL_START]);
distSpecial[distSlot - DIST_MODEL_START]);
else {
reps[0] |= rc.decodeDirectBits(limit - ALIGN_BITS)
<< ALIGN_BITS;

View File

@@ -22,10 +22,10 @@ public abstract class LZMAEncoder extends LZMACoder {
* LZMA2 chunk is considered full when its uncompressed size exceeds
* <code>LZMA2_UNCOMPRESSED_LIMIT</code>.
* <p>
* A compressed LZMA2 chunk can hold 2 MiB of uncompressed data.
* A single LZMA symbol may indicate up to MATCH_LEN_MAX bytes
* of data, so the LZMA2 chunk is considered full when there is
* less space than MATCH_LEN_MAX bytes.
* A compressed LZMA2 chunk can hold 2 MiB of uncompressed data. A single
* LZMA symbol may indicate up to MATCH_LEN_MAX bytes of data, so the LZMA2
* chunk is considered full when there is less space than MATCH_LEN_MAX
* bytes.
*/
private static final int LZMA2_UNCOMPRESSED_LIMIT
= (2 << 20) - MATCH_LEN_MAX;
@@ -34,11 +34,11 @@ public abstract class LZMAEncoder extends LZMACoder {
* LZMA2 chunk is considered full when its compressed size exceeds
* <code>LZMA2_COMPRESSED_LIMIT</code>.
* <p>
* The maximum compressed size of a LZMA2 chunk is 64 KiB.
* A single LZMA symbol might use 20 bytes of space even though
* it usually takes just one byte or so. Two more bytes are needed
* for LZMA2 uncompressed chunks (see LZMA2OutputStream.writeChunk).
* Leave a little safety margin and use 26 bytes.
* The maximum compressed size of a LZMA2 chunk is 64 KiB. A single LZMA
* symbol might use 20 bytes of space even though it usually takes just one
* byte or so. Two more bytes are needed for LZMA2 uncompressed chunks (see
* LZMA2OutputStream.writeChunk). Leave a little safety margin and use 26
* bytes.
*/
private static final int LZMA2_COMPRESSED_LIMIT = (64 << 10) - 26;
@@ -70,46 +70,46 @@ public abstract class LZMAEncoder extends LZMACoder {
int m = 80;
switch (mode) {
case MODE_FAST:
m += LZMAEncoderFast.getMemoryUsage(
case MODE_FAST:
m += LZMAEncoderFast.getMemoryUsage(
dictSize, extraSizeBefore, mf);
break;
break;
case MODE_NORMAL:
m += LZMAEncoderNormal.getMemoryUsage(
case MODE_NORMAL:
m += LZMAEncoderNormal.getMemoryUsage(
dictSize, extraSizeBefore, mf);
break;
break;
default:
throw new IllegalArgumentException();
default:
throw new IllegalArgumentException();
}
return m;
}
public static LZMAEncoder getInstance(
RangeEncoder rc, int lc, int lp, int pb, int mode,
int dictSize, int extraSizeBefore,
int niceLen, int mf, int depthLimit) {
RangeEncoder rc, int lc, int lp, int pb, int mode,
int dictSize, int extraSizeBefore,
int niceLen, int mf, int depthLimit) {
switch (mode) {
case MODE_FAST:
return new LZMAEncoderFast(rc, lc, lp, pb,
dictSize, extraSizeBefore,
niceLen, mf, depthLimit);
case MODE_FAST:
return new LZMAEncoderFast(rc, lc, lp, pb,
dictSize, extraSizeBefore,
niceLen, mf, depthLimit);
case MODE_NORMAL:
return new LZMAEncoderNormal(rc, lc, lp, pb,
dictSize, extraSizeBefore,
niceLen, mf, depthLimit);
case MODE_NORMAL:
return new LZMAEncoderNormal(rc, lc, lp, pb,
dictSize, extraSizeBefore,
niceLen, mf, depthLimit);
}
throw new IllegalArgumentException();
}
/**
* Gets an integer [0, 63] matching the highest two bits of an integer.
* This is like bit scan reverse (BSR) on x86 except that this also
* cares about the second highest bit.
* Gets an integer [0, 63] matching the highest two bits of an integer. This
* is like bit scan reverse (BSR) on x86 except that this also cares about
* the second highest bit.
*/
public static int getDistSlot(int dist) {
if (dist <= DIST_MODEL_START)
@@ -147,19 +147,18 @@ public abstract class LZMAEncoder extends LZMACoder {
/**
* Gets the next LZMA symbol.
* <p>
* There are three types of symbols: literal (a single byte),
* repeated match, and normal match. The symbol is indicated
* by the return value and by the variable <code>back</code>.
* There are three types of symbols: literal (a single byte), repeated
* match, and normal match. The symbol is indicated by the return value and
* by the variable <code>back</code>.
* <p>
* Literal: <code>back == -1</code> and return value is <code>1</code>.
* The literal itself needs to be read from <code>lz</code> separately.
* Literal: <code>back == -1</code> and return value is <code>1</code>. The
* literal itself needs to be read from <code>lz</code> separately.
* <p>
* Repeated match: <code>back</code> is in the range [0, 3] and
* the return value is the length of the repeated match.
* Repeated match: <code>back</code> is in the range [0, 3] and the return
* value is the length of the repeated match.
* <p>
* Normal match: <code>back - REPS<code> (<code>back - 4</code>)
* is the distance of the match and the return value is the length
* of the match.
* Normal match: <code>back - REPS<code> (<code>back - 4</code>) is the
* distance of the match and the return value is the length of the match.
*/
abstract int getNextSymbol();
@@ -293,8 +292,8 @@ public abstract class LZMAEncoder extends LZMACoder {
if (distSlot < DIST_MODEL_END)
rc.encodeReverseBitTree(
distSpecial[distSlot - DIST_MODEL_START],
distReduced);
distSpecial[distSlot - DIST_MODEL_START],
distReduced);
else {
rc.encodeDirectBits(distReduced >>> ALIGN_BITS,
footerBits - ALIGN_BITS);
@@ -382,7 +381,7 @@ public abstract class LZMAEncoder extends LZMACoder {
if (rep == 0)
price += RangeEncoder.getBitPrice(isRep0[state.get()], 0)
+ RangeEncoder.getBitPrice(
isRep0Long[state.get()][posState], 1);
isRep0Long[state.get()][posState], 1);
else {
price += RangeEncoder.getBitPrice(isRep0[state.get()], 1);
@@ -430,7 +429,7 @@ public abstract class LZMAEncoder extends LZMACoder {
for (int distSlot = 0; distSlot < distSlotPricesSize; ++distSlot)
distSlotPrices[distState][distSlot]
= RangeEncoder.getBitTreePrice(
distSlots[distState], distSlot);
distSlots[distState], distSlot);
for (int distSlot = DIST_MODEL_END; distSlot < distSlotPricesSize;
++distSlot) {
@@ -454,8 +453,8 @@ public abstract class LZMAEncoder extends LZMACoder {
for (int i = 0; i < limit; ++i) {
int distReduced = dist - base;
int price = RangeEncoder.getReverseBitTreePrice(
distSpecial[distSlot - DIST_MODEL_START],
distReduced);
distSpecial[distSlot - DIST_MODEL_START],
distReduced);
for (int distState = 0; distState < DIST_STATES; ++distState)
fullDistPrices[distState][dist]
@@ -477,9 +476,9 @@ public abstract class LZMAEncoder extends LZMACoder {
}
/**
* Updates the lookup tables used for calculating match distance
* and length prices. The updating is skipped for performance reasons
* if the tables haven't changed much since the previous update.
* Updates the lookup tables used for calculating match distance and length
* prices. The updating is skipped for performance reasons if the tables
* haven't changed much since the previous update.
*/
void updatePrices() {
if (distPriceCount <= 0)
@@ -527,7 +526,7 @@ public abstract class LZMAEncoder extends LZMACoder {
int getPrice(int curByte, int matchByte,
int prevByte, int pos, State state) {
int price = RangeEncoder.getBitPrice(
isMatch[state.get()][pos & posMask], 0);
isMatch[state.get()][pos & posMask], 0);
int i = getSubcoderIndex(prevByte, pos);
price += state.isLiteral()
@@ -621,8 +620,8 @@ public abstract class LZMAEncoder extends LZMACoder {
/**
* The prices are updated after at least
* <code>PRICE_UPDATE_INTERVAL</code> many lengths
* have been encoded with the same posState.
* <code>PRICE_UPDATE_INTERVAL</code> many lengths have been encoded
* with the same posState.
*/
private static final int PRICE_UPDATE_INTERVAL = 32; // FIXME?

View File

@@ -22,8 +22,8 @@ final class LZMAEncoderFast extends LZMAEncoder {
static int getMemoryUsage(int dictSize, int extraSizeBefore, int mf) {
return LZEncoder.getMemoryUsage(
dictSize, Math.max(extraSizeBefore, EXTRA_SIZE_BEFORE),
EXTRA_SIZE_AFTER, MATCH_LEN_MAX, mf);
dictSize, Math.max(extraSizeBefore, EXTRA_SIZE_BEFORE),
EXTRA_SIZE_AFTER, MATCH_LEN_MAX, mf);
}
LZMAEncoderFast(RangeEncoder rc, int lc, int lp, int pb,

View File

@@ -60,9 +60,9 @@ final class LZMAEncoderNormal extends LZMAEncoder {
}
/**
* Converts the opts array from backward indexes to forward indexes.
* Then it will be simple to get the next symbol from the array
* in later calls to <code>getNextSymbol()</code>.
* Converts the opts array from backward indexes to forward indexes. Then it
* will be simple to get the next symbol from the array in later calls to
* <code>getNextSymbol()</code>.
*/
private int convertOpts() {
optEnd = optCur;

View File

@@ -58,8 +58,7 @@ final class Optimum {
}
/**
* Sets to indicate three LZMA symbols of which the second one
* is a literal.
* Sets to indicate three LZMA symbols of which the second one is a literal.
*/
void set3(int newPrice, int optCur, int back2, int len2, int back) {
price = newPrice;

View File

@@ -3,14 +3,14 @@
*
* <h4>Introduction</h4>
* <p>
* This aims to be a complete implementation of XZ data compression
* in pure Java. Features:
* This aims to be a complete implementation of XZ data compression in pure
* Java. Features:
* <ul>
* <li>Full support for the .xz file format specification version 1.0.4</li>
* <li>Single-threaded streamed compression and decompression</li>
* <li>Single-threaded decompression with limited random access support</li>
* <li>Raw streams (no .xz headers) for advanced users, including LZMA2
* with preset dictionary</li>
* <li>Raw streams (no .xz headers) for advanced users, including LZMA2 with
* preset dictionary</li>
* </ul>
* <p>
* Threading is planned but it is unknown when it will be implemented.
@@ -21,15 +21,14 @@
* <h4>Getting started</h4>
* <p>
* Start by reading the documentation of {@link org.tukaani.xz.XZOutputStream}
* and {@link org.tukaani.xz.XZInputStream}.
* If you use XZ inside another file format or protocol,
* see also {@link org.tukaani.xz.SingleXZInputStream}.
* and {@link org.tukaani.xz.XZInputStream}. If you use XZ inside another file
* format or protocol, see also {@link org.tukaani.xz.SingleXZInputStream}.
*
* <h4>Licensing</h4>
* <p>
* XZ for Java has been put into the public domain, thus you can do
* whatever you want with it. All the files in the package have been
* written by Lasse Collin and/or Igor Pavlov.
* XZ for Java has been put into the public domain, thus you can do whatever you
* want with it. All the files in the package have been written by Lasse Collin
* and/or Igor Pavlov.
* <p>
* This software is provided "as is", without any warranty.
*/

View File

@@ -26,7 +26,7 @@ public final class RangeDecoderFromBuffer extends RangeDecoder {
}
public void prepareInputBuffer(DataInputStream in, int len)
throws IOException {
throws IOException {
if (len < INIT_SIZE)
throw new CorruptedInputException();

View File

@@ -15,7 +15,7 @@ public final class IA64 implements SimpleFilter {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 4, 6, 6, 0, 0, 7, 7,
4, 4, 0, 0, 4, 4, 0, 0};
4, 4, 0, 0, 4, 4, 0, 0 };
private final boolean isEncoder;
private int pos;

View File

@@ -12,9 +12,9 @@ package org.tukaani.xz.simple;
public final class X86 implements SimpleFilter {
private static final boolean[] MASK_TO_ALLOWED_STATUS
= {true, true, true, false, true, false, false, false};
= { true, true, true, false, true, false, false, false };
private static final int[] MASK_TO_BIT_NUMBER = {0, 1, 2, 2, 3, 3, 3, 3};
private static final int[] MASK_TO_BIT_NUMBER = { 0, 1, 2, 2, 3, 3, 3, 3 };
private final boolean isEncoder;
private int pos;
@@ -46,7 +46,7 @@ public final class X86 implements SimpleFilter {
prevMask = (prevMask << (prevPos - 1)) & 7;
if (prevMask != 0)
if (!MASK_TO_ALLOWED_STATUS[prevMask] || test86MSByte(
buf[i + 4 - MASK_TO_BIT_NUMBER[prevMask]])) {
buf[i + 4 - MASK_TO_BIT_NUMBER[prevMask]])) {
prevPos = i;
prevMask = (prevMask << 1) | 1;
continue;