To make death, I removed xz processor

This commit is contained in:
huangyuhui
2016-01-29 22:23:50 +08:00
parent f8eec1b7d5
commit ff0750f1f7
68 changed files with 0 additions and 6629 deletions

View File

@@ -1,36 +0,0 @@
/*
* BCJCoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
abstract class BCJCoder implements FilterCoder {
public static final long X86_FILTER_ID = 0x04;
public static final long POWERPC_FILTER_ID = 0x05;
public static final long IA64_FILTER_ID = 0x06;
public static final long SPARC_FILTER_ID = 0x09;
public static boolean isBCJFilterID(long filterID) {
return filterID >= 0x04 && filterID <= 0x09;
}
@Override
public boolean changesSize() {
return false;
}
@Override
public boolean nonLastOK() {
return true;
}
@Override
public boolean lastOK() {
return false;
}
}

View File

@@ -1,62 +0,0 @@
/*
* BCJDecoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import org.tukaani.xz.simple.*;
class BCJDecoder extends BCJCoder implements FilterDecoder {
private final long filterID;
private final int startOffset;
BCJDecoder(long filterID, byte[] props)
throws UnsupportedOptionsException {
assert isBCJFilterID(filterID);
this.filterID = filterID;
switch (props.length) {
case 0:
startOffset = 0;
break;
case 4:
int n = 0;
for (int i = 0; i < 4; ++i)
n |= (props[i] & 0xFF) << (i * 8);
startOffset = n;
break;
default:
throw new UnsupportedOptionsException(
"Unsupported BCJ filter properties");
}
}
@Override
public int getMemoryUsage() {
return SimpleInputStream.getMemoryUsage();
}
@Override
public InputStream getInputStream(InputStream in) {
SimpleFilter simpleFilter = null;
if (filterID == X86_FILTER_ID)
simpleFilter = new X86(false, startOffset);
else if (filterID == POWERPC_FILTER_ID)
simpleFilter = new PowerPC(false, startOffset);
else if (filterID == IA64_FILTER_ID)
simpleFilter = new IA64(false, startOffset);
else if (filterID == SPARC_FILTER_ID)
simpleFilter = new SPARC(false, startOffset);
else
assert false;
return new SimpleInputStream(in, simpleFilter);
}
}

View File

@@ -1,48 +0,0 @@
/*
* BCJEncoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
class BCJEncoder extends BCJCoder implements FilterEncoder {
private final BCJOptions options;
private final long filterID;
private final byte[] props;
BCJEncoder(BCJOptions options, long filterID) {
assert isBCJFilterID(filterID);
int startOffset = options.getStartOffset();
if (startOffset == 0)
props = new byte[0];
else {
props = new byte[4];
for (int i = 0; i < 4; ++i)
props[i] = (byte) (startOffset >>> (i * 8));
}
this.filterID = filterID;
this.options = (BCJOptions) options.clone();
}
public long getFilterID() {
return filterID;
}
public byte[] getFilterProps() {
return props;
}
public boolean supportsFlushing() {
return false;
}
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return options.getOutputStream(out);
}
}

View File

@@ -1,57 +0,0 @@
/*
* BCJOptions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
abstract class BCJOptions extends FilterOptions {
private final int alignment;
int startOffset = 0;
BCJOptions(int alignment) {
this.alignment = alignment;
}
/**
* Sets the start offset for the address conversions. Normally this is
* useless so you shouldn't use this function. The default value is
* <code>0</code>.
*/
public void setStartOffset(int startOffset)
throws UnsupportedOptionsException {
if ((startOffset & (alignment - 1)) != 0)
throw new UnsupportedOptionsException(
"Start offset must be a multiple of " + alignment);
this.startOffset = startOffset;
}
/**
* Gets the start offset.
*/
public int getStartOffset() {
return startOffset;
}
public int getEncoderMemoryUsage() {
return SimpleOutputStream.getMemoryUsage();
}
public int getDecoderMemoryUsage() {
return SimpleInputStream.getMemoryUsage();
}
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException e) {
assert false;
throw new RuntimeException();
}
}
}

View File

@@ -1,273 +0,0 @@
/*
* BlockInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.DataInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.Arrays;
import org.tukaani.xz.common.DecoderUtil;
import org.tukaani.xz.check.Check;
class BlockInputStream extends InputStream {
private final DataInputStream inData;
private final CountingInputStream inCounted;
private InputStream filterChain;
private final Check check;
private long uncompressedSizeInHeader = -1;
private long compressedSizeInHeader = -1;
private long compressedSizeLimit;
private final int headerSize;
private long uncompressedSize = 0;
private boolean endReached = false;
private final byte[] tempBuf = new byte[1];
public BlockInputStream(InputStream in, Check check, int memoryLimit,
long unpaddedSizeInIndex,
long uncompressedSizeInIndex)
throws IOException, IndexIndicatorException {
this.check = check;
inData = new DataInputStream(in);
byte[] buf = new byte[DecoderUtil.BLOCK_HEADER_SIZE_MAX];
// Block Header Size or Index Indicator
inData.readFully(buf, 0, 1);
// See if this begins the Index field.
if (buf[0] == 0x00)
throw new IndexIndicatorException();
// Read the rest of the Block Header.
headerSize = 4 * ((buf[0] & 0xFF) + 1);
inData.readFully(buf, 1, headerSize - 1);
// Validate the CRC32.
if (!DecoderUtil.isCRC32Valid(buf, 0, headerSize - 4, headerSize - 4))
throw new CorruptedInputException("XZ Block Header is corrupt");
// Check for reserved bits in Block Flags.
if ((buf[1] & 0x3C) != 0)
throw new UnsupportedOptionsException(
"Unsupported options in XZ Block Header");
// Memory for the Filter Flags field
int filterCount = (buf[1] & 0x03) + 1;
long[] filterIDs = new long[filterCount];
byte[][] filterProps = new byte[filterCount][];
// Use a stream to parse the fields after the Block Flags field.
// Exclude the CRC32 field at the end.
ByteArrayInputStream bufStream = new ByteArrayInputStream(
buf, 2, headerSize - 6);
try {
// Set the maximum valid compressed size. This is overriden
// by the value from the Compressed Size field if it is present.
compressedSizeLimit = (DecoderUtil.VLI_MAX & ~3)
- headerSize - check.getSize();
// Decode and validate Compressed Size if the relevant flag
// is set in Block Flags.
if ((buf[1] & 0x40) != 0x00) {
compressedSizeInHeader = DecoderUtil.decodeVLI(bufStream);
if (compressedSizeInHeader == 0
|| compressedSizeInHeader > compressedSizeLimit)
throw new CorruptedInputException();
compressedSizeLimit = compressedSizeInHeader;
}
// Decode Uncompressed Size if the relevant flag is set
// in Block Flags.
if ((buf[1] & 0x80) != 0x00)
uncompressedSizeInHeader = DecoderUtil.decodeVLI(bufStream);
// Decode Filter Flags.
for (int i = 0; i < filterCount; ++i) {
filterIDs[i] = DecoderUtil.decodeVLI(bufStream);
long filterPropsSize = DecoderUtil.decodeVLI(bufStream);
if (filterPropsSize > bufStream.available())
throw new CorruptedInputException();
filterProps[i] = new byte[(int) filterPropsSize];
bufStream.read(filterProps[i]);
}
} catch (IOException e) {
throw new CorruptedInputException("XZ Block Header is corrupt");
}
// Check that the remaining bytes are zero.
for (int i = bufStream.available(); i > 0; --i)
if (bufStream.read() != 0x00)
throw new UnsupportedOptionsException(
"Unsupported options in XZ Block Header");
// Validate the Blcok Header against the Index when doing
// random access reading.
if (unpaddedSizeInIndex != -1) {
// Compressed Data must be at least one byte, so if Block Header
// and Check alone take as much or more space than the size
// stored in the Index, the file is corrupt.
int headerAndCheckSize = headerSize + check.getSize();
if (headerAndCheckSize >= unpaddedSizeInIndex)
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
// The compressed size calculated from Unpadded Size must
// match the value stored in the Compressed Size field in
// the Block Header.
long compressedSizeFromIndex
= unpaddedSizeInIndex - headerAndCheckSize;
if (compressedSizeFromIndex > compressedSizeLimit
|| (compressedSizeInHeader != -1
&& compressedSizeInHeader != compressedSizeFromIndex))
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
// The uncompressed size stored in the Index must match
// the value stored in the Uncompressed Size field in
// the Block Header.
if (uncompressedSizeInHeader != -1
&& uncompressedSizeInHeader != uncompressedSizeInIndex)
throw new CorruptedInputException(
"XZ Index does not match a Block Header");
// For further validation, pretend that the values from the Index
// were stored in the Block Header.
compressedSizeLimit = compressedSizeFromIndex;
compressedSizeInHeader = compressedSizeFromIndex;
uncompressedSizeInHeader = uncompressedSizeInIndex;
}
// Check if the Filter IDs are supported, decode
// the Filter Properties, and check that they are
// supported by this decoder implementation.
FilterDecoder[] filters = new FilterDecoder[filterIDs.length];
for (int i = 0; i < filters.length; ++i)
if (BCJDecoder.isBCJFilterID(filterIDs[i]))
filters[i] = new BCJDecoder(filterIDs[i], filterProps[i]);
else
throw new UnsupportedOptionsException(
"Unknown Filter ID " + filterIDs[i]);
RawCoder.validate(filters);
// Check the memory usage limit.
if (memoryLimit >= 0) {
int memoryNeeded = 0;
for (int i = 0; i < filters.length; ++i)
memoryNeeded += filters[i].getMemoryUsage();
if (memoryNeeded > memoryLimit)
throw new MemoryLimitException(memoryNeeded, memoryLimit);
}
// Use an input size counter to calculate
// the size of the Compressed Data field.
inCounted = new CountingInputStream(in);
// Initialize the filter chain.
filterChain = inCounted;
for (int i = filters.length - 1; i >= 0; --i)
filterChain = filters[i].getInputStream(filterChain);
}
@Override
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
@Override
public int read(byte[] buf, int off, int len) throws IOException {
if (endReached)
return -1;
int ret = filterChain.read(buf, off, len);
if (ret > 0) {
check.update(buf, off, ret);
uncompressedSize += ret;
// Catch invalid values.
long compressedSize = inCounted.getSize();
if (compressedSize < 0
|| compressedSize > compressedSizeLimit
|| uncompressedSize < 0
|| (uncompressedSizeInHeader != -1
&& uncompressedSize > uncompressedSizeInHeader))
throw new CorruptedInputException();
// Check the Block integrity as soon as possible:
// - The filter chain shouldn't return less than requested
// unless it hit the end of the input.
// - If the uncompressed size is known, we know when there
// shouldn't be more data coming. We still need to read
// one byte to let the filter chain catch errors and to
// let it read end of payload marker(s).
if (ret < len || uncompressedSize == uncompressedSizeInHeader) {
if (filterChain.read() != -1)
throw new CorruptedInputException();
validate();
endReached = true;
}
} else if (ret == -1) {
validate();
endReached = true;
}
return ret;
}
private void validate() throws IOException {
long compressedSize = inCounted.getSize();
// Validate Compressed Size and Uncompressed Size if they were
// present in Block Header.
if ((compressedSizeInHeader != -1
&& compressedSizeInHeader != compressedSize)
|| (uncompressedSizeInHeader != -1
&& uncompressedSizeInHeader != uncompressedSize))
throw new CorruptedInputException();
// Block Padding bytes must be zeros.
while ((compressedSize++ & 3) != 0)
if (inData.readUnsignedByte() != 0x00)
throw new CorruptedInputException();
// Validate the integrity check.
byte[] storedCheck = new byte[check.getSize()];
inData.readFully(storedCheck);
if (!Arrays.equals(check.finish(), storedCheck))
throw new CorruptedInputException("Integrity check ("
+ check.getName() + ") does not match");
}
public int available() throws IOException {
return filterChain.available();
}
public long getUnpaddedSize() {
return headerSize + inCounted.getSize() + check.getSize();
}
public long getUncompressedSize() {
return uncompressedSize;
}
}

View File

@@ -1,134 +0,0 @@
/*
* BlockOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.OutputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import org.tukaani.xz.common.EncoderUtil;
import org.tukaani.xz.check.Check;
class BlockOutputStream extends FinishableOutputStream {
private final OutputStream out;
private final CountingOutputStream outCounted;
private FinishableOutputStream filterChain;
private final Check check;
private final int headerSize;
private final long compressedSizeLimit;
private long uncompressedSize = 0;
private final byte[] tempBuf = new byte[1];
public BlockOutputStream(OutputStream out, FilterEncoder[] filters,
Check check) throws IOException {
this.out = out;
this.check = check;
// Initialize the filter chain.
outCounted = new CountingOutputStream(out);
filterChain = outCounted;
for (int i = filters.length - 1; i >= 0; --i)
filterChain = filters[i].getOutputStream(filterChain);
// Prepare to encode the Block Header field.
ByteArrayOutputStream bufStream = new ByteArrayOutputStream();
// Write a dummy Block Header Size field. The real value is written
// once everything else except CRC32 has been written.
bufStream.write(0x00);
// Write Block Flags. Storing Compressed Size or Uncompressed Size
// isn't supported for now.
bufStream.write(filters.length - 1);
// List of Filter Flags
for (int i = 0; i < filters.length; ++i) {
EncoderUtil.encodeVLI(bufStream, filters[i].getFilterID());
byte[] filterProps = filters[i].getFilterProps();
EncoderUtil.encodeVLI(bufStream, filterProps.length);
bufStream.write(filterProps);
}
// Header Padding
while ((bufStream.size() & 3) != 0)
bufStream.write(0x00);
byte[] buf = bufStream.toByteArray();
// Total size of the Block Header: Take the size of the CRC32 field
// into account.
headerSize = buf.length + 4;
// This is just a sanity check.
if (headerSize > EncoderUtil.BLOCK_HEADER_SIZE_MAX)
throw new UnsupportedOptionsException();
// Block Header Size
buf[0] = (byte) (buf.length / 4);
// Write the Block Header field to the output stream.
out.write(buf);
EncoderUtil.writeCRC32(out, buf);
// Calculate the maximum allowed size of the Compressed Data field.
// It is hard to exceed it so this is mostly to be pedantic.
compressedSizeLimit = (EncoderUtil.VLI_MAX & ~3)
- headerSize - check.getSize();
}
public void write(int b) throws IOException {
tempBuf[0] = (byte) b;
write(tempBuf, 0, 1);
}
public void write(byte[] buf, int off, int len) throws IOException {
filterChain.write(buf, off, len);
check.update(buf, off, len);
uncompressedSize += len;
validate();
}
public void flush() throws IOException {
filterChain.flush();
validate();
}
public void finish() throws IOException {
// Finish the Compressed Data field.
filterChain.finish();
validate();
// Block Padding
for (long i = outCounted.getSize(); (i & 3) != 0; ++i)
out.write(0x00);
// Check
out.write(check.finish());
}
private void validate() throws IOException {
long compressedSize = outCounted.getSize();
// It is very hard to trigger this exception.
// This is just to be pedantic.
if (compressedSize < 0 || compressedSize > compressedSizeLimit
|| uncompressedSize < 0)
throw new XZIOException("XZ Stream has grown too big");
}
public long getUnpaddedSize() {
return headerSize + outCounted.getSize() + check.getSize();
}
public long getUncompressedSize() {
return uncompressedSize;
}
}

View File

@@ -1,37 +0,0 @@
/*
* CorruptedInputException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* Thrown when the compressed input data is corrupt. However, it is possible
* that some or all of the data already read from the input stream was corrupt
* too.
*/
public class CorruptedInputException extends XZIOException {
private static final long serialVersionUID = 3L;
/**
* Creates a new CorruptedInputException with the default error detail
* message.
*/
public CorruptedInputException() {
super("Compressed data is corrupt");
}
/**
* Creates a new CorruptedInputException with the specified error detail
* message.
*
* @param s error detail message
*/
public CorruptedInputException(String s) {
super(s);
}
}

View File

@@ -1,45 +0,0 @@
/*
* CountingInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.FilterInputStream;
import java.io.InputStream;
import java.io.IOException;
/**
* Counts the number of bytes read from an input stream.
*/
class CountingInputStream extends FilterInputStream {
private long size = 0;
public CountingInputStream(InputStream in) {
super(in);
}
public int read() throws IOException {
int ret = in.read();
if (ret != -1 && size >= 0)
++size;
return ret;
}
public int read(byte[] b, int off, int len) throws IOException {
int ret = in.read(b, off, len);
if (ret > 0 && size >= 0)
size += ret;
return ret;
}
public long getSize() {
return size;
}
}

View File

@@ -1,54 +0,0 @@
/*
* CountingOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.OutputStream;
import java.io.IOException;
/**
* Counts the number of bytes written to an output stream.
* <p>
* The <code>finish</code> method does nothing.
* This is <code>FinishableOutputStream</code> instead
* of <code>OutputStream</code> solely because it allows
* using this as the output stream for a chain of raw filters.
*/
class CountingOutputStream extends FinishableOutputStream {
private final OutputStream out;
private long size = 0;
public CountingOutputStream(OutputStream out) {
this.out = out;
}
public void write(int b) throws IOException {
out.write(b);
if (size >= 0)
++size;
}
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
if (size >= 0)
size += len;
}
public void flush() throws IOException {
out.flush();
}
public void close() throws IOException {
out.close();
}
public long getSize() {
return size;
}
}

View File

@@ -1,18 +0,0 @@
/*
* FilterCoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
interface FilterCoder {
boolean changesSize();
boolean nonLastOK();
boolean lastOK();
}

View File

@@ -1,18 +0,0 @@
/*
* FilterDecoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
interface FilterDecoder extends FilterCoder {
int getMemoryUsage();
InputStream getInputStream(InputStream in);
}

View File

@@ -1,20 +0,0 @@
/*
* FilterEncoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
interface FilterEncoder extends FilterCoder {
long getFilterID();
byte[] getFilterProps();
boolean supportsFlushing();
FinishableOutputStream getOutputStream(FinishableOutputStream out);
}

View File

@@ -1,79 +0,0 @@
/*
* FilterOptions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.IOException;
/**
* Base class for filter-specific options classes.
*/
public abstract class FilterOptions implements Cloneable {
/**
* Gets how much memory the encoder will need with the given filter chain.
* This function simply calls <code>getEncoderMemoryUsage()</code> for every
* filter in the array and returns the sum of the returned values.
*/
public static int getEncoderMemoryUsage(FilterOptions[] options) {
int m = 0;
for (int i = 0; i < options.length; ++i)
m += options[i].getEncoderMemoryUsage();
return m;
}
/**
* Gets how much memory the decoder will need with the given filter chain.
* This function simply calls <code>getDecoderMemoryUsage()</code> for every
* filter in the array and returns the sum of the returned values.
*/
public static int getDecoderMemoryUsage(FilterOptions[] options) {
int m = 0;
for (int i = 0; i < options.length; ++i)
m += options[i].getDecoderMemoryUsage();
return m;
}
/**
* Gets how much memory the encoder will need with these options.
*/
public abstract int getEncoderMemoryUsage();
/**
* Gets a raw (no XZ headers) encoder output stream using these options. Raw
* streams are an advanced feature. In most cases you want to store the
* compressed data in the .xz container format instead of using a raw
* stream. To use this filter in a .xz file, pass this object to
* XZOutputStream.
*/
public abstract FinishableOutputStream getOutputStream(
FinishableOutputStream out);
/**
* Gets how much memory the decoder will need to decompress the data that
* was encoded with these options.
*/
public abstract int getDecoderMemoryUsage();
/**
* Gets a raw (no XZ headers) decoder input stream using these options.
*/
public abstract InputStream getInputStream(InputStream in)
throws IOException;
abstract FilterEncoder getFilterEncoder();
FilterOptions() {
}
}

View File

@@ -1,33 +0,0 @@
/*
* FinishableOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.OutputStream;
import java.io.IOException;
/**
* Output stream that supports finishing without closing
* the underlying stream.
*/
public abstract class FinishableOutputStream extends OutputStream {
/**
* Finish the stream without closing the underlying stream.
* No more data may be written to the stream after finishing.
* <p>
* The <code>finish</code> method of <code>FinishableOutputStream</code>
* does nothing. Subclasses should override it if they need finishing
* support, which is the case, for example, with compressors.
*
* @throws IOException
*/
public void finish() throws IOException {
}
;
}

View File

@@ -1,75 +0,0 @@
/*
* FinishableWrapperOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.OutputStream;
import java.io.IOException;
/**
* Wraps an output stream to a finishable output stream for use with
* raw encoders. This is not needed for XZ compression and thus most
* people will never need this.
*/
public class FinishableWrapperOutputStream extends FinishableOutputStream {
/**
* The {@link java.io.OutputStream OutputStream} that has been
* wrapped into a FinishableWrapperOutputStream.
*/
protected OutputStream out;
/**
* Creates a new output stream which support finishing.
* The <code>finish()</code> method will do nothing.
*/
public FinishableWrapperOutputStream(OutputStream out) {
this.out = out;
}
/**
* Calls {@link java.io.OutputStream#write(int) out.write(b)}.
*/
@Override
public void write(int b) throws IOException {
out.write(b);
}
/**
* Calls {@link java.io.OutputStream#write(byte[]) out.write(buf)}.
*/
@Override
public void write(byte[] buf) throws IOException {
out.write(buf);
}
/**
* Calls {@link java.io.OutputStream#write(byte[],int,int)
* out.write(buf, off, len)}.
*/
@Override
public void write(byte[] buf, int off, int len) throws IOException {
out.write(buf, off, len);
}
/**
* Calls {@link java.io.OutputStream#flush() out.flush()}.
*/
@Override
public void flush() throws IOException {
out.flush();
}
/**
* Calls {@link java.io.OutputStream#close() out.close()}.
*/
@Override
public void close() throws IOException {
out.close();
}
}

View File

@@ -1,36 +0,0 @@
/*
* IA64Options
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import org.tukaani.xz.simple.IA64;
/**
* BCJ filter for Itanium (IA-64) instructions.
*/
public class IA64Options extends BCJOptions {
private static final int ALIGNMENT = 16;
public IA64Options() {
super(ALIGNMENT);
}
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return new SimpleOutputStream(out, new IA64(true, startOffset));
}
public InputStream getInputStream(InputStream in) {
return new SimpleInputStream(in, new IA64(false, startOffset));
}
FilterEncoder getFilterEncoder() {
return new BCJEncoder(this, BCJCoder.IA64_FILTER_ID);
}
}

View File

@@ -1,14 +0,0 @@
/*
* IndexIndicatorException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
class IndexIndicatorException extends Exception {
private static final long serialVersionUID = 1L;
}

View File

@@ -1,60 +0,0 @@
/*
* MemoryLimitException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* Thrown when the memory usage limit given to the XZ decompressor would be
* exceeded.
* <p>
* The amount of memory required and the memory usage limit are included in the
* error detail message in human readable format.
*/
public class MemoryLimitException extends XZIOException {
private static final long serialVersionUID = 3L;
private final int memoryNeeded;
private final int memoryLimit;
/**
* Creates a new MemoryLimitException.
* <p>
* The amount of memory needed and the memory usage limit are included in
* the error detail message.
*
* @param memoryNeeded amount of memory needed as kibibytes (KiB)
* @param memoryLimit specified memory usage limit as kibibytes (KiB)
*/
public MemoryLimitException(int memoryNeeded, int memoryLimit) {
super("" + memoryNeeded + " KiB of memory would be needed; limit was "
+ memoryLimit + " KiB");
this.memoryNeeded = memoryNeeded;
this.memoryLimit = memoryLimit;
}
/**
* Gets how much memory is required to decompress the data.
*
* @return amount of memory needed as kibibytes (KiB)
*/
public int getMemoryNeeded() {
return memoryNeeded;
}
/**
* Gets what the memory usage limit was at the time the exception was
* created.
*
* @return memory usage limit as kibibytes (KiB)
*/
public int getMemoryLimit() {
return memoryLimit;
}
}

View File

@@ -1,36 +0,0 @@
/*
* PowerPCOptions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import org.tukaani.xz.simple.PowerPC;
/**
* BCJ filter for big endian PowerPC instructions.
*/
public class PowerPCOptions extends BCJOptions {
private static final int ALIGNMENT = 4;
public PowerPCOptions() {
super(ALIGNMENT);
}
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return new SimpleOutputStream(out, new PowerPC(true, startOffset));
}
public InputStream getInputStream(InputStream in) {
return new SimpleInputStream(in, new PowerPC(false, startOffset));
}
FilterEncoder getFilterEncoder() {
return new BCJEncoder(this, BCJCoder.POWERPC_FILTER_ID);
}
}

View File

@@ -1,33 +0,0 @@
/*
* RawCoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
class RawCoder {
static void validate(FilterCoder[] filters)
throws UnsupportedOptionsException {
for (int i = 0; i < filters.length - 1; ++i)
if (!filters[i].nonLastOK())
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
if (!filters[filters.length - 1].lastOK())
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
int changesSizeCount = 0;
for (int i = 0; i < filters.length; ++i)
if (filters[i].changesSize())
++changesSizeCount;
if (changesSizeCount > 3)
throw new UnsupportedOptionsException(
"Unsupported XZ filter chain");
}
}

View File

@@ -1,36 +0,0 @@
/*
* SPARCOptions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import org.tukaani.xz.simple.SPARC;
/**
* BCJ filter for SPARC.
*/
public class SPARCOptions extends BCJOptions {
private static final int ALIGNMENT = 4;
public SPARCOptions() {
super(ALIGNMENT);
}
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return new SimpleOutputStream(out, new SPARC(true, startOffset));
}
public InputStream getInputStream(InputStream in) {
return new SimpleInputStream(in, new SPARC(false, startOffset));
}
FilterEncoder getFilterEncoder() {
return new BCJEncoder(this, BCJCoder.SPARC_FILTER_ID);
}
}

View File

@@ -1,101 +0,0 @@
/*
* SeekableFileInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.File;
import java.io.RandomAccessFile;
import java.io.IOException;
import java.io.FileNotFoundException;
/**
* Wraps a {@link java.io.RandomAccessFile RandomAccessFile} in a
* SeekableInputStream.
*/
public class SeekableFileInputStream extends SeekableInputStream {
/**
* The RandomAccessFile that has been wrapped into a
* SeekableFileInputStream.
*/
protected RandomAccessFile randomAccessFile;
/**
* Creates a new seekable input stream that reads from the specified file.
*/
public SeekableFileInputStream(File file) throws FileNotFoundException {
randomAccessFile = new RandomAccessFile(file, "r");
}
/**
* Creates a new seekable input stream that reads from a file with the
* specified name.
*/
public SeekableFileInputStream(String name) throws FileNotFoundException {
randomAccessFile = new RandomAccessFile(name, "r");
}
/**
* Creates a new seekable input stream from an existing
* <code>RandomAccessFile</code> object.
*/
public SeekableFileInputStream(RandomAccessFile randomAccessFile) {
this.randomAccessFile = randomAccessFile;
}
/**
* Calls {@link RandomAccessFile#read() randomAccessFile.read()}.
*/
public int read() throws IOException {
return randomAccessFile.read();
}
/**
* Calls {@link RandomAccessFile#read(byte[]) randomAccessFile.read(buf)}.
*/
public int read(byte[] buf) throws IOException {
return randomAccessFile.read(buf);
}
/**
* Calls null null null null null null null null null null null {@link RandomAccessFile#read(byte[],int,int)
* randomAccessFile.read(buf, off, len)}.
*/
public int read(byte[] buf, int off, int len) throws IOException {
return randomAccessFile.read(buf, off, len);
}
/**
* Calls {@link RandomAccessFile#close() randomAccessFile.close()}.
*/
public void close() throws IOException {
randomAccessFile.close();
}
/**
* Calls {@link RandomAccessFile#length() randomAccessFile.length()}.
*/
public long length() throws IOException {
return randomAccessFile.length();
}
/**
* Calls {@link RandomAccessFile#getFilePointer()
* randomAccessFile.getFilePointer()}.
*/
public long position() throws IOException {
return randomAccessFile.getFilePointer();
}
/**
* Calls {@link RandomAccessFile#seek(long) randomAccessFile.seek(long)}.
*/
public void seek(long pos) throws IOException {
randomAccessFile.seek(pos);
}
}

View File

@@ -1,80 +0,0 @@
/*
* SeekableInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.IOException;
/**
* Input stream with random access support.
*/
public abstract class SeekableInputStream extends InputStream {
/**
* Seeks <code>n</code> bytes forward in this stream.
* <p>
* This will not seek past the end of the file. If the current position is
* already at or past the end of the file, this doesn't seek at all and
* returns <code>0</code>. Otherwise, if skipping <code>n</code> bytes would
* cause the position to exceed the stream size, this will do equivalent of
* <code>seek(length())</code> and the return value will be adjusted
* accordingly.
* <p>
* If <code>n</code> is negative, the position isn't changed and the return
* value is <code>0</code>. It doesn't seek backward because it would
* conflict with the specification of
* {@link java.io.InputStream#skip(long) InputStream.skip}.
*
* @return <code>0</code> if <code>n</code> is negative, less than
* <code>n</code> if skipping <code>n</code> bytes would seek past the end
* of the file, <code>n</code> otherwise
*
* @throws IOException might be thrown by {@link #seek(long)}
*/
public long skip(long n) throws IOException {
if (n <= 0)
return 0;
long size = length();
long pos = position();
if (pos >= size)
return 0;
if (size - pos < n)
n = size - pos;
seek(pos + n);
return n;
}
/**
* Gets the size of the stream.
*/
public abstract long length() throws IOException;
/**
* Gets the current position in the stream.
*/
public abstract long position() throws IOException;
/**
* Seeks to the specified absolute position in the stream.
* <p>
* Seeking past the end of the file should be supported by the subclasses
* unless there is a good reason to do otherwise. If one has seeked past the
* end of the stream, <code>read</code> will return <code>-1</code> to
* indicate end of stream.
*
* @param pos new read position in the stream
*
* @throws IOException if <code>pos</code> is negative or if a
* stream-specific I/O error occurs
*/
public abstract void seek(long pos) throws IOException;
}

View File

@@ -1,877 +0,0 @@
/*
* SeekableXZInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.util.Arrays;
import java.util.ArrayList;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.EOFException;
import org.tukaani.xz.common.DecoderUtil;
import org.tukaani.xz.common.StreamFlags;
import org.tukaani.xz.check.Check;
import org.tukaani.xz.index.IndexDecoder;
import org.tukaani.xz.index.BlockInfo;
/**
* Decompresses a .xz file in random access mode. This supports decompressing
* concatenated .xz files.
* <p>
* Each .xz file consist of one or more Streams. Each Stream consist of zero or
* more Blocks. Each Stream contains an Index of Streams' Blocks. The Indexes
* from all Streams are loaded in RAM by a constructor of this class. A typical
* .xz file has only one Stream, and parsing its Index will need only three or
* four seeks.
* <p>
* To make random access possible, the data in a .xz file must be splitted into
* multiple Blocks of reasonable size. Decompression can only start at a Block
* boundary. When seeking to an uncompressed position that is not at a Block
* boundary, decompression starts at the beginning of the Block and throws away
* data until the target position is reached. Thus, smaller Blocks mean faster
* seeks to arbitrary uncompressed positions. On the other hand, smaller Blocks
* mean worse compression. So one has to make a compromise between random access
* speed and compression ratio.
* <p>
* Implementation note: This class uses linear search to locate the correct
* Stream from the data structures in RAM. It was the simplest to implement and
* should be fine as long as there aren't too many Streams. The correct Block
* inside a Stream is located using binary search and thus is fast even with a
* huge number of Blocks.
*
* <h4>Memory usage</h4>
* <p>
* The amount of memory needed for the Indexes is taken into account when
* checking the memory usage limit. Each Stream is calculated to need at least
* 1&nbsp;KiB of memory and each Block 16 bytes of memory, rounded up to the
* next kibibyte. So unless the file has a huge number of Streams or Blocks,
* these don't take significant amount of memory.
*
* <h4>Creating random-accessible .xz files</h4>
* <p>
* When using {@link XZOutputStream}, a new Block can be started by calling its
* {@link XZOutputStream#endBlock() endBlock} method. If you know that the
* decompressor will only need to seek to certain uncompressed positions, it can
* be a good idea to start a new Block at (some of) these positions (and only at
* these positions to get better compression ratio).
* <p>
* liblzma in XZ Utils supports starting a new Block with
* <code>LZMA_FULL_FLUSH</code>. XZ Utils 5.1.1alpha added threaded compression
* which creates multi-Block .xz files. XZ Utils 5.1.1alpha also added the
* option <code>--block-size=SIZE</code> to the xz command line tool. XZ Utils
* 5.1.2alpha added a partial implementation of <code>--block-list=SIZES</code>
* which allows specifying sizes of individual Blocks.
*
* @see SeekableFileInputStream
* @see XZInputStream
* @see XZOutputStream
*/
public class SeekableXZInputStream extends SeekableInputStream {
/**
* The input stream containing XZ compressed data.
*/
private SeekableInputStream in;
/**
* Memory usage limit after the memory usage of the IndexDecoders have been
* substracted.
*/
private final int memoryLimit;
/**
* Memory usage of the IndexDecoders.
* <code>memoryLimit + indexMemoryUsage</code> equals the original memory
* usage limit that was passed to the constructor.
*/
private int indexMemoryUsage = 0;
/**
* List of IndexDecoders, one for each Stream in the file. The list is in
* reverse order: The first element is the last Stream in the file.
*/
private final ArrayList streams = new ArrayList();
/**
* Bitmask of all Check IDs seen.
*/
private int checkTypes = 0;
/**
* Uncompressed size of the file (all Streams).
*/
private long uncompressedSize = 0;
/**
* Uncompressed size of the largest XZ Block in the file.
*/
private long largestBlockSize = 0;
/**
* Number of XZ Blocks in the file.
*/
private int blockCount = 0;
/**
* Size and position information about the current Block. If there are no
* Blocks, all values will be <code>-1</code>.
*/
private final BlockInfo curBlockInfo;
/**
* Temporary (and cached) information about the Block whose information is
* queried via <code>getBlockPos</code> and related functions.
*/
private final BlockInfo queriedBlockInfo;
/**
* Integrity Check in the current XZ Stream. The constructor leaves this to
* point to the Check of the first Stream.
*/
private Check check;
/**
* Decoder of the current XZ Block, if any.
*/
private BlockInputStream blockDecoder = null;
/**
* Current uncompressed position.
*/
private long curPos = 0;
/**
* Target position for seeking.
*/
private long seekPos;
/**
* True when <code>seek(long)</code> has been called but the actual seeking
* hasn't been done yet.
*/
private boolean seekNeeded = false;
/**
* True when end of the file was reached. This can be cleared by calling
* <code>seek(long)</code>.
*/
private boolean endReached = false;
/**
* Pending exception from an earlier error.
*/
private IOException exception = null;
/**
* Temporary buffer for read(). This avoids reallocating memory on every
* read() call.
*/
private final byte[] tempBuf = new byte[1];
/**
* Creates a new seekable XZ decompressor without a memory usage limit.
*
* @param in seekable input stream containing one or more XZ Streams; the
* whole input stream is used
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ data is corrupt or truncated
*
* @throws UnsupportedOptionsException XZ headers seem valid but they
* specify options not supported by this implementation
*
* @throws EOFException less than 6 bytes of input was
* available from
* <code>in</code>, or (unlikely) the size of the underlying stream got
* smaller while this was reading from it
*
* @throws IOException may be thrown by <code>in</code>
*/
public SeekableXZInputStream(SeekableInputStream in)
throws IOException {
this(in, -1);
}
/**
* Creates a new seekable XZ decomporessor with an optional memory usage
* limit.
*
* @param in seekable input stream containing one or more XZ
* Streams; the
* whole input stream is used
*
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ data is corrupt or truncated
*
* @throws UnsupportedOptionsException XZ headers seem valid but they
* specify options not supported by this implementation
*
* @throws MemoryLimitException decoded XZ Indexes would need more
* memory
* than allowed by the memory usage limit
*
* @throws EOFException less than 6 bytes of input was
* available from
* <code>in</code>, or (unlikely) the size of the underlying stream got
* smaller while this was reading from it
*
* @throws IOException may be thrown by <code>in</code>
*/
public SeekableXZInputStream(SeekableInputStream in, int memoryLimit)
throws IOException {
this.in = in;
DataInputStream inData = new DataInputStream(in);
// Check the magic bytes in the beginning of the file.
{
in.seek(0);
byte[] buf = new byte[XZ.HEADER_MAGIC.length];
inData.readFully(buf);
if (!Arrays.equals(buf, XZ.HEADER_MAGIC))
throw new XZFormatException();
}
// Get the file size and verify that it is a multiple of 4 bytes.
long pos = in.length();
if ((pos & 3) != 0)
throw new CorruptedInputException(
"XZ file size is not a multiple of 4 bytes");
// Parse the headers starting from the end of the file.
byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
long streamPadding = 0;
while (pos > 0) {
if (pos < DecoderUtil.STREAM_HEADER_SIZE)
throw new CorruptedInputException();
// Read the potential Stream Footer.
in.seek(pos - DecoderUtil.STREAM_HEADER_SIZE);
inData.readFully(buf);
// Skip Stream Padding four bytes at a time.
// Skipping more at once would be faster,
// but usually there isn't much Stream Padding.
if (buf[8] == 0x00 && buf[9] == 0x00 && buf[10] == 0x00
&& buf[11] == 0x00) {
streamPadding += 4;
pos -= 4;
continue;
}
// It's not Stream Padding. Update pos.
pos -= DecoderUtil.STREAM_HEADER_SIZE;
// Decode the Stream Footer and check if Backward Size
// looks reasonable.
StreamFlags streamFooter = DecoderUtil.decodeStreamFooter(buf);
if (streamFooter.backwardSize >= pos)
throw new CorruptedInputException(
"Backward Size in XZ Stream Footer is too big");
// Check that the Check ID is supported. Store it in case this
// is the first Stream in the file.
check = Check.getInstance(streamFooter.checkType);
// Remember which Check IDs have been seen.
checkTypes |= 1 << streamFooter.checkType;
// Seek to the beginning of the Index.
in.seek(pos - streamFooter.backwardSize);
// Decode the Index field.
IndexDecoder index;
try {
index = new IndexDecoder(in, streamFooter, streamPadding,
memoryLimit);
} catch (MemoryLimitException e) {
// IndexDecoder doesn't know how much memory we had
// already needed so we need to recreate the exception.
assert memoryLimit >= 0;
throw new MemoryLimitException(
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
}
// Update the memory usage and limit counters.
indexMemoryUsage += index.getMemoryUsage();
if (memoryLimit >= 0) {
memoryLimit -= index.getMemoryUsage();
assert memoryLimit >= 0;
}
// Remember the uncompressed size of the largest Block.
if (largestBlockSize < index.getLargestBlockSize())
largestBlockSize = index.getLargestBlockSize();
// Calculate the offset to the beginning of this XZ Stream and
// check that it looks sane.
long off = index.getStreamSize() - DecoderUtil.STREAM_HEADER_SIZE;
if (pos < off)
throw new CorruptedInputException("XZ Index indicates "
+ "too big compressed size for the XZ Stream");
// Seek to the beginning of this Stream.
pos -= off;
in.seek(pos);
// Decode the Stream Header.
inData.readFully(buf);
StreamFlags streamHeader = DecoderUtil.decodeStreamHeader(buf);
// Verify that the Stream Header matches the Stream Footer.
if (!DecoderUtil.areStreamFlagsEqual(streamHeader, streamFooter))
throw new CorruptedInputException(
"XZ Stream Footer does not match Stream Header");
// Update the total uncompressed size of the file and check that
// it doesn't overflow.
uncompressedSize += index.getUncompressedSize();
if (uncompressedSize < 0)
throw new UnsupportedOptionsException("XZ file is too big");
// Update the Block count and check that it fits into an int.
blockCount += index.getRecordCount();
if (blockCount < 0)
throw new UnsupportedOptionsException(
"XZ file has over " + Integer.MAX_VALUE + " Blocks");
// Add this Stream to the list of Streams.
streams.add(index);
// Reset to be ready to parse the next Stream.
streamPadding = 0;
}
assert pos == 0;
// Save it now that indexMemoryUsage has been substracted from it.
this.memoryLimit = memoryLimit;
// Store the relative offsets of the Streams. This way we don't
// need to recalculate them in this class when seeking; the
// IndexDecoder instances will handle them.
IndexDecoder prev = (IndexDecoder) streams.get(streams.size() - 1);
for (int i = streams.size() - 2; i >= 0; --i) {
IndexDecoder cur = (IndexDecoder) streams.get(i);
cur.setOffsets(prev);
prev = cur;
}
// Initialize curBlockInfo to point to the first Stream.
// The blockNumber will be left to -1 so that .hasNext()
// and .setNext() work to get the first Block when starting
// to decompress from the beginning of the file.
IndexDecoder first = (IndexDecoder) streams.get(streams.size() - 1);
curBlockInfo = new BlockInfo(first);
// queriedBlockInfo needs to be allocated too. The Stream used for
// initialization doesn't matter though.
queriedBlockInfo = new BlockInfo(first);
}
/**
* Gets the types of integrity checks used in the .xz file. Multiple checks
* are possible only if there are multiple concatenated XZ Streams.
* <p>
* The returned value has a bit set for every check type that is present.
* For example, if CRC64 and SHA-256 were used, the return value is <code>(1&nbsp;&lt;&lt;&nbsp;XZ.CHECK_CRC64)
* | (1&nbsp;&lt;&lt;&nbsp;XZ.CHECK_SHA256)</code>.
*/
public int getCheckTypes() {
return checkTypes;
}
/**
* Gets the amount of memory in kibibytes (KiB) used by the data structures
* needed to locate the XZ Blocks. This is usually useless information but
* since it is calculated for memory usage limit anyway, it is nice to make
* it available to too.
*/
public int getIndexMemoryUsage() {
return indexMemoryUsage;
}
/**
* Gets the uncompressed size of the largest XZ Block in bytes. This can be
* useful if you want to check that the file doesn't have huge XZ Blocks
* which could make seeking to arbitrary offsets very slow. Note that huge
* Blocks don't automatically mean that seeking would be slow, for example,
* seeking to the beginning of any Block is always fast.
*/
public long getLargestBlockSize() {
return largestBlockSize;
}
/**
* Gets the number of Streams in the .xz file.
*
* @since 1.3
*/
public int getStreamCount() {
return streams.size();
}
/**
* Gets the number of Blocks in the .xz file.
*
* @since 1.3
*/
public int getBlockCount() {
return blockCount;
}
/**
* Gets the uncompressed start position of the given Block.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>.
*
* @since 1.3
*/
public long getBlockPos(int blockNumber) {
locateBlockByNumber(queriedBlockInfo, blockNumber);
return queriedBlockInfo.uncompressedOffset;
}
/**
* Gets the uncompressed size of the given Block.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>.
*
* @since 1.3
*/
public long getBlockSize(int blockNumber) {
locateBlockByNumber(queriedBlockInfo, blockNumber);
return queriedBlockInfo.uncompressedSize;
}
/**
* Gets the position where the given compressed Block starts in the
* underlying .xz file. This information is rarely useful to the users of
* this class.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>.
*
* @since 1.3
*/
public long getBlockCompPos(int blockNumber) {
locateBlockByNumber(queriedBlockInfo, blockNumber);
return queriedBlockInfo.compressedOffset;
}
/**
* Gets the compressed size of the given Block. This together with the
* uncompressed size can be used to calculate the compression ratio of the
* specific Block.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>.
*
* @since 1.3
*/
public long getBlockCompSize(int blockNumber) {
locateBlockByNumber(queriedBlockInfo, blockNumber);
return (queriedBlockInfo.unpaddedSize + 3) & ~3;
}
/**
* Gets integrity check type (Check ID) of the given Block.
*
* @throws IndexOutOfBoundsException if
* <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>.
*
* @see #getCheckTypes()
*
* @since 1.3
*/
public int getBlockCheckType(int blockNumber) {
locateBlockByNumber(queriedBlockInfo, blockNumber);
return queriedBlockInfo.getCheckType();
}
/**
* Gets the number of the Block that contains the byte at the given
* uncompressed position.
*
* @throws IndexOutOfBoundsException if <code>pos&nbsp;&lt;&nbsp;0</code> or
* <code>pos&nbsp;&gt;=&nbsp;length()</code>.
*
* @since 1.3
*/
public int getBlockNumber(long pos) {
locateBlockByPos(queriedBlockInfo, pos);
return queriedBlockInfo.blockNumber;
}
/**
* Decompresses the next byte from this input stream.
*
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less than
* <code>len</code> bytes have already been successfully decompressed. The
* next call with non-zero <code>len</code> will immediately throw the
* pending exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
* @param buf target buffer for uncompressed data
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (len == 0)
return 0;
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
int size = 0;
try {
if (seekNeeded)
seek();
if (endReached)
return -1;
while (len > 0) {
if (blockDecoder == null) {
seek();
if (endReached)
break;
}
int ret = blockDecoder.read(buf, off, len);
if (ret > 0) {
curPos += ret;
size += ret;
off += ret;
len -= ret;
} else if (ret == -1)
blockDecoder = null;
}
} catch (IOException e) {
// We know that the file isn't simply truncated because we could
// parse the Indexes in the constructor. So convert EOFException
// to CorruptedInputException.
if (e instanceof EOFException)
e = new CorruptedInputException();
exception = e;
if (size == 0)
throw e;
}
return size;
}
/**
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
if (endReached || seekNeeded || blockDecoder == null)
return 0;
return blockDecoder.available();
}
/**
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/
public void close() throws IOException {
if (in != null)
try {
in.close();
} finally {
in = null;
}
}
/**
* Gets the uncompressed size of this input stream. If there are multiple XZ
* Streams, the total uncompressed size of all XZ Streams is returned.
*/
public long length() {
return uncompressedSize;
}
/**
* Gets the current uncompressed position in this input stream.
*
* @throws XZIOException if the stream has been closed
*/
public long position() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
return seekNeeded ? seekPos : curPos;
}
/**
* Seeks to the specified absolute uncompressed position in the stream. This
* only stores the new position, so this function itself is always very
* fast. The actual seek is done when <code>read</code> is called to read at
* least one byte.
* <p>
* Seeking past the end of the stream is possible. In that case
* <code>read</code> will return <code>-1</code> to indicate the end of the
* stream.
*
* @param pos new uncompressed read position
*
* @throws XZIOException if <code>pos</code> is negative, or if stream has
* been closed
*/
public void seek(long pos) throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (pos < 0)
throw new XZIOException("Negative seek position: " + pos);
seekPos = pos;
seekNeeded = true;
}
/**
* Seeks to the beginning of the given XZ Block.
*
* @throws XZIOException if <code>blockNumber&nbsp;&lt;&nbsp;0</code> or
* <code>blockNumber&nbsp;&gt;=&nbsp;getBlockCount()</code>, or if stream
* has been closed
*
* @since 1.3
*/
public void seekToBlock(int blockNumber) throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (blockNumber < 0 || blockNumber >= blockCount)
throw new XZIOException("Invalid XZ Block number: " + blockNumber);
// This is a bit silly implementation. Here we locate the uncompressed
// offset of the specified Block, then when doing the actual seek in
// seek(), we need to find the Block number based on seekPos.
seekPos = getBlockPos(blockNumber);
seekNeeded = true;
}
/**
* Does the actual seeking. This is also called when <code>read</code> needs
* a new Block to decode.
*/
private void seek() throws IOException {
// If seek(long) wasn't called, we simply need to get the next Block
// from the same Stream. If there are no more Blocks in this Stream,
// then we behave as if seek(long) had been called.
if (!seekNeeded) {
if (curBlockInfo.hasNext()) {
curBlockInfo.setNext();
initBlockDecoder();
return;
}
seekPos = curPos;
}
seekNeeded = false;
// Check if we are seeking to or past the end of the file.
if (seekPos >= uncompressedSize) {
curPos = seekPos;
blockDecoder = null;
endReached = true;
return;
}
endReached = false;
// Locate the Block that contains the uncompressed target position.
locateBlockByPos(curBlockInfo, seekPos);
// Seek in the underlying stream and create a new Block decoder
// only if really needed. We can skip it if the current position
// is already in the correct Block and the target position hasn't
// been decompressed yet.
//
// NOTE: If curPos points to the beginning of this Block, it's
// because it was left there after decompressing an earlier Block.
// In that case, decoding of the current Block hasn't been started
// yet. (Decoding of a Block won't be started until at least one
// byte will also be read from it.)
if (!(curPos > curBlockInfo.uncompressedOffset && curPos <= seekPos)) {
// Seek to the beginning of the Block.
in.seek(curBlockInfo.compressedOffset);
// Since it is possible that this Block is from a different
// Stream than the previous Block, initialize a new Check.
check = Check.getInstance(curBlockInfo.getCheckType());
// Create a new Block decoder.
initBlockDecoder();
curPos = curBlockInfo.uncompressedOffset;
}
// If the target wasn't at a Block boundary, decompress and throw
// away data to reach the target position.
if (seekPos > curPos) {
// NOTE: The "if" below is there just in case. In this situation,
// blockDecoder.skip will always skip the requested amount
// or throw an exception.
long skipAmount = seekPos - curPos;
if (blockDecoder.skip(skipAmount) != skipAmount)
throw new CorruptedInputException();
curPos = seekPos;
}
}
/**
* Locates the Block that contains the given uncompressed position.
*/
private void locateBlockByPos(BlockInfo info, long pos) {
if (pos < 0 || pos >= uncompressedSize)
throw new IndexOutOfBoundsException(
"Invalid uncompressed position: " + pos);
// Locate the Stream that contains the target position.
IndexDecoder index;
for (int i = 0;; ++i) {
index = (IndexDecoder) streams.get(i);
if (index.hasUncompressedOffset(pos))
break;
}
// Locate the Block from the Stream that contains the target position.
index.locateBlock(info, pos);
assert (info.compressedOffset & 3) == 0;
assert info.uncompressedSize > 0;
assert pos >= info.uncompressedOffset;
assert pos < info.uncompressedOffset + info.uncompressedSize;
}
/**
* Locates the given Block and stores information about it to
* <code>info</code>.
*/
private void locateBlockByNumber(BlockInfo info, int blockNumber) {
// Validate.
if (blockNumber < 0 || blockNumber >= blockCount)
throw new IndexOutOfBoundsException(
"Invalid XZ Block number: " + blockNumber);
// Skip the search if info already points to the correct Block.
if (info.blockNumber == blockNumber)
return;
// Search the Stream that contains the given Block and then
// search the Block from that Stream.
for (int i = 0;; ++i) {
IndexDecoder index = (IndexDecoder) streams.get(i);
if (index.hasRecord(blockNumber)) {
index.setBlockInfo(info, blockNumber);
return;
}
}
}
/**
* Initializes a new BlockInputStream. This is a helper function for
* <code>seek()</code>.
*/
private void initBlockDecoder() throws IOException {
try {
// Set it to null first so that GC can collect it if memory
// runs tight when initializing a new BlockInputStream.
blockDecoder = null;
blockDecoder = new BlockInputStream(in, check, memoryLimit,
curBlockInfo.unpaddedSize, curBlockInfo.uncompressedSize);
} catch (MemoryLimitException e) {
// BlockInputStream doesn't know how much memory we had
// already needed so we need to recreate the exception.
assert memoryLimit >= 0;
throw new MemoryLimitException(
e.getMemoryNeeded() + indexMemoryUsage,
memoryLimit + indexMemoryUsage);
} catch (IndexIndicatorException e) {
// It cannot be Index so the file must be corrupt.
throw new CorruptedInputException();
}
}
}

View File

@@ -1,137 +0,0 @@
/*
* SimpleInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.IOException;
import org.tukaani.xz.simple.SimpleFilter;
class SimpleInputStream extends InputStream {
private static final int FILTER_BUF_SIZE = 4096;
private InputStream in;
private final SimpleFilter simpleFilter;
private final byte[] filterBuf = new byte[FILTER_BUF_SIZE];
private int pos = 0;
private int filtered = 0;
private int unfiltered = 0;
private boolean endReached = false;
private IOException exception = null;
private final byte[] tempBuf = new byte[1];
static int getMemoryUsage() {
return 1 + FILTER_BUF_SIZE / 1024;
}
SimpleInputStream(InputStream in, SimpleFilter simpleFilter) {
// Check for null because otherwise null isn't detect
// in this constructor.
if (in == null)
throw new NullPointerException();
// The simpleFilter argument comes from this package
// so it is known to be non-null already.
assert simpleFilter != null;
this.in = in;
this.simpleFilter = simpleFilter;
}
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
public int read(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (len == 0)
return 0;
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
try {
int size = 0;
while (true) {
// Copy filtered data into the caller-provided buffer.
int copySize = Math.min(filtered, len);
System.arraycopy(filterBuf, pos, buf, off, copySize);
pos += copySize;
filtered -= copySize;
off += copySize;
len -= copySize;
size += copySize;
// If end of filterBuf was reached, move the pending data to
// the beginning of the buffer so that more data can be
// copied into filterBuf on the next loop iteration.
if (pos + filtered + unfiltered == FILTER_BUF_SIZE) {
System.arraycopy(filterBuf, pos, filterBuf, 0,
filtered + unfiltered);
pos = 0;
}
if (len == 0 || endReached)
return size > 0 ? size : -1;
assert filtered == 0;
// Get more data into the temporary buffer.
int inSize = FILTER_BUF_SIZE - (pos + filtered + unfiltered);
inSize = in.read(filterBuf, pos + filtered + unfiltered,
inSize);
if (inSize == -1) {
// Mark the remaining unfiltered bytes to be ready
// to be copied out.
endReached = true;
filtered = unfiltered;
unfiltered = 0;
} else {
// Filter the data in filterBuf.
unfiltered += inSize;
filtered = simpleFilter.code(filterBuf, pos, unfiltered);
assert filtered <= unfiltered;
unfiltered -= filtered;
}
}
} catch (IOException e) {
exception = e;
throw e;
}
}
public int available() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
return filtered;
}
public void close() throws IOException {
if (in != null)
try {
in.close();
} finally {
in = null;
}
}
}

View File

@@ -1,151 +0,0 @@
/*
* SimpleOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.IOException;
import org.tukaani.xz.simple.SimpleFilter;
class SimpleOutputStream extends FinishableOutputStream {
private static final int FILTER_BUF_SIZE = 4096;
private FinishableOutputStream out;
private final SimpleFilter simpleFilter;
private final byte[] filterBuf = new byte[FILTER_BUF_SIZE];
private int pos = 0;
private int unfiltered = 0;
private IOException exception = null;
private boolean finished = false;
private final byte[] tempBuf = new byte[1];
static int getMemoryUsage() {
return 1 + FILTER_BUF_SIZE / 1024;
}
SimpleOutputStream(FinishableOutputStream out,
SimpleFilter simpleFilter) {
if (out == null)
throw new NullPointerException();
this.out = out;
this.simpleFilter = simpleFilter;
}
public void write(int b) throws IOException {
tempBuf[0] = (byte) b;
write(tempBuf, 0, 1);
}
public void write(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (exception != null)
throw exception;
if (finished)
throw new XZIOException("Stream finished or closed");
while (len > 0) {
// Copy more unfiltered data into filterBuf.
int copySize = Math.min(len, FILTER_BUF_SIZE - (pos + unfiltered));
System.arraycopy(buf, off, filterBuf, pos + unfiltered, copySize);
off += copySize;
len -= copySize;
unfiltered += copySize;
// Filter the data in filterBuf.
int filtered = simpleFilter.code(filterBuf, pos, unfiltered);
assert filtered <= unfiltered;
unfiltered -= filtered;
// Write out the filtered data.
try {
out.write(filterBuf, pos, filtered);
} catch (IOException e) {
exception = e;
throw e;
}
pos += filtered;
// If end of filterBuf was reached, move the pending unfiltered
// data to the beginning of the buffer so that more data can
// be copied into filterBuf on the next loop iteration.
if (pos + unfiltered == FILTER_BUF_SIZE) {
System.arraycopy(filterBuf, pos, filterBuf, 0, unfiltered);
pos = 0;
}
}
}
private void writePending() throws IOException {
assert !finished;
if (exception != null)
throw exception;
try {
out.write(filterBuf, pos, unfiltered);
} catch (IOException e) {
exception = e;
throw e;
}
finished = true;
}
public void flush() throws IOException {
throw new UnsupportedOptionsException("Flushing is not supported");
}
public void finish() throws IOException {
if (!finished) {
// If it fails, don't call out.finish().
writePending();
try {
out.finish();
} catch (IOException e) {
exception = e;
throw e;
}
}
}
public void close() throws IOException {
if (out != null) {
if (!finished)
// out.close() must be called even if writePending() fails.
// writePending() saves the possible exception so we can
// ignore exceptions here.
try {
writePending();
} catch (IOException e) {
}
try {
out.close();
} catch (IOException e) {
// If there is an earlier exception, the exception
// from out.close() is lost.
if (exception == null)
exception = e;
}
out = null;
}
if (exception != null)
throw exception;
}
}

View File

@@ -1,306 +0,0 @@
/*
* SingleXZInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.EOFException;
import org.tukaani.xz.common.DecoderUtil;
import org.tukaani.xz.common.StreamFlags;
import org.tukaani.xz.index.IndexHash;
import org.tukaani.xz.check.Check;
/**
* Decompresses exactly one XZ Stream in streamed mode (no seeking). The
* decompression stops after the first XZ Stream has been decompressed, and the
* read position in the input stream is left at the first byte after the end of
* the XZ Stream. This can be useful when XZ data has been stored inside some
* other file format or protocol.
* <p>
* Unless you know what you are doing, don't use this class to decompress
* standalone .xz files. For that purpose, use <code>XZInputStream</code>.
*
* <h4>When uncompressed size is known beforehand</h4>
* <p>
* If you are decompressing complete XZ streams and your application knows
* exactly how much uncompressed data there should be, it is good to try reading
* one more byte by calling <code>read()</code> and checking that it returns
* <code>-1</code>. This way the decompressor will parse the file footers and
* verify the integrity checks, giving the caller more confidence that the
* uncompressed data is valid.
*
* @see XZInputStream
*/
public class SingleXZInputStream extends InputStream {
private InputStream in;
private int memoryLimit;
private StreamFlags streamHeaderFlags;
private Check check;
private BlockInputStream blockDecoder = null;
private final IndexHash indexHash = new IndexHash();
private boolean endReached = false;
private IOException exception = null;
private final byte[] tempBuf = new byte[1];
/**
* Creates a new XZ decompressor that decompresses exactly one XZ Stream
* from <code>in</code> without a memory usage limit.
* <p>
* This constructor reads and parses the XZ Stream Header (12 bytes) from
* <code>in</code>. The header of the first Block is not read until
* <code>read</code> is called.
*
* @param in input stream from which XZ-compressed data is read
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
public SingleXZInputStream(InputStream in) throws IOException {
initialize(in, -1);
}
/**
* Creates a new XZ decompressor that decompresses exactly one XZ Stream
* from <code>in</code> with an optional memory usage limit.
* <p>
* This is identical to <code>SingleXZInputStream(InputStream)</code> except
* that this takes also the <code>memoryLimit</code> argument.
*
* @param in input stream from which XZ-compressed data is read
*
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
public SingleXZInputStream(InputStream in, int memoryLimit)
throws IOException {
initialize(in, memoryLimit);
}
SingleXZInputStream(InputStream in, int memoryLimit,
byte[] streamHeader) throws IOException {
initialize(in, memoryLimit, streamHeader);
}
private void initialize(InputStream in, int memoryLimit)
throws IOException {
byte[] streamHeader = new byte[DecoderUtil.STREAM_HEADER_SIZE];
new DataInputStream(in).readFully(streamHeader);
initialize(in, memoryLimit, streamHeader);
}
private void initialize(InputStream in, int memoryLimit,
byte[] streamHeader) throws IOException {
this.in = in;
this.memoryLimit = memoryLimit;
streamHeaderFlags = DecoderUtil.decodeStreamHeader(streamHeader);
check = Check.getInstance(streamHeaderFlags.checkType);
}
/**
* Gets the ID of the integrity check used in this XZ Stream.
*
* @return the Check ID specified in the XZ Stream Header
*/
public int getCheckType() {
return streamHeaderFlags.checkType;
}
/**
* Gets the name of the integrity check used in this XZ Stream.
*
* @return the name of the check specified in the XZ Stream Header
*/
public String getCheckName() {
return check.getName();
}
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in {@link java.io.BufferedInputStream} if you
* need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less <code>len</code>
* bytes have already been successfully decompressed. The next call with
* non-zero <code>len</code> will immediately throw the pending
* exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
* @param buf target buffer for uncompressed data
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (len == 0)
return 0;
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
if (endReached)
return -1;
int size = 0;
try {
while (len > 0) {
if (blockDecoder == null)
try {
blockDecoder = new BlockInputStream(
in, check, memoryLimit, -1, -1);
} catch (IndexIndicatorException e) {
indexHash.validate(in);
validateStreamFooter();
endReached = true;
return size > 0 ? size : -1;
}
int ret = blockDecoder.read(buf, off, len);
if (ret > 0) {
size += ret;
off += ret;
len -= ret;
} else if (ret == -1) {
indexHash.add(blockDecoder.getUnpaddedSize(),
blockDecoder.getUncompressedSize());
blockDecoder = null;
}
}
} catch (IOException e) {
exception = e;
if (size == 0)
throw e;
}
return size;
}
private void validateStreamFooter() throws IOException {
byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
new DataInputStream(in).readFully(buf);
StreamFlags streamFooterFlags = DecoderUtil.decodeStreamFooter(buf);
if (!DecoderUtil.areStreamFlagsEqual(streamHeaderFlags,
streamFooterFlags)
|| indexHash.getIndexSize() != streamFooterFlags.backwardSize)
throw new CorruptedInputException(
"XZ Stream Footer does not match Stream Header");
}
/**
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
return blockDecoder == null ? 0 : blockDecoder.available();
}
/**
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/
public void close() throws IOException {
if (in != null)
try {
in.close();
} finally {
in = null;
}
}
}

View File

@@ -1,35 +0,0 @@
/*
* UnsupportedOptionsException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* Thrown when compression options not supported by this implementation are
* detected. Some other implementation might support those options.
*/
public class UnsupportedOptionsException extends XZIOException {
private static final long serialVersionUID = 3L;
/**
* Creates a new UnsupportedOptionsException with null as its error detail
* message.
*/
public UnsupportedOptionsException() {
}
/**
* Creates a new UnsupportedOptionsException with the given error detail
* message.
*
* @param s error detail message
*/
public UnsupportedOptionsException(String s) {
super(s);
}
}

View File

@@ -1,36 +0,0 @@
/*
* X86Options
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import org.tukaani.xz.simple.X86;
/**
* BCJ filter for x86 (32-bit and 64-bit) instructions.
*/
public class X86Options extends BCJOptions {
private static final int ALIGNMENT = 1;
public X86Options() {
super(ALIGNMENT);
}
public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
return new SimpleOutputStream(out, new X86(true, startOffset));
}
public InputStream getInputStream(InputStream in) {
return new SimpleInputStream(in, new X86(false, startOffset));
}
FilterEncoder getFilterEncoder() {
return new BCJEncoder(this, BCJCoder.X86_FILTER_ID);
}
}

View File

@@ -1,54 +0,0 @@
/*
* XZ
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* XZ constants.
*/
public class XZ {
/**
* XZ Header Magic Bytes begin a XZ file. This can be useful to detect XZ
* compressed data.
*/
public static final byte[] HEADER_MAGIC = {
(byte) 0xFD, '7', 'z', 'X', 'Z', '\0' };
/**
* XZ Footer Magic Bytes are the last bytes of a XZ Stream.
*/
public static final byte[] FOOTER_MAGIC = { 'Y', 'Z' };
/**
* Integrity check ID indicating that no integrity check is calculated.
* <p>
* Omitting the integrity check is strongly discouraged except when the
* integrity of the data will be verified by other means anyway, and
* calculating the check twice would be useless.
*/
public static final int CHECK_NONE = 0;
/**
* Integrity check ID for CRC32.
*/
public static final int CHECK_CRC32 = 1;
/**
* Integrity check ID for CRC64.
*/
public static final int CHECK_CRC64 = 4;
/**
* Integrity check ID for SHA-256.
*/
public static final int CHECK_SHA256 = 10;
private XZ() {
}
}

View File

@@ -1,24 +0,0 @@
/*
* XZFormatException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* Thrown when the input data is not in the XZ format.
*/
public class XZFormatException extends XZIOException {
private static final long serialVersionUID = 3L;
/**
* Creates a new exception with the default error detail message.
*/
public XZFormatException() {
super("Input is not in the XZ format");
}
}

View File

@@ -1,26 +0,0 @@
/*
* XZIOException
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
/**
* Generic {@link java.io.IOException IOException} specific to this package. The
* other IOExceptions in this package extend from <code>XZIOException</code>.
*/
public class XZIOException extends java.io.IOException {
private static final long serialVersionUID = 3L;
public XZIOException() {
super();
}
public XZIOException(String s) {
super(s);
}
}

View File

@@ -1,302 +0,0 @@
/*
* XZInputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.InputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.EOFException;
import org.tukaani.xz.common.DecoderUtil;
/**
* Decompresses a .xz file in streamed mode (no seeking).
* <p>
* Use this to decompress regular standalone .xz files. This reads from its
* input stream until the end of the input or until an error occurs. This
* supports decompressing concatenated .xz files.
*
* <h4>Typical use cases</h4>
* <p>
* Getting an input stream to decompress a .xz file:
* <p>
* <blockquote><pre>
* InputStream infile = new FileInputStream("foo.xz");
* XZInputStream inxz = new XZInputStream(infile);
* </pre></blockquote>
* <p>
* It's important to keep in mind that decompressor memory usage depends on the
* settings used to compress the file. The worst-case memory usage of
* XZInputStream is currently 1.5&nbsp;GiB. Still, very few files will require
* more than about 65&nbsp;MiB because that's how much decompressing a file
* created with the highest preset level will need, and only a few people use
* settings other than the predefined presets.
* <p>
* It is possible to specify a memory usage limit for
* <code>XZInputStream</code>. If decompression requires more memory than the
* specified limit, MemoryLimitException will be thrown when reading from the
* stream. For example, the following sets the memory usage limit to
* 100&nbsp;MiB:
* <p>
* <blockquote><pre>
* InputStream infile = new FileInputStream("foo.xz");
* XZInputStream inxz = new XZInputStream(infile, 100 * 1024);
* </pre></blockquote>
*
* <h4>When uncompressed size is known beforehand</h4>
* <p>
* If you are decompressing complete files and your application knows exactly
* how much uncompressed data there should be, it is good to try reading one
* more byte by calling <code>read()</code> and checking that it returns
* <code>-1</code>. This way the decompressor will parse the file footers and
* verify the integrity checks, giving the caller more confidence that the
* uncompressed data is valid. (This advice seems to apply to
* {@link java.util.zip.GZIPInputStream java.util.zip.GZIPInputStream} too.)
*
* @see SingleXZInputStream
*/
public class XZInputStream extends InputStream {
private final int memoryLimit;
private InputStream in;
private SingleXZInputStream xzIn;
private boolean endReached = false;
private IOException exception = null;
private final byte[] tempBuf = new byte[1];
/**
* Creates a new XZ decompressor without a memory usage limit.
* <p>
* This constructor reads and parses the XZ Stream Header (12 bytes) from
* <code>in</code>. The header of the first Block is not read until
* <code>read</code> is called.
*
* @param in input stream from which XZ-compressed data is read
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
public XZInputStream(InputStream in) throws IOException {
this(in, -1);
}
/**
* Creates a new XZ decompressor with an optional memory usage limit.
* <p>
* This is identical to <code>XZInputStream(InputStream)</code> except that
* this takes also the <code>memoryLimit</code> argument.
*
* @param in input stream from which XZ-compressed data is read
*
* @param memoryLimit memory usage limit in kibibytes (KiB) or
* <code>-1</code> to impose no memory usage limit
*
* @throws XZFormatException input is not in the XZ format
*
* @throws CorruptedInputException XZ header CRC32 doesn't match
*
* @throws UnsupportedOptionsException XZ header is valid but specifies
* options not supported by this implementation
*
* @throws EOFException less than 12 bytes of input was
* available from
* <code>in</code>
*
* @throws IOException may be thrown by <code>in</code>
*/
public XZInputStream(InputStream in, int memoryLimit) throws IOException {
this.in = in;
this.memoryLimit = memoryLimit;
this.xzIn = new SingleXZInputStream(in, memoryLimit);
}
/**
* Decompresses the next byte from this input stream.
* <p>
* Reading lots of data with <code>read()</code> from this input stream may
* be inefficient. Wrap it in {@link java.io.BufferedInputStream} if you
* need to read lots of data one byte at a time.
*
* @return the next decompressed byte, or <code>-1</code> to indicate the
* end of the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read() throws IOException {
return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
}
/**
* Decompresses into an array of bytes.
* <p>
* If <code>len</code> is zero, no bytes are read and <code>0</code> is
* returned. Otherwise this will try to decompress <code>len</code> bytes of
* uncompressed data. Less than <code>len</code> bytes may be read only in
* the following situations:
* <ul>
* <li>The end of the compressed data was reached successfully.</li>
* <li>An error is detected after at least one but less <code>len</code>
* bytes have already been successfully decompressed. The next call with
* non-zero <code>len</code> will immediately throw the pending
* exception.</li>
* <li>An exception is thrown.</li>
* </ul>
*
* @param buf target buffer for uncompressed data
* @param off start offset in <code>buf</code>
* @param len maximum number of uncompressed bytes to read
*
* @return number of bytes read, or <code>-1</code> to indicate the end of
* the compressed stream
*
* @throws CorruptedInputException
* @throws UnsupportedOptionsException
* @throws MemoryLimitException
*
* @throws XZIOException if the stream has been closed
*
* @throws EOFException compressed input is truncated or
* corrupt
*
* @throws IOException may be thrown by <code>in</code>
*/
public int read(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (len == 0)
return 0;
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
if (endReached)
return -1;
int size = 0;
try {
while (len > 0) {
if (xzIn == null) {
prepareNextStream();
if (endReached)
return size == 0 ? -1 : size;
}
int ret = xzIn.read(buf, off, len);
if (ret > 0) {
size += ret;
off += ret;
len -= ret;
} else if (ret == -1)
xzIn = null;
}
} catch (IOException e) {
exception = e;
if (size == 0)
throw e;
}
return size;
}
private void prepareNextStream() throws IOException {
DataInputStream inData = new DataInputStream(in);
byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
// The size of Stream Padding must be a multiple of four bytes,
// all bytes zero.
do {
// First try to read one byte to see if we have reached the end
// of the file.
int ret = inData.read(buf, 0, 1);
if (ret == -1) {
endReached = true;
return;
}
// Since we got one byte of input, there must be at least
// three more available in a valid file.
inData.readFully(buf, 1, 3);
} while (buf[0] == 0 && buf[1] == 0 && buf[2] == 0 && buf[3] == 0);
// Not all bytes are zero. In a valid Stream it indicates the
// beginning of the next Stream. Read the rest of the Stream Header
// and initialize the XZ decoder.
inData.readFully(buf, 4, DecoderUtil.STREAM_HEADER_SIZE - 4);
try {
xzIn = new SingleXZInputStream(in, memoryLimit, buf);
} catch (XZFormatException e) {
// Since this isn't the first .xz Stream, it is more
// logical to tell that the data is corrupt.
throw new CorruptedInputException(
"Garbage after a valid XZ Stream");
}
}
/**
* Returns the number of uncompressed bytes that can be read without
* blocking. The value is returned with an assumption that the compressed
* input data will be valid. If the compressed data is corrupt,
* <code>CorruptedInputException</code> may get thrown before the number of
* bytes claimed to be available have been read from this input stream.
*
* @return the number of uncompressed bytes that can be read without
* blocking
*/
public int available() throws IOException {
if (in == null)
throw new XZIOException("Stream closed");
if (exception != null)
throw exception;
return xzIn == null ? 0 : xzIn.available();
}
/**
* Closes the stream and calls <code>in.close()</code>. If the stream was
* already closed, this does nothing.
*
* @throws IOException if thrown by <code>in.close()</code>
*/
public void close() throws IOException {
if (in != null)
try {
in.close();
} finally {
in = null;
}
}
}

View File

@@ -1,464 +0,0 @@
/*
* XZOutputStream
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz;
import java.io.OutputStream;
import java.io.IOException;
import org.tukaani.xz.common.EncoderUtil;
import org.tukaani.xz.common.StreamFlags;
import org.tukaani.xz.check.Check;
import org.tukaani.xz.index.IndexEncoder;
/**
* Compresses into the .xz file format.
*
* <h4>Examples</h4>
* <p>
* Getting an output stream to compress with LZMA2 using the default settings
* and the default integrity check type (CRC64):
* <p>
* <blockquote><pre>
* FileOutputStream outfile = new FileOutputStream("foo.xz");
* XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options());
* </pre></blockquote>
* <p>
* Using the preset level <code>8</code> for LZMA2 (the default is
* <code>6</code>) and SHA-256 instead of CRC64 for integrity checking:
* <p>
* <blockquote><pre>
* XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options(8),
* XZ.CHECK_SHA256);
* </pre></blockquote>
* <p>
* Using the x86 BCJ filter together with LZMA2 to compress x86 executables and
* printing the memory usage information before creating the XZOutputStream:
* <p>
* <blockquote><pre>
* X86Options x86 = new X86Options();
* LZMA2Options lzma2 = new LZMA2Options();
* FilterOptions[] options = { x86, lzma2 };
* System.out.println("Encoder memory usage: "
* + FilterOptions.getEncoderMemoryUsage(options)
* + " KiB");
* System.out.println("Decoder memory usage: "
* + FilterOptions.getDecoderMemoryUsage(options)
* + " KiB");
* XZOutputStream outxz = new XZOutputStream(outfile, options);
* </pre></blockquote>
*/
public class XZOutputStream extends FinishableOutputStream {
private OutputStream out;
private final StreamFlags streamFlags = new StreamFlags();
private final Check check;
private final IndexEncoder index = new IndexEncoder();
private BlockOutputStream blockEncoder = null;
private FilterEncoder[] filters;
/**
* True if the current filter chain supports flushing. If it doesn't support
* flushing, <code>flush()</code> will use <code>endBlock()</code> as a
* fallback.
*/
private boolean filtersSupportFlushing;
private IOException exception = null;
private boolean finished = false;
private final byte[] tempBuf = new byte[1];
/**
* Creates a new XZ compressor using one filter and CRC64 as the integrity
* check. This constructor is equivalent to passing a single-member
* FilterOptions array to
* <code>XZOutputStream(OutputStream, FilterOptions[])</code>.
*
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions filter options to use
*
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions filterOptions)
throws IOException {
this(out, filterOptions, XZ.CHECK_CRC64);
}
/**
* Creates a new XZ compressor using one filter and the specified integrity
* check type. This constructor is equivalent to passing a single-member
* FilterOptions array to
* <code>XZOutputStream(OutputStream, FilterOptions[], int)</code>.
*
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions filter options to use
*
* @param checkType type of the integrity check, for example
* XZ.CHECK_CRC32
*
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions filterOptions,
int checkType) throws IOException {
this(out, new FilterOptions[] { filterOptions }, checkType);
}
/**
* Creates a new XZ compressor using 1-4 filters and CRC64 as the integrity
* check. This constructor is equivalent
* <code>XZOutputStream(out, filterOptions, XZ.CHECK_CRC64)</code>.
*
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions array of filter options to use
*
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions[] filterOptions)
throws IOException {
this(out, filterOptions, XZ.CHECK_CRC64);
}
/**
* Creates a new XZ compressor using 1-4 filters and the specified integrity
* check type.
*
* @param out output stream to which the compressed data will be
* written
*
* @param filterOptions array of filter options to use
*
* @param checkType type of the integrity check, for example
* XZ.CHECK_CRC32
*
* @throws UnsupportedOptionsException invalid filter chain
*
* @throws IOException may be thrown from <code>out</code>
*/
public XZOutputStream(OutputStream out, FilterOptions[] filterOptions,
int checkType) throws IOException {
this.out = out;
updateFilters(filterOptions);
streamFlags.checkType = checkType;
check = Check.getInstance(checkType);
encodeStreamHeader();
}
/**
* Updates the filter chain with a single filter. This is equivalent to
* passing a single-member FilterOptions array to
* <code>updateFilters(FilterOptions[])</code>.
*
* @param filterOptions new filter to use
*
* @throws UnsupportedOptionsException unsupported filter chain, or trying
* to change the filter chain in the middle of a Block
*/
public void updateFilters(FilterOptions filterOptions)
throws XZIOException {
FilterOptions[] opts = new FilterOptions[1];
opts[0] = filterOptions;
updateFilters(opts);
}
/**
* Updates the filter chain with 1-4 filters.
* <p>
* Currently this cannot be used to update e.g. LZMA2 options in the middle
* of a XZ Block. Use <code>endBlock()</code> to finish the current XZ Block
* before calling this function. The new filter chain will then be used for
* the next XZ Block.
*
* @param filterOptions new filter chain to use
*
* @throws UnsupportedOptionsException unsupported filter chain, or trying
* to change the filter chain in the middle of a Block
*/
public void updateFilters(FilterOptions[] filterOptions)
throws XZIOException {
if (blockEncoder != null)
throw new UnsupportedOptionsException("Changing filter options "
+ "in the middle of a XZ Block not implemented");
if (filterOptions.length < 1 || filterOptions.length > 4)
throw new UnsupportedOptionsException(
"XZ filter chain must be 1-4 filters");
filtersSupportFlushing = true;
FilterEncoder[] newFilters = new FilterEncoder[filterOptions.length];
for (int i = 0; i < filterOptions.length; ++i) {
newFilters[i] = filterOptions[i].getFilterEncoder();
filtersSupportFlushing &= newFilters[i].supportsFlushing();
}
RawCoder.validate(newFilters);
filters = newFilters;
}
/**
* Writes one byte to be compressed.
*
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException <code>finish()</code> or <code>close()</code> was
* already called
*
* @throws IOException may be thrown by the underlying output stream
*/
public void write(int b) throws IOException {
tempBuf[0] = (byte) b;
write(tempBuf, 0, 1);
}
/**
* Writes an array of bytes to be compressed. The compressors tend to do
* internal buffering and thus the written data won't be readable from the
* compressed output immediately. Use <code>flush()</code> to force
* everything written so far to be written to the underlaying output stream,
* but be aware that flushing reduces compression ratio.
*
* @param buf buffer of bytes to be written
* @param off start offset in <code>buf</code>
* @param len number of bytes to write
*
* @throws XZIOException XZ Stream has grown too big: total file size about
* 8&nbsp;EiB or the Index field exceeds 16&nbsp;GiB; you shouldn't reach
* these sizes in practice
*
* @throws XZIOException <code>finish()</code> or <code>close()</code> was
* already called and len &gt; 0
*
* @throws IOException may be thrown by the underlying output stream
*/
public void write(byte[] buf, int off, int len) throws IOException {
if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
throw new IndexOutOfBoundsException();
if (exception != null)
throw exception;
if (finished)
throw new XZIOException("Stream finished or closed");
try {
if (blockEncoder == null)
blockEncoder = new BlockOutputStream(out, filters, check);
blockEncoder.write(buf, off, len);
} catch (IOException e) {
exception = e;
throw e;
}
}
/**
* Finishes the current XZ Block (but not the whole XZ Stream). This doesn't
* flush the stream so it's possible that not all data will be
* decompressible from the output stream when this function returns. Call
* also <code>flush()</code> if flushing is wanted in addition to finishing
* the current XZ Block.
* <p>
* If there is no unfinished Block open, this function will do nothing. (No
* empty XZ Block will be created.)
* <p>
* This function can be useful, for example, to create random-accessible .xz
* files.
* <p>
* Starting a new XZ Block means that the encoder state is reset. Doing this
* very often will increase the size of the compressed file a lot (more than
* plain <code>flush()</code> would do).
*
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException stream finished or closed
*
* @throws IOException may be thrown by the underlying output stream
*/
public void endBlock() throws IOException {
if (exception != null)
throw exception;
if (finished)
throw new XZIOException("Stream finished or closed");
// NOTE: Once there is threading with multiple Blocks, it's possible
// that this function will be more like a barrier that returns
// before the last Block has been finished.
if (blockEncoder != null)
try {
blockEncoder.finish();
index.add(blockEncoder.getUnpaddedSize(),
blockEncoder.getUncompressedSize());
blockEncoder = null;
} catch (IOException e) {
exception = e;
throw e;
}
}
/**
* Flushes the encoder and calls <code>out.flush()</code>. All buffered
* pending data will then be decompressible from the output stream.
* <p>
* Calling this function very often may increase the compressed file size a
* lot. The filter chain options may affect the size increase too. For
* example, with LZMA2 the HC4 match finder has smaller penalty with
* flushing than BT4.
* <p>
* Some filters don't support flushing. If the filter chain has such a
* filter, <code>flush()</code> will call <code>endBlock()</code> before
* flushing.
*
* @throws XZIOException XZ Stream has grown too big
*
* @throws XZIOException stream finished or closed
*
* @throws IOException may be thrown by the underlying output stream
*/
public void flush() throws IOException {
if (exception != null)
throw exception;
if (finished)
throw new XZIOException("Stream finished or closed");
try {
if (blockEncoder != null)
if (filtersSupportFlushing)
// This will eventually call out.flush() so
// no need to do it here again.
blockEncoder.flush();
else {
endBlock();
out.flush();
}
else
out.flush();
} catch (IOException e) {
exception = e;
throw e;
}
}
/**
* Finishes compression without closing the underlying stream. No more data
* can be written to this stream after finishing (calling <code>write</code>
* with an empty buffer is OK).
* <p>
* Repeated calls to <code>finish()</code> do nothing unless an exception
* was thrown by this stream earlier. In that case the same exception is
* thrown again.
* <p>
* After finishing, the stream may be closed normally with
* <code>close()</code>. If the stream will be closed anyway, there usually
* is no need to call <code>finish()</code> separately.
*
* @throws XZIOException XZ Stream has grown too big
*
* @throws IOException may be thrown by the underlying output stream
*/
public void finish() throws IOException {
if (!finished) {
// This checks for pending exceptions so we don't need to
// worry about it here.
endBlock();
try {
index.encode(out);
encodeStreamFooter();
} catch (IOException e) {
exception = e;
throw e;
}
// Set it to true only if everything goes fine. Setting it earlier
// would cause repeated calls to finish() do nothing instead of
// throwing an exception to indicate an earlier error.
finished = true;
}
}
/**
* Finishes compression and closes the underlying stream. The underlying
* stream <code>out</code> is closed even if finishing fails. If both
* finishing and closing fail, the exception thrown by <code>finish()</code>
* is thrown and the exception from the failed <code>out.close()</code> is
* lost.
*
* @throws XZIOException XZ Stream has grown too big
*
* @throws IOException may be thrown by the underlying output stream
*/
public void close() throws IOException {
if (out != null) {
// If finish() throws an exception, it stores the exception to
// the variable "exception". So we can ignore the possible
// exception here.
try {
finish();
} catch (IOException e) {
}
try {
out.close();
} catch (IOException e) {
// Remember the exception but only if there is no previous
// pending exception.
if (exception == null)
exception = e;
}
out = null;
}
if (exception != null)
throw exception;
}
private void encodeStreamFlags(byte[] buf, int off) {
buf[off] = 0x00;
buf[off + 1] = (byte) streamFlags.checkType;
}
private void encodeStreamHeader() throws IOException {
out.write(XZ.HEADER_MAGIC);
byte[] buf = new byte[2];
encodeStreamFlags(buf, 0);
out.write(buf);
EncoderUtil.writeCRC32(out, buf);
}
private void encodeStreamFooter() throws IOException {
byte[] buf = new byte[6];
long backwardSize = index.getIndexSize() / 4 - 1;
for (int i = 0; i < 4; ++i)
buf[i] = (byte) (backwardSize >>> (i * 8));
encodeStreamFlags(buf, 4);
EncoderUtil.writeCRC32(out, buf);
out.write(buf);
out.write(XZ.FOOTER_MAGIC);
}
}

View File

@@ -1,33 +0,0 @@
/*
* CRC32
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.check;
public class CRC32 extends Check {
private final java.util.zip.CRC32 state = new java.util.zip.CRC32();
public CRC32() {
size = 4;
name = "CRC32";
}
public void update(byte[] buf, int off, int len) {
state.update(buf, off, len);
}
public byte[] finish() {
long value = state.getValue();
byte[] buf = { (byte) (value),
(byte) (value >>> 8),
(byte) (value >>> 16),
(byte) (value >>> 24) };
state.reset();
return buf;
}
}

View File

@@ -1,53 +0,0 @@
/*
* CRC64
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.check;
public class CRC64 extends Check {
private static final long poly = 0xC96C5795D7870F42L;
private static final long[] crcTable = new long[256];
private long crc = -1;
static {
for (int b = 0; b < crcTable.length; ++b) {
long r = b;
for (int i = 0; i < 8; ++i)
if ((r & 1) == 1)
r = (r >>> 1) ^ poly;
else
r >>>= 1;
crcTable[b] = r;
}
}
public CRC64() {
size = 8;
name = "CRC64";
}
public void update(byte[] buf, int off, int len) {
int end = off + len;
while (off < end)
crc = crcTable[(buf[off++] ^ (int) crc) & 0xFF] ^ (crc >>> 8);
}
public byte[] finish() {
long value = ~crc;
crc = -1;
byte[] buf = new byte[8];
for (int i = 0; i < buf.length; ++i)
buf[i] = (byte) (value >> (i * 8));
return buf;
}
}

View File

@@ -1,59 +0,0 @@
/*
* Check
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.check;
import org.tukaani.xz.XZ;
import org.tukaani.xz.UnsupportedOptionsException;
public abstract class Check {
int size;
String name;
public abstract void update(byte[] buf, int off, int len);
public abstract byte[] finish();
public void update(byte[] buf) {
update(buf, 0, buf.length);
}
public int getSize() {
return size;
}
public String getName() {
return name;
}
public static Check getInstance(int checkType)
throws UnsupportedOptionsException {
switch (checkType) {
case XZ.CHECK_NONE:
return new None();
case XZ.CHECK_CRC32:
return new CRC32();
case XZ.CHECK_CRC64:
return new CRC64();
case XZ.CHECK_SHA256:
try {
return new SHA256();
} catch (java.security.NoSuchAlgorithmException e) {
}
break;
}
throw new UnsupportedOptionsException(
"Unsupported Check ID " + checkType);
}
}

View File

@@ -1,25 +0,0 @@
/*
* None
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.check;
public class None extends Check {
public None() {
size = 0;
name = "None";
}
public void update(byte[] buf, int off, int len) {
}
public byte[] finish() {
byte[] empty = new byte[0];
return empty;
}
}

View File

@@ -1,30 +0,0 @@
/*
* SHA256
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.check;
public class SHA256 extends Check {
private final java.security.MessageDigest sha256;
public SHA256() throws java.security.NoSuchAlgorithmException {
size = 32;
name = "SHA-256";
sha256 = java.security.MessageDigest.getInstance("SHA-256");
}
public void update(byte[] buf, int off, int len) {
sha256.update(buf, off, len);
}
public byte[] finish() {
byte[] buf = sha256.digest();
sha256.reset();
return buf;
}
}

View File

@@ -1,120 +0,0 @@
/*
* DecoderUtil
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.common;
import java.io.InputStream;
import java.io.IOException;
import java.io.EOFException;
import java.util.zip.CRC32;
import org.tukaani.xz.XZ;
import org.tukaani.xz.XZFormatException;
import org.tukaani.xz.CorruptedInputException;
import org.tukaani.xz.UnsupportedOptionsException;
public class DecoderUtil extends Util {
public static boolean isCRC32Valid(byte[] buf, int off, int len,
int ref_off) {
CRC32 crc32 = new CRC32();
crc32.update(buf, off, len);
long value = crc32.getValue();
for (int i = 0; i < 4; ++i)
if ((byte) (value >>> (i * 8)) != buf[ref_off + i])
return false;
return true;
}
public static StreamFlags decodeStreamHeader(byte[] buf)
throws IOException {
for (int i = 0; i < XZ.HEADER_MAGIC.length; ++i)
if (buf[i] != XZ.HEADER_MAGIC[i])
throw new XZFormatException();
if (!isCRC32Valid(buf, XZ.HEADER_MAGIC.length, 2,
XZ.HEADER_MAGIC.length + 2))
throw new CorruptedInputException("XZ Stream Header is corrupt");
try {
return decodeStreamFlags(buf, XZ.HEADER_MAGIC.length);
} catch (UnsupportedOptionsException e) {
throw new UnsupportedOptionsException(
"Unsupported options in XZ Stream Header");
}
}
public static StreamFlags decodeStreamFooter(byte[] buf)
throws IOException {
if (buf[10] != XZ.FOOTER_MAGIC[0] || buf[11] != XZ.FOOTER_MAGIC[1])
// NOTE: The exception could be XZFormatException too.
// It depends on the situation which one is better.
throw new CorruptedInputException("XZ Stream Footer is corrupt");
if (!isCRC32Valid(buf, 4, 6, 0))
throw new CorruptedInputException("XZ Stream Footer is corrupt");
StreamFlags streamFlags;
try {
streamFlags = decodeStreamFlags(buf, 8);
} catch (UnsupportedOptionsException e) {
throw new UnsupportedOptionsException(
"Unsupported options in XZ Stream Footer");
}
streamFlags.backwardSize = 0;
for (int i = 0; i < 4; ++i)
streamFlags.backwardSize |= (buf[i + 4] & 0xFF) << (i * 8);
streamFlags.backwardSize = (streamFlags.backwardSize + 1) * 4;
return streamFlags;
}
private static StreamFlags decodeStreamFlags(byte[] buf, int off)
throws UnsupportedOptionsException {
if (buf[off] != 0x00 || (buf[off + 1] & 0xFF) >= 0x10)
throw new UnsupportedOptionsException();
StreamFlags streamFlags = new StreamFlags();
streamFlags.checkType = buf[off + 1];
return streamFlags;
}
public static boolean areStreamFlagsEqual(StreamFlags a, StreamFlags b) {
// backwardSize is intentionally not compared.
return a.checkType == b.checkType;
}
public static long decodeVLI(InputStream in) throws IOException {
int b = in.read();
if (b == -1)
throw new EOFException();
long num = b & 0x7F;
int i = 0;
while ((b & 0x80) != 0x00) {
if (++i >= VLI_SIZE_MAX)
throw new CorruptedInputException();
b = in.read();
if (b == -1)
throw new EOFException();
if (b == 0x00)
throw new CorruptedInputException();
num |= (long) (b & 0x7F) << (i * 7);
}
return num;
}
}

View File

@@ -1,36 +0,0 @@
/*
* EncoderUtil
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.common;
import java.io.OutputStream;
import java.io.IOException;
import java.util.zip.CRC32;
public class EncoderUtil extends Util {
public static void writeCRC32(OutputStream out, byte[] buf)
throws IOException {
CRC32 crc32 = new CRC32();
crc32.update(buf);
long value = crc32.getValue();
for (int i = 0; i < 4; ++i)
out.write((byte) (value >>> (i * 8)));
}
public static void encodeVLI(OutputStream out, long num)
throws IOException {
while (num >= 0x80) {
out.write((byte) (num | 0x80));
num >>>= 7;
}
out.write((byte) num);
}
}

View File

@@ -1,15 +0,0 @@
/*
* StreamFlags
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.common;
public class StreamFlags {
public int checkType = -1;
public long backwardSize = -1;
}

View File

@@ -1,28 +0,0 @@
/*
* Util
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.common;
public class Util {
public static final int STREAM_HEADER_SIZE = 12;
public static final long BACKWARD_SIZE_MAX = 1L << 34;
public static final int BLOCK_HEADER_SIZE_MAX = 1024;
public static final long VLI_MAX = Long.MAX_VALUE;
public static final int VLI_SIZE_MAX = 9;
public static int getVLISize(long num) {
int size = 0;
do {
++size;
num >>= 7;
} while (num != 0);
return size;
}
}

View File

@@ -1,38 +0,0 @@
/*
* BlockInfo
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
import org.tukaani.xz.common.StreamFlags;
public class BlockInfo {
public int blockNumber = -1;
public long compressedOffset = -1;
public long uncompressedOffset = -1;
public long unpaddedSize = -1;
public long uncompressedSize = -1;
IndexDecoder index;
public BlockInfo(IndexDecoder indexOfFirstStream) {
index = indexOfFirstStream;
}
public int getCheckType() {
return index.getStreamFlags().checkType;
}
public boolean hasNext() {
return index.hasRecord(blockNumber + 1);
}
public void setNext() {
index.setBlockInfo(this, blockNumber + 1);
}
}

View File

@@ -1,56 +0,0 @@
/*
* IndexBase
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
import org.tukaani.xz.common.Util;
import org.tukaani.xz.XZIOException;
abstract class IndexBase {
private final XZIOException invalidIndexException;
long blocksSum = 0;
long uncompressedSum = 0;
long indexListSize = 0;
long recordCount = 0;
IndexBase(XZIOException invalidIndexException) {
this.invalidIndexException = invalidIndexException;
}
private long getUnpaddedIndexSize() {
// Index Indicator + Number of Records + List of Records + CRC32
return 1 + Util.getVLISize(recordCount) + indexListSize + 4;
}
public long getIndexSize() {
return (getUnpaddedIndexSize() + 3) & ~3;
}
public long getStreamSize() {
return Util.STREAM_HEADER_SIZE + blocksSum + getIndexSize()
+ Util.STREAM_HEADER_SIZE;
}
int getIndexPaddingSize() {
return (int) ((4 - getUnpaddedIndexSize()) & 3);
}
void add(long unpaddedSize, long uncompressedSize) throws XZIOException {
blocksSum += (unpaddedSize + 3) & ~3;
uncompressedSum += uncompressedSize;
indexListSize += Util.getVLISize(unpaddedSize)
+ Util.getVLISize(uncompressedSize);
++recordCount;
if (blocksSum < 0 || uncompressedSum < 0
|| getIndexSize() > Util.BACKWARD_SIZE_MAX
|| getStreamSize() < 0)
throw invalidIndexException;
}
}

View File

@@ -1,223 +0,0 @@
/*
* IndexDecoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
import java.io.IOException;
import java.io.EOFException;
import java.util.zip.CheckedInputStream;
import org.tukaani.xz.common.DecoderUtil;
import org.tukaani.xz.common.StreamFlags;
import org.tukaani.xz.SeekableInputStream;
import org.tukaani.xz.CorruptedInputException;
import org.tukaani.xz.MemoryLimitException;
import org.tukaani.xz.UnsupportedOptionsException;
public class IndexDecoder extends IndexBase {
private final StreamFlags streamFlags;
private final long streamPadding;
private final int memoryUsage;
// Unpadded Size and Uncompressed Size fields
private final long[] unpadded;
private final long[] uncompressed;
// Uncompressed size of the largest Block. It is used by
// SeekableXZInputStream to find out the largest Block of the .xz file.
private long largestBlockSize = 0;
// Offsets relative to the beginning of the .xz file. These are all zero
// for the first Stream in the file.
private int recordOffset = 0;
private long compressedOffset = 0;
private long uncompressedOffset = 0;
public IndexDecoder(SeekableInputStream in, StreamFlags streamFooterFlags,
long streamPadding, int memoryLimit)
throws IOException {
super(new CorruptedInputException("XZ Index is corrupt"));
this.streamFlags = streamFooterFlags;
this.streamPadding = streamPadding;
// If endPos is exceeded before the CRC32 field has been decoded,
// the Index is corrupt.
long endPos = in.position() + streamFooterFlags.backwardSize - 4;
java.util.zip.CRC32 crc32 = new java.util.zip.CRC32();
CheckedInputStream inChecked = new CheckedInputStream(in, crc32);
// Index Indicator
if (inChecked.read() != 0x00)
throw new CorruptedInputException("XZ Index is corrupt");
try {
// Number of Records
long count = DecoderUtil.decodeVLI(inChecked);
// Catch Record counts that obviously too high to be valid.
// This test isn't exact because it ignores Index Indicator,
// Number of Records, and CRC32 fields, but this is good enough
// to catch the most obvious problems.
if (count >= streamFooterFlags.backwardSize / 2)
throw new CorruptedInputException("XZ Index is corrupt");
// If the Record count doesn't fit into an int, we cannot
// allocate the arrays to hold the Records.
if (count > Integer.MAX_VALUE)
throw new UnsupportedOptionsException("XZ Index has over "
+ Integer.MAX_VALUE + " Records");
// Calculate approximate memory requirements and check the
// memory usage limit.
memoryUsage = 1 + (int) ((16L * count + 1023) / 1024);
if (memoryLimit >= 0 && memoryUsage > memoryLimit)
throw new MemoryLimitException(memoryUsage, memoryLimit);
// Allocate the arrays for the Records.
unpadded = new long[(int) count];
uncompressed = new long[(int) count];
int record = 0;
// Decode the Records.
for (int i = (int) count; i > 0; --i) {
// Get the next Record.
long unpaddedSize = DecoderUtil.decodeVLI(inChecked);
long uncompressedSize = DecoderUtil.decodeVLI(inChecked);
// Check that the input position stays sane. Since this is
// checked only once per loop iteration instead of for
// every input byte read, it's still possible that
// EOFException gets thrown with corrupt input.
if (in.position() > endPos)
throw new CorruptedInputException("XZ Index is corrupt");
// Add the new Record.
unpadded[record] = blocksSum + unpaddedSize;
uncompressed[record] = uncompressedSum + uncompressedSize;
++record;
super.add(unpaddedSize, uncompressedSize);
assert record == recordCount;
// Remember the uncompressed size of the largest Block.
if (largestBlockSize < uncompressedSize)
largestBlockSize = uncompressedSize;
}
} catch (EOFException e) {
// EOFException is caught just in case a corrupt input causes
// DecoderUtil.decodeVLI to read too much at once.
throw new CorruptedInputException("XZ Index is corrupt");
}
// Validate that the size of the Index field matches
// Backward Size.
int indexPaddingSize = getIndexPaddingSize();
if (in.position() + indexPaddingSize != endPos)
throw new CorruptedInputException("XZ Index is corrupt");
// Index Padding
while (indexPaddingSize-- > 0)
if (inChecked.read() != 0x00)
throw new CorruptedInputException("XZ Index is corrupt");
// CRC32
long value = crc32.getValue();
for (int i = 0; i < 4; ++i)
if (((value >>> (i * 8)) & 0xFF) != in.read())
throw new CorruptedInputException("XZ Index is corrupt");
}
public void setOffsets(IndexDecoder prev) {
// NOTE: SeekableXZInputStream checks that the total number of Blocks
// in concatenated Streams fits into an int.
recordOffset = prev.recordOffset + (int) prev.recordCount;
compressedOffset = prev.compressedOffset
+ prev.getStreamSize() + prev.streamPadding;
assert (compressedOffset & 3) == 0;
uncompressedOffset = prev.uncompressedOffset + prev.uncompressedSum;
}
public int getMemoryUsage() {
return memoryUsage;
}
public StreamFlags getStreamFlags() {
return streamFlags;
}
public int getRecordCount() {
// It was already checked in the constructor that it fits into an int.
// Otherwise we couldn't have allocated the arrays.
return (int) recordCount;
}
public long getUncompressedSize() {
return uncompressedSum;
}
public long getLargestBlockSize() {
return largestBlockSize;
}
public boolean hasUncompressedOffset(long pos) {
return pos >= uncompressedOffset
&& pos < uncompressedOffset + uncompressedSum;
}
public boolean hasRecord(int blockNumber) {
return blockNumber >= recordOffset
&& blockNumber < recordOffset + recordCount;
}
public void locateBlock(BlockInfo info, long target) {
assert target >= uncompressedOffset;
target -= uncompressedOffset;
assert target < uncompressedSum;
int left = 0;
int right = unpadded.length - 1;
while (left < right) {
int i = left + (right - left) / 2;
if (uncompressed[i] <= target)
left = i + 1;
else
right = i;
}
setBlockInfo(info, recordOffset + left);
}
public void setBlockInfo(BlockInfo info, int blockNumber) {
// The caller has checked that the given Block number is inside
// this Index.
assert blockNumber >= recordOffset;
assert blockNumber - recordOffset < recordCount;
info.index = this;
info.blockNumber = blockNumber;
int pos = blockNumber - recordOffset;
if (pos == 0) {
info.compressedOffset = 0;
info.uncompressedOffset = 0;
} else {
info.compressedOffset = (unpadded[pos - 1] + 3) & ~3;
info.uncompressedOffset = uncompressed[pos - 1];
}
info.unpaddedSize = unpadded[pos] - info.compressedOffset;
info.uncompressedSize = uncompressed[pos] - info.uncompressedOffset;
info.compressedOffset += compressedOffset
+ DecoderUtil.STREAM_HEADER_SIZE;
info.uncompressedOffset += uncompressedOffset;
}
}

View File

@@ -1,59 +0,0 @@
/*
* IndexEncoder
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
import java.io.OutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.zip.CheckedOutputStream;
import org.tukaani.xz.common.EncoderUtil;
import org.tukaani.xz.XZIOException;
public class IndexEncoder extends IndexBase {
private final ArrayList records = new ArrayList();
public IndexEncoder() {
super(new XZIOException("XZ Stream or its Index has grown too big"));
}
public void add(long unpaddedSize, long uncompressedSize)
throws XZIOException {
super.add(unpaddedSize, uncompressedSize);
records.add(new IndexRecord(unpaddedSize, uncompressedSize));
}
public void encode(OutputStream out) throws IOException {
java.util.zip.CRC32 crc32 = new java.util.zip.CRC32();
CheckedOutputStream outChecked = new CheckedOutputStream(out, crc32);
// Index Indicator
outChecked.write(0x00);
// Number of Records
EncoderUtil.encodeVLI(outChecked, recordCount);
// List of Records
for (Iterator i = records.iterator(); i.hasNext();) {
IndexRecord record = (IndexRecord) i.next();
EncoderUtil.encodeVLI(outChecked, record.unpadded);
EncoderUtil.encodeVLI(outChecked, record.uncompressed);
}
// Index Padding
for (int i = getIndexPaddingSize(); i > 0; --i)
outChecked.write(0x00);
// CRC32
long value = crc32.getValue();
for (int i = 0; i < 4; ++i)
out.write((byte) (value >>> (i * 8)));
}
}

View File

@@ -1,94 +0,0 @@
/*
* IndexHash
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
import java.io.InputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.zip.CheckedInputStream;
import org.tukaani.xz.common.DecoderUtil;
import org.tukaani.xz.XZIOException;
import org.tukaani.xz.CorruptedInputException;
public class IndexHash extends IndexBase {
private org.tukaani.xz.check.Check hash;
public IndexHash() {
super(new CorruptedInputException());
try {
hash = new org.tukaani.xz.check.SHA256();
} catch (java.security.NoSuchAlgorithmException e) {
hash = new org.tukaani.xz.check.CRC32();
}
}
public void add(long unpaddedSize, long uncompressedSize)
throws XZIOException {
super.add(unpaddedSize, uncompressedSize);
ByteBuffer buf = ByteBuffer.allocate(2 * 8);
buf.putLong(unpaddedSize);
buf.putLong(uncompressedSize);
hash.update(buf.array());
}
public void validate(InputStream in) throws IOException {
// Index Indicator (0x00) has already been read by BlockInputStream
// so add 0x00 to the CRC32 here.
java.util.zip.CRC32 crc32 = new java.util.zip.CRC32();
crc32.update('\0');
CheckedInputStream inChecked = new CheckedInputStream(in, crc32);
// Get and validate the Number of Records field.
long storedRecordCount = DecoderUtil.decodeVLI(inChecked);
if (storedRecordCount != recordCount)
throw new CorruptedInputException("XZ Index is corrupt");
// Decode and hash the Index field and compare it to
// the hash value calculated from the decoded Blocks.
IndexHash stored = new IndexHash();
for (long i = 0; i < recordCount; ++i) {
long unpaddedSize = DecoderUtil.decodeVLI(inChecked);
long uncompressedSize = DecoderUtil.decodeVLI(inChecked);
try {
stored.add(unpaddedSize, uncompressedSize);
} catch (XZIOException e) {
throw new CorruptedInputException("XZ Index is corrupt");
}
if (stored.blocksSum > blocksSum
|| stored.uncompressedSum > uncompressedSum
|| stored.indexListSize > indexListSize)
throw new CorruptedInputException("XZ Index is corrupt");
}
if (stored.blocksSum != blocksSum
|| stored.uncompressedSum != uncompressedSum
|| stored.indexListSize != indexListSize
|| !Arrays.equals(stored.hash.finish(), hash.finish()))
throw new CorruptedInputException("XZ Index is corrupt");
// Index Padding
DataInputStream inData = new DataInputStream(inChecked);
for (int i = getIndexPaddingSize(); i > 0; --i)
if (inData.readUnsignedByte() != 0x00)
throw new CorruptedInputException("XZ Index is corrupt");
// CRC32
long value = crc32.getValue();
for (int i = 0; i < 4; ++i)
if (((value >>> (i * 8)) & 0xFF) != inData.readUnsignedByte())
throw new CorruptedInputException("XZ Index is corrupt");
}
}

View File

@@ -1,20 +0,0 @@
/*
* IndexRecord
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.index;
class IndexRecord {
final long unpadded;
final long uncompressed;
IndexRecord(long unpadded, long uncompressed) {
this.unpadded = unpadded;
this.uncompressed = uncompressed;
}
}

View File

@@ -1,254 +0,0 @@
/*
* Binary Tree match finder with 2-, 3-, and 4-byte hashing
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
final class BT4 extends LZEncoder {
private final Hash234 hash;
private final int[] tree;
private final Matches matches;
private final int depthLimit;
private final int cyclicSize;
private int cyclicPos = -1;
private int lzPos;
static int getMemoryUsage(int dictSize) {
return Hash234.getMemoryUsage(dictSize) + dictSize / (1024 / 8) + 10;
}
BT4(int dictSize, int beforeSizeMin, int readAheadMax,
int niceLen, int matchLenMax, int depthLimit) {
super(dictSize, beforeSizeMin, readAheadMax, niceLen, matchLenMax);
cyclicSize = dictSize + 1;
lzPos = cyclicSize;
hash = new Hash234(dictSize);
tree = new int[cyclicSize * 2];
// Substracting 1 because the shortest match that this match
// finder can find is 2 bytes, so there's no need to reserve
// space for one-byte matches.
matches = new Matches(niceLen - 1);
this.depthLimit = depthLimit > 0 ? depthLimit : 16 + niceLen / 2;
}
private int movePos() {
int avail = movePos(niceLen, 4);
if (avail != 0) {
if (++lzPos == Integer.MAX_VALUE) {
int normalizationOffset = Integer.MAX_VALUE - cyclicSize;
hash.normalize(normalizationOffset);
normalize(tree, normalizationOffset);
lzPos -= normalizationOffset;
}
if (++cyclicPos == cyclicSize)
cyclicPos = 0;
}
return avail;
}
public Matches getMatches() {
matches.count = 0;
int matchLenLimit = matchLenMax;
int niceLenLimit = niceLen;
int avail = movePos();
if (avail < matchLenLimit) {
if (avail == 0)
return matches;
matchLenLimit = avail;
if (niceLenLimit > avail)
niceLenLimit = avail;
}
hash.calcHashes(buf, readPos);
int delta2 = lzPos - hash.getHash2Pos();
int delta3 = lzPos - hash.getHash3Pos();
int currentMatch = hash.getHash4Pos();
hash.updateTables(lzPos);
int lenBest = 0;
// See if the hash from the first two bytes found a match.
// The hashing algorithm guarantees that if the first byte
// matches, also the second byte does, so there's no need to
// test the second byte.
if (delta2 < cyclicSize && buf[readPos - delta2] == buf[readPos]) {
lenBest = 2;
matches.len[0] = 2;
matches.dist[0] = delta2 - 1;
matches.count = 1;
}
// See if the hash from the first three bytes found a match that
// is different from the match possibly found by the two-byte hash.
// Also here the hashing algorithm guarantees that if the first byte
// matches, also the next two bytes do.
if (delta2 != delta3 && delta3 < cyclicSize
&& buf[readPos - delta3] == buf[readPos]) {
lenBest = 3;
matches.dist[matches.count++] = delta3 - 1;
delta2 = delta3;
}
// If a match was found, see how long it is.
if (matches.count > 0) {
while (lenBest < matchLenLimit && buf[readPos + lenBest - delta2]
== buf[readPos + lenBest])
++lenBest;
matches.len[matches.count - 1] = lenBest;
// Return if it is long enough (niceLen or reached the end of
// the dictionary).
if (lenBest >= niceLenLimit) {
skip(niceLenLimit, currentMatch);
return matches;
}
}
// Long enough match wasn't found so easily. Look for better matches
// from the binary tree.
if (lenBest < 3)
lenBest = 3;
int depth = depthLimit;
int ptr0 = (cyclicPos << 1) + 1;
int ptr1 = cyclicPos << 1;
int len0 = 0;
int len1 = 0;
while (true) {
int delta = lzPos - currentMatch;
// Return if the search depth limit has been reached or
// if the distance of the potential match exceeds the
// dictionary size.
if (depth-- == 0 || delta >= cyclicSize) {
tree[ptr0] = 0;
tree[ptr1] = 0;
return matches;
}
int pair = (cyclicPos - delta
+ (delta > cyclicPos ? cyclicSize : 0)) << 1;
int len = Math.min(len0, len1);
if (buf[readPos + len - delta] == buf[readPos + len]) {
while (++len < matchLenLimit)
if (buf[readPos + len - delta] != buf[readPos + len])
break;
if (len > lenBest) {
lenBest = len;
matches.len[matches.count] = len;
matches.dist[matches.count] = delta - 1;
++matches.count;
if (len >= niceLenLimit) {
tree[ptr1] = tree[pair];
tree[ptr0] = tree[pair + 1];
return matches;
}
}
}
if ((buf[readPos + len - delta] & 0xFF)
< (buf[readPos + len] & 0xFF)) {
tree[ptr1] = currentMatch;
ptr1 = pair + 1;
currentMatch = tree[ptr1];
len1 = len;
} else {
tree[ptr0] = currentMatch;
ptr0 = pair;
currentMatch = tree[ptr0];
len0 = len;
}
}
}
private void skip(int niceLenLimit, int currentMatch) {
int depth = depthLimit;
int ptr0 = (cyclicPos << 1) + 1;
int ptr1 = cyclicPos << 1;
int len0 = 0;
int len1 = 0;
while (true) {
int delta = lzPos - currentMatch;
if (depth-- == 0 || delta >= cyclicSize) {
tree[ptr0] = 0;
tree[ptr1] = 0;
return;
}
int pair = (cyclicPos - delta
+ (delta > cyclicPos ? cyclicSize : 0)) << 1;
int len = Math.min(len0, len1);
if (buf[readPos + len - delta] == buf[readPos + len])
// No need to look for longer matches than niceLenLimit
// because we only are updating the tree, not returning
// matches found to the caller.
do
if (++len == niceLenLimit) {
tree[ptr1] = tree[pair];
tree[ptr0] = tree[pair + 1];
return;
}
while (buf[readPos + len - delta] == buf[readPos + len]);
if ((buf[readPos + len - delta] & 0xFF)
< (buf[readPos + len] & 0xFF)) {
tree[ptr1] = currentMatch;
ptr1 = pair + 1;
currentMatch = tree[ptr1];
len1 = len;
} else {
tree[ptr0] = currentMatch;
ptr0 = pair;
currentMatch = tree[ptr0];
len0 = len;
}
}
}
public void skip(int len) {
while (len-- > 0) {
int niceLenLimit = niceLen;
int avail = movePos();
if (avail < niceLenLimit) {
if (avail == 0)
continue;
niceLenLimit = avail;
}
hash.calcHashes(buf, readPos);
int currentMatch = hash.getHash4Pos();
hash.updateTables(lzPos);
skip(niceLenLimit, currentMatch);
}
}
}

View File

@@ -1,34 +0,0 @@
/*
* CRC32Hash
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
/**
* Provides a CRC32 table using the polynomial from IEEE 802.3.
*/
class CRC32Hash {
private static final int CRC32_POLY = 0xEDB88320;
static final int[] crcTable = new int[256];
static {
for (int i = 0; i < 256; ++i) {
int r = i;
for (int j = 0; j < 8; ++j)
if ((r & 1) != 0)
r = (r >>> 1) ^ CRC32_POLY;
else
r >>>= 1;
crcTable[i] = r;
}
}
}

View File

@@ -1,199 +0,0 @@
/*
* Hash Chain match finder with 2-, 3-, and 4-byte hashing
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
final class HC4 extends LZEncoder {
private final Hash234 hash;
private final int[] chain;
private final Matches matches;
private final int depthLimit;
private final int cyclicSize;
private int cyclicPos = -1;
private int lzPos;
/**
* Gets approximate memory usage of the match finder as kibibytes.
*/
static int getMemoryUsage(int dictSize) {
return Hash234.getMemoryUsage(dictSize) + dictSize / (1024 / 4) + 10;
}
/**
* Creates a new LZEncoder with the HC4 match finder. See
* <code>LZEncoder.getInstance</code> for parameter descriptions.
*/
HC4(int dictSize, int beforeSizeMin, int readAheadMax,
int niceLen, int matchLenMax, int depthLimit) {
super(dictSize, beforeSizeMin, readAheadMax, niceLen, matchLenMax);
hash = new Hash234(dictSize);
// +1 because we need dictSize bytes of history + the current byte.
cyclicSize = dictSize + 1;
chain = new int[cyclicSize];
lzPos = cyclicSize;
// Substracting 1 because the shortest match that this match
// finder can find is 2 bytes, so there's no need to reserve
// space for one-byte matches.
matches = new Matches(niceLen - 1);
// Use a default depth limit if no other value was specified.
// The default is just something based on experimentation;
// it's nothing magic.
this.depthLimit = (depthLimit > 0) ? depthLimit : 4 + niceLen / 4;
}
/**
* Moves to the next byte, checks that there is enough available space, and
* possibly normalizes the hash tables and the hash chain.
*
* @return number of bytes available, including the current byte
*/
private int movePos() {
int avail = movePos(4, 4);
if (avail != 0) {
if (++lzPos == Integer.MAX_VALUE) {
int normalizationOffset = Integer.MAX_VALUE - cyclicSize;
hash.normalize(normalizationOffset);
normalize(chain, normalizationOffset);
lzPos -= normalizationOffset;
}
if (++cyclicPos == cyclicSize)
cyclicPos = 0;
}
return avail;
}
public Matches getMatches() {
matches.count = 0;
int matchLenLimit = matchLenMax;
int niceLenLimit = niceLen;
int avail = movePos();
if (avail < matchLenLimit) {
if (avail == 0)
return matches;
matchLenLimit = avail;
if (niceLenLimit > avail)
niceLenLimit = avail;
}
hash.calcHashes(buf, readPos);
int delta2 = lzPos - hash.getHash2Pos();
int delta3 = lzPos - hash.getHash3Pos();
int currentMatch = hash.getHash4Pos();
hash.updateTables(lzPos);
chain[cyclicPos] = currentMatch;
int lenBest = 0;
// See if the hash from the first two bytes found a match.
// The hashing algorithm guarantees that if the first byte
// matches, also the second byte does, so there's no need to
// test the second byte.
if (delta2 < cyclicSize && buf[readPos - delta2] == buf[readPos]) {
lenBest = 2;
matches.len[0] = 2;
matches.dist[0] = delta2 - 1;
matches.count = 1;
}
// See if the hash from the first three bytes found a match that
// is different from the match possibly found by the two-byte hash.
// Also here the hashing algorithm guarantees that if the first byte
// matches, also the next two bytes do.
if (delta2 != delta3 && delta3 < cyclicSize
&& buf[readPos - delta3] == buf[readPos]) {
lenBest = 3;
matches.dist[matches.count++] = delta3 - 1;
delta2 = delta3;
}
// If a match was found, see how long it is.
if (matches.count > 0) {
while (lenBest < matchLenLimit && buf[readPos + lenBest - delta2]
== buf[readPos + lenBest])
++lenBest;
matches.len[matches.count - 1] = lenBest;
// Return if it is long enough (niceLen or reached the end of
// the dictionary).
if (lenBest >= niceLenLimit)
return matches;
}
// Long enough match wasn't found so easily. Look for better matches
// from the hash chain.
if (lenBest < 3)
lenBest = 3;
int depth = depthLimit;
while (true) {
int delta = lzPos - currentMatch;
// Return if the search depth limit has been reached or
// if the distance of the potential match exceeds the
// dictionary size.
if (depth-- == 0 || delta >= cyclicSize)
return matches;
currentMatch = chain[cyclicPos - delta
+ (delta > cyclicPos ? cyclicSize : 0)];
// Test the first byte and the first new byte that would give us
// a match that is at least one byte longer than lenBest. This
// too short matches get quickly skipped.
if (buf[readPos + lenBest - delta] == buf[readPos + lenBest]
&& buf[readPos - delta] == buf[readPos]) {
// Calculate the length of the match.
int len = 0;
while (++len < matchLenLimit)
if (buf[readPos + len - delta] != buf[readPos + len])
break;
// Use the match if and only if it is better than the longest
// match found so far.
if (len > lenBest) {
lenBest = len;
matches.len[matches.count] = len;
matches.dist[matches.count] = delta - 1;
++matches.count;
// Return if it is long enough (niceLen or reached the
// end of the dictionary).
if (len >= niceLenLimit)
return matches;
}
}
}
}
public void skip(int len) {
assert len >= 0;
while (len-- > 0)
if (movePos() != 0) {
// Update the hash chain and hash tables.
hash.calcHashes(buf, readPos);
chain[cyclicPos] = hash.getHash4Pos();
hash.updateTables(lzPos);
}
}
}

View File

@@ -1,89 +0,0 @@
/*
* 2-, 3-, and 4-byte hashing
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
final class Hash234 extends CRC32Hash {
private static final int HASH_2_SIZE = 1 << 10;
private static final int HASH_2_MASK = HASH_2_SIZE - 1;
private static final int HASH_3_SIZE = 1 << 16;
private static final int HASH_3_MASK = HASH_3_SIZE - 1;
private final int hash4Mask;
private final int[] hash2Table = new int[HASH_2_SIZE];
private final int[] hash3Table = new int[HASH_3_SIZE];
private final int[] hash4Table;
private int hash2Value = 0;
private int hash3Value = 0;
private int hash4Value = 0;
static int getHash4Size(int dictSize) {
int h = dictSize - 1;
h |= h >>> 1;
h |= h >>> 2;
h |= h >>> 4;
h |= h >>> 8;
h >>>= 1;
h |= 0xFFFF;
if (h > (1 << 24))
h >>>= 1;
return h + 1;
}
static int getMemoryUsage(int dictSize) {
// Sizes of the hash arrays + a little extra
return (HASH_2_SIZE + HASH_3_SIZE + getHash4Size(dictSize))
/ (1024 / 4) + 4;
}
Hash234(int dictSize) {
hash4Table = new int[getHash4Size(dictSize)];
hash4Mask = hash4Table.length - 1;
}
void calcHashes(byte[] buf, int off) {
int temp = crcTable[buf[off] & 0xFF] ^ (buf[off + 1] & 0xFF);
hash2Value = temp & HASH_2_MASK;
temp ^= (buf[off + 2] & 0xFF) << 8;
hash3Value = temp & HASH_3_MASK;
temp ^= crcTable[buf[off + 3] & 0xFF] << 5;
hash4Value = temp & hash4Mask;
}
int getHash2Pos() {
return hash2Table[hash2Value];
}
int getHash3Pos() {
return hash3Table[hash3Value];
}
int getHash4Pos() {
return hash4Table[hash4Value];
}
void updateTables(int pos) {
hash2Table[hash2Value] = pos;
hash3Table[hash3Value] = pos;
hash4Table[hash4Value] = pos;
}
void normalize(int normalizeOffset) {
LZEncoder.normalize(hash2Table, normalizeOffset);
LZEncoder.normalize(hash3Table, normalizeOffset);
LZEncoder.normalize(hash4Table, normalizeOffset);
}
}

View File

@@ -1,126 +0,0 @@
/*
* LZDecoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
import java.io.DataInputStream;
import java.io.IOException;
import org.tukaani.xz.CorruptedInputException;
public final class LZDecoder {
private final byte[] buf;
private int start = 0;
private int pos = 0;
private int full = 0;
private int limit = 0;
private int pendingLen = 0;
private int pendingDist = 0;
public LZDecoder(int dictSize, byte[] presetDict) {
buf = new byte[dictSize];
if (presetDict != null) {
pos = Math.min(presetDict.length, dictSize);
full = pos;
start = pos;
System.arraycopy(presetDict, presetDict.length - pos, buf, 0, pos);
}
}
public void reset() {
start = 0;
pos = 0;
full = 0;
limit = 0;
buf[buf.length - 1] = 0x00;
}
public void setLimit(int outMax) {
if (buf.length - pos <= outMax)
limit = buf.length;
else
limit = pos + outMax;
}
public boolean hasSpace() {
return pos < limit;
}
public boolean hasPending() {
return pendingLen > 0;
}
public int getPos() {
return pos;
}
public int getByte(int dist) {
int offset = pos - dist - 1;
if (dist >= pos)
offset += buf.length;
return buf[offset] & 0xFF;
}
public void putByte(byte b) {
buf[pos++] = b;
if (full < pos)
full = pos;
}
public void repeat(int dist, int len) throws IOException {
if (dist < 0 || dist >= full)
throw new CorruptedInputException();
int left = Math.min(limit - pos, len);
pendingLen = len - left;
pendingDist = dist;
int back = pos - dist - 1;
if (dist >= pos)
back += buf.length;
do {
buf[pos++] = buf[back++];
if (back == buf.length)
back = 0;
} while (--left > 0);
if (full < pos)
full = pos;
}
public void repeatPending() throws IOException {
if (pendingLen > 0)
repeat(pendingDist, pendingLen);
}
public void copyUncompressed(DataInputStream inData, int len)
throws IOException {
int copySize = Math.min(buf.length - pos, len);
inData.readFully(buf, pos, copySize);
pos += copySize;
if (full < pos)
full = pos;
}
public int flush(byte[] out, int outOff) {
int copySize = pos - start;
if (pos == buf.length)
pos = 0;
System.arraycopy(buf, start, out, outOff, copySize);
start = pos;
return copySize;
}
}

View File

@@ -1,410 +0,0 @@
/*
* LZEncoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
import java.io.OutputStream;
import java.io.IOException;
public abstract class LZEncoder {
public static final int MF_HC4 = 0x04;
public static final int MF_BT4 = 0x14;
/**
* Number of bytes to keep available before the current byte when moving the
* LZ window.
*/
private final int keepSizeBefore;
/**
* Number of bytes that must be available, the current byte included, to
* make hasEnoughData return true. Flushing and finishing are naturally
* exceptions to this since there cannot be any data after the end of the
* uncompressed input.
*/
private final int keepSizeAfter;
final int matchLenMax;
final int niceLen;
final byte[] buf;
int readPos = -1;
private int readLimit = -1;
private boolean finishing = false;
private int writePos = 0;
private int pendingSize = 0;
static void normalize(int[] positions, int normalizationOffset) {
for (int i = 0; i < positions.length; ++i)
if (positions[i] <= normalizationOffset)
positions[i] = 0;
else
positions[i] -= normalizationOffset;
}
/**
* Gets the size of the LZ window buffer that needs to be allocated.
*/
private static int getBufSize(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax) {
int keepSizeBefore = extraSizeBefore + dictSize;
int keepSizeAfter = extraSizeAfter + matchLenMax;
int reserveSize = Math.min(dictSize / 2 + (256 << 10), 512 << 20);
return keepSizeBefore + keepSizeAfter + reserveSize;
}
/**
* Gets approximate memory usage of the LZEncoder base structure and the
* match finder as kibibytes.
*/
public static int getMemoryUsage(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int matchLenMax, int mf) {
// Buffer size + a little extra
int m = getBufSize(dictSize, extraSizeBefore, extraSizeAfter,
matchLenMax) / 1024 + 10;
switch (mf) {
case MF_HC4:
m += HC4.getMemoryUsage(dictSize);
break;
case MF_BT4:
m += BT4.getMemoryUsage(dictSize);
break;
default:
throw new IllegalArgumentException();
}
return m;
}
/**
* Creates a new LZEncoder.
* <p>
* @param dictSize dictionary size
*
* @param extraSizeBefore number of bytes to keep available in the history
* in addition to dictSize
*
* @param extraSizeAfter number of bytes that must be available after
* current position + matchLenMax
*
* @param niceLen if a match of at least <code>niceLen</code> bytes
* is
* found, be happy with it and don't stop looking for longer matches
*
* @param matchLenMax don't test for matches longer than
* <code>matchLenMax</code> bytes
*
* @param mf match finder ID
*
* @param depthLimit match finder search depth limit
*/
public static LZEncoder getInstance(
int dictSize, int extraSizeBefore, int extraSizeAfter,
int niceLen, int matchLenMax, int mf, int depthLimit) {
switch (mf) {
case MF_HC4:
return new HC4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
case MF_BT4:
return new BT4(dictSize, extraSizeBefore, extraSizeAfter,
niceLen, matchLenMax, depthLimit);
}
throw new IllegalArgumentException();
}
/**
* Creates a new LZEncoder. See <code>getInstance</code>.
*/
LZEncoder(int dictSize, int extraSizeBefore, int extraSizeAfter,
int niceLen, int matchLenMax) {
buf = new byte[getBufSize(dictSize, extraSizeBefore, extraSizeAfter,
matchLenMax)];
keepSizeBefore = extraSizeBefore + dictSize;
keepSizeAfter = extraSizeAfter + matchLenMax;
this.matchLenMax = matchLenMax;
this.niceLen = niceLen;
}
/**
* Sets a preset dictionary. If a preset dictionary is wanted, this function
* must be called immediately after creating the LZEncoder before any data
* has been encoded.
*/
public void setPresetDict(int dictSize, byte[] presetDict) {
assert !isStarted();
assert writePos == 0;
if (presetDict != null) {
// If the preset dictionary buffer is bigger than the dictionary
// size, copy only the tail of the preset dictionary.
int copySize = Math.min(presetDict.length, dictSize);
int offset = presetDict.length - copySize;
System.arraycopy(presetDict, offset, buf, 0, copySize);
writePos += copySize;
skip(copySize);
}
}
/**
* Moves data from the end of the buffer to the beginning, discarding old
* data and making space for new input.
*/
private void moveWindow() {
// Align the move to a multiple of 16 bytes. LZMA2 needs this
// because it uses the lowest bits from readPos to get the
// alignment of the uncompressed data.
int moveOffset = (readPos + 1 - keepSizeBefore) & ~15;
int moveSize = writePos - moveOffset;
System.arraycopy(buf, moveOffset, buf, 0, moveSize);
readPos -= moveOffset;
readLimit -= moveOffset;
writePos -= moveOffset;
}
/**
* Copies new data into the LZEncoder's buffer.
*/
public int fillWindow(byte[] in, int off, int len) {
assert !finishing;
// Move the sliding window if needed.
if (readPos >= buf.length - keepSizeAfter)
moveWindow();
// Try to fill the dictionary buffer. If it becomes full,
// some of the input bytes may be left unused.
if (len > buf.length - writePos)
len = buf.length - writePos;
System.arraycopy(in, off, buf, writePos, len);
writePos += len;
// Set the new readLimit but only if there's enough data to allow
// encoding of at least one more byte.
if (writePos >= keepSizeAfter)
readLimit = writePos - keepSizeAfter;
processPendingBytes();
// Tell the caller how much input we actually copied into
// the dictionary.
return len;
}
/**
* Process pending bytes remaining from preset dictionary initialization or
* encoder flush operation.
*/
private void processPendingBytes() {
// After flushing or setting a preset dictionary there will be
// pending data that hasn't been ran through the match finder yet.
// Run it through the match finder now if there is enough new data
// available (readPos < readLimit) that the encoder may encode at
// least one more input byte. This way we don't waste any time
// looping in the match finder (and marking the same bytes as
// pending again) if the application provides very little new data
// per write call.
if (pendingSize > 0 && readPos < readLimit) {
readPos -= pendingSize;
int oldPendingSize = pendingSize;
pendingSize = 0;
skip(oldPendingSize);
assert pendingSize < oldPendingSize;
}
}
/**
* Returns true if at least one byte has already been run through the match
* finder.
*/
public boolean isStarted() {
return readPos != -1;
}
/**
* Marks that all the input needs to be made available in the encoded
* output.
*/
public void setFlushing() {
readLimit = writePos - 1;
processPendingBytes();
}
/**
* Marks that there is no more input remaining. The read position can be
* advanced until the end of the data.
*/
public void setFinishing() {
readLimit = writePos - 1;
finishing = true;
processPendingBytes();
}
/**
* Tests if there is enough input available to let the caller encode at
* least one more byte.
*/
public boolean hasEnoughData(int alreadyReadLen) {
return readPos - alreadyReadLen < readLimit;
}
public void copyUncompressed(OutputStream out, int backward, int len)
throws IOException {
out.write(buf, readPos + 1 - backward, len);
}
/**
* Get the number of bytes available, including the current byte.
* <p>
* Note that the result is undefined if <code>getMatches</code> or
* <code>skip</code> hasn't been called yet and no preset dictionary is
* being used.
*/
public int getAvail() {
assert isStarted();
return writePos - readPos;
}
/**
* Gets the lowest four bits of the absolute offset of the current byte.
* Bits other than the lowest four are undefined.
*/
public int getPos() {
return readPos;
}
/**
* Gets the byte from the given backward offset.
* <p>
* The current byte is at <code>0</code>, the previous byte at
* <code>1</code> etc. To get a byte at zero-based distance, use <code>getByte(dist + 1)<code>.
* <p>
* This function is equivalent to <code>getByte(0, backward)</code>.
*/
public int getByte(int backward) {
return buf[readPos - backward] & 0xFF;
}
/**
* Gets the byte from the given forward minus backward offset. The forward
* offset is added to the current position. This lets one read bytes ahead
* of the current byte.
*/
public int getByte(int forward, int backward) {
return buf[readPos + forward - backward] & 0xFF;
}
/**
* Get the length of a match at the given distance.
*
* @param dist zero-based distance of the match to test
* @param lenLimit don't test for a match longer than this
*
* @return length of the match; it is in the range [0, lenLimit]
*/
public int getMatchLen(int dist, int lenLimit) {
int backPos = readPos - dist - 1;
int len = 0;
while (len < lenLimit && buf[readPos + len] == buf[backPos + len])
++len;
return len;
}
/**
* Get the length of a match at the given distance and forward offset.
*
* @param forward forward offset
* @param dist zero-based distance of the match to test
* @param lenLimit don't test for a match longer than this
*
* @return length of the match; it is in the range [0, lenLimit]
*/
public int getMatchLen(int forward, int dist, int lenLimit) {
int curPos = readPos + forward;
int backPos = curPos - dist - 1;
int len = 0;
while (len < lenLimit && buf[curPos + len] == buf[backPos + len])
++len;
return len;
}
/**
* Verifies that the matches returned by the match finder are valid. This is
* meant to be used in an assert statement. This is totally useless for
* actual encoding since match finder's results should naturally always be
* valid if it isn't broken.
*
* @param matches return value from <code>getMatches</code>
*
* @return true if matches are valid, false if match finder is broken
*/
public boolean verifyMatches(Matches matches) {
int lenLimit = Math.min(getAvail(), matchLenMax);
for (int i = 0; i < matches.count; ++i)
if (getMatchLen(matches.dist[i], lenLimit) != matches.len[i])
return false;
return true;
}
/**
* Moves to the next byte, checks if there is enough input available, and
* returns the amount of input available.
*
* @param requiredForFlushing minimum number of available bytes when
* flushing; encoding may be continued with new input after flushing
* @param requiredForFinishing minimum number of available bytes when
* finishing; encoding must not be continued after finishing or the match
* finder state may be corrupt
*
* @return the number of bytes available or zero if there is not enough
* input available
*/
int movePos(int requiredForFlushing, int requiredForFinishing) {
assert requiredForFlushing >= requiredForFinishing;
++readPos;
int avail = writePos - readPos;
if (avail < requiredForFlushing)
if (avail < requiredForFinishing || !finishing) {
++pendingSize;
avail = 0;
}
return avail;
}
/**
* Runs match finder for the next byte and returns the matches found.
*/
public abstract Matches getMatches();
/**
* Skips the given number of bytes in the match finder.
*/
public abstract void skip(int len);
}

View File

@@ -1,22 +0,0 @@
/*
* Matches
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.lz;
public final class Matches {
public final int[] len;
public final int[] dist;
public int count = 0;
Matches(int countMax) {
len = new int[countMax];
dist = new int[countMax];
}
}

View File

@@ -1,35 +0,0 @@
/**
* XZ data compression support.
*
* <h4>Introduction</h4>
* <p>
* This aims to be a complete implementation of XZ data compression in pure
* Java. Features:
* <ul>
* <li>Full support for the .xz file format specification version 1.0.4</li>
* <li>Single-threaded streamed compression and decompression</li>
* <li>Single-threaded decompression with limited random access support</li>
* <li>Raw streams (no .xz headers) for advanced users, including LZMA2 with
* preset dictionary</li>
* </ul>
* <p>
* Threading is planned but it is unknown when it will be implemented.
* <p>
* For the latest source code, see the
* <a href="http://tukaani.org/xz/java.html">home page of XZ for Java</a>.
*
* <h4>Getting started</h4>
* <p>
* Start by reading the documentation of {@link org.tukaani.xz.XZOutputStream}
* and {@link org.tukaani.xz.XZInputStream}. If you use XZ inside another file
* format or protocol, see also {@link org.tukaani.xz.SingleXZInputStream}.
*
* <h4>Licensing</h4>
* <p>
* XZ for Java has been put into the public domain, thus you can do whatever you
* want with it. All the files in the package have been written by Lasse Collin
* and/or Igor Pavlov.
* <p>
* This software is provided "as is", without any warranty.
*/
package org.tukaani.xz;

View File

@@ -1,26 +0,0 @@
/*
* RangeCoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.rangecoder;
import java.util.Arrays;
public abstract class RangeCoder {
static final int SHIFT_BITS = 8;
static final int TOP_MASK = 0xFF000000;
static final int BIT_MODEL_TOTAL_BITS = 11;
static final int BIT_MODEL_TOTAL = 1 << BIT_MODEL_TOTAL_BITS;
static final short PROB_INIT = (short) (BIT_MODEL_TOTAL / 2);
static final int MOVE_BITS = 5;
public static final void initProbs(short[] probs) {
Arrays.fill(probs, PROB_INIT);
}
}

View File

@@ -1,82 +0,0 @@
/*
* RangeDecoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.rangecoder;
import java.io.DataInputStream;
import java.io.IOException;
public abstract class RangeDecoder extends RangeCoder {
int range = 0;
int code = 0;
public abstract void normalize() throws IOException;
public int decodeBit(short[] probs, int index) throws IOException {
normalize();
int prob = probs[index];
int bound = (range >>> BIT_MODEL_TOTAL_BITS) * prob;
int bit;
// Compare code and bound as if they were unsigned 32-bit integers.
if ((code ^ 0x80000000) < (bound ^ 0x80000000)) {
range = bound;
probs[index] = (short) (prob + ((BIT_MODEL_TOTAL - prob) >>> MOVE_BITS));
bit = 0;
} else {
range -= bound;
code -= bound;
probs[index] = (short) (prob - (prob >>> MOVE_BITS));
bit = 1;
}
return bit;
}
public int decodeBitTree(short[] probs) throws IOException {
int symbol = 1;
do
symbol = (symbol << 1) | decodeBit(probs, symbol);
while (symbol < probs.length);
return symbol - probs.length;
}
public int decodeReverseBitTree(short[] probs) throws IOException {
int symbol = 1;
int i = 0;
int result = 0;
do {
int bit = decodeBit(probs, symbol);
symbol = (symbol << 1) | bit;
result |= bit << i++;
} while (symbol < probs.length);
return result;
}
public int decodeDirectBits(int count) throws IOException {
int result = 0;
do {
normalize();
range >>>= 1;
int t = (code - range) >>> 31;
code -= range & (t - 1);
result = (result << 1) | (1 - t);
} while (--count != 0);
return result;
}
}

View File

@@ -1,63 +0,0 @@
/*
* RangeDecoderFromBuffer
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.rangecoder;
import java.io.DataInputStream;
import java.io.IOException;
import org.tukaani.xz.CorruptedInputException;
public final class RangeDecoderFromBuffer extends RangeDecoder {
private static final int INIT_SIZE = 5;
private final byte[] buf;
private int pos = 0;
private int end = 0;
public RangeDecoderFromBuffer(int inputSizeMax) {
buf = new byte[inputSizeMax - INIT_SIZE];
}
public void prepareInputBuffer(DataInputStream in, int len)
throws IOException {
if (len < INIT_SIZE)
throw new CorruptedInputException();
if (in.readUnsignedByte() != 0x00)
throw new CorruptedInputException();
code = in.readInt();
range = 0xFFFFFFFF;
pos = 0;
end = len - INIT_SIZE;
in.readFully(buf, 0, end);
}
public boolean isInBufferOK() {
return pos <= end;
}
public boolean isFinished() {
return pos == end && code == 0;
}
public void normalize() throws IOException {
if ((range & TOP_MASK) == 0)
try {
// If the input is corrupt, this might throw
// ArrayIndexOutOfBoundsException.
code = (code << SHIFT_BITS) | (buf[pos++] & 0xFF);
range <<= SHIFT_BITS;
} catch (ArrayIndexOutOfBoundsException e) {
throw new CorruptedInputException();
}
}
}

View File

@@ -1,41 +0,0 @@
/*
* RangeDecoderFromStream
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.rangecoder;
import java.io.InputStream;
import java.io.DataInputStream;
import java.io.IOException;
import org.tukaani.xz.CorruptedInputException;
public final class RangeDecoderFromStream extends RangeDecoder {
private final DataInputStream inData;
public RangeDecoderFromStream(InputStream in) throws IOException {
inData = new DataInputStream(in);
if (inData.readUnsignedByte() != 0x00)
throw new CorruptedInputException();
code = inData.readInt();
range = 0xFFFFFFFF;
}
public boolean isFinished() {
return code == 0;
}
public void normalize() throws IOException {
if ((range & TOP_MASK) == 0) {
code = (code << SHIFT_BITS) | inData.readUnsignedByte();
range <<= SHIFT_BITS;
}
}
}

View File

@@ -1,202 +0,0 @@
/*
* RangeEncoder
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.rangecoder;
import java.io.OutputStream;
import java.io.IOException;
public final class RangeEncoder extends RangeCoder {
private static final int MOVE_REDUCING_BITS = 4;
private static final int BIT_PRICE_SHIFT_BITS = 4;
private static final int[] prices
= new int[BIT_MODEL_TOTAL >>> MOVE_REDUCING_BITS];
private long low;
private int range;
// NOTE: int is OK for LZMA2 because a compressed chunk
// is not more than 64 KiB, but with LZMA1 there is no chunking
// so in theory cacheSize can grow very big. To be very safe,
// use long instead of int if you adapt this code for LZMA1.
private int cacheSize;
private byte cache;
private final byte[] buf;
private int bufPos;
static {
for (int i = (1 << MOVE_REDUCING_BITS) / 2; i < BIT_MODEL_TOTAL;
i += (1 << MOVE_REDUCING_BITS)) {
int w = i;
int bitCount = 0;
for (int j = 0; j < BIT_PRICE_SHIFT_BITS; ++j) {
w *= w;
bitCount <<= 1;
while ((w & 0xFFFF0000) != 0) {
w >>>= 1;
++bitCount;
}
}
prices[i >> MOVE_REDUCING_BITS]
= (BIT_MODEL_TOTAL_BITS << BIT_PRICE_SHIFT_BITS)
- 15 - bitCount;
}
}
public RangeEncoder(int bufSize) {
buf = new byte[bufSize];
reset();
}
public void reset() {
low = 0;
range = 0xFFFFFFFF;
cache = 0x00;
cacheSize = 1;
bufPos = 0;
}
public int getPendingSize() {
return bufPos + cacheSize + 5 - 1;
}
public int finish() {
for (int i = 0; i < 5; ++i)
shiftLow();
return bufPos;
}
public void write(OutputStream out) throws IOException {
out.write(buf, 0, bufPos);
}
private void shiftLow() {
int lowHi = (int) (low >>> 32);
if (lowHi != 0 || low < 0xFF000000L) {
int temp = cache;
do {
buf[bufPos++] = (byte) (temp + lowHi);
temp = 0xFF;
} while (--cacheSize != 0);
cache = (byte) (low >>> 24);
}
++cacheSize;
low = (low & 0x00FFFFFF) << 8;
}
public void encodeBit(short[] probs, int index, int bit) {
int prob = probs[index];
int bound = (range >>> BIT_MODEL_TOTAL_BITS) * prob;
// NOTE: Any non-zero value for bit is taken as 1.
if (bit == 0) {
range = bound;
probs[index] = (short) (prob + ((BIT_MODEL_TOTAL - prob) >>> MOVE_BITS));
} else {
low += bound & 0xFFFFFFFFL;
range -= bound;
probs[index] = (short) (prob - (prob >>> MOVE_BITS));
}
if ((range & TOP_MASK) == 0) {
range <<= SHIFT_BITS;
shiftLow();
}
}
public static int getBitPrice(int prob, int bit) {
// NOTE: Unlike in encodeBit(), here bit must be 0 or 1.
assert bit == 0 || bit == 1;
return prices[(prob ^ ((-bit) & (BIT_MODEL_TOTAL - 1)))
>>> MOVE_REDUCING_BITS];
}
public void encodeBitTree(short[] probs, int symbol) {
int index = 1;
int mask = probs.length;
do {
mask >>>= 1;
int bit = symbol & mask;
encodeBit(probs, index, bit);
index <<= 1;
if (bit != 0)
index |= 1;
} while (mask != 1);
}
public static int getBitTreePrice(short[] probs, int symbol) {
int price = 0;
symbol |= probs.length;
do {
int bit = symbol & 1;
symbol >>>= 1;
price += getBitPrice(probs[symbol], bit);
} while (symbol != 1);
return price;
}
public void encodeReverseBitTree(short[] probs, int symbol) {
int index = 1;
symbol |= probs.length;
do {
int bit = symbol & 1;
symbol >>>= 1;
encodeBit(probs, index, bit);
index = (index << 1) | bit;
} while (symbol != 1);
}
public static int getReverseBitTreePrice(short[] probs, int symbol) {
int price = 0;
int index = 1;
symbol |= probs.length;
do {
int bit = symbol & 1;
symbol >>>= 1;
price += getBitPrice(probs[index], bit);
index = (index << 1) | bit;
} while (symbol != 1);
return price;
}
public void encodeDirectBits(int value, int count) {
do {
range >>>= 1;
low += range & (0 - ((value >>> --count) & 1));
if ((range & TOP_MASK) == 0) {
range <<= SHIFT_BITS;
shiftLow();
}
} while (count != 0);
}
public static int getDirectBitsPrice(int count) {
return count << BIT_PRICE_SHIFT_BITS;
}
}

View File

@@ -1,81 +0,0 @@
/*
* BCJ filter for Itanium (IA-64) instructions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.simple;
public final class IA64 implements SimpleFilter {
private static final int[] BRANCH_TABLE = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 4, 6, 6, 0, 0, 7, 7,
4, 4, 0, 0, 4, 4, 0, 0 };
private final boolean isEncoder;
private int pos;
public IA64(boolean isEncoder, int startPos) {
this.isEncoder = isEncoder;
pos = startPos;
}
public int code(byte[] buf, int off, int len) {
int end = off + len - 16;
int i;
for (i = off; i <= end; i += 16) {
int instrTemplate = buf[i] & 0x1F;
int mask = BRANCH_TABLE[instrTemplate];
for (int slot = 0, bitPos = 5; slot < 3; ++slot, bitPos += 41) {
if (((mask >>> slot) & 1) == 0)
continue;
int bytePos = bitPos >>> 3;
int bitRes = bitPos & 7;
long instr = 0;
for (int j = 0; j < 6; ++j)
instr |= (buf[i + bytePos + j] & 0xFFL) << (8 * j);
long instrNorm = instr >>> bitRes;
if (((instrNorm >>> 37) & 0x0F) != 0x05
|| ((instrNorm >>> 9) & 0x07) != 0x00)
continue;
int src = (int) ((instrNorm >>> 13) & 0x0FFFFF);
src |= ((int) (instrNorm >>> 36) & 1) << 20;
src <<= 4;
int dest;
if (isEncoder)
dest = src + (pos + i - off);
else
dest = src - (pos + i - off);
dest >>>= 4;
instrNorm &= ~(0x8FFFFFL << 13);
instrNorm |= (dest & 0x0FFFFFL) << 13;
instrNorm |= (dest & 0x100000L) << (36 - 20);
instr &= (1 << bitRes) - 1;
instr |= instrNorm << bitRes;
for (int j = 0; j < 6; ++j)
buf[i + bytePos + j] = (byte) (instr >>> (8 * j));
}
}
i -= off;
pos += i;
return i;
}
}

View File

@@ -1,49 +0,0 @@
/*
* BCJ filter for big endian PowerPC instructions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.simple;
public final class PowerPC implements SimpleFilter {
private final boolean isEncoder;
private int pos;
public PowerPC(boolean isEncoder, int startPos) {
this.isEncoder = isEncoder;
pos = startPos;
}
public int code(byte[] buf, int off, int len) {
int end = off + len - 4;
int i;
for (i = off; i <= end; i += 4)
if ((buf[i] & 0xFC) == 0x48 && (buf[i + 3] & 0x03) == 0x01) {
int src = ((buf[i] & 0x03) << 24)
| ((buf[i + 1] & 0xFF) << 16)
| ((buf[i + 2] & 0xFF) << 8)
| (buf[i + 3] & 0xFC);
int dest;
if (isEncoder)
dest = src + (pos + i - off);
else
dest = src - (pos + i - off);
buf[i] = (byte) (0x48 | ((dest >>> 24) & 0x03));
buf[i + 1] = (byte) (dest >>> 16);
buf[i + 2] = (byte) (dest >>> 8);
buf[i + 3] = (byte) ((buf[i + 3] & 0x03) | dest);
}
i -= off;
pos += i;
return i;
}
}

View File

@@ -1,55 +0,0 @@
/*
* BCJ filter for SPARC instructions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.simple;
public final class SPARC implements SimpleFilter {
private final boolean isEncoder;
private int pos;
public SPARC(boolean isEncoder, int startPos) {
this.isEncoder = isEncoder;
pos = startPos;
}
public int code(byte[] buf, int off, int len) {
int end = off + len - 4;
int i;
for (i = off; i <= end; i += 4)
if ((buf[i] == 0x40 && (buf[i + 1] & 0xC0) == 0x00)
|| (buf[i] == 0x7F && (buf[i + 1] & 0xC0) == 0xC0)) {
int src = ((buf[i] & 0xFF) << 24)
| ((buf[i + 1] & 0xFF) << 16)
| ((buf[i + 2] & 0xFF) << 8)
| (buf[i + 3] & 0xFF);
src <<= 2;
int dest;
if (isEncoder)
dest = src + (pos + i - off);
else
dest = src - (pos + i - off);
dest >>>= 2;
dest = (((0 - ((dest >>> 22) & 1)) << 22) & 0x3FFFFFFF)
| (dest & 0x3FFFFF) | 0x40000000;
buf[i] = (byte) (dest >>> 24);
buf[i + 1] = (byte) (dest >>> 16);
buf[i + 2] = (byte) (dest >>> 8);
buf[i + 3] = (byte) dest;
}
i -= off;
pos += i;
return i;
}
}

View File

@@ -1,14 +0,0 @@
/*
* BCJ filter for little endian ARM instructions
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.simple;
public interface SimpleFilter {
int code(byte[] buf, int off, int len);
}

View File

@@ -1,96 +0,0 @@
/*
* BCJ filter for x86 instructions
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <http://7-zip.org/>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
package org.tukaani.xz.simple;
public final class X86 implements SimpleFilter {
private static final boolean[] MASK_TO_ALLOWED_STATUS
= { true, true, true, false, true, false, false, false };
private static final int[] MASK_TO_BIT_NUMBER = { 0, 1, 2, 2, 3, 3, 3, 3 };
private final boolean isEncoder;
private int pos;
private int prevMask = 0;
private static boolean test86MSByte(byte b) {
int i = b & 0xFF;
return i == 0x00 || i == 0xFF;
}
public X86(boolean isEncoder, int startPos) {
this.isEncoder = isEncoder;
pos = startPos + 5;
}
public int code(byte[] buf, int off, int len) {
int prevPos = off - 1;
int end = off + len - 5;
int i;
for (i = off; i <= end; ++i) {
if ((buf[i] & 0xFE) != 0xE8)
continue;
prevPos = i - prevPos;
if ((prevPos & ~3) != 0) // (unsigned)prevPos > 3
prevMask = 0;
else {
prevMask = (prevMask << (prevPos - 1)) & 7;
if (prevMask != 0)
if (!MASK_TO_ALLOWED_STATUS[prevMask] || test86MSByte(
buf[i + 4 - MASK_TO_BIT_NUMBER[prevMask]])) {
prevPos = i;
prevMask = (prevMask << 1) | 1;
continue;
}
}
prevPos = i;
if (test86MSByte(buf[i + 4])) {
int src = (buf[i + 1] & 0xFF)
| ((buf[i + 2] & 0xFF) << 8)
| ((buf[i + 3] & 0xFF) << 16)
| ((buf[i + 4] & 0xFF) << 24);
int dest;
while (true) {
if (isEncoder)
dest = src + (pos + i - off);
else
dest = src - (pos + i - off);
if (prevMask == 0)
break;
int index = MASK_TO_BIT_NUMBER[prevMask] * 8;
if (!test86MSByte((byte) (dest >>> (24 - index))))
break;
src = dest ^ ((1 << (32 - index)) - 1);
}
buf[i + 1] = (byte) dest;
buf[i + 2] = (byte) (dest >>> 8);
buf[i + 3] = (byte) (dest >>> 16);
buf[i + 4] = (byte) (~(((dest >>> 24) & 1) - 1));
i += 4;
} else
prevMask = (prevMask << 1) | 1;
}
prevPos = i - prevPos;
prevMask = ((prevPos & ~3) != 0) ? 0 : prevMask << (prevPos - 1);
i -= off;
pos += i;
return i;
}
}