Skip to content

Commit

Permalink
SPVBlockStore: support 32 byte chain work
Browse files Browse the repository at this point in the history
Existing V1 files are automatically converted to V2 format.

TODO test migration
TODO test pre-made block store
  • Loading branch information
schildbach committed Jul 12, 2024
1 parent 147e479 commit 3057cde
Showing 1 changed file with 103 additions and 35 deletions.
138 changes: 103 additions & 35 deletions core/src/main/java/org/bitcoinj/store/SPVBlockStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,12 @@ public class SPVBlockStore implements BlockStore {

/** The default number of headers that will be stored in the ring buffer. */
public static final int DEFAULT_CAPACITY = 10000;
@Deprecated
public static final String HEADER_MAGIC = "SPVB";
// Magic header for the V1 format.
private static final byte[] HEADER_MAGIC_V1 = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
// Magic header for the V2 format.
private static final byte[] HEADER_MAGIC_V2 = "SPV2".getBytes(StandardCharsets.US_ASCII);

protected volatile MappedByteBuffer buffer;
protected final NetworkParameters params;
Expand Down Expand Up @@ -115,8 +120,10 @@ public SPVBlockStore(NetworkParameters params, File file, int capacity, boolean
Objects.requireNonNull(file);
this.params = Objects.requireNonNull(params);
checkArgument(capacity > 0);

try {
boolean exists = file.exists();

// Set up the backing file, empty if it doesn't exist.
randomAccessFile = new RandomAccessFile(file, "rw");
FileChannel channel = randomAccessFile.getChannel();
Expand All @@ -127,43 +134,91 @@ public SPVBlockStore(NetworkParameters params, File file, int capacity, boolean
throw new ChainFileLockedException("Store file is already locked by another process");

// Ensure expected file size, grow if desired.
fileLength = getFileSize(capacity);
if (!exists) {
log.info("Creating new SPV block chain file " + file);
randomAccessFile.setLength(fileLength);
this.fileLength = getFileSize(capacity);
byte[] currentHeader = new byte[4];
if (exists) {
log.info("Using existing SPV block chain file: " + file);
// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, randomAccessFile.length());
buffer.get(currentHeader);
} else {
log.info("Creating new SPV block chain file: " + file);
randomAccessFile.setLength(fileLength);
// Map it into memory read/write. See above comment.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
initNewStore(params.getGenesisBlock());
}

// Maybe migrate V1 to V2 format.
if (Arrays.equals(HEADER_MAGIC_V1, currentHeader)) {
long currentLength = randomAccessFile.length();
long currentBlocksLength = currentLength - FILE_PROLOGUE_BYTES;
if (currentBlocksLength % RECORD_SIZE_V1 != 0)
throw new BlockStoreException(
"File size on disk indicates this is not a V1 block store: " + currentLength);
int currentCapacity = (int) (currentBlocksLength / RECORD_SIZE_V1);
log.info("Migrating SPV block chain file containing " + currentCapacity + " blocks from V1 to V2 " +
"format: " + file);

randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);

// migrate magic header
((Buffer) buffer).rewind();
buffer.put(HEADER_MAGIC_V2);

// migrate headers
final byte[] zeroPadding = new byte[20]; // 32 (V2 work) - 12 (V1 work)
for (int i = currentCapacity - 1; i >= 0; i--) {
byte[] record = new byte[RECORD_SIZE_V1];
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V1);
buffer.get(record);
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V2);
buffer.put(record, 0, 32); // hash
buffer.put(zeroPadding);
buffer.put(record, 32, RECORD_SIZE_V1 - 32); // work, height, block header
}

// migrate cursor
int cursorRecord = (getRingCursor(buffer) - FILE_PROLOGUE_BYTES) / RECORD_SIZE_V1;
setRingCursor(buffer, cursorRecord * RECORD_SIZE_V2 + FILE_PROLOGUE_BYTES);
}

// Maybe grow.
if (exists) {
final long currentLength = randomAccessFile.length();
if (currentLength != fileLength) {
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE != 0)
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE_V2 != 0) {
throw new BlockStoreException(
"File size on disk indicates this is not a block store: " + currentLength);
else if (!grow)
"File size on disk indicates this is not a V2 block store: " + currentLength);
} else if (!grow) {
throw new BlockStoreException("File size on disk does not match expected size: " + currentLength
+ " vs " + fileLength);
else if (fileLength < randomAccessFile.length())
} else if (fileLength < randomAccessFile.length()) {
throw new BlockStoreException(
"Shrinking is unsupported: " + currentLength + " vs " + fileLength);
else
} else {
randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
}
}
}

// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);

// Check or initialize the header bytes to ensure we don't try to open some random file.
if (exists) {
byte[] header = new byte[4];
buffer.get(header);
if (!new String(header, StandardCharsets.US_ASCII).equals(HEADER_MAGIC))
throw new BlockStoreException("Header bytes do not equal " + HEADER_MAGIC);
} else {
initNewStore(params.getGenesisBlock());
}
byte[] header = new byte[4];
((Buffer) buffer).rewind();
buffer.get(currentHeader);
if (!Arrays.equals(currentHeader, HEADER_MAGIC_V2))
throw new BlockStoreException("Magic header V2 expected: " + new String(currentHeader,
StandardCharsets.US_ASCII));
} catch (Exception e) {
try {
if (randomAccessFile != null) randomAccessFile.close();
Expand All @@ -175,9 +230,8 @@ else if (fileLength < randomAccessFile.length())
}

private void initNewStore(Block genesisBlock) throws Exception {
byte[] header;
header = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
buffer.put(header);
((Buffer) buffer).rewind();
buffer.put(HEADER_MAGIC_V2);
// Insert the genesis block.
lock.lock();
try {
Expand All @@ -192,7 +246,7 @@ private void initNewStore(Block genesisBlock) throws Exception {

/** Returns the size in bytes of the file that is used to store the chain with the current parameters. */
public static int getFileSize(int capacity) {
return RECORD_SIZE * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
return RECORD_SIZE_V2 * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
}

@Override
Expand All @@ -211,7 +265,7 @@ public void put(StoredBlock block) throws BlockStoreException {
Sha256Hash hash = block.getHeader().getHash();
notFoundCache.remove(hash);
buffer.put(hash.getBytes());
block.serializeCompact(buffer);
block.serializeCompactV2(buffer);
setRingCursor(buffer, buffer.position());
blockCache.put(hash, block);
} finally { lock.unlock(); }
Expand All @@ -238,17 +292,17 @@ public StoredBlock get(Sha256Hash hash) throws BlockStoreException {
final byte[] targetHashBytes = hash.getBytes();
byte[] scratch = new byte[32];
do {
cursor -= RECORD_SIZE;
cursor -= RECORD_SIZE_V2;
if (cursor < FILE_PROLOGUE_BYTES) {
// We hit the start, so wrap around.
cursor = fileLength - RECORD_SIZE;
cursor = fileLength - RECORD_SIZE_V2;
}
// Cursor is now at the start of the next record to check, so read the hash and compare it.
((Buffer) buffer).position(cursor);
buffer.get(scratch);
if (Arrays.equals(scratch, targetHashBytes)) {
// Found the target.
StoredBlock storedBlock = StoredBlock.deserializeCompact(buffer);
StoredBlock storedBlock = StoredBlock.deserializeCompactV2(buffer);
blockCache.put(hash, storedBlock);
return storedBlock;
}
Expand Down Expand Up @@ -311,10 +365,10 @@ public void close() throws BlockStoreException {
}
}

protected static final int RECORD_SIZE = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;
private static final int RECORD_SIZE_V1 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;

// File format:
// 4 header bytes = "SPVB"
// V1 file format:
// 4 magic header bytes = "SPVB"
// 4 cursor bytes, which indicate the offset from the first kb where the next block header should be written.
// 32 bytes for the hash of the chain head
//
Expand All @@ -323,6 +377,20 @@ public void close() throws BlockStoreException {
// 12 bytes of chain work
// 4 bytes of height
// 80 bytes of block header data

private static final int RECORD_SIZE_V2 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE_V2;

// V2 file format:
// 4 magic header bytes = "SPV2"
// 4 cursor bytes, which indicate the offset from the first kb where the next block header should be written.
// 32 bytes for the hash of the chain head
//
// For each header (148 bytes)
// 32 bytes hash of the header
// 32 bytes of chain work
// 4 bytes of height
// 80 bytes of block header data

protected static final int FILE_PROLOGUE_BYTES = 1024;

/** Returns the offset from the file start where the latest block should be written (end of prev block). */
Expand Down

0 comments on commit 3057cde

Please sign in to comment.