Skip to content

Commit

Permalink
SPVBlockStore: support 32 byte chain work
Browse files Browse the repository at this point in the history
Existing V1 files are automatically migrated to V2 format.

Includes a test for migration from V1 to V2 format. This requires
`getRingCursor()` to be changed from private to package-private.
  • Loading branch information
schildbach committed Jul 25, 2024
1 parent effe62b commit 7860555
Show file tree
Hide file tree
Showing 2 changed files with 137 additions and 38 deletions.
141 changes: 103 additions & 38 deletions core/src/main/java/org/bitcoinj/store/SPVBlockStore.java
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,10 @@ public class SPVBlockStore implements BlockStore {
public static final int DEFAULT_CAPACITY = 10000;
@Deprecated
public static final String HEADER_MAGIC = "SPVB";
// Magic header.
private static final byte[] MAGIC_HEADER = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
// Magic header for the V1 format.
static final byte[] HEADER_MAGIC_V1 = HEADER_MAGIC.getBytes(StandardCharsets.US_ASCII);
// Magic header for the V2 format.
static final byte[] HEADER_MAGIC_V2 = "SPV2".getBytes(StandardCharsets.US_ASCII);

protected volatile MappedByteBuffer buffer;
protected final NetworkParameters params;
Expand Down Expand Up @@ -119,8 +121,10 @@ public SPVBlockStore(NetworkParameters params, File file, int capacity, boolean
Objects.requireNonNull(file);
this.params = Objects.requireNonNull(params);
checkArgument(capacity > 0);

try {
boolean exists = file.exists();

// Set up the backing file, empty if it doesn't exist.
randomAccessFile = new RandomAccessFile(file, "rw");
FileChannel channel = randomAccessFile.getChannel();
Expand All @@ -131,44 +135,91 @@ public SPVBlockStore(NetworkParameters params, File file, int capacity, boolean
throw new ChainFileLockedException("Store file is already locked by another process");

// Ensure expected file size, grow if desired.
fileLength = getFileSize(capacity);
if (!exists) {
log.info("Creating new SPV block chain file " + file);
randomAccessFile.setLength(fileLength);
this.fileLength = getFileSize(capacity);
byte[] currentHeader = new byte[4];
if (exists) {
log.info("Using existing SPV block chain file: " + file);
// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, randomAccessFile.length());
buffer.get(currentHeader);
} else {
log.info("Creating new SPV block chain file: " + file);
randomAccessFile.setLength(fileLength);
// Map it into memory read/write. See above comment.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
initNewStore(params.getGenesisBlock());
}

// Maybe migrate V1 to V2 format.
if (Arrays.equals(HEADER_MAGIC_V1, currentHeader)) {
long currentLength = randomAccessFile.length();
long currentBlocksLength = currentLength - FILE_PROLOGUE_BYTES;
if (currentBlocksLength % RECORD_SIZE_V1 != 0)
throw new BlockStoreException(
"File size on disk indicates this is not a V1 block store: " + currentLength);
int currentCapacity = (int) (currentBlocksLength / RECORD_SIZE_V1);
log.info("Migrating SPV block chain file containing " + currentCapacity + " blocks from V1 to V2 " +
"format: " + file);

randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);

// migrate magic header
((Buffer) buffer).rewind();
buffer.put(HEADER_MAGIC_V2);

// migrate headers
final byte[] zeroPadding = new byte[20]; // 32 (V2 work) - 12 (V1 work)
for (int i = currentCapacity - 1; i >= 0; i--) {
byte[] record = new byte[RECORD_SIZE_V1];
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V1);
buffer.get(record);
buffer.position(FILE_PROLOGUE_BYTES + i * RECORD_SIZE_V2);
buffer.put(record, 0, 32); // hash
buffer.put(zeroPadding);
buffer.put(record, 32, RECORD_SIZE_V1 - 32); // work, height, block header
}

// migrate cursor
int cursorRecord = (getRingCursor() - FILE_PROLOGUE_BYTES) / RECORD_SIZE_V1;
setRingCursor(FILE_PROLOGUE_BYTES + cursorRecord * RECORD_SIZE_V2);
}

// Maybe grow.
if (exists) {
final long currentLength = randomAccessFile.length();
if (currentLength != fileLength) {
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE != 0)
if ((currentLength - FILE_PROLOGUE_BYTES) % RECORD_SIZE_V2 != 0) {
throw new BlockStoreException(
"File size on disk indicates this is not a block store: " + currentLength);
else if (!grow)
"File size on disk indicates this is not a V2 block store: " + currentLength);
} else if (!grow) {
throw new BlockStoreException("File size on disk does not match expected size: " + currentLength
+ " vs " + fileLength);
else if (fileLength < randomAccessFile.length())
} else if (fileLength < randomAccessFile.length()) {
throw new BlockStoreException(
"Shrinking is unsupported: " + currentLength + " vs " + fileLength);
else
} else {
randomAccessFile.setLength(fileLength);
// Map it into memory again because of the length change.
buffer.force();
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);
}
}
}

// Map it into memory read/write. The kernel will take care of flushing writes to disk at the most
// efficient times, which may mean that until the map is deallocated the data on disk is randomly
// inconsistent. However the only process accessing it is us, via this mapping, so our own view will
// always be correct. Once we establish the mmap the underlying file and channel can go away. Note that
// the details of mmapping vary between platforms.
buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0, fileLength);

// Check or initialize the header bytes to ensure we don't try to open some random file.
if (exists) {
byte[] currentHeader = new byte[4];
((Buffer) buffer).rewind();
buffer.get(currentHeader);
if (!Arrays.equals(currentHeader, MAGIC_HEADER))
throw new BlockStoreException("Magic header expected, got: " + ByteUtils.formatHex(currentHeader));
} else {
initNewStore(params.getGenesisBlock());
}
// Check the header bytes to ensure we don't try to open some random file.
byte[] header = new byte[4];
((Buffer) buffer).rewind();
buffer.get(currentHeader);
if (!Arrays.equals(currentHeader, HEADER_MAGIC_V2))
throw new BlockStoreException("Magic header V2 expected: " + new String(currentHeader,
StandardCharsets.US_ASCII));
} catch (Exception e) {
try {
if (randomAccessFile != null) randomAccessFile.close();
Expand All @@ -181,7 +232,7 @@ else if (fileLength < randomAccessFile.length())

private void initNewStore(Block genesisBlock) throws Exception {
((Buffer) buffer).rewind();
buffer.put(MAGIC_HEADER);
buffer.put(HEADER_MAGIC_V2);
// Insert the genesis block.
lock.lock();
try {
Expand All @@ -196,7 +247,7 @@ private void initNewStore(Block genesisBlock) throws Exception {

/** Returns the size in bytes of the file that is used to store the chain with the current parameters. */
public static int getFileSize(int capacity) {
return RECORD_SIZE * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
return RECORD_SIZE_V2 * capacity + FILE_PROLOGUE_BYTES /* extra kilobyte for stuff */;
}

@Override
Expand All @@ -215,7 +266,7 @@ public void put(StoredBlock block) throws BlockStoreException {
Sha256Hash hash = block.getHeader().getHash();
notFoundCache.remove(hash);
buffer.put(hash.getBytes());
block.serializeCompact(buffer);
block.serializeCompactV2(buffer);
setRingCursor(buffer.position());
blockCache.put(hash, block);
} finally { lock.unlock(); }
Expand All @@ -242,17 +293,17 @@ public StoredBlock get(Sha256Hash hash) throws BlockStoreException {
final byte[] targetHashBytes = hash.getBytes();
byte[] scratch = new byte[32];
do {
cursor -= RECORD_SIZE;
cursor -= RECORD_SIZE_V2;
if (cursor < FILE_PROLOGUE_BYTES) {
// We hit the start, so wrap around.
cursor = fileLength - RECORD_SIZE;
cursor = fileLength - RECORD_SIZE_V2;
}
// Cursor is now at the start of the next record to check, so read the hash and compare it.
((Buffer) buffer).position(cursor);
buffer.get(scratch);
if (Arrays.equals(scratch, targetHashBytes)) {
// Found the target.
StoredBlock storedBlock = StoredBlock.deserializeCompact(buffer);
StoredBlock storedBlock = StoredBlock.deserializeCompactV2(buffer);
blockCache.put(hash, storedBlock);
return storedBlock;
}
Expand Down Expand Up @@ -315,10 +366,10 @@ public void close() throws BlockStoreException {
}
}

protected static final int RECORD_SIZE = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;
static final int RECORD_SIZE_V1 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE;

// File format:
// 4 header bytes = "SPVB"
// V1 file format:
// 4 magic header bytes = "SPVB"
// 4 cursor bytes, which indicate the offset from the first kb where the next block header should be written.
// 32 bytes for the hash of the chain head
//
Expand All @@ -327,10 +378,24 @@ public void close() throws BlockStoreException {
// 12 bytes of chain work
// 4 bytes of height
// 80 bytes of block header data

static final int RECORD_SIZE_V2 = 32 /* hash */ + StoredBlock.COMPACT_SERIALIZED_SIZE_V2;

// V2 file format:
// 4 magic header bytes = "SPV2"
// 4 cursor bytes, which indicate the offset from the first kb where the next block header should be written.
// 32 bytes for the hash of the chain head
//
// For each header (148 bytes)
// 32 bytes hash of the header
// 32 bytes of chain work
// 4 bytes of height
// 80 bytes of block header data

protected static final int FILE_PROLOGUE_BYTES = 1024;

/** Returns the offset from the file start where the latest block should be written (end of prev block). */
private int getRingCursor() {
int getRingCursor() {
int c = buffer.getInt(4);
checkState(c >= FILE_PROLOGUE_BYTES, () ->
"integer overflow");
Expand Down
34 changes: 34 additions & 0 deletions core/src/test/java/org/bitcoinj/store/SPVBlockStoreTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@
import org.junit.Test;

import java.io.File;
import java.io.RandomAccessFile;
import java.math.BigInteger;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.time.Duration;
import java.time.Instant;
import java.util.Collections;
Expand Down Expand Up @@ -190,4 +194,34 @@ public void oneStoreDelete() throws Exception {
assertTrue(deleted);
}
}

@Test
public void migrateV1toV2() throws Exception {
// create V1 format
RandomAccessFile raf = new RandomAccessFile(blockStoreFile, "rw");
FileChannel channel = raf.getChannel();
ByteBuffer buffer = channel.map(FileChannel.MapMode.READ_WRITE, 0,
SPVBlockStore.FILE_PROLOGUE_BYTES + SPVBlockStore.RECORD_SIZE_V1 * 3);
buffer.put(SPVBlockStore.HEADER_MAGIC_V1); // header magic
Block genesisBlock = TESTNET.getGenesisBlock();
StoredBlock storedGenesisBlock = new StoredBlock(genesisBlock.cloneAsHeader(), genesisBlock.getWork(), 0);
Sha256Hash genesisHash = storedGenesisBlock.getHeader().getHash();
buffer.position(SPVBlockStore.FILE_PROLOGUE_BYTES);
buffer.put(genesisHash.getBytes());
storedGenesisBlock.serializeCompact(buffer);
buffer.putInt(4, buffer.position()); // ring cursor
((Buffer) buffer).position(8);
buffer.put(genesisHash.getBytes()); // chain head
raf.close();

// migrate to V2 format
SPVBlockStore store = new SPVBlockStore(TESTNET, blockStoreFile);

// check block is the same
assertEquals(genesisHash, store.getChainHead().getHeader().getHash());
// check ring cursor
assertEquals(SPVBlockStore.FILE_PROLOGUE_BYTES + SPVBlockStore.RECORD_SIZE_V2 * 1,
store.getRingCursor());
store.close();
}
}

0 comments on commit 7860555

Please sign in to comment.