From abeb9de302a1ee74f6ba81eda9adb69356b6f00e Mon Sep 17 00:00:00 2001 From: Josef Widder Date: Fri, 19 Apr 2024 15:38:10 +0200 Subject: [PATCH] fixed a bug in the heighest_staged --- specs/quint/specs/reset.qnt | 248 ++++++++++++++++++-------------- specs/quint/specs/resetTest.qnt | 17 +-- 2 files changed, 146 insertions(+), 119 deletions(-) diff --git a/specs/quint/specs/reset.qnt b/specs/quint/specs/reset.qnt index ff2568f01..3b11adcdc 100644 --- a/specs/quint/specs/reset.qnt +++ b/specs/quint/specs/reset.qnt @@ -21,22 +21,17 @@ type Height = int type Time = int type ForkID = int -// val L2EpochSize = 5 -// val L1EpochSize = 3 -// val maxReg = 3 -// val minReg = -3 -// val ValAddresses = Set("Huey", "Dewey", "Louie") - - const L2EpochSize : Height const L1EpochSize : Time const EthStartTime : Time +const MaxEthBlockTime: Time const maxReg : int const minReg : int const ValAddresses : Set[Address] type Registration = { - eth_epoch: int, // Time/Time gives unit 1 + eth_epoch: int, // Computed by eth.now / EVE_duration. Should never be 0 as this creates a corner case with the + // initial forkID also being 0. seq_num: int, starknet_addr: Address, amount: int @@ -52,21 +47,24 @@ type L2BlockProof = { type L2Proof = | Proof (L2BlockProof) - | Invalid // for modeling ZK checks. to allow executions where invalid proofs are submitted - | None + | Invalid // for modeling ZK checks. to allow executions where invalid proofs are submitted to L1 + | None // if no proof is submitted type L1Block = { // meta data time : Time, // transactions - newRegistrations: Set[Registration], // MAtan: next_update_id used to make reg.index unique + newRegistrations: Set[Registration], // Matan: next_update_id used to make reg.index unique + // We don't capture the staking logic in this model + // For reset we only care about registrations and the absence etc. + // This logic is encoded in the environment now newProof: L2Proof, // state - pendingRegistrations: Set[Registration], // TODO: Matan: unfulfilled_updates + unfulfilled_updates: Set[Registration], provenHeight: Height, l2forkID: int, - latestProvenL2BlockHash: L2Block, // What happens is that the actuall proof submitted to L1 is computed from L2 off-chain + latestProvenL2BlockHash: L2Block, // What happens is that the actual proof submitted to L1 is computed from L2 off-chain // history variable for analysis. Not sure we actually need it cntRegistrations: int, } @@ -81,9 +79,7 @@ type L2Block = { valset: Address -> int, // stakers stagedUpdates: Set[Registration], unstagedUpdates: Set[Registration], - highest_applied_seq_num: int, - // history of received registrations needed for proof (for analysis. history variable) - // receivedRegistrations: Set[Registration], + heighest_staged_seq_num: int, // we need this explicitly in case the stagedUpdates becomes empty // proofs not modelled here } @@ -101,12 +97,11 @@ pure def toSet (l) = // Functions for L1 chain // - pure def L1Epoch(b: L1Block) : int = b.time / L1EpochSize pure def staleRegs (b: L1Block) : Set[Registration] = - b.pendingRegistrations.filter(r => r.eth_epoch + 2 <= L1Epoch(b)) + b.unfulfilled_updates.filter(r => r.eth_epoch + 2 <= L1Epoch(b)) pure def existsStale (b: L1Block) : bool = staleRegs(b).size() > 0 @@ -125,42 +120,54 @@ pure def confirmedRegistrations (vp: L2Proof) : Set[Registration] = | Proof(p) => p.confirmedRegs } +// TODO: I am basing staleness on the previous block. We should check whether this is OK +// In principle it can always be the case that we accept a fork at the end of an EVE, and +// in the next block new registrations become stale, and we need to fork again after one block +// TODO: make a test for that +pure def proofOK (prev: L1Block, p: L2BlockProof) : bool = all { + p.from_height == prev.provenHeight + 1, + p.forkID == prev.expectedForkID(), // the forkID is right, and + prev.staleRegs().exclude(p.confirmedRegs) == Set() // there are no stale registrations left -// If no registrations are stale, I accept everything from past epochs -// otherwise, for heights greater they need to be from the current epoch -pure def proofOK (prev: L1Block, p: L2BlockProof) : bool = - if (prev.provenHeight + 1 != p.from_height) false - else - p.forkID == prev.expectedForkID() // if you "apply proof", as a resykt there cannot be stale registrations left +} -pure def newL1Block (prev: L1Block, regs: Set[Registration], proof: L2Proof) : L1Block = +// We accept a proof if all the registrations that were stale in the previous block are confirmed +// This allows the scenario were +// - we accept a proof +// - immediately have new stale registrations (if the proof came towards the end of an EVE) +// - and we immediately need a reset +// I think we cannot escape such a scenario. The only question is whether we want an invariant that says +// "an L1 Block that contains a valid proof does not contain stale registrations" +pure def newL1Block (prev: L1Block, regs: Set[Registration], proof: L2Proof, delay: Time) : L1Block = val accepted = match proof { - | None => None + | Proof(p) => if (proofOK(prev, p)) proof + else None + | None => None | Invalid => None - | Proof(p) => - if (proofOK(prev, p)) proof - else None + } - val confirmed = confirmedRegistrations(accepted) val newProvenHeight = match accepted { + | Proof(p) => p.to_height | None => prev.provenHeight | Invalid => prev.provenHeight - | Proof(p) => p.to_height } val newBlockHash = match accepted { + | Proof(p) => p.resultingBlock | None => prev.latestProvenL2BlockHash | Invalid => prev.latestProvenL2BlockHash - | Proof(p) => p.resultingBlock } - { time : prev.time + 1, // TODO: right now time increments. We should pass in the time as a parameter to this function + val auxBlock = { + time : prev.time + delay, newRegistrations: regs, newProof: proof, provenHeight: newProvenHeight, latestProvenL2BlockHash: newBlockHash, l2forkID: prev.expectedForkID(), // off by one perhaps. - pendingRegistrations: prev.pendingRegistrations.union(regs).exclude(confirmed), + unfulfilled_updates: prev.unfulfilled_updates.union(regs).exclude(accepted.confirmedRegistrations()), cntRegistrations: prev.cntRegistrations + regs.size() } + { ... auxBlock, l2forkID: auxBlock.expectedForkID()} + // // Functions for L2 chain @@ -177,60 +184,61 @@ pure def addOneReg (vals: Address -> int, reg: Registration) : Address -> int = pure def nextValSet (vals: Address -> int, regs: Set[Registration]) : Address -> int = regs.fold(vals, (v, r) => addOneReg(v,r)) -pure def completePrefixIdx(r: Set[Registration]) : int = +pure def completePrefixIdx(r: Set[Registration], heighest: int) : int = // smallest unproven block - 1 - val existingIdx = Set(0).union(r.fold(Set(), (s, x) => s.union(Set(x.seq_num)))) + val existingIdx = 0.to(heighest).union(r.fold(Set(), (s, x) => s.union(Set(x.seq_num)))) // if we have {0,1,2,4,5,8}, complete is {0,1,2}; TODO test val complete = existingIdx.filter(i => 0.to(i).subseteq(existingIdx)) complete.fold(0, (max, i) => if (i > max) i else max) +pure def maxSeqNum (regs: Set[Registration]) : int = + regs.fold(-1, (s, x) => if (x.seq_num > s) x.seq_num else s) -type DenseResult = { - idx: int, - denseRegs: Set[Registration] -} - -pure def denseSubset (regs: Set[Registration]) : DenseResult = - val c = regs.completePrefixIdx() - { idx: c, - denseRegs: regs.filter(r => r.seq_num <= c) - } - +pure def denseSubset (regs: Set[Registration], heighest: int) : Set[Registration] = + val c = regs.completePrefixIdx(heighest) + regs.filter(r => r.seq_num <= c) + +// Compute an L2 block in the normal case (no reset) +// block is computed on top of the L2 chain, with new registrations pure def newL2Block (chain: List[L2Block], regs: Set[Registration]) : L2Block = val prev = last(chain) if (chain.length() % L2EpochSize == 0) // new epoch - val toStaged = prev.unstagedUpdates.denseSubset().denseRegs - val toUnstaged = prev.unstagedUpdates.exclude(toStaged).union(regs) + val toStaged = prev.unstagedUpdates.denseSubset(prev.heighest_staged_seq_num) + val heighest = if (toStaged == Set()) prev.heighest_staged_seq_num else toStaged.maxSeqNum() + val toUnstaged = prev.unstagedUpdates + .exclude(toStaged) + .union(regs) + .filter(x => x.seq_num > heighest) { valset: nextValSet(prev.valset, prev.stagedUpdates), - stagedUpdates: toStaged, // TODO: "dense subset" and remainder stays in unstaged + stagedUpdates: toStaged, unstagedUpdates: toUnstaged, height: chain.length(), forkID: prev.forkID, registrations: regs, - highest_applied_seq_num: prev.unstagedUpdates.denseSubset().idx, - //receivedRegistrations: prev.receivedRegistrations.union(regs), + heighest_staged_seq_num: heighest, } else { valset: prev.valset, stagedUpdates: prev.stagedUpdates, - unstagedUpdates: prev.unstagedUpdates.union(regs), + unstagedUpdates: prev.unstagedUpdates + .union(regs) + .filter(x => x.seq_num > prev.heighest_staged_seq_num), height: chain.length(), forkID: prev.forkID, registrations: regs, - highest_applied_seq_num: prev.highest_applied_seq_num, - //receivedRegistrations: prev.receivedRegistrations.union(regs), + heighest_staged_seq_num: prev.heighest_staged_seq_num, } - -// special function to compute a first block for a new fork +// Function to compute a first block for a new fork // TODO: test with holes in registrations // TODO: test with forkBlock that misses stale registrations pure def forkBlock (prev: L2Block, regs: Set[Registration], h: Height, fID: ForkID) : L2Block = val allRegs = prev.unstagedUpdates.union(prev.stagedUpdates).union(regs) - val toApplyNow = allRegs.denseSubset().denseRegs + val toApplyNow = allRegs.denseSubset(prev.heighest_staged_seq_num) + val heighest = if (toApplyNow == Set()) prev.heighest_staged_seq_num else toApplyNow.maxSeqNum() val toUnstaged = allRegs.exclude(toApplyNow) val newValSet = nextValSet(prev.valset, toApplyNow) { height: h, // lastL1.provenHeight + 1, @@ -239,7 +247,7 @@ pure def forkBlock (prev: L2Block, regs: Set[Registration], h: Height, fID: Fork valset: newValSet, stagedUpdates: Set(), unstagedUpdates: toUnstaged, - highest_applied_seq_num: allRegs.denseSubset().idx, + heighest_staged_seq_num: heighest, //receivedRegistrations: prev.receivedRegistrations.union(regs), } @@ -270,12 +278,15 @@ pure def makeProof (l2: List[L2Block], to_height: Height, l1: List[L1Block]) : L // // -var L1: List[L1Block] +var L1: List[L1Block] // this is a super simplification and doesn't consider non-finalized Eth blocks var L2: List[L2Block] var envRegs: Set[Registration] + +// // Invariants - +// + def staleInv = not(L1.last().existsStale()) // chainID of last element is 0 @@ -292,10 +303,17 @@ def growingForkID = ( // is removed from pending def noConfirmed = ( L1.length() > 2) implies - L1[L1.length() - 2].pendingRegistrations.forall( - r => L1[L1.length() - 1].pendingRegistrations.contains(r) + L1[L1.length() - 2].unfulfilled_updates.forall( + r => L1[L1.length() - 1].unfulfilled_updates.contains(r) ) +// first line important: we only have unstanged registrations which have smaller seq_num. +// this ensures that we don't apply a registration twice +// TODO test +def stagedInv = all { + L2.last().unstagedUpdates.forall(r => r.seq_num > L2.last().heighest_staged_seq_num), + L2.last().stagedUpdates.forall(r => r.seq_num <= L2.last().heighest_staged_seq_num) +} @@ -309,8 +327,9 @@ def oneForkIDperProof = ( ) - +// // ACTIONS +// action init = val initialL2Block = { @@ -320,14 +339,14 @@ action init = valset: Map(), stagedUpdates: Set(), unstagedUpdates: Set(), - highest_applied_seq_num: -1, + heighest_staged_seq_num: 0, //receivedRegistrations: Set(), } all { L1' = List({time : EthStartTime, newRegistrations: Set(), newProof: None, - pendingRegistrations: Set(), + unfulfilled_updates: Set(), provenHeight: 0, // initial state proven? l2forkID: 0, latestProvenL2BlockHash: initialL2Block, @@ -352,33 +371,72 @@ action addRegistration = all { L2' = L2, } +action addL1Block = all { + nondet someHeight = (-1).to(L2.length() - 1).oneOf() + nondet blocktime = 1.to(MaxEthBlockTime).oneOf() + val newproof = makeProof(L2, someHeight, L1) // if someheight < provenHeight this will give Invalid proof + val submProof = Set(newproof, None).oneOf() + L1' = L1.append(newL1Block(L1.last(), envRegs, submProof, blocktime)), + L2' = L2, + envRegs' = Set(), +} +action addL2Block = all { + nondet regs = L1.last().unfulfilled_updates.powerset().oneOf() // This doesn't replay confirmed registrations + // And it doesn't ensure at most once semantics + // for transferring registrations + L2' = L2.append(newL2Block(L2, regs)), + L1' = L1, + envRegs' = envRegs, +} - +action reset = all { + if (existsStale(L1.last()) and L2.last().forkID != L1.last().l2forkID) + // TODO debug to make sure L1 last forkID and stale are handled on the right L1 block + // there is an off by one issue. + // AND last block on L2 must have old fork id TODO: important + L2' = L2.slice(0, L1.last().provenHeight + 1) + .append(forkBlock( + L2[L1.last().provenHeight], + L1.last().staleRegs(), + L1.last().provenHeight + 1, + L1.last().expectedForkID())) + // TODO: write more involved test that fails old_reset + else + L2' = L2, + L1' = L1, + envRegs' = envRegs, +} + +action step = any { + addRegistration, // adds a registration to the environment. A bit like submitting a transaction + addL1Block, // adds a new L1 block, and takes all registrations from the environment + addL2Block, // adds a normal L2 block / no reset + reset // this is asynchronous now. We should link it to L1 block generation +} + + + +// +// Extra actions to drive tests in a fine-grained way +// // for repl and writing tests -action addL1BlockNoProof = all { - L1' = L1.append(newL1Block(L1.last(), envRegs, None)), +action addL1BlockNoProof (blocktime) = all { + L1' = L1.append(newL1Block(L1.last(), envRegs, None, blocktime)), L2' = L2, envRegs' = Set(), } // for repl and writing tests -action addL1BlockProofWithHeight(h) = all { +action addL1BlockProofWithHeight(h, blocktime) = all { val newproof = makeProof(L2, h, L1) - L1' = L1.append(newL1Block(L1.last(), envRegs, newproof)), + L1' = L1.append(newL1Block(L1.last(), envRegs, newproof, blocktime)), L2' = L2, envRegs' = Set(), } -// TODO: rewrite action to take heights -action addL1Block = all { - nondet someHeight = (-1).to(L2.length() - 1).oneOf() // TODO: instead of -1 take lastProvenHeight - val newproof = makeProof(L2, someHeight, L1) - L1' = L1.append(newL1Block(L1.last(), envRegs, newproof)), - L2' = L2, - envRegs' = Set(), -} + // for repl and writing tests action addL2BlockRegs (regs: Set[Registration]) : bool = all { @@ -387,13 +445,6 @@ action addL2BlockRegs (regs: Set[Registration]) : bool = all { envRegs' = envRegs, } -action addL2Block = all { - nondet regs = L1.last().pendingRegistrations.powerset().oneOf() - // TODO: Question: at most once? - L2' = L2.append(newL2Block(L2, regs)), - L1' = L1, - envRegs' = envRegs, -} // this was the first version of reset that contains two bugs // in the if conditions, multiple resets can happen for the same forkID (L2 progress is over-written) @@ -413,32 +464,7 @@ action old_reset = all { envRegs' = envRegs, } -action reset = all { - if (existsStale(L1.last()) and L2.last().forkID != L1.last().l2forkID) - // TODO debug to make sure L1 last forkID and stale are handled on the right L1 block - // there is an off by one issue. - // AND last block on L2 must have old fork id TODO: important - L2' = L2 - .slice(0, L1.last().provenHeight + 1) - .append(forkBlock( - L2[L1.last().provenHeight], - L1.last().staleRegs(), - L1.last().provenHeight + 1, - L1.last().expectedForkID())) - // TODO: write more involved test that fails old_reset - else - L2' = L2, - L1' = L1, - envRegs' = envRegs, -} - -action step = any { - addRegistration, // adds a registration to the environment. A bit like submitting a transaction - addL1Block, // adds a new L1 block, and takes all registrations from the environment - addL2Block, // adds a normal L2 block / no reset - reset // this is asynchronous now. We should link it to L1 block generation -} diff --git a/specs/quint/specs/resetTest.qnt b/specs/quint/specs/resetTest.qnt index e923bc435..a392c8d14 100644 --- a/specs/quint/specs/resetTest.qnt +++ b/specs/quint/specs/resetTest.qnt @@ -6,6 +6,7 @@ import reset ( L2EpochSize = 5, L1EpochSize = 4, EthStartTime = 42, // This enforces that the first registrations have EVE greater than the inital forkID + MaxEthBlockTime = 3, maxReg = 3, minReg = -3, ValAddresses = Set("Huey", "Dewey", "Louie") @@ -15,35 +16,35 @@ import reset ( run FailedResetTest = init .then(addRegistration) - .then(addL1BlockNoProof) - .then((3 * L1EpochSize).reps(_ => addL1BlockNoProof)) + .then(addL1BlockNoProof(1)) + .then((3 * L1EpochSize).reps(_ => addL1BlockNoProof(1))) .then(reset) .expect(L2[L2.length()-1].forkID == L1[L1.length()-1].l2forkID and L2.length() == 2) // first reset happened - .then((L1EpochSize).reps(_ => addL1BlockNoProof)) + .then((L1EpochSize).reps(_ => addL1BlockNoProof(1))) .then(reset) // TODO: this should do nothing as we don't have new registrations between two resets // TODO: we need a rule that resets at most once for every forkID .expect(L2[L2.length()-1].forkID == L1[L1.length()-1].l2forkID and L2.length() == 2) // second reset happened - .then(addL1BlockProofWithHeight(1)) // now proof gets into L1 + .then(addL1BlockProofWithHeight(1,1)) // now proof gets into L1 .expect(not(existsStale(L1[L1.length()-1]))) // no more stale registrations run ResetwithProofonL1Test = init .then(addRegistration) - .then((3*L1EpochSize).reps(_=>addL1BlockNoProof)) + .then((3*L1EpochSize).reps(_=>addL1BlockNoProof(1))) .then(reset) - .then(addL1BlockProofWithHeight(1)) + .then(addL1BlockProofWithHeight(1,1)) .expect(L2[L2.length()-1] == L1[L1.length()-1].latestProvenL2BlockHash) run moveOneRegToL1ToL2 = addRegistration - .then(addL1BlockNoProof) + .then(addL1BlockNoProof(1)) .then(addL2BlockRegs(L1[L1.length()-1].newRegistrations)) run ProofRegsTest = init .then(5.reps(_=>moveOneRegToL1ToL2)) - .then(addL1BlockProofWithHeight(3)) + .then(addL1BlockProofWithHeight(3,1)) run simpleTest = init