From 02d856990b16b738ba2074e8cf9698fcce4bf981 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Wed, 26 Feb 2025 13:49:09 +0800 Subject: [PATCH 01/26] Start using Nimbus EVM. --- fluffy/evm/portal_evm.nim | 68 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 fluffy/evm/portal_evm.nim diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim new file mode 100644 index 000000000..779b34983 --- /dev/null +++ b/fluffy/evm/portal_evm.nim @@ -0,0 +1,68 @@ +# Fluffy +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import + # std/[tables, sets], + chronos, + # chronicles, + stint, + results, + eth/common/[hashes, accounts, addresses, headers, transactions], + web3/[primitives, eth_api_types, eth_api], + ../../execution_chain/beacon/web3_eth_conv, + ../../execution_chain/common/common, + ../../execution_chain/db/ledger, + ../../execution_chain/transaction/call_evm, + ../../execution_chain/[evm/types, evm/state, evm/evm_errors], + ../network/history/history_network, + ../network/state/[state_endpoints, state_network] + +from eth/common/eth_types_rlp import rlpHash + +export evmc, addresses, stint, headers, state_network + +#{.push raises: [].} + +type + PortalEvm* = ref object + historyNetwork: HistoryNetwork + stateNetwork: StateNetwork + +proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork) = + PortalEvm(historyNetwork: hn, stateNetwork: sn) + +proc call*(evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32): EvmResult[CallResult] = + let + header = (waitFor evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: + raise newException(ValueError, "Could not find header with requested block number") + parent = (waitFor evm.historyNetwork.getVerifiedBlockHeader(header.parentHash)).valueOr: + raise newException(ValueError, "Could not find header with requested block number") + com = CommonRef.new(newCoreDbRef DefaultDbMemory, nil) + fork = com.toEVMFork(header) + vmState = BaseVMState() + + vmState.init(parent, header, com, com.db.baseTxFrame()) + + vmState.mutateLedger: + db.setBalance(default(Address), 0.u256()) + # for accessPair in accessList: + # let + # accountAddr = accessPair.address + # acc = await lcProxy.getAccount(accountAddr, quantityTag) + # accCode = await lcProxy.getCode(accountAddr, quantityTag) + + # db.setNonce(accountAddr, acc.nonce) + # db.setBalance(accountAddr, acc.balance) + # db.setCode(accountAddr, accCode) + + # for slot in accessPair.storageKeys: + # let slotInt = UInt256.fromHex(toHex(slot)) + # let slotValue = await lcProxy.getStorageAt(accountAddr, slotInt, quantityTag) + # db.setStorage(accountAddr, slotInt, slotValue) + db.persist(clearEmptyAccount = false) # settle accounts storage + + rpcCallEvm(tx, header, vmState) From 43472d518f8a1391206cefd8502879610f408859 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Thu, 27 Feb 2025 13:16:23 +0800 Subject: [PATCH 02/26] Implement proof of concept implementation using Nimbus EVM. --- execution_chain/db/ledger.nim | 1 - fluffy/evm/portal_evm.nim | 132 ++++++++++++++++----- fluffy/fluffy.nim | 6 +- fluffy/network/beacon/beacon_network.nim | 4 +- fluffy/network/history/history_network.nim | 4 +- fluffy/network/state/state_endpoints.nim | 2 +- fluffy/network/state/state_network.nim | 4 +- fluffy/portal_node.nim | 15 ++- fluffy/rpc/rpc_eth_api.nim | 37 ++++++ 9 files changed, 158 insertions(+), 47 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index e0725a26a..d58d036fa 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -790,7 +790,6 @@ proc update(wd: var WitnessData, acc: AccountRef) = if not acc.originalStorage.isNil: for k, v in acc.originalStorage: - if v.isZero: continue wd.storageKeys.incl k for k, v in acc.overlayStorage: diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 779b34983..cdd787f68 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -8,16 +8,18 @@ import # std/[tables, sets], chronos, + # taskpools, # chronicles, + stew/byteutils, stint, results, eth/common/[hashes, accounts, addresses, headers, transactions], web3/[primitives, eth_api_types, eth_api], ../../execution_chain/beacon/web3_eth_conv, - ../../execution_chain/common/common, ../../execution_chain/db/ledger, + ../../execution_chain/common/common, ../../execution_chain/transaction/call_evm, - ../../execution_chain/[evm/types, evm/state, evm/evm_errors], + ../../execution_chain/evm/[types, state, evm_errors], ../network/history/history_network, ../network/state/[state_endpoints, state_network] @@ -27,42 +29,108 @@ export evmc, addresses, stint, headers, state_network #{.push raises: [].} -type - PortalEvm* = ref object - historyNetwork: HistoryNetwork - stateNetwork: StateNetwork +type PortalEvm* = ref object + historyNetwork: HistoryNetwork + stateNetwork: StateNetwork -proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork) = +proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = PortalEvm(historyNetwork: hn, stateNetwork: sn) -proc call*(evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32): EvmResult[CallResult] = +proc call*( + evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 +): Future[EvmResult[CallResult]] {.async.} = #{.async: (raises: [CancelledError, ValueError]).} = + let - header = (waitFor evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: - raise newException(ValueError, "Could not find header with requested block number") - parent = (waitFor evm.historyNetwork.getVerifiedBlockHeader(header.parentHash)).valueOr: - raise newException(ValueError, "Could not find header with requested block number") + to = tx.to.valueOr: + raise newException(ValueError, "to address missing in transaction") + header = (await evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: + raise + newException(ValueError, "Could not find header with requested block number") + # do we need to get the parent? + parent = (await evm.historyNetwork.getVerifiedBlockHeader(header.parentHash)).valueOr: + raise + newException(ValueError, "Could not find parent header with requested block number") + # update the get account call + acc = (await evm.stateNetwork.getAccount(header.stateRoot, to, Opt.none(Hash32))).valueOr: + raise + newException(ValueError, "Unable to get account") + code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to)).valueOr: + raise + newException(ValueError, "Unable to get code") + + # slot1Key = UInt256.fromBytesBE(hexToSeqByte("0x0000000000000000000000000000000000000000000000000000000000000000")) + # slot2Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e572")) + # slot3Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e574")) + # slot4Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e573")) + # slot5Key = UInt256.fromBytesBE(hexToSeqByte("0xff48e101e1045535d929d495692c383c0f1b7e861d5176a028cb8373d1179af2")) + + # slot1 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot1Key)).valueOr: + # raise newException(ValueError, "Unable to get slot1") + # slot2 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot2Key)).valueOr: + # raise newException(ValueError, "Unable to get slot2") + # slot3 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot3Key)).valueOr: + # raise newException(ValueError, "Unable to get slot3") + # slot4 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot4Key)).valueOr: + # raise newException(ValueError, "Unable to get slot4") + # slot5 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot5Key)).valueOr: + # raise newException(ValueError, "Unable to get slot5") + com = CommonRef.new(newCoreDbRef DefaultDbMemory, nil) - fork = com.toEVMFork(header) + # fork = com.toEVMFork(header) vmState = BaseVMState() vmState.init(parent, header, com, com.db.baseTxFrame()) - vmState.mutateLedger: - db.setBalance(default(Address), 0.u256()) - # for accessPair in accessList: - # let - # accountAddr = accessPair.address - # acc = await lcProxy.getAccount(accountAddr, quantityTag) - # accCode = await lcProxy.getCode(accountAddr, quantityTag) - - # db.setNonce(accountAddr, acc.nonce) - # db.setBalance(accountAddr, acc.balance) - # db.setCode(accountAddr, accCode) - - # for slot in accessPair.storageKeys: - # let slotInt = UInt256.fromHex(toHex(slot)) - # let slotValue = await lcProxy.getStorageAt(accountAddr, slotInt, quantityTag) - # db.setStorage(accountAddr, slotInt, slotValue) - db.persist(clearEmptyAccount = false) # settle accounts storage - - rpcCallEvm(tx, header, vmState) + vmState.ledger.setBalance(to, acc.balance) + vmState.ledger.setNonce(to, acc.nonce) + vmState.ledger.setCode(to, code.asSeq()) + # vmState.ledger.setStorage(to, slot1Key, slot1) + # vmState.ledger.setStorage(to, slot2Key, slot2) + # vmState.ledger.setStorage(to, slot3Key, slot3) + # vmState.ledger.setStorage(to, slot4Key, slot4) + # vmState.ledger.setStorage(to, slot5Key, slot5) + vmState.ledger.persist(clearEmptyAccount = false) + + var + lastMultiKeysCount = -1 + multiKeys = vmState.ledger.makeMultiKeys() + callResult: EvmResult[CallResult] + i = 0 + while i < 10: #multiKeys.keys.len() > lastMultiKeysCount: + inc i + + lastMultiKeysCount = multiKeys.keys.len() + + callResult = rpcCallEvm(tx, header, vmState) + echo "callResult: ", callResult + + vmState.ledger.collectWitnessData() + multiKeys = vmState.ledger.makeMultiKeys() + + for k in multiKeys.keys: + echo "k.storageMode: ", k.storageMode + echo "k.address: ", k.address + echo "k.codeTouched: ", k.codeTouched + + if not k.storageMode and k.address != default(Address): + let account = (await evm.stateNetwork.getAccount(header.stateRoot, k.address, Opt.none(Hash32))).valueOr: + raise newException(ValueError, "Unable to get account") + vmState.ledger.setBalance(k.address, account.balance) + vmState.ledger.setNonce(k.address, account.nonce) + + if k.codeTouched: + let code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address)).valueOr: + raise newException(ValueError, "Unable to get code") + vmState.ledger.setCode(k.address, code.asSeq()) + + if not k.storageKeys.isNil(): + for sk in k.storageKeys.keys: + let slotKey = UInt256.fromBytesBE(sk.storageSlot) + echo "sk.storageSlot: ", slotKey + let slotValue = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, k.address, slotKey)).valueOr: + raise newException(ValueError, "Unable to get slot") + vmState.ledger.setStorage(k.address, slotKey, slotValue) + + vmState.ledger.persist(clearEmptyAccount = false) + + return callResult diff --git a/fluffy/fluffy.nim b/fluffy/fluffy.nim index f64aae46a..9b6276ed5 100644 --- a/fluffy/fluffy.nim +++ b/fluffy/fluffy.nim @@ -254,7 +254,7 @@ proc run( case flag of RpcFlag.eth: rpcServer.installEthApiHandlers( - node.historyNetwork, node.beaconLightClient, node.stateNetwork + node.historyNetwork, node.beaconLightClient, node.stateNetwork, node.portalEvm ) of RpcFlag.debug: rpcServer.installDebugApiHandlers(node.stateNetwork) @@ -344,14 +344,14 @@ when isMainModule: raiseAssert exc.msg # shouldn't happen notice "Shutting down after having received SIGINT" - node.state = PortalNodeState.Stopping + node.status = PortalNodeStatus.Stopping try: setControlCHook(controlCHandler) except Exception as exc: # TODO Exception warn "Cannot set ctrl-c handler", msg = exc.msg - while node.state == PortalNodeState.Running: + while node.status == PortalNodeStatus.Running: try: poll() except CatchableError as e: diff --git a/fluffy/network/beacon/beacon_network.nim b/fluffy/network/beacon/beacon_network.nim index 4191293f5..8f1908eae 100644 --- a/fluffy/network/beacon/beacon_network.nim +++ b/fluffy/network/beacon/beacon_network.nim @@ -448,10 +448,10 @@ proc processContentLoop(n: BeaconNetwork) {.async: (raises: []).} = proc statusLogLoop(n: BeaconNetwork) {.async: (raises: []).} = try: while true: + await sleepAsync(60.seconds) + info "Beacon network status", routingTableNodes = n.portalProtocol.routingTable.len() - - await sleepAsync(60.seconds) except CancelledError: trace "statusLogLoop canceled" diff --git a/fluffy/network/history/history_network.nim b/fluffy/network/history/history_network.nim index 236de27dd..1b7aafca5 100644 --- a/fluffy/network/history/history_network.nim +++ b/fluffy/network/history/history_network.nim @@ -422,10 +422,10 @@ proc processContentLoop(n: HistoryNetwork) {.async: (raises: []).} = proc statusLogLoop(n: HistoryNetwork) {.async: (raises: []).} = try: while true: + await sleepAsync(60.seconds) + info "History network status", routingTableNodes = n.portalProtocol.routingTable.len() - - await sleepAsync(60.seconds) except CancelledError: trace "statusLogLoop canceled" diff --git a/fluffy/network/state/state_endpoints.nim b/fluffy/network/state/state_endpoints.nim index 211c4bd88..aadffad97 100644 --- a/fluffy/network/state/state_endpoints.nim +++ b/fluffy/network/state/state_endpoints.nim @@ -161,7 +161,7 @@ proc getStorageProof( doAssert(nibblesIdx <= nibbles.len()) ok((proof, nibblesIdx == nibbles.len())) -proc getAccount( +proc getAccount*( n: StateNetwork, stateRoot: Hash32, address: Address, maybeBlockHash: Opt[Hash32] ): Future[Opt[Account]] {.async: (raises: [CancelledError]).} = let (accountProof, exists) = ( diff --git a/fluffy/network/state/state_network.nim b/fluffy/network/state/state_network.nim index 4906fcf6c..806241c28 100644 --- a/fluffy/network/state/state_network.nim +++ b/fluffy/network/state/state_network.nim @@ -262,10 +262,10 @@ proc processContentLoop(n: StateNetwork) {.async: (raises: []).} = proc statusLogLoop(n: StateNetwork) {.async: (raises: []).} = try: while true: + await sleepAsync(60.seconds) + info "State network status", routingTableNodes = n.portalProtocol.routingTable.len() - - await sleepAsync(60.seconds) except CancelledError: trace "statusLogLoop canceled" diff --git a/fluffy/portal_node.nim b/fluffy/portal_node.nim index b9c50b574..56650986e 100644 --- a/fluffy/portal_node.nim +++ b/fluffy/portal_node.nim @@ -19,13 +19,14 @@ import ./network/wire/[portal_stream, portal_protocol_config], ./network/beacon/[beacon_init_loader, beacon_light_client], ./network/history/[history_network, history_content], - ./network/state/[state_network, state_content] + ./network/state/[state_network, state_content], + ./evm/portal_evm export beacon_light_client, history_network, state_network, portal_protocol_config, forks type - PortalNodeState* = enum + PortalNodeStatus* = enum Starting Running Stopping @@ -40,7 +41,7 @@ type contentRequestRetries*: int PortalNode* = ref object - state*: PortalNodeState + status*: PortalNodeStatus discovery: protocol.Protocol contentDB: ContentDB streamManager: StreamManager @@ -48,6 +49,7 @@ type historyNetwork*: Opt[HistoryNetwork] stateNetwork*: Opt[StateNetwork] beaconLightClient*: Opt[LightClient] + portalEvm*: Opt[PortalEvm] statusLogLoop: Future[void] # Beacon light client application callbacks triggered when new finalized header @@ -190,6 +192,11 @@ proc new*( historyNetwork: historyNetwork, stateNetwork: stateNetwork, beaconLightClient: beaconLightClient, + portalEvm: + if historyNetwork.isSome() and stateNetwork.isSome(): + Opt.some(PortalEvm.init(historyNetwork.get(), stateNetwork.get())) + else: + Opt.none(PortalEvm), ) proc statusLogLoop(n: PortalNode) {.async: (raises: []).} = @@ -227,7 +234,7 @@ proc start*(n: PortalNode) = n.statusLogLoop = statusLogLoop(n) - n.state = PortalNodeState.Running + n.status = PortalNodeStatus.Running proc stop*(n: PortalNode) {.async: (raises: []).} = debug "Stopping Portal node" diff --git a/fluffy/rpc/rpc_eth_api.nim b/fluffy/rpc/rpc_eth_api.nim index d3a45182c..35d7dbc44 100644 --- a/fluffy/rpc/rpc_eth_api.nim +++ b/fluffy/rpc/rpc_eth_api.nim @@ -17,6 +17,7 @@ import ../network/history/[history_network, history_content], ../network/state/[state_network, state_content, state_endpoints], ../network/beacon/beacon_light_client, + ../evm/portal_evm, ../version from ../../execution_chain/errors import ValidationError @@ -125,11 +126,17 @@ template getOrRaise(stateNetwork: Opt[StateNetwork]): StateNetwork = raise newException(ValueError, "state sub-network not enabled") sn +template getOrRaise(portalEvm: Opt[PortalEvm]): PortalEvm = + let evm = portalEvm.valueOr: + raise newException(ValueError, "portal evm not enabled") + evm + proc installEthApiHandlers*( rpcServer: RpcServer, historyNetwork: Opt[HistoryNetwork], beaconLightClient: Opt[LightClient], stateNetwork: Opt[StateNetwork], + portalEvm: Opt[PortalEvm], ) = rpcServer.rpc("web3_clientVersion") do() -> string: return clientVersion @@ -418,3 +425,33 @@ proc installEthApiHandlers*( storageHash: proofs.account.storageRoot, storageProof: storageProof, ) + + # TransactionArgs* = object + # `from`*: Opt[Address] # (optional) The address the transaction is sent from. + # to*: Opt[Address] # The address the transaction is directed to. + # gas*: Opt[Quantity] # (optional) Integer of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions. + # gasPrice*: Opt[Quantity] # (optional) Integer of the gasPrice used for each paid gas. + # maxFeePerGas*: Opt[Quantity] # (optional) MaxFeePerGas is the maximum fee per gas offered, in wei. + # maxPriorityFeePerGas*: Opt[Quantity] # (optional) MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei. + # value*: Opt[UInt256] # (optional) Integer of the value sent with this transaction. + # nonce*: Opt[Quantity] # (optional) integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce + rpcServer.rpc("eth_call") do( + tx: TransactionArgs, quantityTag: RtBlockIdentifier + ) -> seq[byte]: + # TODO: add documentation + + if tx.to.isNone(): + raise newException(ValueError, "to address is required") + + if quantityTag.kind == bidAlias: + raise newException(ValueError, "tag not yet implemented") + + let + hn = historyNetwork.getOrRaise() + sn = stateNetwork.getOrRaise() + evm = portalEvm.getOrRaise() + + let callResult = (await evm.call(tx, quantityTag.number.uint64)).valueOr: + raise newException(ValueError, "Unable to call contract") + + callResult.output From 1164a11cb51d12bdefb8f757b1fa932903bb5cc5 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Thu, 27 Feb 2025 13:25:40 +0800 Subject: [PATCH 03/26] Cleanup. --- fluffy/evm/portal_evm.nim | 64 ++++++++++++++------------------------- 1 file changed, 22 insertions(+), 42 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index cdd787f68..7738d7217 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -38,8 +38,8 @@ proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = proc call*( evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 -): Future[EvmResult[CallResult]] {.async.} = #{.async: (raises: [CancelledError, ValueError]).} = - +): Future[EvmResult[CallResult]] {.async.} = + #{.async: (raises: [CancelledError, ValueError]).} = let to = tx.to.valueOr: raise newException(ValueError, "to address missing in transaction") @@ -48,32 +48,14 @@ proc call*( newException(ValueError, "Could not find header with requested block number") # do we need to get the parent? parent = (await evm.historyNetwork.getVerifiedBlockHeader(header.parentHash)).valueOr: - raise - newException(ValueError, "Could not find parent header with requested block number") + raise newException( + ValueError, "Could not find parent header with requested block number" + ) # update the get account call acc = (await evm.stateNetwork.getAccount(header.stateRoot, to, Opt.none(Hash32))).valueOr: - raise - newException(ValueError, "Unable to get account") + raise newException(ValueError, "Unable to get account") code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to)).valueOr: - raise - newException(ValueError, "Unable to get code") - - # slot1Key = UInt256.fromBytesBE(hexToSeqByte("0x0000000000000000000000000000000000000000000000000000000000000000")) - # slot2Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e572")) - # slot3Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e574")) - # slot4Key = UInt256.fromBytesBE(hexToSeqByte("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e573")) - # slot5Key = UInt256.fromBytesBE(hexToSeqByte("0xff48e101e1045535d929d495692c383c0f1b7e861d5176a028cb8373d1179af2")) - - # slot1 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot1Key)).valueOr: - # raise newException(ValueError, "Unable to get slot1") - # slot2 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot2Key)).valueOr: - # raise newException(ValueError, "Unable to get slot2") - # slot3 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot3Key)).valueOr: - # raise newException(ValueError, "Unable to get slot3") - # slot4 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot4Key)).valueOr: - # raise newException(ValueError, "Unable to get slot4") - # slot5 = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, to, slot5Key)).valueOr: - # raise newException(ValueError, "Unable to get slot5") + raise newException(ValueError, "Unable to get code") com = CommonRef.new(newCoreDbRef DefaultDbMemory, nil) # fork = com.toEVMFork(header) @@ -84,12 +66,7 @@ proc call*( vmState.ledger.setBalance(to, acc.balance) vmState.ledger.setNonce(to, acc.nonce) vmState.ledger.setCode(to, code.asSeq()) - # vmState.ledger.setStorage(to, slot1Key, slot1) - # vmState.ledger.setStorage(to, slot2Key, slot2) - # vmState.ledger.setStorage(to, slot3Key, slot3) - # vmState.ledger.setStorage(to, slot4Key, slot4) - # vmState.ledger.setStorage(to, slot5Key, slot5) - vmState.ledger.persist(clearEmptyAccount = false) + # vmState.ledger.persist(clearEmptyAccount = false) var lastMultiKeysCount = -1 @@ -98,39 +75,42 @@ proc call*( i = 0 while i < 10: #multiKeys.keys.len() > lastMultiKeysCount: inc i - lastMultiKeysCount = multiKeys.keys.len() callResult = rpcCallEvm(tx, header, vmState) - echo "callResult: ", callResult vmState.ledger.collectWitnessData() multiKeys = vmState.ledger.makeMultiKeys() for k in multiKeys.keys: - echo "k.storageMode: ", k.storageMode - echo "k.address: ", k.address - echo "k.codeTouched: ", k.codeTouched - if not k.storageMode and k.address != default(Address): - let account = (await evm.stateNetwork.getAccount(header.stateRoot, k.address, Opt.none(Hash32))).valueOr: + let account = ( + await evm.stateNetwork.getAccount( + header.stateRoot, k.address, Opt.none(Hash32) + ) + ).valueOr: raise newException(ValueError, "Unable to get account") vmState.ledger.setBalance(k.address, account.balance) vmState.ledger.setNonce(k.address, account.nonce) if k.codeTouched: - let code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address)).valueOr: + let code = ( + await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) + ).valueOr: raise newException(ValueError, "Unable to get code") vmState.ledger.setCode(k.address, code.asSeq()) if not k.storageKeys.isNil(): for sk in k.storageKeys.keys: let slotKey = UInt256.fromBytesBE(sk.storageSlot) - echo "sk.storageSlot: ", slotKey - let slotValue = (await evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, k.address, slotKey)).valueOr: + let slotValue = ( + await evm.stateNetwork.getStorageAtByStateRoot( + header.stateRoot, k.address, slotKey + ) + ).valueOr: raise newException(ValueError, "Unable to get slot") vmState.ledger.setStorage(k.address, slotKey, slotValue) - vmState.ledger.persist(clearEmptyAccount = false) + # vmState.ledger.persist(clearEmptyAccount = false) return callResult From 86d0d82601ef9b5829386aae681f4e549cbf6e43 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Fri, 28 Feb 2025 16:21:38 +0800 Subject: [PATCH 04/26] Make CommonRef db initialization configurable. --- execution_chain/common/common.nim | 32 ++++++++++++++---------- fluffy/evm/portal_evm.nim | 19 ++++++-------- fluffy/network/state/state_endpoints.nim | 5 +++- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/execution_chain/common/common.nim b/execution_chain/common/common.nim index 4bcb4064f..4124b75cd 100644 --- a/execution_chain/common/common.nim +++ b/execution_chain/common/common.nim @@ -176,20 +176,21 @@ proc init(com : CommonRef, networkId : NetworkId, config : ChainConfig, genesis : Genesis, - pruneHistory: bool) = + pruneHistory: bool, + initializeDb: bool) = config.daoCheck() - com.db = db - com.config = config + com.db = db + com.config = config com.forkTransitionTable = config.toForkTransitionTable() - com.networkId = networkId - com.syncProgress= SyncProgress() - com.syncState = Waiting - com.pruneHistory= pruneHistory - com.extraData = ShortClientId - com.taskpool = taskpool - com.gasLimit = DEFAULT_GAS_LIMIT + com.networkId = networkId + com.syncProgress = SyncProgress() + com.syncState = Waiting + com.pruneHistory = pruneHistory + com.extraData = ShortClientId + com.taskpool = taskpool + com.gasLimit = DEFAULT_GAS_LIMIT # com.forkIdCalculator and com.genesisHash are set # by setForkId @@ -213,7 +214,8 @@ proc init(com : CommonRef, # By default, history begins at genesis. com.startOfHistory = GENESIS_PARENT_HASH - com.initializeDb() + if initializeDb: + com.initializeDb() proc isBlockAfterTtd(com: CommonRef, header: Header, txFrame: CoreDbTxRef): bool = if com.config.terminalTotalDifficulty.isNone: @@ -237,6 +239,7 @@ proc new*( networkId: NetworkId = MainNet; params = networkParams(MainNet); pruneHistory = false; + initializeDb = true; ): CommonRef = ## If genesis data is present, the forkIds will be initialized @@ -248,7 +251,8 @@ proc new*( networkId, params.config, params.genesis, - pruneHistory) + pruneHistory, + initializeDb) proc new*( _: type CommonRef; @@ -257,6 +261,7 @@ proc new*( config: ChainConfig; networkId: NetworkId = MainNet; pruneHistory = false; + initializeDb = true; ): CommonRef = ## There is no genesis data present @@ -268,7 +273,8 @@ proc new*( networkId, config, nil, - pruneHistory) + pruneHistory, + initializeDb) func clone*(com: CommonRef, db: CoreDbRef): CommonRef = ## clone but replace the db diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 7738d7217..134aef475 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -46,23 +46,20 @@ proc call*( header = (await evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: raise newException(ValueError, "Could not find header with requested block number") - # do we need to get the parent? - parent = (await evm.historyNetwork.getVerifiedBlockHeader(header.parentHash)).valueOr: - raise newException( - ValueError, "Could not find parent header with requested block number" - ) - # update the get account call - acc = (await evm.stateNetwork.getAccount(header.stateRoot, to, Opt.none(Hash32))).valueOr: + acc = (await evm.stateNetwork.getAccount(header.stateRoot, to)).valueOr: raise newException(ValueError, "Unable to get account") code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to)).valueOr: raise newException(ValueError, "Unable to get code") - com = CommonRef.new(newCoreDbRef DefaultDbMemory, nil) - # fork = com.toEVMFork(header) + com = CommonRef.new( + DefaultDbMemory.newCoreDbRef(), + taskpool = nil, + config = networkParams(MainNet).config, + initializeDb = false + ) vmState = BaseVMState() - vmState.init(parent, header, com, com.db.baseTxFrame()) - + vmState.init(header, header, com, com.db.baseTxFrame()) vmState.ledger.setBalance(to, acc.balance) vmState.ledger.setNonce(to, acc.nonce) vmState.ledger.setCode(to, code.asSeq()) diff --git a/fluffy/network/state/state_endpoints.nim b/fluffy/network/state/state_endpoints.nim index aadffad97..d05791628 100644 --- a/fluffy/network/state/state_endpoints.nim +++ b/fluffy/network/state/state_endpoints.nim @@ -162,7 +162,10 @@ proc getStorageProof( ok((proof, nibblesIdx == nibbles.len())) proc getAccount*( - n: StateNetwork, stateRoot: Hash32, address: Address, maybeBlockHash: Opt[Hash32] + n: StateNetwork, + stateRoot: Hash32, + address: Address, + maybeBlockHash = Opt.none(Hash32), ): Future[Opt[Account]] {.async: (raises: [CancelledError]).} = let (accountProof, exists) = ( await n.getAccountProof(stateRoot, address, maybeBlockHash) From 4d2f21be50ea8d7fbc00dc8e86309ad443b90091 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Fri, 28 Feb 2025 22:01:09 +0800 Subject: [PATCH 05/26] Return error string. Use txFrame dispose. --- fluffy/evm/portal_evm.nim | 59 ++++++++++++++++++++++---------------- fluffy/rpc/rpc_eth_api.nim | 5 +++- 2 files changed, 39 insertions(+), 25 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 134aef475..f3398c663 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -27,43 +27,53 @@ from eth/common/eth_types_rlp import rlpHash export evmc, addresses, stint, headers, state_network -#{.push raises: [].} +{.push raises: [].} type PortalEvm* = ref object historyNetwork: HistoryNetwork stateNetwork: StateNetwork + com: CommonRef proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = - PortalEvm(historyNetwork: hn, stateNetwork: sn) + let config = + try: + networkParams(MainNet).config + except ValueError as e: + raiseAssert(e.msg) # Should not fail + except RlpError as e: + raiseAssert(e.msg) # Should not fail + + let com = CommonRef.new( + DefaultDbMemory.newCoreDbRef(), + taskpool = nil, + config = config, + initializeDb = false, + ) + + PortalEvm(historyNetwork: hn, stateNetwork: sn, com: com) proc call*( evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 -): Future[EvmResult[CallResult]] {.async.} = - #{.async: (raises: [CancelledError, ValueError]).} = +): Future[Result[CallResult, string]] {.async: (raises: [CancelledError]).} = let to = tx.to.valueOr: - raise newException(ValueError, "to address missing in transaction") + return err("to address is required") header = (await evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: - raise - newException(ValueError, "Could not find header with requested block number") + return err("Unable to get block header") acc = (await evm.stateNetwork.getAccount(header.stateRoot, to)).valueOr: - raise newException(ValueError, "Unable to get account") + return err("Unable to get account") code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to)).valueOr: - raise newException(ValueError, "Unable to get code") + return err("Unable to get code") - com = CommonRef.new( - DefaultDbMemory.newCoreDbRef(), - taskpool = nil, - config = networkParams(MainNet).config, - initializeDb = false - ) - vmState = BaseVMState() + let txFrame = evm.com.db.baseTxFrame().txFrameBegin() + defer: + txFrame.dispose() # always dispose state changes - vmState.init(header, header, com, com.db.baseTxFrame()) + # TODO: review what child header to use here (second parameter) + let vmState = BaseVMState.new(header, header, evm.com, txFrame) vmState.ledger.setBalance(to, acc.balance) vmState.ledger.setNonce(to, acc.nonce) vmState.ledger.setCode(to, code.asSeq()) - # vmState.ledger.persist(clearEmptyAccount = false) var lastMultiKeysCount = -1 @@ -86,7 +96,7 @@ proc call*( header.stateRoot, k.address, Opt.none(Hash32) ) ).valueOr: - raise newException(ValueError, "Unable to get account") + return err("Unable to get account") vmState.ledger.setBalance(k.address, account.balance) vmState.ledger.setNonce(k.address, account.nonce) @@ -94,7 +104,7 @@ proc call*( let code = ( await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) ).valueOr: - raise newException(ValueError, "Unable to get code") + return err("Unable to get code") vmState.ledger.setCode(k.address, code.asSeq()) if not k.storageKeys.isNil(): @@ -105,9 +115,10 @@ proc call*( header.stateRoot, k.address, slotKey ) ).valueOr: - raise newException(ValueError, "Unable to get slot") + return err("Unable to get slot") vmState.ledger.setStorage(k.address, slotKey, slotValue) - # vmState.ledger.persist(clearEmptyAccount = false) - - return callResult + callResult.mapErr( + proc(e: EvmErrorObj): string = + "EVM execution failed: " & $e.code + ) diff --git a/fluffy/rpc/rpc_eth_api.nim b/fluffy/rpc/rpc_eth_api.nim index 35d7dbc44..7b6f1c863 100644 --- a/fluffy/rpc/rpc_eth_api.nim +++ b/fluffy/rpc/rpc_eth_api.nim @@ -452,6 +452,9 @@ proc installEthApiHandlers*( evm = portalEvm.getOrRaise() let callResult = (await evm.call(tx, quantityTag.number.uint64)).valueOr: - raise newException(ValueError, "Unable to call contract") + raise newException(ValueError, error) + + if callResult.error.len() > 0: + raise newException(ValueError, callResult.error) callResult.output From 0cc05d83dc566306d3e5c0fa713bb46410af84e7 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Mon, 3 Mar 2025 20:40:20 +0800 Subject: [PATCH 06/26] Make state lookups concurrent. --- execution_chain/db/ledger.nim | 2 +- fluffy/evm/portal_evm.nim | 148 +++++++++++++++++++++++++--------- 2 files changed, 112 insertions(+), 38 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index d58d036fa..04090717b 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -26,7 +26,7 @@ import ./aristo/aristo_blobify export - code_bytes + code_bytes, multi_keys const debugLedgerRef = false diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index f3398c663..dae839f63 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -8,7 +8,6 @@ import # std/[tables, sets], chronos, - # taskpools, # chronicles, stew/byteutils, stint, @@ -21,7 +20,7 @@ import ../../execution_chain/transaction/call_evm, ../../execution_chain/evm/[types, state, evm_errors], ../network/history/history_network, - ../network/state/[state_endpoints, state_network] + ../network/state/[state_endpoints, state_network, state_content] from eth/common/eth_types_rlp import rlpHash @@ -29,10 +28,35 @@ export evmc, addresses, stint, headers, state_network {.push raises: [].} -type PortalEvm* = ref object - historyNetwork: HistoryNetwork - stateNetwork: StateNetwork - com: CommonRef +const evmCallLimit = 1024 + +type + AccountQuery = object + address: Address + accFut: Future[Opt[Account]] + + StorageQuery = object + address: Address + slotKey: UInt256 + storageFut: Future[Opt[UInt256]] + + CodeQuery = object + address: Address + codeFut: Future[Opt[Bytecode]] + + PortalEvm* = ref object + historyNetwork: HistoryNetwork + stateNetwork: StateNetwork + com: CommonRef + +template init(T: type AccountQuery, adr: Address, fut: Future[Opt[Account]]): T = + T(address: adr, accFut: fut) + +template init(T: type StorageQuery, adr: Address, slotKey: UInt256, fut: Future[Opt[UInt256]]): T = + T(address: adr, slotKey: slotKey, storageFut: fut) + +template init(T: type CodeQuery, adr: Address, fut: Future[Opt[Bytecode]]): T = + T(address: adr, codeFut: fut) proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = let config = @@ -52,18 +76,49 @@ proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = PortalEvm(historyNetwork: hn, stateNetwork: sn, com: com) +func equals(mkeys1: MultiKeysRef, mkeys2: MultiKeysRef): bool = + doAssert(not mkeys1.isNil()) + doAssert(not mkeys2.isNil()) + + let + keys1 = mkeys1.keys + keys2 = mkeys2.keys + + if keys1.len() != keys2.len(): + return false + + for i in 0..keys1.high: + let + k1 = keys1[i] + k2 = keys2[i] + + if k1.hash != k2.hash or k1.address != k2.address or k1.codeTouched != k2.codeTouched: + return false + + if k1.storageKeys.isNil() or k2.storageKeys.isNil(): + if k1.storageKeys != k2.storageKeys: + return false + else: + if k1.storageKeys.keys.len() != k2.storageKeys.keys.len(): + return false + + for j in 0..k1.storageKeys.keys.high: + if k1.storageKeys.keys[j].storageSlot != k2.storageKeys.keys[j].storageSlot: + return false + + return true + proc call*( evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 -): Future[Result[CallResult, string]] {.async: (raises: [CancelledError]).} = +): Future[Result[CallResult, string]] {.async: (raises: [CancelledError, CatchableError]).} = let to = tx.to.valueOr: return err("to address is required") header = (await evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: return err("Unable to get block header") - acc = (await evm.stateNetwork.getAccount(header.stateRoot, to)).valueOr: - return err("Unable to get account") - code = (await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to)).valueOr: - return err("Unable to get code") + # Fetch account and code concurrently + accFut = evm.stateNetwork.getAccount(header.stateRoot, to) + codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to) let txFrame = evm.com.db.baseTxFrame().txFrameBegin() defer: @@ -71,52 +126,71 @@ proc call*( # TODO: review what child header to use here (second parameter) let vmState = BaseVMState.new(header, header, evm.com, txFrame) + + let acc = (await accFut).valueOr: + return err("Unable to get account") vmState.ledger.setBalance(to, acc.balance) vmState.ledger.setNonce(to, acc.nonce) + + let code = (await codeFut).valueOr: + return err("Unable to get code") vmState.ledger.setCode(to, code.asSeq()) + vmState.ledger.collectWitnessData() + var - lastMultiKeysCount = -1 + lastMultiKeys = new MultiKeysRef multiKeys = vmState.ledger.makeMultiKeys() callResult: EvmResult[CallResult] - i = 0 - while i < 10: #multiKeys.keys.len() > lastMultiKeysCount: - inc i - lastMultiKeysCount = multiKeys.keys.len() + evmCallCount = 0 + while evmCallCount < evmCallLimit and not lastMultiKeys.equals(multiKeys): + let sp = vmState.ledger.beginSavepoint() callResult = rpcCallEvm(tx, header, vmState) + inc evmCallCount + vmState.ledger.rollback(sp) + lastMultiKeys = multiKeys vmState.ledger.collectWitnessData() multiKeys = vmState.ledger.makeMultiKeys() + + var + accountQueries = newSeq[AccountQuery]() + storageQueries = newSeq[StorageQuery]() + codeQueries = newSeq[CodeQuery]() + for k in multiKeys.keys: if not k.storageMode and k.address != default(Address): - let account = ( - await evm.stateNetwork.getAccount( - header.stateRoot, k.address, Opt.none(Hash32) - ) - ).valueOr: - return err("Unable to get account") - vmState.ledger.setBalance(k.address, account.balance) - vmState.ledger.setNonce(k.address, account.nonce) + let accFut = evm.stateNetwork.getAccount(header.stateRoot, k.address) + accountQueries.add(AccountQuery.init(k.address, accFut)) if k.codeTouched: - let code = ( - await evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) - ).valueOr: - return err("Unable to get code") - vmState.ledger.setCode(k.address, code.asSeq()) + let codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) + codeQueries.add(CodeQuery.init(k.address, codeFut)) if not k.storageKeys.isNil(): for sk in k.storageKeys.keys: - let slotKey = UInt256.fromBytesBE(sk.storageSlot) - let slotValue = ( - await evm.stateNetwork.getStorageAtByStateRoot( - header.stateRoot, k.address, slotKey - ) - ).valueOr: - return err("Unable to get slot") - vmState.ledger.setStorage(k.address, slotKey, slotValue) + let + slotKey = UInt256.fromBytesBE(sk.storageSlot) + storageFut = evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, k.address, slotKey) + storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) + + for q in accountQueries: + let acc = (await q.accFut).valueOr: + return err("Unable to get account") + vmState.ledger.setBalance(q.address, acc.balance) + vmState.ledger.setNonce(q.address, acc.nonce) + + for q in storageQueries: + let slotValue = (await q.storageFut).valueOr: + return err("Unable to get slot") + vmState.ledger.setStorage(q.address, q.slotKey, slotValue) + + for q in codeQueries: + let code = (await q.codeFut).valueOr: + return err("Unable to get code") + vmState.ledger.setCode(q.address, code.asSeq()) callResult.mapErr( proc(e: EvmErrorObj): string = From 602c8158b150b09b25493efd33dee0ada42e251d Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 4 Mar 2025 11:50:12 +0800 Subject: [PATCH 07/26] Move equals into multi_keys.nim. --- execution_chain/stateless/multi_keys.nim | 32 ++++++ fluffy/evm/portal_evm.nim | 136 ++++++++++------------- 2 files changed, 93 insertions(+), 75 deletions(-) diff --git a/execution_chain/stateless/multi_keys.nim b/execution_chain/stateless/multi_keys.nim index eaf85dfad..ea6ce92d2 100644 --- a/execution_chain/stateless/multi_keys.nim +++ b/execution_chain/stateless/multi_keys.nim @@ -186,3 +186,35 @@ proc visitMatch*(m: var MultiKeysRef, mg: MatchGroup, depth: int): KeyData = doAssert(mg.isValidMatch, "Multiple identical keys are not allowed") m.keys[mg.group.first].visited = true result = m.keys[mg.group.first] + +func equals*(mkeys1: MultiKeysRef, mkeys2: MultiKeysRef): bool = + doAssert(not mkeys1.isNil()) + doAssert(not mkeys2.isNil()) + + let + keys1 = mkeys1.keys + keys2 = mkeys2.keys + + if keys1.len() != keys2.len(): + return false + + for i in 0..keys1.high: + let + k1 = keys1[i] + k2 = keys2[i] + + if k1.hash != k2.hash or k1.address != k2.address or k1.codeTouched != k2.codeTouched: + return false + + if k1.storageKeys.isNil() or k2.storageKeys.isNil(): + if k1.storageKeys != k2.storageKeys: + return false + else: + if k1.storageKeys.keys.len() != k2.storageKeys.keys.len(): + return false + + for j in 0..k1.storageKeys.keys.high: + if k1.storageKeys.keys[j].storageSlot != k2.storageKeys.keys[j].storageSlot: + return false + + return true diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index dae839f63..931964d01 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - # std/[tables, sets], + std/[tables, sets], chronos, # chronicles, stew/byteutils, @@ -28,7 +28,7 @@ export evmc, addresses, stint, headers, state_network {.push raises: [].} -const evmCallLimit = 1024 +const evmCallLimit = 10000 type AccountQuery = object @@ -49,13 +49,15 @@ type stateNetwork: StateNetwork com: CommonRef -template init(T: type AccountQuery, adr: Address, fut: Future[Opt[Account]]): T = +func init(T: type AccountQuery, adr: Address, fut: Future[Opt[Account]]): T = T(address: adr, accFut: fut) -template init(T: type StorageQuery, adr: Address, slotKey: UInt256, fut: Future[Opt[UInt256]]): T = +func init( + T: type StorageQuery, adr: Address, slotKey: UInt256, fut: Future[Opt[UInt256]] +): T = T(address: adr, slotKey: slotKey, storageFut: fut) -template init(T: type CodeQuery, adr: Address, fut: Future[Opt[Bytecode]]): T = +func init(T: type CodeQuery, adr: Address, fut: Future[Opt[Bytecode]]): T = T(address: adr, codeFut: fut) proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = @@ -76,41 +78,9 @@ proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = PortalEvm(historyNetwork: hn, stateNetwork: sn, com: com) -func equals(mkeys1: MultiKeysRef, mkeys2: MultiKeysRef): bool = - doAssert(not mkeys1.isNil()) - doAssert(not mkeys2.isNil()) - - let - keys1 = mkeys1.keys - keys2 = mkeys2.keys - - if keys1.len() != keys2.len(): - return false - - for i in 0..keys1.high: - let - k1 = keys1[i] - k2 = keys2[i] - - if k1.hash != k2.hash or k1.address != k2.address or k1.codeTouched != k2.codeTouched: - return false - - if k1.storageKeys.isNil() or k2.storageKeys.isNil(): - if k1.storageKeys != k2.storageKeys: - return false - else: - if k1.storageKeys.keys.len() != k2.storageKeys.keys.len(): - return false - - for j in 0..k1.storageKeys.keys.high: - if k1.storageKeys.keys[j].storageSlot != k2.storageKeys.keys[j].storageSlot: - return false - - return true - proc call*( evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 -): Future[Result[CallResult, string]] {.async: (raises: [CancelledError, CatchableError]).} = +): Future[Result[CallResult, string]] {.async: (raises: [CancelledError]).} = let to = tx.to.valueOr: return err("to address is required") @@ -144,6 +114,10 @@ proc call*( callResult: EvmResult[CallResult] evmCallCount = 0 + fetchedAccounts = initHashSet[Address]() + fetchedStorage = initHashSet[(Address, UInt256)]() + fetchedCode = initHashSet[Address]() + while evmCallCount < evmCallLimit and not lastMultiKeys.equals(multiKeys): let sp = vmState.ledger.beginSavepoint() callResult = rpcCallEvm(tx, header, vmState) @@ -154,43 +128,55 @@ proc call*( vmState.ledger.collectWitnessData() multiKeys = vmState.ledger.makeMultiKeys() - - var - accountQueries = newSeq[AccountQuery]() - storageQueries = newSeq[StorageQuery]() - codeQueries = newSeq[CodeQuery]() - - for k in multiKeys.keys: - if not k.storageMode and k.address != default(Address): - let accFut = evm.stateNetwork.getAccount(header.stateRoot, k.address) - accountQueries.add(AccountQuery.init(k.address, accFut)) - - if k.codeTouched: - let codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) - codeQueries.add(CodeQuery.init(k.address, codeFut)) - - if not k.storageKeys.isNil(): - for sk in k.storageKeys.keys: - let - slotKey = UInt256.fromBytesBE(sk.storageSlot) - storageFut = evm.stateNetwork.getStorageAtByStateRoot(header.stateRoot, k.address, slotKey) - storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) - - for q in accountQueries: - let acc = (await q.accFut).valueOr: - return err("Unable to get account") - vmState.ledger.setBalance(q.address, acc.balance) - vmState.ledger.setNonce(q.address, acc.nonce) - - for q in storageQueries: - let slotValue = (await q.storageFut).valueOr: - return err("Unable to get slot") - vmState.ledger.setStorage(q.address, q.slotKey, slotValue) - - for q in codeQueries: - let code = (await q.codeFut).valueOr: - return err("Unable to get code") - vmState.ledger.setCode(q.address, code.asSeq()) + try: + var + accountQueries = newSeq[AccountQuery]() + storageQueries = newSeq[StorageQuery]() + codeQueries = newSeq[CodeQuery]() + + for k in multiKeys.keys: + if not k.storageMode and k.address != default(Address): + if k.address notin fetchedAccounts: + let accFut = evm.stateNetwork.getAccount(header.stateRoot, k.address) + accountQueries.add(AccountQuery.init(k.address, accFut)) + + if k.codeTouched and k.address notin fetchedCode: + let codeFut = + evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) + codeQueries.add(CodeQuery.init(k.address, codeFut)) + + if not k.storageKeys.isNil(): + for sk in k.storageKeys.keys: + let + slotKey = UInt256.fromBytesBE(sk.storageSlot) + slotIdx = (k.address, slotKey) + if slotIdx notin fetchedStorage: + let storageFut = evm.stateNetwork.getStorageAtByStateRoot( + header.stateRoot, k.address, slotKey + ) + storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) + + for q in accountQueries: + let acc = (await q.accFut).valueOr: + return err("Unable to get account") + vmState.ledger.setBalance(q.address, acc.balance) + vmState.ledger.setNonce(q.address, acc.nonce) + fetchedAccounts.incl(q.address) + + for q in storageQueries: + let slotValue = (await q.storageFut).valueOr: + return err("Unable to get slot") + vmState.ledger.setStorage(q.address, q.slotKey, slotValue) + fetchedStorage.incl((q.address, q.slotKey)) + + for q in codeQueries: + let code = (await q.codeFut).valueOr: + return err("Unable to get code") + vmState.ledger.setCode(q.address, code.asSeq()) + fetchedCode.incl(q.address) + except CatchableError as e: + # TODO: why do the above futures throw a CatchableError and not CancelledError? + raiseAssert(e.msg) callResult.mapErr( proc(e: EvmErrorObj): string = From 64a69bbd6539d67c24f6f7baddbce5bcff6f4641 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 4 Mar 2025 15:22:18 +0800 Subject: [PATCH 08/26] Add some documentation and comments. Tests for multikeys equals. --- execution_chain/stateless/multi_keys.nim | 60 +++++++++++++++++------- fluffy/evm/portal_evm.nim | 42 ++++++++++++++++- tests/test_multi_keys.nim | 54 +++++++++++++++++++++ 3 files changed, 136 insertions(+), 20 deletions(-) diff --git a/execution_chain/stateless/multi_keys.nim b/execution_chain/stateless/multi_keys.nim index ea6ce92d2..5ef8ed6c8 100644 --- a/execution_chain/stateless/multi_keys.nim +++ b/execution_chain/stateless/multi_keys.nim @@ -187,34 +187,58 @@ proc visitMatch*(m: var MultiKeysRef, mg: MatchGroup, depth: int): KeyData = m.keys[mg.group.first].visited = true result = m.keys[mg.group.first] -func equals*(mkeys1: MultiKeysRef, mkeys2: MultiKeysRef): bool = - doAssert(not mkeys1.isNil()) - doAssert(not mkeys2.isNil()) +func equalsStorageMode(m1: MultiKeysRef, m2: MultiKeysRef): bool = + doAssert(not m1.isNil()) + doAssert(not m2.isNil()) - let - keys1 = mkeys1.keys - keys2 = mkeys2.keys + if m1.keys.len() != m2.keys.len(): + return false + + for i in 0..m1.keys.high: + let + kd1 = m1.keys[i] + kd2 = m2.keys[i] + doAssert(kd1.storageMode) + doAssert(kd2.storageMode) + + if kd1.hash != kd2.hash or kd1.storageSlot != kd2.storageSlot: + return false + + return true + +func equals*(m1: MultiKeysRef, m2: MultiKeysRef): bool = + doAssert(not m1.isNil()) + doAssert(not m2.isNil()) - if keys1.len() != keys2.len(): + if m1.keys.len() != m2.keys.len(): return false - for i in 0..keys1.high: + for i in 0..m1.keys.high: let - k1 = keys1[i] - k2 = keys2[i] + kd1 = m1.keys[i] + kd2 = m2.keys[i] - if k1.hash != k2.hash or k1.address != k2.address or k1.codeTouched != k2.codeTouched: + if kd1.storageMode != kd2.storageMode: return false - if k1.storageKeys.isNil() or k2.storageKeys.isNil(): - if k1.storageKeys != k2.storageKeys: + if kd1.storageMode: + if kd1.hash != kd2.hash or kd1.storageSlot != kd2.storageSlot: return false - else: - if k1.storageKeys.keys.len() != k2.storageKeys.keys.len(): + else: + continue + + # Not storageMode + if kd1.hash != kd2.hash or kd1.address != kd2.address or kd1.codeTouched != kd2.codeTouched: + return false + + if kd1.storageKeys.isNil() or kd2.storageKeys.isNil(): + if kd1.storageKeys != kd2.storageKeys: return false + else: + continue - for j in 0..k1.storageKeys.keys.high: - if k1.storageKeys.keys[j].storageSlot != k2.storageKeys.keys[j].storageSlot: - return false + # storageKeys not nil + if not equalsStorageMode(kd1.storageKeys, kd2.storageKeys): + return false return true diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 931964d01..af9b1885d 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -28,6 +28,35 @@ export evmc, addresses, stint, headers, state_network {.push raises: [].} +# The Portal EVM uses the Nimbus in-memory EVM to execute transactions using the +# portal state network state data. Currently only call is supported. +# +# Rather than wire in the portal state lookups into the EVM directly, the approach +# taken here is to optimistically execute the transaction multiple times with the +# goal of building the correct access list so that we can then lookup the accessed +# state from the portal network, store the state in the in-memory EVM and then +# finally execute the transaction using the correct state. The Portal EVM makes +# use of data in memory during the call and therefore each piece of state is never +# fetched more than once. +# +# The assumption here is that network lookups for state data are generally much +# slower than the time it takes to execute a transaction in the EVM and therefore +# executing the transaction multiple times should not significally slow down the +# call given that we gain the ability to fetch the state concurrently. +# +# There are multiple reasons for choosing this approach: +# - Firstly updating the existing Nimbus EVM to support using a different state +# backend (portal state in this case) is difficult and would require making +# non-trivial changes to the EVM. +# - This new approach allows us to look up the state concurrently in the event that +# multiple new state keys are discovered after executing the transaction. This +# should in theory result in improved performance for certain scenarios. The +# default approach where the state lookups are wired directly into the EVM gives +# the worst case performance because all state accesses inside the EVM are +# completely sequential. + +# Limit the max number of calls to prevent infinite loops and/or DOS in the event +# of a bug in the implementation const evmCallLimit = 10000 type @@ -97,6 +126,7 @@ proc call*( # TODO: review what child header to use here (second parameter) let vmState = BaseVMState.new(header, header, evm.com, txFrame) + # Fetch account and code of the 'to' address so that we can execute the transaction let acc = (await accFut).valueOr: return err("Unable to get account") vmState.ledger.setBalance(to, acc.balance) @@ -106,6 +136,7 @@ proc call*( return err("Unable to get code") vmState.ledger.setCode(to, code.asSeq()) + # Collects the keys of read or modified accounts, code and storage slots vmState.ledger.collectWitnessData() var @@ -113,7 +144,8 @@ proc call*( multiKeys = vmState.ledger.makeMultiKeys() callResult: EvmResult[CallResult] evmCallCount = 0 - + # Record the keys of fetched accounts, storage and code so that we don't + # bother to fetch them multiple times fetchedAccounts = initHashSet[Address]() fetchedStorage = initHashSet[(Address, UInt256)]() fetchedCode = initHashSet[Address]() @@ -122,8 +154,9 @@ proc call*( let sp = vmState.ledger.beginSavepoint() callResult = rpcCallEvm(tx, header, vmState) inc evmCallCount - vmState.ledger.rollback(sp) + vmState.ledger.rollback(sp) # all state changes from the call are reverted + # Collect the keys after executing the transaction lastMultiKeys = multiKeys vmState.ledger.collectWitnessData() multiKeys = vmState.ledger.makeMultiKeys() @@ -134,6 +167,8 @@ proc call*( storageQueries = newSeq[StorageQuery]() codeQueries = newSeq[CodeQuery]() + # Loop through the collected keys and fetch all state concurrently + for k in multiKeys.keys: if not k.storageMode and k.address != default(Address): if k.address notin fetchedAccounts: @@ -156,6 +191,8 @@ proc call*( ) storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) + # Store fetched state in the in-memory EVM + for q in accountQueries: let acc = (await q.accFut).valueOr: return err("Unable to get account") @@ -174,6 +211,7 @@ proc call*( return err("Unable to get code") vmState.ledger.setCode(q.address, code.asSeq()) fetchedCode.incl(q.address) + except CatchableError as e: # TODO: why do the above futures throw a CatchableError and not CancelledError? raiseAssert(e.msg) diff --git a/tests/test_multi_keys.nim b/tests/test_multi_keys.nim index ccc5a4f99..3d00a74d5 100644 --- a/tests/test_multi_keys.nim +++ b/tests/test_multi_keys.nim @@ -115,4 +115,58 @@ proc multiKeysMain*() = mg.group.first == 2 mg.group.last == 3 + test "Compare multikeys using equals": + let + keys1 = [ + "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", + "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606" + ] + keys2 = [ + "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", + "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606", + "0abc6a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" + ] + storageKeys = [ + "0abc8a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b", + "0abc7a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" + ] + + let + m1 = initMultiKeys(keys1, storageMode = false) + m2 = initMultiKeys(keys1, storageMode = false) + m3 = initMultiKeys(keys2, storageMode = false) + + m1.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) + m2.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) + m3.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) + + check: + m1.equals(m2) + m2.equals(m1) + not m2.equals(m3) + not m3.equals(m2) + + test "Compare multikeys using equals - storageMode": + let + keys1 = [ + "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", + "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606" + ] + keys2 = [ + "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", + "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606", + "0abc6a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" + ] + + let + m1 = initMultiKeys(keys1, storageMode = true) + m2 = initMultiKeys(keys1, storageMode = true) + m3 = initMultiKeys(keys2, storageMode = true) + + check: + m1.equals(m2) + m2.equals(m1) + not m2.equals(m3) + not m3.equals(m2) + multiKeysMain() From 3102ebec657ef9f25ec496c1ace37a9c4583959a Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 4 Mar 2025 16:18:10 +0800 Subject: [PATCH 09/26] Add logging. --- fluffy/evm/portal_evm.nim | 43 ++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index af9b1885d..4ee7e72cb 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -5,28 +5,30 @@ # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. +{.push raises: [].} + import - std/[tables, sets], - chronos, - # chronicles, + std/sets, stew/byteutils, + chronos, + chronicles, stint, results, - eth/common/[hashes, accounts, addresses, headers, transactions], - web3/[primitives, eth_api_types, eth_api], - ../../execution_chain/beacon/web3_eth_conv, + eth/common/[hashes, addresses, accounts, headers], ../../execution_chain/db/ledger, ../../execution_chain/common/common, ../../execution_chain/transaction/call_evm, ../../execution_chain/evm/[types, state, evm_errors], ../network/history/history_network, - ../network/state/[state_endpoints, state_network, state_content] + ../network/state/[state_endpoints, state_network] -from eth/common/eth_types_rlp import rlpHash +from web3/eth_api_types import TransactionArgs -export evmc, addresses, stint, headers, state_network +export + results, chronos, hashes, history_network, state_network, TransactionArgs, CallResult -{.push raises: [].} +logScope: + topics = "portal_evm" # The Portal EVM uses the Nimbus in-memory EVM to execute transactions using the # portal state network state data. Currently only call is supported. @@ -115,10 +117,11 @@ proc call*( return err("to address is required") header = (await evm.historyNetwork.getVerifiedBlockHeader(blockNumOrHash)).valueOr: return err("Unable to get block header") - # Fetch account and code concurrently - accFut = evm.stateNetwork.getAccount(header.stateRoot, to) + # Start fetching code in the background while setting up the EVM codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, to) + debug "Executing call", to, blockNumOrHash + let txFrame = evm.com.db.baseTxFrame().txFrameBegin() defer: txFrame.dispose() # always dispose state changes @@ -126,15 +129,11 @@ proc call*( # TODO: review what child header to use here (second parameter) let vmState = BaseVMState.new(header, header, evm.com, txFrame) - # Fetch account and code of the 'to' address so that we can execute the transaction - let acc = (await accFut).valueOr: - return err("Unable to get account") - vmState.ledger.setBalance(to, acc.balance) - vmState.ledger.setNonce(to, acc.nonce) - + # Set code of the 'to' address in the EVM so that we can execute the transaction let code = (await codeFut).valueOr: return err("Unable to get code") vmState.ledger.setCode(to, code.asSeq()) + debug "Code to be executed", code = code.asSeq().to0xHex() # Collects the keys of read or modified accounts, code and storage slots vmState.ledger.collectWitnessData() @@ -151,6 +150,8 @@ proc call*( fetchedCode = initHashSet[Address]() while evmCallCount < evmCallLimit and not lastMultiKeys.equals(multiKeys): + debug "Starting PortalEvm execution", evmCallCount + let sp = vmState.ledger.beginSavepoint() callResult = rpcCallEvm(tx, header, vmState) inc evmCallCount @@ -168,14 +169,15 @@ proc call*( codeQueries = newSeq[CodeQuery]() # Loop through the collected keys and fetch all state concurrently - for k in multiKeys.keys: if not k.storageMode and k.address != default(Address): if k.address notin fetchedAccounts: + debug "Fetching account", address = k.address let accFut = evm.stateNetwork.getAccount(header.stateRoot, k.address) accountQueries.add(AccountQuery.init(k.address, accFut)) if k.codeTouched and k.address notin fetchedCode: + debug "Fetching code", address = k.address let codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) codeQueries.add(CodeQuery.init(k.address, codeFut)) @@ -186,13 +188,13 @@ proc call*( slotKey = UInt256.fromBytesBE(sk.storageSlot) slotIdx = (k.address, slotKey) if slotIdx notin fetchedStorage: + debug "Fetching storage slot", address = k.address, slotKey let storageFut = evm.stateNetwork.getStorageAtByStateRoot( header.stateRoot, k.address, slotKey ) storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) # Store fetched state in the in-memory EVM - for q in accountQueries: let acc = (await q.accFut).valueOr: return err("Unable to get account") @@ -211,7 +213,6 @@ proc call*( return err("Unable to get code") vmState.ledger.setCode(q.address, code.asSeq()) fetchedCode.incl(q.address) - except CatchableError as e: # TODO: why do the above futures throw a CatchableError and not CancelledError? raiseAssert(e.msg) From 55fb6347c70cbafbfb285c0c4957fbb804799b30 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 4 Mar 2025 23:05:52 +0800 Subject: [PATCH 10/26] Disable linking to RocksDb in Fluffy. --- fluffy/evm/portal_evm.nim | 5 ++++- fluffy/fluffy.nim.cfg | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 4ee7e72cb..d6fdb9126 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -39,7 +39,8 @@ logScope: # state from the portal network, store the state in the in-memory EVM and then # finally execute the transaction using the correct state. The Portal EVM makes # use of data in memory during the call and therefore each piece of state is never -# fetched more than once. +# fetched more than once. We know we have found the correct access list if it +# doesn't change after another execution of the transaction. # # The assumption here is that network lookups for state data are generally much # slower than the time it takes to execute a transaction in the EVM and therefore @@ -149,6 +150,8 @@ proc call*( fetchedStorage = initHashSet[(Address, UInt256)]() fetchedCode = initHashSet[Address]() + # If the multikeys did not change after the last execution then we can stop + # because we have already executed the transaction with the correct state while evmCallCount < evmCallLimit and not lastMultiKeys.equals(multiKeys): debug "Starting PortalEvm execution", evmCallCount diff --git a/fluffy/fluffy.nim.cfg b/fluffy/fluffy.nim.cfg index 4c0d44247..f6264499e 100644 --- a/fluffy/fluffy.nim.cfg +++ b/fluffy/fluffy.nim.cfg @@ -5,3 +5,5 @@ @if release: -d:"chronicles_line_numbers:0" @end + +-d:"rocksdb_dynamic_linking" From 76e0ed296ca70fb73c5b84e9e28f83ebd2a7aac1 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Thu, 6 Mar 2025 23:06:24 +0800 Subject: [PATCH 11/26] Fix issue discovered when calling another contract from within an existing call. --- execution_chain/db/ledger.nim | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index 9a617c3ea..54dc9d11e 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -453,6 +453,10 @@ proc getCode*(ac: LedgerRef, returnHash: static[bool] = false): auto = let acc = ac.getAccount(address, false) if acc.isNil: + # We need to record that the code was read even if the account doesn't exist + # so that the returned multikeys correctly show that the code lookup occurred + ac.witnessCache[address] = WitnessData(codeTouched: true) + when returnHash: return (EMPTY_CODE_HASH, CodeBytesRef()) else: From d3b27b2bf71b20853fbb2de93adadbde13c7aa5a Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Fri, 7 Mar 2025 09:43:04 +0800 Subject: [PATCH 12/26] Fix copyright. --- execution_chain/stateless/multi_keys.nim | 2 +- fluffy/evm/portal_evm.nim | 4 ++-- fluffy/network/state/state_endpoints.nim | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/execution_chain/stateless/multi_keys.nim b/execution_chain/stateless/multi_keys.nim index 5ef8ed6c8..82ee48796 100644 --- a/execution_chain/stateless/multi_keys.nim +++ b/execution_chain/stateless/multi_keys.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2020-2024 Status Research & Development GmbH +# Copyright (c) 2020-2025 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index d6fdb9126..b54d72bb2 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -60,7 +60,7 @@ logScope: # Limit the max number of calls to prevent infinite loops and/or DOS in the event # of a bug in the implementation -const evmCallLimit = 10000 +const EVM_CALL_LIMIT = 10000 type AccountQuery = object @@ -152,7 +152,7 @@ proc call*( # If the multikeys did not change after the last execution then we can stop # because we have already executed the transaction with the correct state - while evmCallCount < evmCallLimit and not lastMultiKeys.equals(multiKeys): + while evmCallCount < EVM_CALL_LIMIT and not lastMultiKeys.equals(multiKeys): debug "Starting PortalEvm execution", evmCallCount let sp = vmState.ledger.beginSavepoint() diff --git a/fluffy/network/state/state_endpoints.nim b/fluffy/network/state/state_endpoints.nim index d05791628..41671f97d 100644 --- a/fluffy/network/state/state_endpoints.nim +++ b/fluffy/network/state/state_endpoints.nim @@ -1,5 +1,5 @@ # Fluffy -# Copyright (c) 2021-2024 Status Research & Development GmbH +# Copyright (c) 2021-2025 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). From 52ed10275451c288b1001af436efcbdb8fa7d1ac Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Fri, 7 Mar 2025 13:59:13 +0800 Subject: [PATCH 13/26] Add to address to fetched code. --- fluffy/evm/portal_evm.nim | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index b54d72bb2..6e5d4abc4 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -130,29 +130,33 @@ proc call*( # TODO: review what child header to use here (second parameter) let vmState = BaseVMState.new(header, header, evm.com, txFrame) + var + # Record the keys of fetched accounts, storage and code so that we don't + # bother to fetch them multiple times + fetchedAccounts = initHashSet[Address]() + fetchedStorage = initHashSet[(Address, UInt256)]() + fetchedCode = initHashSet[Address]() + # Set code of the 'to' address in the EVM so that we can execute the transaction let code = (await codeFut).valueOr: return err("Unable to get code") vmState.ledger.setCode(to, code.asSeq()) + fetchedCode.incl(to) debug "Code to be executed", code = code.asSeq().to0xHex() # Collects the keys of read or modified accounts, code and storage slots vmState.ledger.collectWitnessData() var - lastMultiKeys = new MultiKeysRef + lastMultiKeys: MultiKeysRef multiKeys = vmState.ledger.makeMultiKeys() callResult: EvmResult[CallResult] evmCallCount = 0 - # Record the keys of fetched accounts, storage and code so that we don't - # bother to fetch them multiple times - fetchedAccounts = initHashSet[Address]() - fetchedStorage = initHashSet[(Address, UInt256)]() - fetchedCode = initHashSet[Address]() # If the multikeys did not change after the last execution then we can stop # because we have already executed the transaction with the correct state - while evmCallCount < EVM_CALL_LIMIT and not lastMultiKeys.equals(multiKeys): + while evmCallCount < EVM_CALL_LIMIT and lastMultiKeys.isNil() or + not lastMultiKeys.equals(multiKeys): debug "Starting PortalEvm execution", evmCallCount let sp = vmState.ledger.beginSavepoint() From 03fa696c47c64cc70404c6587abd564e2a8fe04d Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Fri, 14 Mar 2025 15:11:13 +0800 Subject: [PATCH 14/26] Move PortalEvm into eth rpc api. --- fluffy/fluffy.nim | 2 +- fluffy/portal_node.nim | 9 +-------- fluffy/rpc/rpc_eth_api.nim | 16 ++++++---------- 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/fluffy/fluffy.nim b/fluffy/fluffy.nim index 9b6276ed5..509cfd67c 100644 --- a/fluffy/fluffy.nim +++ b/fluffy/fluffy.nim @@ -254,7 +254,7 @@ proc run( case flag of RpcFlag.eth: rpcServer.installEthApiHandlers( - node.historyNetwork, node.beaconLightClient, node.stateNetwork, node.portalEvm + node.historyNetwork, node.beaconLightClient, node.stateNetwork ) of RpcFlag.debug: rpcServer.installDebugApiHandlers(node.stateNetwork) diff --git a/fluffy/portal_node.nim b/fluffy/portal_node.nim index 43b0e2ff5..84e8fbfcb 100644 --- a/fluffy/portal_node.nim +++ b/fluffy/portal_node.nim @@ -19,8 +19,7 @@ import ./network/wire/[portal_stream, portal_protocol_config], ./network/beacon/[beacon_init_loader, beacon_light_client], ./network/history/[history_network, history_content], - ./network/state/[state_network, state_content], - ./evm/portal_evm + ./network/state/[state_network, state_content] export beacon_light_client, history_network, state_network, portal_protocol_config, forks @@ -49,7 +48,6 @@ type historyNetwork*: Opt[HistoryNetwork] stateNetwork*: Opt[StateNetwork] beaconLightClient*: Opt[LightClient] - portalEvm*: Opt[PortalEvm] statusLogLoop: Future[void] # Beacon light client application callbacks triggered when new finalized header @@ -193,11 +191,6 @@ proc new*( historyNetwork: historyNetwork, stateNetwork: stateNetwork, beaconLightClient: beaconLightClient, - portalEvm: - if historyNetwork.isSome() and stateNetwork.isSome(): - Opt.some(PortalEvm.init(historyNetwork.get(), stateNetwork.get())) - else: - Opt.none(PortalEvm), ) proc statusLogLoop(n: PortalNode) {.async: (raises: []).} = diff --git a/fluffy/rpc/rpc_eth_api.nim b/fluffy/rpc/rpc_eth_api.nim index e70a5a96b..5b215e593 100644 --- a/fluffy/rpc/rpc_eth_api.nim +++ b/fluffy/rpc/rpc_eth_api.nim @@ -136,8 +136,13 @@ proc installEthApiHandlers*( historyNetwork: Opt[HistoryNetwork], beaconLightClient: Opt[LightClient], stateNetwork: Opt[StateNetwork], - portalEvm: Opt[PortalEvm], ) = + let portalEvm = + if historyNetwork.isSome() and stateNetwork.isSome(): + Opt.some(PortalEvm.init(historyNetwork.get(), stateNetwork.get())) + else: + Opt.none(PortalEvm) + rpcServer.rpc("web3_clientVersion") do() -> string: return clientVersion @@ -426,15 +431,6 @@ proc installEthApiHandlers*( storageProof: storageProof, ) - # TransactionArgs* = object - # `from`*: Opt[Address] # (optional) The address the transaction is sent from. - # to*: Opt[Address] # The address the transaction is directed to. - # gas*: Opt[Quantity] # (optional) Integer of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions. - # gasPrice*: Opt[Quantity] # (optional) Integer of the gasPrice used for each paid gas. - # maxFeePerGas*: Opt[Quantity] # (optional) MaxFeePerGas is the maximum fee per gas offered, in wei. - # maxPriorityFeePerGas*: Opt[Quantity] # (optional) MaxPriorityFeePerGas is the maximum miner tip per gas offered, in wei. - # value*: Opt[UInt256] # (optional) Integer of the value sent with this transaction. - # nonce*: Opt[Quantity] # (optional) integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce rpcServer.rpc("eth_call") do( tx: TransactionArgs, quantityTag: RtBlockIdentifier ) -> seq[byte]: From cd73192a7c88ed29c31463bb50005554d0d6ac39 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Mon, 17 Mar 2025 20:13:21 +0800 Subject: [PATCH 15/26] Remove existing witness code. --- .../core/executor/process_block.nim | 2 - .../core/executor/process_transaction.nim | 3 - execution_chain/db/ledger.nim | 46 ----------- execution_chain/evm/state.nim | 7 -- execution_chain/evm/types.nim | 1 - tests/test_ledger.nim | 77 ++++++++++--------- 6 files changed, 39 insertions(+), 97 deletions(-) diff --git a/execution_chain/core/executor/process_block.nim b/execution_chain/core/executor/process_block.nim index 46155a284..ad71e5e60 100644 --- a/execution_chain/core/executor/process_block.nim +++ b/execution_chain/core/executor/process_block.nim @@ -191,8 +191,6 @@ proc procBlkEpilogue( # Reward beneficiary vmState.mutateLedger: - if vmState.collectWitnessData: - db.collectWitnessData() # Clearing the account cache here helps manage its size when replaying # large ranges of blocks, implicitly limiting its size using the gas limit diff --git a/execution_chain/core/executor/process_transaction.nim b/execution_chain/core/executor/process_transaction.nim index baa2875c5..b11a3ca2d 100644 --- a/execution_chain/core/executor/process_transaction.nim +++ b/execution_chain/core/executor/process_transaction.nim @@ -120,9 +120,6 @@ proc processTransactionImpl( else: err(txRes.error) - if vmState.collectWitnessData: - vmState.ledger.collectWitnessData() - vmState.ledger.persist(clearEmptyAccount = fork >= FkSpurious) res diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index 9e11a0b18..c604d64d1 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -59,14 +59,9 @@ type originalStorage: TableRef[UInt256, UInt256] overlayStorage: Table[UInt256, UInt256] - WitnessData* = object - storageKeys*: HashSet[UInt256] - codeTouched*: bool - LedgerRef* = ref object txFrame*: CoreDbTxRef savePoint: LedgerSpRef - witnessCache: Table[Address, WitnessData] isDirty: bool ripemdSpecial: bool storeSlotHash*: bool @@ -360,7 +355,6 @@ proc makeDirty(ac: LedgerRef, address: Address, cloneStorage = true): AccountRef proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): LedgerRef = new result result.txFrame = db - result.witnessCache = Table[Address, WitnessData]() result.storeSlotHash = storeSlotHash result.code = typeof(result.code).init(codeLruSize) result.slots = typeof(result.slots).init(slotsLruSize) @@ -783,46 +777,6 @@ proc getStorageRoot*(ac: LedgerRef, address: Address): Hash32 = if acc.isNil: EMPTY_ROOT_HASH else: ac.txFrame.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH -proc update(wd: var WitnessData, acc: AccountRef) = - # once the code is touched make sure it doesn't get reset back to false in another update - if not wd.codeTouched: - wd.codeTouched = CodeChanged in acc.flags or acc.code != nil - - if not acc.originalStorage.isNil: - for k in acc.originalStorage.keys(): - wd.storageKeys.incl k - - for k, v in acc.overlayStorage: - wd.storageKeys.incl k - -proc witnessData(acc: AccountRef): WitnessData = - result.storageKeys = HashSet[UInt256]() - update(result, acc) - -proc collectWitnessData*(ac: LedgerRef) = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - # usually witness data is collected before we call persist() - for address, acc in ac.savePoint.cache: - ac.witnessCache.withValue(address, val) do: - update(val[], acc) - do: - ac.witnessCache[address] = witnessData(acc) - -func multiKeys(slots: HashSet[UInt256]): MultiKeysRef = - if slots.len == 0: return - new result - for x in slots: - result.add x.toBytesBE - result.sort() - -proc makeMultiKeys*(ac: LedgerRef): MultiKeysRef = - # this proc is called after we done executing a block - new result - for k, v in ac.witnessCache: - result.add(k, v.codeTouched, multiKeys(v.storageKeys)) - result.sort() - proc accessList*(ac: LedgerRef, address: Address) = ac.savePoint.accessList.add(address) diff --git a/execution_chain/evm/state.nim b/execution_chain/evm/state.nim index 06634c1d7..08cc3f20a 100644 --- a/execution_chain/evm/state.nim +++ b/execution_chain/evm/state.nim @@ -237,13 +237,6 @@ proc `status=`*(vmState: BaseVMState, status: bool) = if status: vmState.flags.incl ExecutionOK else: vmState.flags.excl ExecutionOK -proc collectWitnessData*(vmState: BaseVMState): bool = - CollectWitnessData in vmState.flags - -proc `collectWitnessData=`*(vmState: BaseVMState, status: bool) = - if status: vmState.flags.incl CollectWitnessData - else: vmState.flags.excl CollectWitnessData - func tracingEnabled*(vmState: BaseVMState): bool = vmState.tracer.isNil.not diff --git a/execution_chain/evm/types.nim b/execution_chain/evm/types.nim index 00a697856..eeaa4e9ff 100644 --- a/execution_chain/evm/types.nim +++ b/execution_chain/evm/types.nim @@ -21,7 +21,6 @@ export stack, memory type VMFlag* = enum ExecutionOK - CollectWitnessData BlockContext* = object timestamp* : EthTime diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 33ae04923..9f51f2eb5 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -713,44 +713,45 @@ proc runLedgerBasicOperationsTests() = check 2.u256 in vals check 3.u256 in vals - test "Test MultiKeys - Set storage": - var - ac = LedgerRef.init(memDB.baseTxFrame()) - addr1 = initAddr(1) - - ac.setStorage(addr1, 1.u256, 1.u256) # Non-zero value - ac.setStorage(addr1, 2.u256, 0.u256) # Zero value - - ac.collectWitnessData() - let multikeys = ac.makeMultiKeys().keys - - check: - multikeys.len() == 1 - multikeys[0].storageMode == false - multikeys[0].address == addr1 - multikeys[0].storageKeys.keys.len() == 2 - multikeys[0].storageKeys.keys[0].storageSlot == 2.u256.toBytesBE() - multikeys[0].storageKeys.keys[1].storageSlot == 1.u256.toBytesBE() - - test "Test MultiKeys - Get storage": - var - ac = LedgerRef.init(memDB.baseTxFrame()) - addr1 = initAddr(1) - - ac.setStorage(addr1, 3.u256, 1.u256) - discard ac.getStorage(addr1, 3.u256) # Returns non-zero value - discard ac.getStorage(addr1, 4.u256) # Returns default zero value - - ac.collectWitnessData() - let multikeys = ac.makeMultiKeys().keys - - check: - multikeys.len() == 1 - multikeys[0].storageMode == false - multikeys[0].address == addr1 - multikeys[0].storageKeys.keys.len() == 2 - multikeys[0].storageKeys.keys[0].storageSlot == 4.u256.toBytesBE() - multikeys[0].storageKeys.keys[1].storageSlot == 3.u256.toBytesBE() + # TODO: Update these tests + # test "Test MultiKeys - Set storage": + # var + # ac = LedgerRef.init(memDB.baseTxFrame()) + # addr1 = initAddr(1) + + # ac.setStorage(addr1, 1.u256, 1.u256) # Non-zero value + # ac.setStorage(addr1, 2.u256, 0.u256) # Zero value + + # ac.collectWitnessData() + # let multikeys = ac.makeMultiKeys().keys + + # check: + # multikeys.len() == 1 + # multikeys[0].storageMode == false + # multikeys[0].address == addr1 + # multikeys[0].storageKeys.keys.len() == 2 + # multikeys[0].storageKeys.keys[0].storageSlot == 2.u256.toBytesBE() + # multikeys[0].storageKeys.keys[1].storageSlot == 1.u256.toBytesBE() + + # test "Test MultiKeys - Get storage": + # var + # ac = LedgerRef.init(memDB.baseTxFrame()) + # addr1 = initAddr(1) + + # ac.setStorage(addr1, 3.u256, 1.u256) + # discard ac.getStorage(addr1, 3.u256) # Returns non-zero value + # discard ac.getStorage(addr1, 4.u256) # Returns default zero value + + # ac.collectWitnessData() + # let multikeys = ac.makeMultiKeys().keys + + # check: + # multikeys.len() == 1 + # multikeys[0].storageMode == false + # multikeys[0].address == addr1 + # multikeys[0].storageKeys.keys.len() == 2 + # multikeys[0].storageKeys.keys[0].storageSlot == 4.u256.toBytesBE() + # multikeys[0].storageKeys.keys[1].storageSlot == 3.u256.toBytesBE() # ------------------------------------------------------------------------------ # Main function(s) From 99ced973b6606132d99c792fc2d9060c28e45136 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:14:16 +0800 Subject: [PATCH 16/26] Implement collection of witness keys using ordered list. --- execution_chain/db/ledger.nim | 70 +++++++++++++++++- tests/test_ledger.nim | 132 ++++++++++++++++++++++++---------- 2 files changed, 160 insertions(+), 42 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index c604d64d1..3c8951321 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -18,7 +18,6 @@ import minilru, ../utils/mergeutils, ../evm/code_bytes, - ../stateless/multi_keys, ../core/eip7702, "/.."/[constants, utils/utils], ./access_list as ac_access_list, @@ -39,6 +38,13 @@ const # in greater detail. slotsLruSize = 16 * 1024 + statelessEnabled = defined(stateless) + +when statelessEnabled: + import ../stateless/multi_keys + + export multi_keys + type AccountFlag = enum Alive @@ -84,6 +90,10 @@ type ## over and over again to the database to avoid the WAL and compation ## write amplification that ensues + when statelessEnabled: + witnessKeys: OrderedTableRef[(Address, KeyHash), KeyData] + + ReadOnlyLedger* = distinct LedgerRef TransactionState = enum @@ -130,12 +140,14 @@ when debugLedgerRef: template logTxt(info: static[string]): static[string] = "LedgerRef " & info -template toAccountKey(acc: AccountRef): Hash32 = +template toAccountKey*(acc: AccountRef): Hash32 = acc.accPath -template toAccountKey(eAddr: Address): Hash32 = +template toAccountKey*(eAddr: Address): Hash32 = eAddr.data.keccak256 +template toSlotKey*(slot: UInt256): Hash32 = + slot.toBytesBE.keccak256 proc beginSavepoint*(ac: LedgerRef): LedgerSpRef {.gcsafe.} @@ -152,6 +164,15 @@ proc getAccount( address: Address; shouldCreate = true; ): AccountRef = + when statelessEnabled: + let + keyHash = address.toAccountKey.data + lookupKey = (address, keyHash) + if not ac.witnessKeys.contains(lookupKey): + ac.witnessKeys[lookupKey] = KeyData( + storageMode: false, + hash: keyHash, + address: address) # search account from layers of cache var sp = ac.savePoint @@ -215,6 +236,7 @@ proc originalStorageValue( slot: UInt256; ac: LedgerRef; ): UInt256 = + # share the same original storage between multiple # versions of account if acc.originalStorage.isNil: @@ -360,6 +382,9 @@ proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): Ledger result.slots = typeof(result.slots).init(slotsLruSize) discard result.beginSavepoint + when statelessEnabled: + result.witnessKeys = newOrderedTable[(Address, KeyHash), KeyData]() + proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) @@ -445,6 +470,18 @@ proc getNonce*(ac: LedgerRef, address: Address): AccountNonce = proc getCode*(ac: LedgerRef, address: Address, returnHash: static[bool] = false): auto = + when statelessEnabled: + let + keyHash = address.toAccountKey.data + lookupKey = (address, keyHash) + # We overwrite any existing record here so that codeTouched is always set to + # true even if an account was previously accessed without touching the code + ac.witnessKeys[lookupKey] = KeyData( + storageMode: false, + hash: keyHash, + address: address, + codeTouched: true) + let acc = ac.getAccount(address, false) if acc.isNil: when returnHash: @@ -501,12 +538,32 @@ proc resolveCode*(ac: LedgerRef, address: Address): CodeBytesRef = ac.getCode(delegateTo) proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + when statelessEnabled: + let + keyHash = slot.toSlotKey.data + lookupKey = (address, keyHash) + if not ac.witnessKeys.contains(lookupKey): + ac.witnessKeys[lookupKey] = KeyData( + storageMode: true, + hash: keyHash, + storageSlot: slot.toBytesBE()) + let acc = ac.getAccount(address, false) if acc.isNil: return acc.originalStorageValue(slot, ac) proc getStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + when statelessEnabled: + let + keyHash = slot.toSlotKey.data + lookupKey = (address, keyHash) + if not ac.witnessKeys.contains(lookupKey): + ac.witnessKeys[lookupKey] = KeyData( + storageMode: true, + hash: keyHash, + storageSlot: slot.toBytesBE()) + let acc = ac.getAccount(address, false) if acc.isNil: return @@ -726,6 +783,9 @@ proc persist*(ac: LedgerRef, ac.isDirty = false + when statelessEnabled: + ac.witnessKeys.clear() + iterator addresses*(ac: LedgerRef): Address = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) @@ -865,6 +925,10 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] storageProof +when statelessEnabled: + func getWitnessKeys(ac: LedgerRef): OrderedTableRef[(Address, KeyHash), KeyData] = + ac.witnessKeys + # ------------------------------------------------------------------------------ # Public virtual read-only methods # ------------------------------------------------------------------------------ diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 9f51f2eb5..a8da82b17 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -25,6 +25,7 @@ import import results + const genesisFile = "tests/customgenesis/cancun123.json" hexPrivKey = "af1a9be9f1a54421cac82943820a0fe0f601bb5f4f6d0bccc81c613f0ce6ae22" @@ -713,45 +714,98 @@ proc runLedgerBasicOperationsTests() = check 2.u256 in vals check 3.u256 in vals - # TODO: Update these tests - # test "Test MultiKeys - Set storage": - # var - # ac = LedgerRef.init(memDB.baseTxFrame()) - # addr1 = initAddr(1) - - # ac.setStorage(addr1, 1.u256, 1.u256) # Non-zero value - # ac.setStorage(addr1, 2.u256, 0.u256) # Zero value - - # ac.collectWitnessData() - # let multikeys = ac.makeMultiKeys().keys - - # check: - # multikeys.len() == 1 - # multikeys[0].storageMode == false - # multikeys[0].address == addr1 - # multikeys[0].storageKeys.keys.len() == 2 - # multikeys[0].storageKeys.keys[0].storageSlot == 2.u256.toBytesBE() - # multikeys[0].storageKeys.keys[1].storageSlot == 1.u256.toBytesBE() - - # test "Test MultiKeys - Get storage": - # var - # ac = LedgerRef.init(memDB.baseTxFrame()) - # addr1 = initAddr(1) - - # ac.setStorage(addr1, 3.u256, 1.u256) - # discard ac.getStorage(addr1, 3.u256) # Returns non-zero value - # discard ac.getStorage(addr1, 4.u256) # Returns default zero value - - # ac.collectWitnessData() - # let multikeys = ac.makeMultiKeys().keys - - # check: - # multikeys.len() == 1 - # multikeys[0].storageMode == false - # multikeys[0].address == addr1 - # multikeys[0].storageKeys.keys.len() == 2 - # multikeys[0].storageKeys.keys[0].storageSlot == 4.u256.toBytesBE() - # multikeys[0].storageKeys.keys[1].storageSlot == 3.u256.toBytesBE() + when defined(stateless): + test "Witness keys - Get account": + var + ac = LedgerRef.init(memDB.baseTxFrame()) + addr1 = initAddr(1) + + discard ac.getAccount(addr1) + + let + witnessKeys = ac.getWitnessKeys() + keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey.data)) + check: + witnessKeys.len() == 1 + keyData.address == addr1 + keyData.codeTouched == false + + test "Witness keys - Get code": + var + ac = LedgerRef.init(memDB.baseTxFrame()) + addr1 = initAddr(1) + + discard ac.getCode(addr1) + + let + witnessKeys = ac.getWitnessKeys() + keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey.data)) + check: + witnessKeys.len() == 1 + keyData.address == addr1 + keyData.codeTouched == true + + test "Witness keys - Get storage": + var + ac = LedgerRef.init(memDB.baseTxFrame()) + addr1 = initAddr(1) + slot1 = 1.u256 + + discard ac.getStorage(addr1, slot1) + + let + witnessKeys = ac.getWitnessKeys() + keyData = witnessKeys.getOrDefault((addr1, slot1.toSlotKey.data)) + check: + witnessKeys.len() == 2 + keyData.storageSlot == slot1.toBytesBE() + + test "Witness keys - Get account, code and storage": + var + ac = LedgerRef.init(memDB.baseTxFrame()) + addr1 = initAddr(1) + addr2 = initAddr(2) + addr3 = initAddr(3) + slot1 = 1.u256 + + + discard ac.getAccount(addr1) + discard ac.getCode(addr2) + discard ac.getCode(addr1) + discard ac.getStorage(addr2, slot1) + discard ac.getStorage(addr1, slot1) + discard ac.getStorage(addr2, slot1) + discard ac.getAccount(addr3) + + let witnessKeys = ac.getWitnessKeys() + check witnessKeys.len() == 5 + + var keysList = newSeq[(Address, KeyData)]() + for k, v in witnessKeys: + let (adr, _) = k + keysList.add((adr, v)) + + check: + keysList[0][0] == addr1 + keysList[0][1].address == addr1 + keysList[0][1].codeTouched == true + + keysList[1][0] == addr2 + keysList[1][1].address == addr2 + keysList[1][1].codeTouched == true + + keysList[2][0] == addr2 + keysList[2][1].storageSlot == slot1.toBytesBE() + + keysList[3][0] == addr1 + keysList[3][1].storageSlot == slot1.toBytesBE() + + keysList[4][0] == addr3 + keysList[4][1].address == addr3 + keysList[4][1].codeTouched == false + + + # ------------------------------------------------------------------------------ # Main function(s) From 7838a4eaef9aac72d6a60239f47609012f485203 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:56:14 +0800 Subject: [PATCH 17/26] Improve implementation. No longer using multikeys. --- execution_chain/db/ledger.nim | 50 +++++++++++++++-------------------- tests/test_ledger.nim | 14 +++++----- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index 3c8951321..d19b6d518 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -41,9 +41,12 @@ const statelessEnabled = defined(stateless) when statelessEnabled: - import ../stateless/multi_keys - - export multi_keys + type + WitnessKey* = object + storageMode*: bool + address*: Address + codeTouched*: bool + storageSlot*: UInt256 type AccountFlag = enum @@ -91,7 +94,10 @@ type ## write amplification that ensues when statelessEnabled: - witnessKeys: OrderedTableRef[(Address, KeyHash), KeyData] + witnessKeys: OrderedTableRef[(Address, Hash32), WitnessKey] + ## Used to collect the keys of all read accounts, code and storage slots. + ## Maps a tuple of address and hash of the key (address or slot) to the + ## witness key which can be either a storage key or an account key ReadOnlyLedger* = distinct LedgerRef @@ -165,13 +171,10 @@ proc getAccount( shouldCreate = true; ): AccountRef = when statelessEnabled: - let - keyHash = address.toAccountKey.data - lookupKey = (address, keyHash) + let lookupKey = (address, address.toAccountKey) if not ac.witnessKeys.contains(lookupKey): - ac.witnessKeys[lookupKey] = KeyData( + ac.witnessKeys[lookupKey] = WitnessKey( storageMode: false, - hash: keyHash, address: address) # search account from layers of cache @@ -383,7 +386,7 @@ proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): Ledger discard result.beginSavepoint when statelessEnabled: - result.witnessKeys = newOrderedTable[(Address, KeyHash), KeyData]() + result.witnessKeys = newOrderedTable[(Address, Hash32), WitnessKey]() proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) @@ -471,14 +474,11 @@ proc getCode*(ac: LedgerRef, address: Address, returnHash: static[bool] = false): auto = when statelessEnabled: - let - keyHash = address.toAccountKey.data - lookupKey = (address, keyHash) + let lookupKey = (address, address.toAccountKey) # We overwrite any existing record here so that codeTouched is always set to # true even if an account was previously accessed without touching the code - ac.witnessKeys[lookupKey] = KeyData( + ac.witnessKeys[lookupKey] = WitnessKey( storageMode: false, - hash: keyHash, address: address, codeTouched: true) @@ -539,14 +539,11 @@ proc resolveCode*(ac: LedgerRef, address: Address): CodeBytesRef = proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = when statelessEnabled: - let - keyHash = slot.toSlotKey.data - lookupKey = (address, keyHash) + let lookupKey = (address, slot.toSlotKey) if not ac.witnessKeys.contains(lookupKey): - ac.witnessKeys[lookupKey] = KeyData( + ac.witnessKeys[lookupKey] = WitnessKey( storageMode: true, - hash: keyHash, - storageSlot: slot.toBytesBE()) + storageSlot: slot) let acc = ac.getAccount(address, false) if acc.isNil: @@ -555,14 +552,11 @@ proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt2 proc getStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = when statelessEnabled: - let - keyHash = slot.toSlotKey.data - lookupKey = (address, keyHash) + let lookupKey = (address, slot.toSlotKey) if not ac.witnessKeys.contains(lookupKey): - ac.witnessKeys[lookupKey] = KeyData( + ac.witnessKeys[lookupKey] = WitnessKey( storageMode: true, - hash: keyHash, - storageSlot: slot.toBytesBE()) + storageSlot: slot) let acc = ac.getAccount(address, false) if acc.isNil: @@ -926,7 +920,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] storageProof when statelessEnabled: - func getWitnessKeys(ac: LedgerRef): OrderedTableRef[(Address, KeyHash), KeyData] = + func getWitnessKeys*(ac: LedgerRef): OrderedTableRef[(Address, Hash32), WitnessKey] = ac.witnessKeys # ------------------------------------------------------------------------------ diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index a8da82b17..55d9ba370 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -724,7 +724,7 @@ proc runLedgerBasicOperationsTests() = let witnessKeys = ac.getWitnessKeys() - keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey.data)) + keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey)) check: witnessKeys.len() == 1 keyData.address == addr1 @@ -739,7 +739,7 @@ proc runLedgerBasicOperationsTests() = let witnessKeys = ac.getWitnessKeys() - keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey.data)) + keyData = witnessKeys.getOrDefault((addr1, addr1.toAccountKey)) check: witnessKeys.len() == 1 keyData.address == addr1 @@ -755,10 +755,10 @@ proc runLedgerBasicOperationsTests() = let witnessKeys = ac.getWitnessKeys() - keyData = witnessKeys.getOrDefault((addr1, slot1.toSlotKey.data)) + keyData = witnessKeys.getOrDefault((addr1, slot1.toSlotKey)) check: witnessKeys.len() == 2 - keyData.storageSlot == slot1.toBytesBE() + keyData.storageSlot == slot1 test "Witness keys - Get account, code and storage": var @@ -780,7 +780,7 @@ proc runLedgerBasicOperationsTests() = let witnessKeys = ac.getWitnessKeys() check witnessKeys.len() == 5 - var keysList = newSeq[(Address, KeyData)]() + var keysList = newSeq[(Address, WitnessKey)]() for k, v in witnessKeys: let (adr, _) = k keysList.add((adr, v)) @@ -795,10 +795,10 @@ proc runLedgerBasicOperationsTests() = keysList[1][1].codeTouched == true keysList[2][0] == addr2 - keysList[2][1].storageSlot == slot1.toBytesBE() + keysList[2][1].storageSlot == slot1 keysList[3][0] == addr1 - keysList[3][1].storageSlot == slot1.toBytesBE() + keysList[3][1].storageSlot == slot1 keysList[4][0] == addr3 keysList[4][1].address == addr3 From ea98562ef5af29ecb6e675e2fafa622614129d1c Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 14:57:54 +0800 Subject: [PATCH 18/26] Use witness keys. --- execution_chain/db/ledger.nim | 8 +-- execution_chain/stateless/multi_keys.nim | 56 --------------------- fluffy/evm/portal_evm.nim | 63 +++++++++++------------- fluffy/fluffy.nim.cfg | 1 + 4 files changed, 33 insertions(+), 95 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index 16da616b5..8bb7496ef 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -25,7 +25,7 @@ import ./aristo/aristo_blobify export - code_bytes, multi_keys + code_bytes const debugLedgerRef = false @@ -484,10 +484,6 @@ proc getCode*(ac: LedgerRef, let acc = ac.getAccount(address, false) if acc.isNil: - # We need to record that the code was read even if the account doesn't exist - # so that the returned multikeys correctly show that the code lookup occurred - ac.witnessCache[address] = WitnessData(codeTouched: true) - when returnHash: return (EMPTY_CODE_HASH, CodeBytesRef()) else: @@ -930,7 +926,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] storageProof when statelessEnabled: - func getWitnessKeys(ac: LedgerRef): OrderedTableRef[(Address, KeyHash), KeyData] = + func getWitnessKeys*(ac: LedgerRef): OrderedTableRef[(Address, KeyHash), KeyData] = ac.witnessKeys # ------------------------------------------------------------------------------ diff --git a/execution_chain/stateless/multi_keys.nim b/execution_chain/stateless/multi_keys.nim index 82ee48796..70d00e929 100644 --- a/execution_chain/stateless/multi_keys.nim +++ b/execution_chain/stateless/multi_keys.nim @@ -186,59 +186,3 @@ proc visitMatch*(m: var MultiKeysRef, mg: MatchGroup, depth: int): KeyData = doAssert(mg.isValidMatch, "Multiple identical keys are not allowed") m.keys[mg.group.first].visited = true result = m.keys[mg.group.first] - -func equalsStorageMode(m1: MultiKeysRef, m2: MultiKeysRef): bool = - doAssert(not m1.isNil()) - doAssert(not m2.isNil()) - - if m1.keys.len() != m2.keys.len(): - return false - - for i in 0..m1.keys.high: - let - kd1 = m1.keys[i] - kd2 = m2.keys[i] - doAssert(kd1.storageMode) - doAssert(kd2.storageMode) - - if kd1.hash != kd2.hash or kd1.storageSlot != kd2.storageSlot: - return false - - return true - -func equals*(m1: MultiKeysRef, m2: MultiKeysRef): bool = - doAssert(not m1.isNil()) - doAssert(not m2.isNil()) - - if m1.keys.len() != m2.keys.len(): - return false - - for i in 0..m1.keys.high: - let - kd1 = m1.keys[i] - kd2 = m2.keys[i] - - if kd1.storageMode != kd2.storageMode: - return false - - if kd1.storageMode: - if kd1.hash != kd2.hash or kd1.storageSlot != kd2.storageSlot: - return false - else: - continue - - # Not storageMode - if kd1.hash != kd2.hash or kd1.address != kd2.address or kd1.codeTouched != kd2.codeTouched: - return false - - if kd1.storageKeys.isNil() or kd2.storageKeys.isNil(): - if kd1.storageKeys != kd2.storageKeys: - return false - else: - continue - - # storageKeys not nil - if not equalsStorageMode(kd1.storageKeys, kd2.storageKeys): - return false - - return true diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 6e5d4abc4..86ce2bcb0 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -144,19 +144,16 @@ proc call*( fetchedCode.incl(to) debug "Code to be executed", code = code.asSeq().to0xHex() - # Collects the keys of read or modified accounts, code and storage slots - vmState.ledger.collectWitnessData() var - lastMultiKeys: MultiKeysRef - multiKeys = vmState.ledger.makeMultiKeys() + lastWitnessKeys: OrderedTableRef[(Address, KeyHash), KeyData] + witnessKeys = vmState.ledger.getWitnessKeys() callResult: EvmResult[CallResult] evmCallCount = 0 - # If the multikeys did not change after the last execution then we can stop + # If the witness keys did not change after the last execution then we can stop # because we have already executed the transaction with the correct state - while evmCallCount < EVM_CALL_LIMIT and lastMultiKeys.isNil() or - not lastMultiKeys.equals(multiKeys): + while evmCallCount < EVM_CALL_LIMIT and lastWitnessKeys.isNil() or lastWitnessKeys != witnessKeys: debug "Starting PortalEvm execution", evmCallCount let sp = vmState.ledger.beginSavepoint() @@ -165,9 +162,8 @@ proc call*( vmState.ledger.rollback(sp) # all state changes from the call are reverted # Collect the keys after executing the transaction - lastMultiKeys = multiKeys - vmState.ledger.collectWitnessData() - multiKeys = vmState.ledger.makeMultiKeys() + lastWitnessKeys = witnessKeys + witnessKeys = vmState.ledger.getWitnessKeys() try: var @@ -176,30 +172,31 @@ proc call*( codeQueries = newSeq[CodeQuery]() # Loop through the collected keys and fetch all state concurrently - for k in multiKeys.keys: - if not k.storageMode and k.address != default(Address): - if k.address notin fetchedAccounts: - debug "Fetching account", address = k.address - let accFut = evm.stateNetwork.getAccount(header.stateRoot, k.address) - accountQueries.add(AccountQuery.init(k.address, accFut)) - - if k.codeTouched and k.address notin fetchedCode: - debug "Fetching code", address = k.address + for k, v in witnessKeys: + let (adr, _) = k + if v.storageMode: + let + slotKey = UInt256.fromBytesBE(v.storageSlot) + slotIdx = (adr, slotKey) + if slotIdx notin fetchedStorage: + debug "Fetching storage slot", address = adr, slotKey + let storageFut = evm.stateNetwork.getStorageAtByStateRoot( + header.stateRoot, adr, slotKey + ) + storageQueries.add(StorageQuery.init(adr, slotKey, storageFut)) + elif adr != default(Address): + doAssert(adr == v.address) + + if adr notin fetchedAccounts: + debug "Fetching account", address = adr + let accFut = evm.stateNetwork.getAccount(header.stateRoot, adr) + accountQueries.add(AccountQuery.init(adr, accFut)) + + if v.codeTouched and adr notin fetchedCode: + debug "Fetching code", address = adr let codeFut = - evm.stateNetwork.getCodeByStateRoot(header.stateRoot, k.address) - codeQueries.add(CodeQuery.init(k.address, codeFut)) - - if not k.storageKeys.isNil(): - for sk in k.storageKeys.keys: - let - slotKey = UInt256.fromBytesBE(sk.storageSlot) - slotIdx = (k.address, slotKey) - if slotIdx notin fetchedStorage: - debug "Fetching storage slot", address = k.address, slotKey - let storageFut = evm.stateNetwork.getStorageAtByStateRoot( - header.stateRoot, k.address, slotKey - ) - storageQueries.add(StorageQuery.init(k.address, slotKey, storageFut)) + evm.stateNetwork.getCodeByStateRoot(header.stateRoot, adr) + codeQueries.add(CodeQuery.init(adr, codeFut)) # Store fetched state in the in-memory EVM for q in accountQueries: diff --git a/fluffy/fluffy.nim.cfg b/fluffy/fluffy.nim.cfg index f6264499e..489cafa28 100644 --- a/fluffy/fluffy.nim.cfg +++ b/fluffy/fluffy.nim.cfg @@ -6,4 +6,5 @@ -d:"chronicles_line_numbers:0" @end +-d:"stateless" -d:"rocksdb_dynamic_linking" From f4461ef5b3033c1d3ac39cca68ba6e136a946699 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 15:52:28 +0800 Subject: [PATCH 19/26] Use OrderedTable instead of OrderedTableRef. --- execution_chain/db/ledger.nim | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index d19b6d518..84910ed5e 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -94,7 +94,7 @@ type ## write amplification that ensues when statelessEnabled: - witnessKeys: OrderedTableRef[(Address, Hash32), WitnessKey] + witnessKeys: OrderedTable[(Address, Hash32), WitnessKey] ## Used to collect the keys of all read accounts, code and storage slots. ## Maps a tuple of address and hash of the key (address or slot) to the ## witness key which can be either a storage key or an account key @@ -386,7 +386,7 @@ proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): Ledger discard result.beginSavepoint when statelessEnabled: - result.witnessKeys = newOrderedTable[(Address, Hash32), WitnessKey]() + result.witnessKeys = initOrderedTable[(Address, Hash32), WitnessKey]() proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) @@ -920,7 +920,7 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] storageProof when statelessEnabled: - func getWitnessKeys*(ac: LedgerRef): OrderedTableRef[(Address, Hash32), WitnessKey] = + func getWitnessKeys*(ac: LedgerRef): OrderedTable[(Address, Hash32), WitnessKey] = ac.witnessKeys # ------------------------------------------------------------------------------ From 0f6b1b7af517b8a15ca3bb8fe62fb27df6a2bd5b Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 19:22:20 +0800 Subject: [PATCH 20/26] Use latest witness keys changes. --- fluffy/evm/portal_evm.nim | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 0c84b69de..a5ac17248 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -145,14 +145,12 @@ proc call*( debug "Code to be executed", code = code.asSeq().to0xHex() var - lastWitnessKeys: OrderedTableRef[(Address, Hash32), WitnessKey] + lastWitnessKeys: OrderedTable[(Address, Hash32), WitnessKey] witnessKeys = vmState.ledger.getWitnessKeys() callResult: EvmResult[CallResult] evmCallCount = 0 - # If the witness keys did not change after the last execution then we can stop - # because we have already executed the transaction with the correct state - while evmCallCount < EVM_CALL_LIMIT and (lastWitnessKeys.isNil() or lastWitnessKeys != witnessKeys): + while evmCallCount < EVM_CALL_LIMIT: debug "Starting PortalEvm execution", evmCallCount let sp = vmState.ledger.beginSavepoint() @@ -164,6 +162,12 @@ proc call*( lastWitnessKeys = witnessKeys witnessKeys = vmState.ledger.getWitnessKeys() + # If the witness keys did not change after the last execution then we can stop + # the execution loop because we have already executed the transaction with the + # correct state + if lastWitnessKeys == witnessKeys: + break + try: var accountQueries = newSeq[AccountQuery]() From c78a8767f1d4cacd4fa2b12bc7bfab87861f653e Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 19:29:19 +0800 Subject: [PATCH 21/26] Remove unneeded rocksdb flag after in memory db fix. --- fluffy/fluffy.nim.cfg | 2 -- 1 file changed, 2 deletions(-) diff --git a/fluffy/fluffy.nim.cfg b/fluffy/fluffy.nim.cfg index f6264499e..4c0d44247 100644 --- a/fluffy/fluffy.nim.cfg +++ b/fluffy/fluffy.nim.cfg @@ -5,5 +5,3 @@ @if release: -d:"chronicles_line_numbers:0" @end - --d:"rocksdb_dynamic_linking" From a0cd8f2bc38fd70a6917ad37d5344984e9aeb12d Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:07:10 +0800 Subject: [PATCH 22/26] Improvements. --- execution_chain/db/ledger.nim | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index 84910ed5e..f7430800c 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -48,6 +48,8 @@ when statelessEnabled: codeTouched*: bool storageSlot*: UInt256 + WitnessTable* = OrderedTable[(Address, Hash32), WitnessKey] + type AccountFlag = enum Alive @@ -94,7 +96,7 @@ type ## write amplification that ensues when statelessEnabled: - witnessKeys: OrderedTable[(Address, Hash32), WitnessKey] + witnessKeys: WitnessTable ## Used to collect the keys of all read accounts, code and storage slots. ## Maps a tuple of address and hash of the key (address or slot) to the ## witness key which can be either a storage key or an account key @@ -175,7 +177,8 @@ proc getAccount( if not ac.witnessKeys.contains(lookupKey): ac.witnessKeys[lookupKey] = WitnessKey( storageMode: false, - address: address) + address: address, + codeTouched: false) # search account from layers of cache var sp = ac.savePoint @@ -239,7 +242,6 @@ proc originalStorageValue( slot: UInt256; ac: LedgerRef; ): UInt256 = - # share the same original storage between multiple # versions of account if acc.originalStorage.isNil: @@ -385,9 +387,6 @@ proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef, storeSlotHash: bool): Ledger result.slots = typeof(result.slots).init(slotsLruSize) discard result.beginSavepoint - when statelessEnabled: - result.witnessKeys = initOrderedTable[(Address, Hash32), WitnessKey]() - proc init*(x: typedesc[LedgerRef], db: CoreDbTxRef): LedgerRef = init(x, db, false) @@ -538,6 +537,8 @@ proc resolveCode*(ac: LedgerRef, address: Address): CodeBytesRef = ac.getCode(delegateTo) proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + let acc = ac.getAccount(address, false) + when statelessEnabled: let lookupKey = (address, slot.toSlotKey) if not ac.witnessKeys.contains(lookupKey): @@ -545,12 +546,13 @@ proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt2 storageMode: true, storageSlot: slot) - let acc = ac.getAccount(address, false) if acc.isNil: return acc.originalStorageValue(slot, ac) proc getStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + let acc = ac.getAccount(address, false) + when statelessEnabled: let lookupKey = (address, slot.toSlotKey) if not ac.witnessKeys.contains(lookupKey): @@ -558,7 +560,6 @@ proc getStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = storageMode: true, storageSlot: slot) - let acc = ac.getAccount(address, false) if acc.isNil: return acc.storageValue(slot, ac) @@ -640,6 +641,14 @@ proc setCode*(ac: LedgerRef, address: Address, code: seq[byte]) = proc setStorage*(ac: LedgerRef, address: Address, slot, value: UInt256) = let acc = ac.getAccount(address) acc.flags.incl {Alive} + + when statelessEnabled: + let lookupKey = (address, slot.toSlotKey) + if not ac.witnessKeys.contains(lookupKey): + ac.witnessKeys[lookupKey] = WitnessKey( + storageMode: true, + storageSlot: slot) + let oldValue = acc.storageValue(slot, ac) if oldValue != value: var acc = ac.makeDirty(address) @@ -777,9 +786,6 @@ proc persist*(ac: LedgerRef, ac.isDirty = false - when statelessEnabled: - ac.witnessKeys.clear() - iterator addresses*(ac: LedgerRef): Address = # make sure all savepoint already committed doAssert(ac.savePoint.parentSavepoint.isNil) @@ -920,9 +926,12 @@ proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256] storageProof when statelessEnabled: - func getWitnessKeys*(ac: LedgerRef): OrderedTable[(Address, Hash32), WitnessKey] = + func getWitnessKeys*(ac: LedgerRef): WitnessTable = ac.witnessKeys + proc clearWitnessKeys*(ac: LedgerRef) = + ac.witnessKeys.clear() + # ------------------------------------------------------------------------------ # Public virtual read-only methods # ------------------------------------------------------------------------------ From 34f6e2f33a4fb7261e9fc32a1b3deeafca118e9a Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:22:10 +0800 Subject: [PATCH 23/26] Add set storage test. --- execution_chain/db/ledger.nim | 2 +- tests/test_ledger.nim | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/execution_chain/db/ledger.nim b/execution_chain/db/ledger.nim index f7430800c..c97f014f2 100644 --- a/execution_chain/db/ledger.nim +++ b/execution_chain/db/ledger.nim @@ -148,7 +148,7 @@ when debugLedgerRef: template logTxt(info: static[string]): static[string] = "LedgerRef " & info -template toAccountKey*(acc: AccountRef): Hash32 = +template toAccountKey(acc: AccountRef): Hash32 = acc.accPath template toAccountKey*(eAddr: Address): Hash32 = diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 55d9ba370..c3d011b9d 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -715,6 +715,7 @@ proc runLedgerBasicOperationsTests() = check 3.u256 in vals when defined(stateless): + test "Witness keys - Get account": var ac = LedgerRef.init(memDB.baseTxFrame()) @@ -760,6 +761,21 @@ proc runLedgerBasicOperationsTests() = witnessKeys.len() == 2 keyData.storageSlot == slot1 + test "Witness keys - Set storage": + var + ac = LedgerRef.init(memDB.baseTxFrame()) + addr1 = initAddr(1) + slot1 = 1.u256 + + ac.setStorage(addr1, slot1, slot1) + + let + witnessKeys = ac.getWitnessKeys() + keyData = witnessKeys.getOrDefault((addr1, slot1.toSlotKey)) + check: + witnessKeys.len() == 2 + keyData.storageSlot == slot1 + test "Witness keys - Get account, code and storage": var ac = LedgerRef.init(memDB.baseTxFrame()) From fa1910dee9f160abeb7fd468c6b20299f99f8cb2 Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Tue, 18 Mar 2025 20:30:47 +0800 Subject: [PATCH 24/26] Clear witness keys after each call. --- fluffy/evm/portal_evm.nim | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index a5ac17248..1d69809b1 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -145,7 +145,7 @@ proc call*( debug "Code to be executed", code = code.asSeq().to0xHex() var - lastWitnessKeys: OrderedTable[(Address, Hash32), WitnessKey] + lastWitnessKeys: WitnessTable witnessKeys = vmState.ledger.getWitnessKeys() callResult: EvmResult[CallResult] evmCallCount = 0 @@ -159,8 +159,9 @@ proc call*( vmState.ledger.rollback(sp) # all state changes from the call are reverted # Collect the keys after executing the transaction - lastWitnessKeys = witnessKeys + lastWitnessKeys = ensureMove(witnessKeys) witnessKeys = vmState.ledger.getWitnessKeys() + vmState.ledger.clearWitnessKeys() # If the witness keys did not change after the last execution then we can stop # the execution loop because we have already executed the transaction with the From 6c49854baf29fccd09b929ad431397378bd56a1a Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Wed, 19 Mar 2025 11:29:40 +0800 Subject: [PATCH 25/26] Implement second state fetch method and put behind a boolean flag. Improve comments. --- fluffy/evm/portal_evm.nim | 57 ++++++++++++++++++++++++++++---------- fluffy/rpc/rpc_eth_api.nim | 13 +++++++-- 2 files changed, 53 insertions(+), 17 deletions(-) diff --git a/fluffy/evm/portal_evm.nim b/fluffy/evm/portal_evm.nim index 1d69809b1..02a0feaef 100644 --- a/fluffy/evm/portal_evm.nim +++ b/fluffy/evm/portal_evm.nim @@ -58,8 +58,6 @@ logScope: # the worst case performance because all state accesses inside the EVM are # completely sequential. -# Limit the max number of calls to prevent infinite loops and/or DOS in the event -# of a bug in the implementation const EVM_CALL_LIMIT = 10000 type @@ -111,7 +109,10 @@ proc init*(T: type PortalEvm, hn: HistoryNetwork, sn: StateNetwork): T = PortalEvm(historyNetwork: hn, stateNetwork: sn, com: com) proc call*( - evm: PortalEvm, tx: TransactionArgs, blockNumOrHash: uint64 | Hash32 + evm: PortalEvm, + tx: TransactionArgs, + blockNumOrHash: uint64 | Hash32, + optimisticStateFetch = true, ): Future[Result[CallResult, string]] {.async: (raises: [CancelledError]).} = let to = tx.to.valueOr: @@ -150,6 +151,8 @@ proc call*( callResult: EvmResult[CallResult] evmCallCount = 0 + # Limit the max number of calls to prevent infinite loops and/or DOS in the + # event of a bug in the implementation. while evmCallCount < EVM_CALL_LIMIT: debug "Starting PortalEvm execution", evmCallCount @@ -163,21 +166,23 @@ proc call*( witnessKeys = vmState.ledger.getWitnessKeys() vmState.ledger.clearWitnessKeys() - # If the witness keys did not change after the last execution then we can stop - # the execution loop because we have already executed the transaction with the - # correct state - if lastWitnessKeys == witnessKeys: - break - try: var accountQueries = newSeq[AccountQuery]() storageQueries = newSeq[StorageQuery]() codeQueries = newSeq[CodeQuery]() - # Loop through the collected keys and fetch all state concurrently + # Loop through the collected keys and fetch the state concurrently. + # If optimisticStateFetch is enabled then we fetch state for all the witness + # keys and await all queries before continuing to the next call. + # If optimisticStateFetch is disabled then we only fetch and then await on + # one piece of state (the next in the ordered witness keys) while the remaining + # state queries are still issued in the background just incase the state is + # needed in the next iteration. + var stateFetchDone = false for k, v in witnessKeys: let (adr, _) = k + if v.storageMode: let slotIdx = (adr, v.storageSlot) if slotIdx notin fetchedStorage: @@ -185,20 +190,42 @@ proc call*( let storageFut = evm.stateNetwork.getStorageAtByStateRoot( header.stateRoot, adr, v.storageSlot ) - storageQueries.add(StorageQuery.init(adr, v.storageSlot, storageFut)) + if not stateFetchDone: + storageQueries.add(StorageQuery.init(adr, v.storageSlot, storageFut)) + if not optimisticStateFetch: + stateFetchDone = true elif adr != default(Address): doAssert(adr == v.address) if adr notin fetchedAccounts: debug "Fetching account", address = adr let accFut = evm.stateNetwork.getAccount(header.stateRoot, adr) - accountQueries.add(AccountQuery.init(adr, accFut)) + if not stateFetchDone: + accountQueries.add(AccountQuery.init(adr, accFut)) + if not optimisticStateFetch: + stateFetchDone = true if v.codeTouched and adr notin fetchedCode: debug "Fetching code", address = adr - let codeFut = - evm.stateNetwork.getCodeByStateRoot(header.stateRoot, adr) - codeQueries.add(CodeQuery.init(adr, codeFut)) + let codeFut = evm.stateNetwork.getCodeByStateRoot(header.stateRoot, adr) + if not stateFetchDone: + codeQueries.add(CodeQuery.init(adr, codeFut)) + if not optimisticStateFetch: + stateFetchDone = true + + if optimisticStateFetch: + # If the witness keys did not change after the last execution then we can + # stop the execution loop because we have already executed the transaction + # with the correct state. + if lastWitnessKeys == witnessKeys: + break + else: + # When optimisticStateFetch is disabled and stateFetchDone is not set then + # we know that all the state has already been fetched in the last iteration + # of the loop and therefore we have already executed the transaction with + # the correct state. + if not stateFetchDone: + break # Store fetched state in the in-memory EVM for q in accountQueries: diff --git a/fluffy/rpc/rpc_eth_api.nim b/fluffy/rpc/rpc_eth_api.nim index 5b215e593..468855c77 100644 --- a/fluffy/rpc/rpc_eth_api.nim +++ b/fluffy/rpc/rpc_eth_api.nim @@ -432,7 +432,7 @@ proc installEthApiHandlers*( ) rpcServer.rpc("eth_call") do( - tx: TransactionArgs, quantityTag: RtBlockIdentifier + tx: TransactionArgs, quantityTag: RtBlockIdentifier, optimisticStateFetch: Opt[bool] ) -> seq[byte]: # TODO: add documentation @@ -447,7 +447,16 @@ proc installEthApiHandlers*( sn = stateNetwork.getOrRaise() evm = portalEvm.getOrRaise() - let callResult = (await evm.call(tx, quantityTag.number.uint64)).valueOr: + let callResult = ( + await evm.call( + tx, + quantityTag.number.uint64, + if optimisticStateFetch.isNone(): + true + else: + optimisticStateFetch.get(), + ) + ).valueOr: raise newException(ValueError, error) if callResult.error.len() > 0: From f7ec7d6a8b8c16d81d72bdd2f5c552b507852e1f Mon Sep 17 00:00:00 2001 From: bhartnett <51288821+bhartnett@users.noreply.github.com> Date: Wed, 19 Mar 2025 11:35:16 +0800 Subject: [PATCH 26/26] Cleanup test. --- tests/test_multi_keys.nim | 54 --------------------------------------- 1 file changed, 54 deletions(-) diff --git a/tests/test_multi_keys.nim b/tests/test_multi_keys.nim index 3d00a74d5..ccc5a4f99 100644 --- a/tests/test_multi_keys.nim +++ b/tests/test_multi_keys.nim @@ -115,58 +115,4 @@ proc multiKeysMain*() = mg.group.first == 2 mg.group.last == 3 - test "Compare multikeys using equals": - let - keys1 = [ - "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", - "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606" - ] - keys2 = [ - "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", - "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606", - "0abc6a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" - ] - storageKeys = [ - "0abc8a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b", - "0abc7a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" - ] - - let - m1 = initMultiKeys(keys1, storageMode = false) - m2 = initMultiKeys(keys1, storageMode = false) - m3 = initMultiKeys(keys2, storageMode = false) - - m1.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) - m2.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) - m3.keys[0].storageKeys = initMultiKeys(storageKeys, storageMode = true) - - check: - m1.equals(m2) - m2.equals(m1) - not m2.equals(m3) - not m3.equals(m2) - - test "Compare multikeys using equals - storageMode": - let - keys1 = [ - "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", - "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606" - ] - keys2 = [ - "01237124bce7762869be690036144c12c256bdb06ee9073ad5ecca18a47c3254", - "0890cc5b491732f964182ce4bde5e2468318692ed446e008f621b26f8ff56606", - "0abc6a163140158288775c8912aed274fb9d6a3a260e9e95e03e70ba8df30f6b" - ] - - let - m1 = initMultiKeys(keys1, storageMode = true) - m2 = initMultiKeys(keys1, storageMode = true) - m3 = initMultiKeys(keys2, storageMode = true) - - check: - m1.equals(m2) - m2.equals(m1) - not m2.equals(m3) - not m3.equals(m2) - multiKeysMain()