From c47440c08cc72c4d7c82c5da1bb4ba0a7b7840e6 Mon Sep 17 00:00:00 2001 From: Pia Date: Wed, 17 Jul 2024 16:34:23 +0200 Subject: [PATCH] refactor type, module --- src/HdpExecutionStore.sol | 127 ++++++++++++------ src/datatypes/ComputationalTaskCodecs.sol | 64 --------- .../IterativeDynamicLayoutDatalakeCodecs.sol | 51 ------- src/datatypes/Task.sol | 8 ++ .../BlockSampledDatalakeCodecs.sol | 0 src/datatypes/datalake/ComputeCodecs.sol | 106 +++++++++++++++ src/datatypes/{ => datalake}/Datalake.sol | 3 +- .../TransactionsInBlockDatalakeCodecs.sol | 0 src/datatypes/module/ModuleCodecs.sol | 44 ++++++ test/BlockSampledHdpExecutionStore.t.sol | 121 ++++++++++++----- ...TransactionsInBlockHdpExecutionStore.t.sol | 121 ++++++++++++----- 11 files changed, 416 insertions(+), 229 deletions(-) delete mode 100644 src/datatypes/ComputationalTaskCodecs.sol delete mode 100644 src/datatypes/IterativeDynamicLayoutDatalakeCodecs.sol create mode 100644 src/datatypes/Task.sol rename src/datatypes/{ => datalake}/BlockSampledDatalakeCodecs.sol (100%) create mode 100644 src/datatypes/datalake/ComputeCodecs.sol rename src/datatypes/{ => datalake}/Datalake.sol (70%) rename src/datatypes/{ => datalake}/TransactionsInBlockDatalakeCodecs.sol (100%) create mode 100644 src/datatypes/module/ModuleCodecs.sol diff --git a/src/HdpExecutionStore.sol b/src/HdpExecutionStore.sol index 880f7b3..5f85acc 100644 --- a/src/HdpExecutionStore.sol +++ b/src/HdpExecutionStore.sol @@ -8,14 +8,9 @@ import {IFactsRegistry} from "./interfaces/IFactsRegistry.sol"; import {ISharpFactsAggregator} from "./interfaces/ISharpFactsAggregator.sol"; import {IAggregatorsFactory} from "./interfaces/IAggregatorsFactory.sol"; -import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "./datatypes/BlockSampledDatalakeCodecs.sol"; -import { - TransactionsInBlockDatalake, - TransactionsInBlockDatalakeCodecs -} from "./datatypes/TransactionsInBlockDatalakeCodecs.sol"; -import {IterativeDynamicLayoutDatalake} from "./datatypes/IterativeDynamicLayoutDatalakeCodecs.sol"; -import {IterativeDynamicLayoutDatalakeCodecs} from "./datatypes/IterativeDynamicLayoutDatalakeCodecs.sol"; -import {ComputationalTask, ComputationalTaskCodecs} from "./datatypes/ComputationalTaskCodecs.sol"; +import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "./datatypes/datalake/BlockSampledDatalakeCodecs.sol"; +import {TransactionsInBlockDatalake, TransactionsInBlockDatalakeCodecs} from "./datatypes/datalake/TransactionsInBlockDatalakeCodecs.sol"; +import {ComputationalTask, ComputationalTaskCodecs} from "./datatypes/datalake/ComputeCodecs.sol"; /// Caller is not authorized to perform the action error Unauthorized(); @@ -35,7 +30,6 @@ contract HdpExecutionStore is AccessControl { using MerkleProof for bytes32[]; using BlockSampledDatalakeCodecs for BlockSampledDatalake; using TransactionsInBlockDatalakeCodecs for TransactionsInBlockDatalake; - using IterativeDynamicLayoutDatalakeCodecs for IterativeDynamicLayoutDatalake; using ComputationalTaskCodecs for ComputationalTask; /// @notice The status of a task @@ -55,10 +49,16 @@ contract HdpExecutionStore is AccessControl { event MmrRootCached(uint256 mmrId, uint256 mmrSize, bytes32 mmrRoot); /// @notice emitted when a new task with block sampled datalake is scheduled - event TaskWithBlockSampledDatalakeScheduled(BlockSampledDatalake datalake, ComputationalTask task); + event TaskWithBlockSampledDatalakeScheduled( + BlockSampledDatalake datalake, + ComputationalTask task + ); /// @notice emitted when a new task with transactions in block datalake is scheduled - event TaskWithTransactionsInBlockDatalakeScheduled(TransactionsInBlockDatalake datalake, ComputationalTask task); + event TaskWithTransactionsInBlockDatalakeScheduled( + TransactionsInBlockDatalake datalake, + ComputationalTask task + ); /// @notice constant representing role of operator bytes32 public constant OPERATOR_ROLE = keccak256("OPERATOR_ROLE"); @@ -79,9 +79,14 @@ contract HdpExecutionStore is AccessControl { mapping(bytes32 => TaskResult) public cachedTasksResult; /// @notice mapping of chain id => mmr id => mmr size => mmr root - mapping(uint256 => mapping(uint256 => mapping(uint256 => bytes32))) public cachedMMRsRoots; - - constructor(IFactsRegistry factsRegistry, IAggregatorsFactory aggregatorsFactory, bytes32 programHash) { + mapping(uint256 => mapping(uint256 => mapping(uint256 => bytes32))) + public cachedMMRsRoots; + + constructor( + IFactsRegistry factsRegistry, + IAggregatorsFactory aggregatorsFactory, + bytes32 programHash + ) { SHARP_FACTS_REGISTRY = factsRegistry; AGGREGATORS_FACTORY = aggregatorsFactory; PROGRAM_HASH = programHash; @@ -99,11 +104,20 @@ contract HdpExecutionStore is AccessControl { /// @notice Caches the MMR root for a given MMR id /// @notice Get MMR size and root from the aggregator and cache it function cacheMmrRoot(uint256 mmrId) public { - ISharpFactsAggregator aggregator = AGGREGATORS_FACTORY.aggregatorsById(mmrId); - ISharpFactsAggregator.AggregatorState memory aggregatorState = aggregator.aggregatorState(); - cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][aggregatorState.mmrSize] = aggregatorState.poseidonMmrRoot; - - emit MmrRootCached(mmrId, aggregatorState.mmrSize, aggregatorState.poseidonMmrRoot); + ISharpFactsAggregator aggregator = AGGREGATORS_FACTORY.aggregatorsById( + mmrId + ); + ISharpFactsAggregator.AggregatorState + memory aggregatorState = aggregator.aggregatorState(); + cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][ + aggregatorState.mmrSize + ] = aggregatorState.poseidonMmrRoot; + + emit MmrRootCached( + mmrId, + aggregatorState.mmrSize, + aggregatorState.poseidonMmrRoot + ); } /// @notice Requests the execution of a task with a block sampled datalake @@ -122,9 +136,15 @@ contract HdpExecutionStore is AccessControl { } // Store the task result - cachedTasksResult[taskCommitment] = TaskResult({status: TaskStatus.SCHEDULED, result: ""}); - - emit TaskWithBlockSampledDatalakeScheduled(blockSampledDatalake, computationalTask); + cachedTasksResult[taskCommitment] = TaskResult({ + status: TaskStatus.SCHEDULED, + result: "" + }); + + emit TaskWithBlockSampledDatalakeScheduled( + blockSampledDatalake, + computationalTask + ); } /// @notice Requests the execution of a task with a transactions in block datalake @@ -143,9 +163,15 @@ contract HdpExecutionStore is AccessControl { } // Store the task result - cachedTasksResult[taskCommitment] = TaskResult({status: TaskStatus.SCHEDULED, result: ""}); - - emit TaskWithTransactionsInBlockDatalakeScheduled(transactionsInBlockDatalake, computationalTask); + cachedTasksResult[taskCommitment] = TaskResult({ + status: TaskStatus.SCHEDULED, + result: "" + }); + + emit TaskWithTransactionsInBlockDatalakeScheduled( + transactionsInBlockDatalake, + computationalTask + ); } /// @notice Authenticates the execution of a task is finalized @@ -172,7 +198,7 @@ contract HdpExecutionStore is AccessControl { bytes32[] calldata taskCommitments, bytes32[] calldata taskResults ) external onlyOperator { - assert (mmrIds.length == mmrSizes.length); + assert(mmrIds.length == mmrSizes.length); // Initialize an array of uint256 to store the program output uint256[] memory programOutput = new uint256[](4 + mmrIds.length * 4); @@ -195,7 +221,9 @@ contract HdpExecutionStore is AccessControl { bytes32 programOutputHash = keccak256(abi.encodePacked(programOutput)); // Compute GPS fact hash - bytes32 gpsFactHash = keccak256(abi.encode(PROGRAM_HASH, programOutputHash)); + bytes32 gpsFactHash = keccak256( + abi.encode(PROGRAM_HASH, programOutputHash) + ); // Ensure GPS fact is registered if (!SHARP_FACTS_REGISTRY.isValid(gpsFactHash)) { @@ -209,42 +237,63 @@ contract HdpExecutionStore is AccessControl { bytes32[] memory resultInclusionProof = resultsInclusionProofs[i]; // Convert the low and high 128 bits to a single 256 bit value - bytes32 resultMerkleRoot = bytes32((resultMerkleRootHigh << 128) | resultMerkleRootLow); - bytes32 taskMerkleRoot = bytes32((taskMerkleRootHigh << 128) | taskMerkleRootLow); + bytes32 resultMerkleRoot = bytes32( + (resultMerkleRootHigh << 128) | resultMerkleRootLow + ); + bytes32 taskMerkleRoot = bytes32( + (taskMerkleRootHigh << 128) | taskMerkleRootLow + ); // Compute the Merkle leaf of the task bytes32 taskCommitment = taskCommitments[i]; bytes32 taskMerkleLeaf = standardLeafHash(taskCommitment); // Ensure that the task is included in the batch, by verifying the Merkle proof - bool isVerifiedTask = taskInclusionProof.verify(taskMerkleRoot, taskMerkleLeaf); + bool isVerifiedTask = taskInclusionProof.verify( + taskMerkleRoot, + taskMerkleLeaf + ); if (!isVerifiedTask) { revert NotInBatch(); } // Compute the Merkle leaf of the task result - bytes32 taskResultCommitment = keccak256(abi.encode(taskCommitment, computationalTaskResult)); - bytes32 taskResultMerkleLeaf = standardLeafHash(taskResultCommitment); + bytes32 taskResultCommitment = keccak256( + abi.encode(taskCommitment, computationalTaskResult) + ); + bytes32 taskResultMerkleLeaf = standardLeafHash( + taskResultCommitment + ); // Ensure that the task result is included in the batch, by verifying the Merkle proof - bool isVerifiedResult = resultInclusionProof.verify(resultMerkleRoot, taskResultMerkleLeaf); + bool isVerifiedResult = resultInclusionProof.verify( + resultMerkleRoot, + taskResultMerkleLeaf + ); if (!isVerifiedResult) { revert NotInBatch(); } // Store the task result - cachedTasksResult[taskCommitment] = - TaskResult({status: TaskStatus.FINALIZED, result: computationalTaskResult}); + cachedTasksResult[taskCommitment] = TaskResult({ + status: TaskStatus.FINALIZED, + result: computationalTaskResult + }); } } /// @notice Load MMR root from cache with given mmrId and mmrSize - function loadMmrRoot(uint256 mmrId, uint256 mmrSize) public view returns (bytes32) { + function loadMmrRoot( + uint256 mmrId, + uint256 mmrSize + ) public view returns (bytes32) { return cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][mmrSize]; } /// @notice Returns the result of a finalized task - function getFinalizedTaskResult(bytes32 taskCommitment) external view returns (bytes32) { + function getFinalizedTaskResult( + bytes32 taskCommitment + ) external view returns (bytes32) { // Ensure task is finalized if (cachedTasksResult[taskCommitment].status != TaskStatus.FINALIZED) { revert NotFinalized(); @@ -253,7 +302,9 @@ contract HdpExecutionStore is AccessControl { } /// @notice Returns the status of a task - function getTaskStatus(bytes32 taskCommitment) external view returns (TaskStatus) { + function getTaskStatus( + bytes32 taskCommitment + ) external view returns (TaskStatus) { return cachedTasksResult[taskCommitment].status; } diff --git a/src/datatypes/ComputationalTaskCodecs.sol b/src/datatypes/ComputationalTaskCodecs.sol deleted file mode 100644 index 94f70be..0000000 --- a/src/datatypes/ComputationalTaskCodecs.sol +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.4; - -/// @dev A ComputationalTask. -/// @param AggregateFnId The aggregate function id. -/// @param operator The operator to use (only COUNT). -/// @param valueToCompare The value to compare (COUNT/SLR). -/// The context is used to pass additional parameters to the aggregate function. -struct ComputationalTask { - AggregateFn aggregateFnId; - Operator operatorId; - uint256 valueToCompare; -} - -///@notice Aggregates functions. -enum AggregateFn { - AVG, - SUM, - MIN, - MAX, - COUNT, - MERKLE, - SLR -} - -///@notice Operators for COUNT. -enum Operator { - NONE, - EQ, - NEQ, - GT, - GTE, - LT, - LTE -} - -/// @notice Codecs for ComputationalTask. -/// @dev Represent a computational task with an aggregate function and context. -library ComputationalTaskCodecs { - /// @dev Encodes a ComputationalTask. - /// @param task The ComputationalTask to encode. - function encode(ComputationalTask memory task) internal pure returns (bytes memory) { - return abi.encode(task.aggregateFnId, task.operatorId, task.valueToCompare); - } - - /// @dev Get the commitment of a ComputationalTask. - /// @notice The commitment embeds the datalake commitment. - /// @param task The ComputationalTask to commit. - /// @param datalakeCommitment The commitment of the datalake. - function commit(ComputationalTask memory task, bytes32 datalakeCommitment) internal pure returns (bytes32) { - return keccak256(abi.encode(datalakeCommitment, task.aggregateFnId, task.operatorId, task.valueToCompare)); - } - - /// @dev Decodes a ComputationalTask. - /// @param data The encoded ComputationalTask. - function decode(bytes memory data) internal pure returns (ComputationalTask memory) { - (uint8 aggregateFnId, uint8 operator, uint256 valueToCompare) = abi.decode(data, (uint8, uint8, uint256)); - return ComputationalTask({ - aggregateFnId: AggregateFn(aggregateFnId), - operatorId: Operator(operator), - valueToCompare: valueToCompare - }); - } -} diff --git a/src/datatypes/IterativeDynamicLayoutDatalakeCodecs.sol b/src/datatypes/IterativeDynamicLayoutDatalakeCodecs.sol deleted file mode 100644 index d698494..0000000 --- a/src/datatypes/IterativeDynamicLayoutDatalakeCodecs.sol +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.4; - -import {DatalakeCode} from "./Datalake.sol"; - -struct IterativeDynamicLayoutDatalake { - uint256 blockNumber; - address account; - uint256 slotIndex; - uint256 initialKey; - uint256 keyBoundry; - uint256 increment; -} - -library IterativeDynamicLayoutDatalakeCodecs { - function encode(IterativeDynamicLayoutDatalake memory datalake) internal pure returns (bytes memory) { - return abi.encode( - DatalakeCode.IterativeDynamicLayout, - datalake.blockNumber, - datalake.account, - datalake.slotIndex, - datalake.initialKey, - datalake.keyBoundry, - datalake.increment - ); - } - - function commit(IterativeDynamicLayoutDatalake memory datalake) internal pure returns (bytes32) { - return keccak256(encode(datalake)); - } - - function decode(bytes memory data) internal pure returns (IterativeDynamicLayoutDatalake memory) { - ( - , - uint256 blockNumber, - address account, - uint256 slotIndex, - uint256 initialKey, - uint256 keyBoundry, - uint256 increment - ) = abi.decode(data, (DatalakeCode, uint256, address, uint256, uint256, uint256, uint256)); - return IterativeDynamicLayoutDatalake({ - blockNumber: blockNumber, - account: account, - slotIndex: slotIndex, - initialKey: initialKey, - keyBoundry: keyBoundry, - increment: increment - }); - } -} diff --git a/src/datatypes/Task.sol b/src/datatypes/Task.sol new file mode 100644 index 0000000..f4c2cc8 --- /dev/null +++ b/src/datatypes/Task.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +/// @notice Task type. +enum TaskCode { + Datalake, + Module +} diff --git a/src/datatypes/BlockSampledDatalakeCodecs.sol b/src/datatypes/datalake/BlockSampledDatalakeCodecs.sol similarity index 100% rename from src/datatypes/BlockSampledDatalakeCodecs.sol rename to src/datatypes/datalake/BlockSampledDatalakeCodecs.sol diff --git a/src/datatypes/datalake/ComputeCodecs.sol b/src/datatypes/datalake/ComputeCodecs.sol new file mode 100644 index 0000000..85955b0 --- /dev/null +++ b/src/datatypes/datalake/ComputeCodecs.sol @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {TaskCode} from "../Task.sol"; + +/// @dev A ComputationalTask. +/// @param AggregateFnId The aggregate function id. +/// @param operator The operator to use (only COUNT). +/// @param valueToCompare The value to compare (COUNT/SLR). +/// The context is used to pass additional parameters to the aggregate function. +struct ComputationalTask { + AggregateFn aggregateFnId; + Operator operatorId; + uint256 valueToCompare; +} + +///@notice Aggregates functions. +enum AggregateFn { + AVG, + SUM, + MIN, + MAX, + COUNT, + MERKLE, + SLR +} + +///@notice Operators for COUNT. +enum Operator { + NONE, + EQ, + NEQ, + GT, + GTE, + LT, + LTE +} + +/// @notice Codecs for ComputationalTask. +/// @dev Represent a computational task with an aggregate function and context. +library ComputationalTaskCodecs { + /// @dev Encodes a ComputationalTask. + /// @param task The ComputationalTask to encode. + function encode( + ComputationalTask memory task + ) internal pure returns (bytes memory) { + return + abi.encode( + task.aggregateFnId, + task.operatorId, + task.valueToCompare + ); + } + + /// @dev Encodes a ComputationalTask. + /// @param task The ComputationalTask to encode. + function encode_task( + ComputationalTask memory task, + bytes memory encodedDatalake + ) internal pure returns (bytes memory) { + return + abi.encode( + TaskCode.Module, + encodedDatalake, + abi.encode( + task.aggregateFnId, + task.operatorId, + task.valueToCompare + ) + ); + } + + /// @dev Get the commitment of a ComputationalTask. + /// @notice The commitment embeds the datalake commitment. + /// @param task The ComputationalTask to commit. + /// @param datalakeCommitment The commitment of the datalake. + function commit( + ComputationalTask memory task, + bytes32 datalakeCommitment + ) internal pure returns (bytes32) { + return + keccak256( + abi.encode( + datalakeCommitment, + task.aggregateFnId, + task.operatorId, + task.valueToCompare + ) + ); + } + + /// @dev Decodes a ComputationalTask. + /// @param data The encoded ComputationalTask. + function decode( + bytes memory data + ) internal pure returns (ComputationalTask memory) { + (uint8 aggregateFnId, uint8 operator, uint256 valueToCompare) = abi + .decode(data, (uint8, uint8, uint256)); + return + ComputationalTask({ + aggregateFnId: AggregateFn(aggregateFnId), + operatorId: Operator(operator), + valueToCompare: valueToCompare + }); + } +} diff --git a/src/datatypes/Datalake.sol b/src/datatypes/datalake/Datalake.sol similarity index 70% rename from src/datatypes/Datalake.sol rename to src/datatypes/datalake/Datalake.sol index 12ae393..1a2750f 100644 --- a/src/datatypes/Datalake.sol +++ b/src/datatypes/datalake/Datalake.sol @@ -4,6 +4,5 @@ pragma solidity ^0.8.4; /// @notice Datalake type. enum DatalakeCode { BlockSampled, - TransactionsInBlock, - IterativeDynamicLayout + TransactionsInBlock } diff --git a/src/datatypes/TransactionsInBlockDatalakeCodecs.sol b/src/datatypes/datalake/TransactionsInBlockDatalakeCodecs.sol similarity index 100% rename from src/datatypes/TransactionsInBlockDatalakeCodecs.sol rename to src/datatypes/datalake/TransactionsInBlockDatalakeCodecs.sol diff --git a/src/datatypes/module/ModuleCodecs.sol b/src/datatypes/module/ModuleCodecs.sol new file mode 100644 index 0000000..4ee695e --- /dev/null +++ b/src/datatypes/module/ModuleCodecs.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.4; + +import {TaskCode} from "../Task.sol"; + +/// @dev A module compute. +/// @param classHash The class hash of the module. +/// @param inputs The inputs to the module. +struct ModuleCompute { + uint256 classHash; + uint256[] inputs; +} + +/// @notice Codecs for ModuleCompute. +/// @dev Represent a computation perform by a module. +library ModuleComputeCodecs { + /// @dev Encodes a ModuleCompute. + /// @param module The ModuleCompute to encode. + function encode_task( + ModuleCompute memory module + ) internal pure returns (bytes memory) { + return abi.encode(TaskCode.Module, module.classHash, module.inputs); + } + + /// @dev Get the commitment of a ModuleCompute. + /// @param module The ModuleCompute to commit. + function commit( + ModuleCompute memory module + ) internal pure returns (bytes32) { + return keccak256(abi.encode(module.classHash, module.inputs)); + } + + /// @dev Decodes a ModuleCompute. + /// @param data The encoded ModuleCompute. + function decode( + bytes memory data + ) internal pure returns (ModuleCompute memory) { + (, uint256 classHash, uint256[] memory inputs) = abi.decode( + data, + (TaskCode, uint256, uint256[]) + ); + return ModuleCompute(classHash, inputs); + } +} diff --git a/test/BlockSampledHdpExecutionStore.t.sol b/test/BlockSampledHdpExecutionStore.t.sol index 5ba6de7..692b346 100644 --- a/test/BlockSampledHdpExecutionStore.t.sol +++ b/test/BlockSampledHdpExecutionStore.t.sol @@ -3,9 +3,9 @@ pragma solidity ^0.8.4; import {Test} from "forge-std/Test.sol"; import {HdpExecutionStore} from "../src/HdpExecutionStore.sol"; -import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "../src/datatypes/BlockSampledDatalakeCodecs.sol"; -import {ComputationalTask, ComputationalTaskCodecs} from "../src/datatypes/ComputationalTaskCodecs.sol"; -import {AggregateFn, Operator} from "../src/datatypes/ComputationalTaskCodecs.sol"; +import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "../src/datatypes/datalake/BlockSampledDatalakeCodecs.sol"; +import {ComputationalTask, ComputationalTaskCodecs} from "../src/datatypes/datalake/ComputeCodecs.sol"; +import {AggregateFn, Operator} from "../src/datatypes/datalake/ComputeCodecs.sol"; import {IFactsRegistry} from "../src/interfaces/IFactsRegistry.sol"; import {ISharpFactsAggregator} from "../src/interfaces/ISharpFactsAggregator.sol"; import {IAggregatorsFactory} from "../src/interfaces/IAggregatorsFactory.sol"; @@ -22,7 +22,10 @@ contract MockFactsRegistry is IFactsRegistry { contract MockAggregatorsFactory is IAggregatorsFactory { mapping(uint256 => ISharpFactsAggregator) public aggregatorsById; - function createAggregator(uint256 id, ISharpFactsAggregator aggregator) external { + function createAggregator( + uint256 id, + ISharpFactsAggregator aggregator + ) external { aggregatorsById[id] = aggregator; } } @@ -37,12 +40,13 @@ contract MockSharpFactsAggregator is ISharpFactsAggregator { } function aggregatorState() external view returns (AggregatorState memory) { - return AggregatorState({ - poseidonMmrRoot: usedMmrRoot, - keccakMmrRoot: bytes32(0), - mmrSize: usedMmrSize, - continuableParentHash: bytes32(0) - }); + return + AggregatorState({ + poseidonMmrRoot: usedMmrRoot, + keccakMmrRoot: bytes32(0), + mmrSize: usedMmrSize, + continuableParentHash: bytes32(0) + }); } } @@ -71,18 +75,21 @@ contract HdpExecutionStoreTest is Test { // !! If want to fetch different input, modify helpers/target/bs_cached_input.json && helpers/target/bs_cached_output.json // !! And construct corresponding BlockSampledDatalake and ComputationalTask here - BlockSampledDatalake datalake = BlockSampledDatalake({ - blockRangeStart: 5858987, - blockRangeEnd: 5858997, - increment: 2, - sampledProperty: BlockSampledDatalakeCodecs.encodeSampledPropertyForHeaderProp(uint8(18)) - }); - - ComputationalTask computationalTask = ComputationalTask({ - aggregateFnId: AggregateFn.SLR, - operatorId: Operator.NONE, - valueToCompare: uint256(10000000) - }); + BlockSampledDatalake datalake = + BlockSampledDatalake({ + blockRangeStart: 5858987, + blockRangeEnd: 5858997, + increment: 2, + sampledProperty: BlockSampledDatalakeCodecs + .encodeSampledPropertyForHeaderProp(uint8(18)) + }); + + ComputationalTask computationalTask = + ComputationalTask({ + aggregateFnId: AggregateFn.SLR, + operatorId: Operator.NONE, + valueToCompare: uint256(10000000) + }); function setUp() public { // Registery for facts that has been processed through SHARP @@ -95,11 +102,18 @@ contract HdpExecutionStoreTest is Test { bytes[] memory taskEncodedCompare = new bytes[](1); taskEncodedCompare[0] = computationalTask.encode(); - _callPreprocessCli(abi.encode(taskEncodedCompare), abi.encode(datalakeEncodedCompare)); + _callPreprocessCli( + abi.encode(taskEncodedCompare), + abi.encode(datalakeEncodedCompare) + ); // Get program hash from compiled Cairo program programHash = _getProgramHash(); - hdp = new HdpExecutionStore(factsRegistry, aggregatorsFactory, programHash); + hdp = new HdpExecutionStore( + factsRegistry, + aggregatorsFactory, + programHash + ); // Parse from input file ( @@ -115,27 +129,37 @@ contract HdpExecutionStoreTest is Test { ) = _fetchCairoInput(); bytes32 computedDatalakeCommitment = datalake.commit(); - bytes32 computedTaskCommitment = computationalTask.commit(computedDatalakeCommitment); + bytes32 computedTaskCommitment = computationalTask.commit( + computedDatalakeCommitment + ); assertEq(fetchedTasksCommitments[0], computedTaskCommitment); // Mock SHARP facts aggregator - sharpFactsAggregator = new MockSharpFactsAggregator(fetchedMmrRoots[0], fetchedMmrSizes[0]); + sharpFactsAggregator = new MockSharpFactsAggregator( + fetchedMmrRoots[0], + fetchedMmrSizes[0] + ); // Create mock SHARP facts aggregator - aggregatorsFactory.createAggregator(fetchedMmrIds[0], sharpFactsAggregator); + aggregatorsFactory.createAggregator( + fetchedMmrIds[0], + sharpFactsAggregator + ); assertTrue(hdp.hasRole(keccak256("OPERATOR_ROLE"), address(this))); hdp.grantRole(keccak256("OPERATOR_ROLE"), proverAddress); } function testHdpExecutionFlow() public { - (uint256 taskRootLow, uint256 taskRootHigh) = Uint256Splitter.split128(uint256(bytes32(fetchedTasksMerkleRoot))); + (uint256 taskRootLow, uint256 taskRootHigh) = Uint256Splitter.split128( + uint256(bytes32(fetchedTasksMerkleRoot)) + ); - (uint256 resultRootLow, uint256 resultRootHigh) = - Uint256Splitter.split128(uint256(bytes32(fetchedResultsMerkleRoot))); + (uint256 resultRootLow, uint256 resultRootHigh) = Uint256Splitter + .split128(uint256(bytes32(fetchedResultsMerkleRoot))); // Cache MMR root - for(uint i = 0; i < fetchedMmrIds.length; i++) { + for (uint i = 0; i < fetchedMmrIds.length; i++) { hdp.cacheMmrRoot(fetchedMmrIds[i]); } // Compute fact hash from PIE file @@ -163,11 +187,18 @@ contract HdpExecutionStoreTest is Test { ); // Check if the task state is FINALIZED - HdpExecutionStore.TaskStatus taskStatusAfter = hdp.getTaskStatus(fetchedTasksCommitments[0]); - assertEq(uint256(taskStatusAfter), uint256(HdpExecutionStore.TaskStatus.FINALIZED)); + HdpExecutionStore.TaskStatus taskStatusAfter = hdp.getTaskStatus( + fetchedTasksCommitments[0] + ); + assertEq( + uint256(taskStatusAfter), + uint256(HdpExecutionStore.TaskStatus.FINALIZED) + ); // Check if the task result is stored - bytes32 taskResult = hdp.getFinalizedTaskResult(fetchedTasksCommitments[0]); + bytes32 taskResult = hdp.getFinalizedTaskResult( + fetchedTasksCommitments[0] + ); assertEq(taskResult, fetchedResults[0]); } @@ -182,7 +213,10 @@ contract HdpExecutionStoreTest is Test { return abi.decode(abiEncoded, (bytes32)); } - function _callPreprocessCli(bytes memory encodedTask, bytes memory encodedDatalake) internal { + function _callPreprocessCli( + bytes memory encodedTask, + bytes memory encodedDatalake + ) internal { string[] memory inputs = new string[](4); inputs[0] = "node"; inputs[1] = "./helpers/fetch_cairo_input.js"; @@ -191,7 +225,9 @@ contract HdpExecutionStoreTest is Test { vm.ffi(inputs); } - function bytesToString(bytes memory _data) public pure returns (string memory) { + function bytesToString( + bytes memory _data + ) public pure returns (string memory) { bytes memory buffer = new bytes(_data.length); for (uint256 i = 0; i < _data.length; i++) { bytes1 b = _data[i]; @@ -245,7 +281,18 @@ contract HdpExecutionStoreTest is Test { tasksCommitments, taskResults ) = abi.decode( - abiEncoded, (uint256[], uint256[], bytes32[], bytes32, bytes32, bytes32[][], bytes32[][], bytes32[], bytes32[]) + abiEncoded, + ( + uint256[], + uint256[], + bytes32[], + bytes32, + bytes32, + bytes32[][], + bytes32[][], + bytes32[], + bytes32[] + ) ); } } diff --git a/test/TransactionsInBlockHdpExecutionStore.t.sol b/test/TransactionsInBlockHdpExecutionStore.t.sol index ca4c8d3..75b5613 100644 --- a/test/TransactionsInBlockHdpExecutionStore.t.sol +++ b/test/TransactionsInBlockHdpExecutionStore.t.sol @@ -3,12 +3,9 @@ pragma solidity ^0.8.4; import {Test} from "forge-std/Test.sol"; import {HdpExecutionStore} from "../src/HdpExecutionStore.sol"; -import { - TransactionsInBlockDatalake, - TransactionsInBlockDatalakeCodecs -} from "../src/datatypes/TransactionsInBlockDatalakeCodecs.sol"; -import {ComputationalTask, ComputationalTaskCodecs} from "../src/datatypes/ComputationalTaskCodecs.sol"; -import {AggregateFn, Operator} from "../src/datatypes/ComputationalTaskCodecs.sol"; +import {TransactionsInBlockDatalake, TransactionsInBlockDatalakeCodecs} from "../src/datatypes/datalake/TransactionsInBlockDatalakeCodecs.sol"; +import {ComputationalTask, ComputationalTaskCodecs} from "../src/datatypes/datalake/ComputeCodecs.sol"; +import {AggregateFn, Operator} from "../src/datatypes/datalake/ComputeCodecs.sol"; import {IFactsRegistry} from "../src/interfaces/IFactsRegistry.sol"; import {ISharpFactsAggregator} from "../src/interfaces/ISharpFactsAggregator.sol"; import {IAggregatorsFactory} from "../src/interfaces/IAggregatorsFactory.sol"; @@ -25,7 +22,10 @@ contract MockFactsRegistry is IFactsRegistry { contract MockAggregatorsFactory is IAggregatorsFactory { mapping(uint256 => ISharpFactsAggregator) public aggregatorsById; - function createAggregator(uint256 id, ISharpFactsAggregator aggregator) external { + function createAggregator( + uint256 id, + ISharpFactsAggregator aggregator + ) external { aggregatorsById[id] = aggregator; } } @@ -40,12 +40,13 @@ contract MockSharpFactsAggregator is ISharpFactsAggregator { } function aggregatorState() external view returns (AggregatorState memory) { - return AggregatorState({ - poseidonMmrRoot: usedMmrRoot, - keccakMmrRoot: bytes32(0), - mmrSize: usedMmrSize, - continuableParentHash: bytes32(0) - }); + return + AggregatorState({ + poseidonMmrRoot: usedMmrRoot, + keccakMmrRoot: bytes32(0), + mmrSize: usedMmrSize, + continuableParentHash: bytes32(0) + }); } } @@ -74,17 +75,23 @@ contract HdpExecutionStoreTest is Test { // !! If want to fetch different input, modify helpers/target/tx_cached_input.json && helpers/target/tx_cached_output.json // !! And construct corresponding TransactionsInBlockDatalake and ComputationalTask here - TransactionsInBlockDatalake datalake = TransactionsInBlockDatalake({ - targetBlock: uint256(5605816), - startIndex: uint256(12), - endIndex: uint256(53), - increment: uint256(1), - includedTypes: uint256(0x00000101), - sampledProperty: TransactionsInBlockDatalakeCodecs.encodeSampledPropertyFortxReceipt(uint8(0)) - }); + TransactionsInBlockDatalake datalake = + TransactionsInBlockDatalake({ + targetBlock: uint256(5605816), + startIndex: uint256(12), + endIndex: uint256(53), + increment: uint256(1), + includedTypes: uint256(0x00000101), + sampledProperty: TransactionsInBlockDatalakeCodecs + .encodeSampledPropertyFortxReceipt(uint8(0)) + }); ComputationalTask computationalTask = - ComputationalTask({aggregateFnId: AggregateFn.SLR, operatorId: Operator.NONE, valueToCompare: uint256(50)}); + ComputationalTask({ + aggregateFnId: AggregateFn.SLR, + operatorId: Operator.NONE, + valueToCompare: uint256(50) + }); function setUp() public { // Registery for facts that has been processed through SHARP @@ -97,11 +104,18 @@ contract HdpExecutionStoreTest is Test { bytes[] memory taskEncodedCompare = new bytes[](1); taskEncodedCompare[0] = computationalTask.encode(); - _callPreprocessCli(abi.encode(taskEncodedCompare), abi.encode(datalakeEncodedCompare)); + _callPreprocessCli( + abi.encode(taskEncodedCompare), + abi.encode(datalakeEncodedCompare) + ); // Get program hash from compiled Cairo program programHash = _getProgramHash(); - hdp = new HdpExecutionStore(factsRegistry, aggregatorsFactory, programHash); + hdp = new HdpExecutionStore( + factsRegistry, + aggregatorsFactory, + programHash + ); // Parse from input file ( @@ -117,27 +131,37 @@ contract HdpExecutionStoreTest is Test { ) = _fetchCairoInput(); bytes32 computedDatalakeCommitment = datalake.commit(); - bytes32 computedTaskCommitment = computationalTask.commit(computedDatalakeCommitment); + bytes32 computedTaskCommitment = computationalTask.commit( + computedDatalakeCommitment + ); assertEq(fetchedTasksCommitments[0], computedTaskCommitment); // Mock SHARP facts aggregator - sharpFactsAggregator = new MockSharpFactsAggregator(fetchedMmrRoots[0], fetchedMmrSizes[0]); + sharpFactsAggregator = new MockSharpFactsAggregator( + fetchedMmrRoots[0], + fetchedMmrSizes[0] + ); // Create mock SHARP facts aggregator - aggregatorsFactory.createAggregator(fetchedMmrIds[0], sharpFactsAggregator); + aggregatorsFactory.createAggregator( + fetchedMmrIds[0], + sharpFactsAggregator + ); assertTrue(hdp.hasRole(keccak256("OPERATOR_ROLE"), address(this))); hdp.grantRole(keccak256("OPERATOR_ROLE"), proverAddress); } function testHdpExecutionFlow() public { - (uint256 taskRootLow, uint256 taskRootHigh) = Uint256Splitter.split128(uint256(bytes32(fetchedTasksMerkleRoot))); + (uint256 taskRootLow, uint256 taskRootHigh) = Uint256Splitter.split128( + uint256(bytes32(fetchedTasksMerkleRoot)) + ); - (uint256 resultRootLow, uint256 resultRootHigh) = - Uint256Splitter.split128(uint256(bytes32(fetchedResultsMerkleRoot))); + (uint256 resultRootLow, uint256 resultRootHigh) = Uint256Splitter + .split128(uint256(bytes32(fetchedResultsMerkleRoot))); // Cache MMR roots - for(uint i = 0; i < fetchedMmrIds.length; i++) { + for (uint i = 0; i < fetchedMmrIds.length; i++) { hdp.cacheMmrRoot(fetchedMmrIds[i]); } @@ -166,11 +190,18 @@ contract HdpExecutionStoreTest is Test { ); // Check if the task state is FINALIZED - HdpExecutionStore.TaskStatus taskStatusAfter = hdp.getTaskStatus(fetchedTasksCommitments[0]); - assertEq(uint256(taskStatusAfter), uint256(HdpExecutionStore.TaskStatus.FINALIZED)); + HdpExecutionStore.TaskStatus taskStatusAfter = hdp.getTaskStatus( + fetchedTasksCommitments[0] + ); + assertEq( + uint256(taskStatusAfter), + uint256(HdpExecutionStore.TaskStatus.FINALIZED) + ); // Check if the task result is stored - bytes32 taskResult = hdp.getFinalizedTaskResult(fetchedTasksCommitments[0]); + bytes32 taskResult = hdp.getFinalizedTaskResult( + fetchedTasksCommitments[0] + ); assertEq(taskResult, fetchedResults[0]); } @@ -185,7 +216,10 @@ contract HdpExecutionStoreTest is Test { return abi.decode(abiEncoded, (bytes32)); } - function _callPreprocessCli(bytes memory encodedTask, bytes memory encodedDatalake) internal { + function _callPreprocessCli( + bytes memory encodedTask, + bytes memory encodedDatalake + ) internal { string[] memory inputs = new string[](4); inputs[0] = "node"; inputs[1] = "./helpers/fetch_cairo_input.js"; @@ -194,7 +228,9 @@ contract HdpExecutionStoreTest is Test { vm.ffi(inputs); } - function bytesToString(bytes memory _data) public pure returns (string memory) { + function bytesToString( + bytes memory _data + ) public pure returns (string memory) { bytes memory buffer = new bytes(_data.length); for (uint256 i = 0; i < _data.length; i++) { bytes1 b = _data[i]; @@ -248,7 +284,18 @@ contract HdpExecutionStoreTest is Test { tasksCommitments, taskResults ) = abi.decode( - abiEncoded, (uint256[], uint256[], bytes32[], bytes32, bytes32, bytes32[][], bytes32[][], bytes32[], bytes32[]) + abiEncoded, + ( + uint256[], + uint256[], + bytes32[], + bytes32, + bytes32, + bytes32[][], + bytes32[][], + bytes32[], + bytes32[] + ) ); } }