Skip to content

Commit

Permalink
refactor type, module
Browse files Browse the repository at this point in the history
  • Loading branch information
rkdud007 committed Jul 17, 2024
1 parent ec98fe7 commit c47440c
Show file tree
Hide file tree
Showing 11 changed files with 416 additions and 229 deletions.
127 changes: 89 additions & 38 deletions src/HdpExecutionStore.sol
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,9 @@ import {IFactsRegistry} from "./interfaces/IFactsRegistry.sol";
import {ISharpFactsAggregator} from "./interfaces/ISharpFactsAggregator.sol";
import {IAggregatorsFactory} from "./interfaces/IAggregatorsFactory.sol";

import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "./datatypes/BlockSampledDatalakeCodecs.sol";
import {
TransactionsInBlockDatalake,
TransactionsInBlockDatalakeCodecs
} from "./datatypes/TransactionsInBlockDatalakeCodecs.sol";
import {IterativeDynamicLayoutDatalake} from "./datatypes/IterativeDynamicLayoutDatalakeCodecs.sol";
import {IterativeDynamicLayoutDatalakeCodecs} from "./datatypes/IterativeDynamicLayoutDatalakeCodecs.sol";
import {ComputationalTask, ComputationalTaskCodecs} from "./datatypes/ComputationalTaskCodecs.sol";
import {BlockSampledDatalake, BlockSampledDatalakeCodecs} from "./datatypes/datalake/BlockSampledDatalakeCodecs.sol";
import {TransactionsInBlockDatalake, TransactionsInBlockDatalakeCodecs} from "./datatypes/datalake/TransactionsInBlockDatalakeCodecs.sol";
import {ComputationalTask, ComputationalTaskCodecs} from "./datatypes/datalake/ComputeCodecs.sol";

/// Caller is not authorized to perform the action
error Unauthorized();
Expand All @@ -35,7 +30,6 @@ contract HdpExecutionStore is AccessControl {
using MerkleProof for bytes32[];
using BlockSampledDatalakeCodecs for BlockSampledDatalake;
using TransactionsInBlockDatalakeCodecs for TransactionsInBlockDatalake;
using IterativeDynamicLayoutDatalakeCodecs for IterativeDynamicLayoutDatalake;
using ComputationalTaskCodecs for ComputationalTask;

/// @notice The status of a task
Expand All @@ -55,10 +49,16 @@ contract HdpExecutionStore is AccessControl {
event MmrRootCached(uint256 mmrId, uint256 mmrSize, bytes32 mmrRoot);

/// @notice emitted when a new task with block sampled datalake is scheduled
event TaskWithBlockSampledDatalakeScheduled(BlockSampledDatalake datalake, ComputationalTask task);
event TaskWithBlockSampledDatalakeScheduled(
BlockSampledDatalake datalake,
ComputationalTask task
);

/// @notice emitted when a new task with transactions in block datalake is scheduled
event TaskWithTransactionsInBlockDatalakeScheduled(TransactionsInBlockDatalake datalake, ComputationalTask task);
event TaskWithTransactionsInBlockDatalakeScheduled(
TransactionsInBlockDatalake datalake,
ComputationalTask task
);

/// @notice constant representing role of operator
bytes32 public constant OPERATOR_ROLE = keccak256("OPERATOR_ROLE");
Expand All @@ -79,9 +79,14 @@ contract HdpExecutionStore is AccessControl {
mapping(bytes32 => TaskResult) public cachedTasksResult;

/// @notice mapping of chain id => mmr id => mmr size => mmr root
mapping(uint256 => mapping(uint256 => mapping(uint256 => bytes32))) public cachedMMRsRoots;

constructor(IFactsRegistry factsRegistry, IAggregatorsFactory aggregatorsFactory, bytes32 programHash) {
mapping(uint256 => mapping(uint256 => mapping(uint256 => bytes32)))
public cachedMMRsRoots;

constructor(
IFactsRegistry factsRegistry,
IAggregatorsFactory aggregatorsFactory,
bytes32 programHash
) {
SHARP_FACTS_REGISTRY = factsRegistry;
AGGREGATORS_FACTORY = aggregatorsFactory;
PROGRAM_HASH = programHash;
Expand All @@ -99,11 +104,20 @@ contract HdpExecutionStore is AccessControl {
/// @notice Caches the MMR root for a given MMR id
/// @notice Get MMR size and root from the aggregator and cache it
function cacheMmrRoot(uint256 mmrId) public {
ISharpFactsAggregator aggregator = AGGREGATORS_FACTORY.aggregatorsById(mmrId);
ISharpFactsAggregator.AggregatorState memory aggregatorState = aggregator.aggregatorState();
cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][aggregatorState.mmrSize] = aggregatorState.poseidonMmrRoot;

emit MmrRootCached(mmrId, aggregatorState.mmrSize, aggregatorState.poseidonMmrRoot);
ISharpFactsAggregator aggregator = AGGREGATORS_FACTORY.aggregatorsById(
mmrId
);
ISharpFactsAggregator.AggregatorState
memory aggregatorState = aggregator.aggregatorState();
cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][
aggregatorState.mmrSize
] = aggregatorState.poseidonMmrRoot;

emit MmrRootCached(
mmrId,
aggregatorState.mmrSize,
aggregatorState.poseidonMmrRoot
);
}

/// @notice Requests the execution of a task with a block sampled datalake
Expand All @@ -122,9 +136,15 @@ contract HdpExecutionStore is AccessControl {
}

// Store the task result
cachedTasksResult[taskCommitment] = TaskResult({status: TaskStatus.SCHEDULED, result: ""});

emit TaskWithBlockSampledDatalakeScheduled(blockSampledDatalake, computationalTask);
cachedTasksResult[taskCommitment] = TaskResult({
status: TaskStatus.SCHEDULED,
result: ""
});

emit TaskWithBlockSampledDatalakeScheduled(
blockSampledDatalake,
computationalTask
);
}

/// @notice Requests the execution of a task with a transactions in block datalake
Expand All @@ -143,9 +163,15 @@ contract HdpExecutionStore is AccessControl {
}

// Store the task result
cachedTasksResult[taskCommitment] = TaskResult({status: TaskStatus.SCHEDULED, result: ""});

emit TaskWithTransactionsInBlockDatalakeScheduled(transactionsInBlockDatalake, computationalTask);
cachedTasksResult[taskCommitment] = TaskResult({
status: TaskStatus.SCHEDULED,
result: ""
});

emit TaskWithTransactionsInBlockDatalakeScheduled(
transactionsInBlockDatalake,
computationalTask
);
}

/// @notice Authenticates the execution of a task is finalized
Expand All @@ -172,7 +198,7 @@ contract HdpExecutionStore is AccessControl {
bytes32[] calldata taskCommitments,
bytes32[] calldata taskResults
) external onlyOperator {
assert (mmrIds.length == mmrSizes.length);
assert(mmrIds.length == mmrSizes.length);

// Initialize an array of uint256 to store the program output
uint256[] memory programOutput = new uint256[](4 + mmrIds.length * 4);
Expand All @@ -195,7 +221,9 @@ contract HdpExecutionStore is AccessControl {
bytes32 programOutputHash = keccak256(abi.encodePacked(programOutput));

// Compute GPS fact hash
bytes32 gpsFactHash = keccak256(abi.encode(PROGRAM_HASH, programOutputHash));
bytes32 gpsFactHash = keccak256(
abi.encode(PROGRAM_HASH, programOutputHash)
);

// Ensure GPS fact is registered
if (!SHARP_FACTS_REGISTRY.isValid(gpsFactHash)) {
Expand All @@ -209,42 +237,63 @@ contract HdpExecutionStore is AccessControl {
bytes32[] memory resultInclusionProof = resultsInclusionProofs[i];

// Convert the low and high 128 bits to a single 256 bit value
bytes32 resultMerkleRoot = bytes32((resultMerkleRootHigh << 128) | resultMerkleRootLow);
bytes32 taskMerkleRoot = bytes32((taskMerkleRootHigh << 128) | taskMerkleRootLow);
bytes32 resultMerkleRoot = bytes32(
(resultMerkleRootHigh << 128) | resultMerkleRootLow
);
bytes32 taskMerkleRoot = bytes32(
(taskMerkleRootHigh << 128) | taskMerkleRootLow
);

// Compute the Merkle leaf of the task
bytes32 taskCommitment = taskCommitments[i];
bytes32 taskMerkleLeaf = standardLeafHash(taskCommitment);
// Ensure that the task is included in the batch, by verifying the Merkle proof
bool isVerifiedTask = taskInclusionProof.verify(taskMerkleRoot, taskMerkleLeaf);
bool isVerifiedTask = taskInclusionProof.verify(
taskMerkleRoot,
taskMerkleLeaf
);

if (!isVerifiedTask) {
revert NotInBatch();
}

// Compute the Merkle leaf of the task result
bytes32 taskResultCommitment = keccak256(abi.encode(taskCommitment, computationalTaskResult));
bytes32 taskResultMerkleLeaf = standardLeafHash(taskResultCommitment);
bytes32 taskResultCommitment = keccak256(
abi.encode(taskCommitment, computationalTaskResult)
);
bytes32 taskResultMerkleLeaf = standardLeafHash(
taskResultCommitment
);
// Ensure that the task result is included in the batch, by verifying the Merkle proof
bool isVerifiedResult = resultInclusionProof.verify(resultMerkleRoot, taskResultMerkleLeaf);
bool isVerifiedResult = resultInclusionProof.verify(
resultMerkleRoot,
taskResultMerkleLeaf
);

if (!isVerifiedResult) {
revert NotInBatch();
}

// Store the task result
cachedTasksResult[taskCommitment] =
TaskResult({status: TaskStatus.FINALIZED, result: computationalTaskResult});
cachedTasksResult[taskCommitment] = TaskResult({
status: TaskStatus.FINALIZED,
result: computationalTaskResult
});
}
}

/// @notice Load MMR root from cache with given mmrId and mmrSize
function loadMmrRoot(uint256 mmrId, uint256 mmrSize) public view returns (bytes32) {
function loadMmrRoot(
uint256 mmrId,
uint256 mmrSize
) public view returns (bytes32) {
return cachedMMRsRoots[SEPOLIA_CHAIN_ID][mmrId][mmrSize];
}

/// @notice Returns the result of a finalized task
function getFinalizedTaskResult(bytes32 taskCommitment) external view returns (bytes32) {
function getFinalizedTaskResult(
bytes32 taskCommitment
) external view returns (bytes32) {
// Ensure task is finalized
if (cachedTasksResult[taskCommitment].status != TaskStatus.FINALIZED) {
revert NotFinalized();
Expand All @@ -253,7 +302,9 @@ contract HdpExecutionStore is AccessControl {
}

/// @notice Returns the status of a task
function getTaskStatus(bytes32 taskCommitment) external view returns (TaskStatus) {
function getTaskStatus(
bytes32 taskCommitment
) external view returns (TaskStatus) {
return cachedTasksResult[taskCommitment].status;
}

Expand Down
64 changes: 0 additions & 64 deletions src/datatypes/ComputationalTaskCodecs.sol

This file was deleted.

51 changes: 0 additions & 51 deletions src/datatypes/IterativeDynamicLayoutDatalakeCodecs.sol

This file was deleted.

8 changes: 8 additions & 0 deletions src/datatypes/Task.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.4;

/// @notice Task type.
enum TaskCode {
Datalake,
Module
}
Loading

0 comments on commit c47440c

Please sign in to comment.