Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove the EIP-4844 Toggle From Decompression Circuit #714

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions prover/backend/blobdecompression/prove.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
emPlonk "github.com/consensys/gnark/std/recursion/plonk"
)

// Generates a concrete proof for the decompression of the blob
// Prove generates a concrete proof for the decompression of the blob
func Prove(cfg *config.Config, req *Request) (*Response, error) {

// Parsing / validating the request
Expand Down Expand Up @@ -74,13 +74,11 @@ func Prove(cfg *config.Config, req *Request) (*Response, error) {
return nil, fmt.Errorf("could not parse the snark hash: %w", err)
}

assignment, pubInput, _snarkHash, err := blobdecompression.Assign(
utils.RightPad(blobBytes, expectedMaxUsableBytes),
dictStore,
req.Eip4844Enabled,
xBytes,
y,
)
if !req.Eip4844Enabled {
return nil, fmt.Errorf("EIP-4844 is mandatory")
}

assignment, pubInput, _snarkHash, err := blobdecompression.Assign(utils.RightPad(blobBytes, expectedMaxUsableBytes), dictStore, xBytes, y)

if err != nil {
return nil, fmt.Errorf("while generating the assignment: %w", err)
Expand Down
52 changes: 4 additions & 48 deletions prover/backend/blobsubmission/blobcompression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package blobsubmission
import (
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"os"
Expand All @@ -19,8 +20,6 @@ import (
)

const (
_inFile = "./samples/sample0.json"
_outFile = "./samples/sample0-response.json"
_inFileEIP4844 = "./samples/sample1.json"
_outFileEIP4844 = "./samples/sample1-response.json"
_inFileEIP4844MaxSize = "./samples/sample-max-size.json"
Expand All @@ -30,49 +29,6 @@ const (
_inFileEIP4844TooLarge = "./samples/sample-too-large.json"
)

// blobsubmission with callData
// eip4844Enabled=false
func TestBlobSubmission(t *testing.T) {
fIn, err := os.Open(_inFile)
if err != nil {
t.Fatalf("could not open %s: %v", _inFile, err)
}
defer fIn.Close()

fOut, err := os.Open(_outFile)
if err != nil {
t.Fatalf("could not open %s: %v", _outFile, err)
}
defer fOut.Close()

var (
inp Request
outExpected Response
)

if err = json.NewDecoder(fIn).Decode(&inp); err != nil {
t.Fatalf("could not decode %++v: %v", inp, err)
}

if err = json.NewDecoder(fOut).Decode(&outExpected); err != nil {
t.Fatalf("could not decode %++v: %v", outExpected, err)
}

// call CraftResponseCalldata()
out, err := CraftResponse(&inp)

ok := assert.NoErrorf(t, err, "could not craft the response: %v", err)
if ok {
assert.Equal(t, outExpected, *out, "the response file should be the same")
}

// Stop the test after the first failed file to not overwhelm the
// logs.
if t.Failed() {
t.Fatalf("Got errors for file %s, stopping the test", _outFile)
}
}

// eip4844 blob submission
// eip4844Enabled = true
func TestBlobSubmissionEIP4844(t *testing.T) {
Expand Down Expand Up @@ -142,7 +98,7 @@ func TestBlobSubmissionEIP4844EmptyBlob(t *testing.T) {
t.Fatalf("could not decode %++v: %v", outExpected, err)
}

compressedStream, _ := b64.DecodeString(inp.CompressedData)
compressedStream, _ := base64.StdEncoding.DecodeString(inp.CompressedData)
// Check if len(compressedStream) is equal to 0
expectedLength := 0
actualLength := len(compressedStream)
Expand Down Expand Up @@ -189,7 +145,7 @@ func TestBlobSubmissionEIP4844MaxSize(t *testing.T) {
t.Fatalf("could not decode %++v: %v", _outFileEIP4844MaxSize, err)
}

compressedStream, _ := b64.DecodeString(inp.CompressedData)
compressedStream, _ := base64.StdEncoding.DecodeString(inp.CompressedData)
// Check if len(compressedStream) is equal to 131072
expectedLength := 131072
actualLength := len(compressedStream)
Expand Down Expand Up @@ -235,7 +191,7 @@ func TestBlobSubmissionEIP4844BlobTooLarge(t *testing.T) {
return
}

compressedStream, _ := b64.DecodeString(inp.CompressedData)
compressedStream, _ := base64.StdEncoding.DecodeString(inp.CompressedData)
// Check if the error message contains the expected substring
expectedErrorMsg := fmt.Sprintf("compressedStream length (%d) exceeds blob length (%d)", len(compressedStream), len(blob))
if !strings.Contains(err.Error(), expectedErrorMsg) {
Expand Down
176 changes: 92 additions & 84 deletions prover/backend/blobsubmission/craft.go
Original file line number Diff line number Diff line change
@@ -1,27 +1,29 @@
package blobsubmission

import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"hash"

"github.com/consensys/linea-monorepo/prover/crypto/mimc"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"

fr381 "github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/consensys/linea-monorepo/prover/lib/compressor/blob/encode"
blob "github.com/consensys/linea-monorepo/prover/lib/compressor/blob/v1"
"github.com/consensys/linea-monorepo/prover/utils"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"golang.org/x/crypto/sha3"
"hash"
)

var b64 = base64.StdEncoding

// Prepare a response object by computing all the fields except for the proof.
func CraftResponseCalldata(req *Request) (*Response, error) {
func CraftResponse(req *Request) (*Response, error) {
if req == nil {
return nil, errors.New("crafting response: request must not be nil")
}

if !req.Eip4844Enabled { // no longer supported
return nil, errors.New("EIP-4844 is mandatory")
}

// Flat pass the request parameters to the response
var (
errs [4]error
Expand All @@ -35,7 +37,7 @@ func CraftResponseCalldata(req *Request) (*Response, error) {
parentZkRootHash, errs[0] = utils.HexDecodeString(req.ParentStateRootHash)
newZkRootHash, errs[1] = utils.HexDecodeString(req.FinalStateRootHash)
prevShnarf, errs[2] = utils.HexDecodeString(req.PrevShnarf)
compressedStream, errs[3] = b64.DecodeString(req.CompressedData)
compressedStream, errs[3] = base64.StdEncoding.DecodeString(req.CompressedData)

// Collect and wrap the errors if any, so that we get a friendly error message
if errors.Join(errs[:]...) != nil {
Expand All @@ -53,88 +55,129 @@ func CraftResponseCalldata(req *Request) (*Response, error) {
errsFiltered = append(errsFiltered, fmt.Errorf("bad compressed data: %w", errs[3]))
}
return nil, fmt.Errorf("crafting response:\n%w", errors.Join(errsFiltered...))

}

resp := &Response{
ConflationOrder: req.ConflationOrder,
// Reencode all the parameters to ensure that they are in 0x prefixed format
CompressedData: b64.EncodeToString(compressedStream),
ParentStateRootHash: utils.HexEncodeToString(parentZkRootHash),
FinalStateRootHash: utils.HexEncodeToString(newZkRootHash),
DataParentHash: req.DataParentHash,
PrevShnarf: utils.HexEncodeToString(prevShnarf),
Eip4844Enabled: req.Eip4844Enabled, // this is guaranteed to be false
// Pass an the hex for an empty commitments and proofs instead of passing
// empty string so that the response is always a valid hex string.
KzgProofContract: "0x",
KzgProofSidecar: "0x",
Commitment: "0x",
Eip4844Enabled: req.Eip4844Enabled, // this is guaranteed to be true
}

// copy compressedStream to kzg48484 blobPadded type
// check boundary conditions and add padding if necessary
blobPadded, err := compressedStreamToBlob(compressedStream)
if err != nil {
formatStr := "crafting response: compressedStreamToBlob: %w"
return nil, fmt.Errorf(formatStr, err)
}

// BlobToCommitment creates a commitment out of a data blob.
commitment, err := kzg4844.BlobToCommitment(&blobPadded)
if err != nil {
formatStr := "crafting response: BlobToCommitment: %w"
return nil, fmt.Errorf(formatStr, err)
}

// blobHash
blobHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment)
if !kzg4844.IsValidVersionedHash(blobHash[:]) {
formatStr := "crafting response: invalid versionedHash (blobHash, dataHash): %w"
return nil, fmt.Errorf(formatStr, err)
}

// Compute all the prover fields
snarkHash, err := encode.MiMCChecksumPackedData(compressedStream, fr381.Bits-1, encode.NoTerminalSymbol())
snarkHash, err := encode.MiMCChecksumPackedData(append(compressedStream, make([]byte, blob.MaxUsableBytes-len(compressedStream))...), fr381.Bits-1, encode.NoTerminalSymbol())
if err != nil {
return nil, fmt.Errorf("crafting response: could not compute snark hash: %w", err)
}

keccakHash := utils.KeccakHash(compressedStream)
x := evaluationChallenge(snarkHash, keccakHash)
y, err := EvalStream(compressedStream, x)
// ExpectedX
// Perform the modular reduction before passing to `ComputeProof`. That's needed because ComputeProof expects a reduced
// x point and our x point comes out of Keccak. Thus, it has no reason to be a valid field element as is.
// importantly, do not use `SetByteCanonical` as it will return an error because it expects a reduced input
xUnreduced := evaluationChallenge(snarkHash, blobHash[:])
var tmp fr381.Element
tmp.SetBytes(xUnreduced[:])
xPoint := kzg4844.Point(tmp.Bytes())

// KZG Proof Contract
kzgProofContract, yClaim, err := kzg4844.ComputeProof(&blobPadded, xPoint)
if err != nil {
errorMsg := "crafting response: could not compute y: %w"
return nil, fmt.Errorf(errorMsg, err)
formatStr := "kzgProofContract: kzg4844.ComputeProof error: %w"
return nil, fmt.Errorf(formatStr, err)
}

// ExpectedY
// A claimed evaluation value in a specific point.
y := make([]byte, len(yClaim))
copy(y[:], yClaim[:])

// KZG Proof Sidecar
kzgProofSidecar, err := kzg4844.ComputeBlobProof(&blobPadded, commitment)
if err != nil {
formatStr := "kzgProofSidecar: kzg4844.ComputeBlobProof error: %w"
return nil, fmt.Errorf(formatStr, err)
}

// newShnarf
parts := Shnarf{
OldShnarf: prevShnarf,
SnarkHash: snarkHash,
NewStateRootHash: newZkRootHash,
Y: y,
X: x,
X: xUnreduced,
}
if err = parts.Y.SetBytesCanonical(y); err != nil {
return nil, err
}
newShnarf := parts.Compute()

// Assign all the fields in the input
resp.DataHash = utils.HexEncodeToString(keccakHash)

// We return the unpadded blob-data and leave the coordinator the responsibility
// to perform the padding operation.
resp.CompressedData = req.CompressedData
resp.Commitment = utils.HexEncodeToString(commitment[:])
resp.KzgProofContract = utils.HexEncodeToString(kzgProofContract[:])
resp.KzgProofSidecar = utils.HexEncodeToString(kzgProofSidecar[:])
resp.DataHash = utils.HexEncodeToString(blobHash[:])
resp.SnarkHash = utils.HexEncodeToString(snarkHash)
xBytes, yBytes := x, y.Bytes()
resp.ExpectedX = utils.HexEncodeToString(xBytes)
resp.ExpectedY = utils.HexEncodeToString(yBytes[:])
resp.ExpectedX = utils.HexEncodeToString(xUnreduced)
resp.ExpectedY = utils.HexEncodeToString(y)
resp.ExpectedShnarf = utils.HexEncodeToString(newShnarf)

return resp, nil
}

// TODO @gbotrel this is not used? confirm with @Tabaie / @AlexandreBelling
// Computes the SNARK hash of a stream of byte. Returns the hex string. The hash
// can fail if the input stream does not have the right format.
func snarkHashV0(stream []byte) ([]byte, error) {
h := mimc.NewMiMC()

const blobBytes = 4096 * 32

if len(stream) > blobBytes {
return nil, fmt.Errorf("the compressed blob is too large : %v bytes, the limit is %v bytes", len(stream), blobBytes)
// Blob is populated with the compressedStream (with padding)
func compressedStreamToBlob(compressedStream []byte) (blob kzg4844.Blob, err error) {
// Error is returned when len(compressedStream) is larger than the 4844 data blob [131072]byte
if len(compressedStream) > len(blob) {
return blob, fmt.Errorf("compressedStream length (%d) exceeds blob length (%d)", len(compressedStream), len(blob))
}

if _, err := h.Write(stream); err != nil {
return nil, fmt.Errorf("cannot generate Snarkhash of the string `%x`, MiMC failed : %w", stream, err)
}
// Copy compressedStream to blob, padding with zeros
copy(blob[:len(compressedStream)], compressedStream)

// @alex: for consistency with the circuit, we need to hash the whole input
// stream padded.
if len(stream) < blobBytes {
h.Write(make([]byte, blobBytes-len(stream)))
// Sanity-check that the blob is right-padded with zeroes
for i := len(compressedStream); i < len(blob); i++ {
if blob[i] != 0 {
utils.Panic("blob not padded correctly at index blob[%d]", i)
}
}
return h.Sum(nil), nil
return blob, nil

}

// Returns an evaluation challenge point from a SNARK hash and a blob hash. The
// evaluation challenge is obtained as the hash of the SnarkHash and the keccak
// hash (or the blob hash once we go EIP4844) in that order. The digest is
// returned as a field element modulo the scalar field of the curve BLS12-381.
func evaluationChallenge(snarkHash, keccakHash []byte) (x []byte) {

// Use the keccak hash
h := sha3.NewLegacyKeccak256()
h.Write(snarkHash)
Expand All @@ -143,43 +186,8 @@ func evaluationChallenge(snarkHash, keccakHash []byte) (x []byte) {
return d
}

// Cast the list of the field into a vector of field elements and performs a
// polynomial evaluation in the scalar field of BLS12-381. The bytes are split
// in chunks of 31 bytes representing each a field element in bigendian order.
// The last chunk is padded to the right with zeroes. The input x is taken as
// an array of 32 bytes because the smart-contract generating it will be using
// the result of the keccak directly. The modular reduction is implicitly done
// during the evaluation of the compressed data polynomial representation.
func EvalStream(stream []byte, x_ []byte) (fr381.Element, error) {
streamLen := len(stream)

const chunkSize = 32
var p, x, y fr381.Element

x.SetBytes(x_)

if streamLen%chunkSize != 0 {
return fr381.Element{}, fmt.Errorf("stream length must be a multiple of 32; received length: %d", streamLen)
}

// Compute y by the Horner method. NB: y is initialized to zero when
// allocated but not assigned.
for k := streamLen; k > 0; k -= chunkSize {
if k < len(stream) {
y.Mul(&y, &x)
}
start, stop := k-chunkSize, k
if err := p.SetBytesCanonical(stream[start:stop]); err != nil {
return fr381.Element{}, fmt.Errorf("stream is invalid: %w", err)
}
y.Add(&y, &p)
}

return y, nil
}

// schnarfParts wrap the arguments needed to create a new Shnarf by calling
// the NewSchnarf() function.
// Shnarf wrap the arguments needed to create a new Shnarf by calling
// the NewShnarf() function.
type Shnarf struct {
OldShnarf, SnarkHash, NewStateRootHash []byte
X []byte
Expand Down
Loading
Loading