Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 30 additions & 1 deletion core/txpool/blobpool/blobpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1397,17 +1397,46 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
return available
}

// convertSidecar converts the legacy sidecar in the submitted transactions
// if Osaka fork has been activated.
func (p *BlobPool) convertSidecar(txs []*types.Transaction) ([]*types.Transaction, []error) {
head := p.chain.CurrentBlock()
if !p.chain.Config().IsOsaka(head.Number, head.Time) {
return txs, make([]error, len(txs))
}
var errs []error
for _, tx := range txs {
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
errs = append(errs, errors.New("missing sidecar in blob transaction"))
continue
}
if sidecar.Version == types.BlobSidecarVersion0 {
if err := sidecar.ToV1(); err != nil {
errs = append(errs, err)
continue
}
}
errs = append(errs, nil)
}
return txs, errs
}

// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions).
//
// Note, if sync is set the method will block until all internal maintenance
// related to the add is finished. Only use this during tests for determinism.
func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var (
errs []error
adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs))
)
txs, errs = p.convertSidecar(txs)
for i, tx := range txs {
if errs[i] != nil {
continue
}
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One part is still missing:

During the Osaka activation, we should somehow convert the legacy sidecars to new format. It should be done in a following PR.

errs[i] = p.add(tx)
if errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
Expand Down
84 changes: 74 additions & 10 deletions core/txpool/blobpool/blobpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"os"
"path/filepath"
"reflect"
"slices"
"sync"
"testing"

Expand All @@ -47,11 +48,12 @@ import (
)

var (
testBlobs []*kzg4844.Blob
testBlobCommits []kzg4844.Commitment
testBlobProofs []kzg4844.Proof
testBlobVHashes [][32]byte
testBlobIndices = make(map[[32]byte]int)
testBlobs []*kzg4844.Blob
testBlobCommits []kzg4844.Commitment
testBlobProofs []kzg4844.Proof
testBlobCellProofs [][]kzg4844.Proof
testBlobVHashes [][32]byte
testBlobIndices = make(map[[32]byte]int)
)

const testMaxBlobsPerBlock = 6
Expand All @@ -67,6 +69,9 @@ func init() {
testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
testBlobProofs = append(testBlobProofs, testBlobProof)

testBlobCellProof, _ := kzg4844.ComputeCellProofs(testBlob)
testBlobCellProofs = append(testBlobCellProofs, testBlobCellProof)

testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
testBlobIndices[testBlobVHash] = len(testBlobVHashes)
testBlobVHashes = append(testBlobVHashes, testBlobVHash)
Expand Down Expand Up @@ -416,24 +421,40 @@ func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
hashes = append(hashes, tx.vhashes...)
}
}
blobs, _, proofs, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
if err != nil {
t.Fatal(err)
}
blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1)
if err != nil {
t.Fatal(err)
}
// Cross validate what we received vs what we wanted
if len(blobs) != len(hashes) || len(proofs) != len(hashes) {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), len(hashes))
if len(blobs1) != len(hashes) || len(proofs1) != len(hashes) {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs1), len(proofs1), len(hashes))
return
}
if len(blobs2) != len(hashes) || len(proofs2) != len(hashes) {
t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want blobs %d, want proofs: %d", len(blobs2), len(proofs2), len(hashes), len(hashes))
return
}
for i, hash := range hashes {
// If an item is missing, but shouldn't, error
if blobs[i] == nil || proofs[i] == nil {
if blobs1[i] == nil || proofs1[i] == nil {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
if blobs2[i] == nil || proofs2[i] == nil {
t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
// Item retrieved, make sure it matches the expectation
index := testBlobIndices[hash]
if *blobs[i] != *testBlobs[index] || proofs[i][0] != testBlobProofs[index] {
if *blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index] {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
if *blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index]) {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
Expand Down Expand Up @@ -1668,6 +1689,49 @@ func TestAdd(t *testing.T) {
}
}

// Tests that adding the transactions with legacy sidecar and expect them to
// be converted to new format correctly.
func TestAddLegacyBlobTx(t *testing.T) {
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()

addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
)

statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true, false)

chain := &testBlockChain{
config: params.MergedTestChainConfig,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
}
pool := New(Config{Datadir: t.TempDir()}, chain, nil)
if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}

// Attempt to add legacy blob transactions.
var (
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0)
tx3 = makeMultiBlobTx(1, 1, 800, 70, 6, 12, key2, types.BlobSidecarVersion1)
)
errs := pool.Add([]*types.Transaction{tx1, tx2, tx3}, true)
for _, err := range errs {
if err != nil {
t.Fatalf("failed to add tx: %v", err)
}
}
verifyPoolInternals(t, pool)
pool.Close()
}

func TestGetBlobs(t *testing.T) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))

Expand Down
6 changes: 2 additions & 4 deletions core/txpool/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"math/big"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
Expand Down Expand Up @@ -167,9 +166,8 @@ func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationO
if len(hashes) == 0 {
return errors.New("blobless blob transaction")
}
maxBlobs := eip4844.MaxBlobsPerBlock(opts.Config, head.Time)
if len(hashes) > maxBlobs {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), maxBlobs)
if len(hashes) > params.BlobTxMaxBlobs {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobs)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What was the purpose of this change? I think you need to use the config to determine the correct max number of blobs?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, it's a very good question.

Originally, we cap the number of blobs within a single tx by eip4844.MaxBlobsPerBlock.

Then the PeerDAS EIP introduces this notion, that the blobs in a tx is capped at 6, constantly.

Additionally, a limit of 6 blobs per transaction is introduced. Clients MUST enforce this limit when validating blob transactions at submission time, when received from the network, and during block production and processing.

So the original check is no longer meaningful, at least after the PeerDAS EIP.

In theory, we should apply the restriction respectively:

  • Before the PeerDAS, the blob count is capped by eip4844.MaxBlobsPerBlock
  • After the PeerDAS, the blob count is capped by a constant threshold

In the txpool, we can just blindly unify the restriction by this constant threshold, it won't hurt too much before the Osaka fork.

Please let me know if this workaround makes sense or not.

}
if len(sidecar.Blobs) != len(hashes) {
return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(sidecar.Blobs), len(hashes))
Expand Down
41 changes: 32 additions & 9 deletions internal/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -1479,6 +1479,8 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c

// SendTransaction creates a transaction for the given argument, sign it and submit it to the
// transaction pool.
//
// This API is not capable for submitting blob transaction with sidecar.
func (api *TransactionAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.from()}
Expand All @@ -1499,7 +1501,7 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction
}

// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, api.b, false); err != nil {
if err := args.setDefaults(ctx, api.b, sidecarConfig{}); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
Expand All @@ -1516,10 +1518,19 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction
// on a given unsigned transaction, and returns it to the caller for further
// processing (signing + broadcast).
func (api *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
args.blobSidecarAllowed = true

// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, api.b, false); err != nil {
sidecarVersion := types.BlobSidecarVersion0
if len(args.Blobs) > 0 {
h := api.b.CurrentHeader()
if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
sidecarVersion = types.BlobSidecarVersion1
}
}
config := sidecarConfig{
blobSidecarAllowed: true,
blobSidecarVersion: sidecarVersion,
}
if err := args.setDefaults(ctx, api.b, config); err != nil {
return nil, err
}
// Assemble the transaction and obtain rlp
Expand Down Expand Up @@ -1576,8 +1587,6 @@ type SignTransactionResult struct {
// The node needs to have the private key of the account corresponding with
// the given from address and it needs to be unlocked.
func (api *TransactionAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
args.blobSidecarAllowed = true

if args.Gas == nil {
return nil, errors.New("gas not specified")
}
Expand All @@ -1587,7 +1596,19 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction
if args.Nonce == nil {
return nil, errors.New("nonce not specified")
}
if err := args.setDefaults(ctx, api.b, false); err != nil {
sidecarVersion := types.BlobSidecarVersion0
if len(args.Blobs) > 0 {
h := api.b.CurrentHeader()
if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
sidecarVersion = types.BlobSidecarVersion1
}
}

config := sidecarConfig{
blobSidecarAllowed: true,
blobSidecarVersion: sidecarVersion,
}
if err := args.setDefaults(ctx, api.b, config); err != nil {
return nil, err
}
// Before actually sign the transaction, ensure the transaction fee is reasonable.
Expand All @@ -1603,7 +1624,7 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction
// no longer retains the blobs, only the blob hashes. In this step, we need
// to put back the blob(s).
if args.IsEIP4844() {
signed = signed.WithBlobTxSidecar(types.NewBlobTxSidecar(types.BlobSidecarVersion0, args.Blobs, args.Commitments, args.Proofs))
signed = signed.WithBlobTxSidecar(types.NewBlobTxSidecar(sidecarVersion, args.Blobs, args.Commitments, args.Proofs))
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note, the legacy proofs submitted by users will be implicitly converted to cell proofs.

}
data, err := signed.MarshalBinary()
if err != nil {
Expand Down Expand Up @@ -1638,11 +1659,13 @@ func (api *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) {

// Resend accepts an existing transaction and a new gas price and limit. It will remove
// the given transaction from the pool and reinsert it with the new gas price and limit.
//
// This API is not capable for submitting blob transaction with sidecar.
func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) {
if sendArgs.Nonce == nil {
return common.Hash{}, errors.New("missing transaction nonce in transaction spec")
}
if err := sendArgs.setDefaults(ctx, api.b, false); err != nil {
if err := sendArgs.setDefaults(ctx, api.b, sidecarConfig{}); err != nil {
return common.Hash{}, err
}
matchTx := sendArgs.ToTransaction(types.LegacyTxType)
Expand Down
Loading