From da7b970d3e54cc87ade4946de02d214c92f940ef Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 15:21:32 +0200 Subject: [PATCH 1/6] add key rotation --- block/internal/executing/executor.go | 26 ++- block/internal/executing/executor_test.go | 95 ++++++++ block/internal/submitting/da_submitter.go | 9 +- .../internal/submitting/da_submitter_test.go | 91 ++++++++ block/internal/syncing/assert.go | 11 +- block/internal/syncing/da_retriever.go | 8 +- block/internal/syncing/p2p_handler.go | 12 +- block/internal/syncing/p2p_handler_test.go | 38 ++++ block/internal/syncing/raft_retriever.go | 2 +- docs/.vitepress/config.ts | 4 + docs/adr/adr-023-proposer-key-rotation.md | 151 +++++++++++++ docs/guides/create-genesis.md | 4 + .../operations/proposer-key-rotation.md | 195 ++++++++++++++++ docs/guides/operations/upgrades.md | 6 + node/failover.go | 2 +- node/full.go | 2 +- pkg/genesis/genesis.go | 34 ++- pkg/genesis/io.go | 4 +- pkg/genesis/proposer_schedule.go | 208 ++++++++++++++++++ pkg/genesis/proposer_schedule_test.go | 93 ++++++++ 20 files changed, 956 insertions(+), 39 deletions(-) create mode 100644 docs/adr/adr-023-proposer-key-rotation.md create mode 100644 docs/guides/operations/proposer-key-rotation.md create mode 100644 pkg/genesis/proposer_schedule.go create mode 100644 pkg/genesis/proposer_schedule_test.go diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index f5be5e1b40..4cc7f4984c 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -126,7 +126,7 @@ func NewExecutor( return nil, fmt.Errorf("failed to get address: %w", err) } - if !bytes.Equal(addr, genesis.ProposerAddress) { + if !genesis.HasScheduledProposer(addr) { return nil, common.ErrNotProposer } } @@ -696,6 +696,10 @@ func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *BatchData) (*types.SignedHeader, *types.Data, error) { currentState := e.getLastState() headerTime := uint64(e.genesis.StartTime.UnixNano()) + proposer, err := e.genesis.ProposerAtHeight(height) + if err != nil { + return nil, nil, fmt.Errorf("resolve proposer for height %d: %w", height, err) + } var lastHeaderHash types.Hash var lastDataHash types.Hash @@ -728,22 +732,30 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba // Get signer info and validator hash var pubKey crypto.PubKey + var signerAddress []byte var validatorHash types.Hash if e.signer != nil { - var err error pubKey, err = e.signer.GetPublic() if err != nil { return nil, nil, fmt.Errorf("failed to get public key: %w", err) } - validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, pubKey) + signerAddress, err = e.signer.GetAddress() + if err != nil { + return nil, nil, fmt.Errorf("failed to get signer address: %w", err) + } + + if err := e.genesis.ValidateProposer(height, signerAddress, pubKey); err != nil { + return nil, nil, fmt.Errorf("signer does not match proposer schedule: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - var err error - validatorHash, err = e.options.ValidatorHasherProvider(e.genesis.ProposerAddress, nil) + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, nil) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } @@ -763,13 +775,13 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba }, LastHeaderHash: lastHeaderHash, AppHash: currentState.AppHash, - ProposerAddress: e.genesis.ProposerAddress, + ProposerAddress: proposer.Address, ValidatorHash: validatorHash, }, Signature: lastSignature, Signer: types.Signer{ PubKey: pubKey, - Address: e.genesis.ProposerAddress, + Address: proposer.Address, }, } diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 1099cdb87d..cec6a3fecc 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,6 +1,7 @@ package executing import ( + "context" "testing" "time" @@ -12,6 +13,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" @@ -121,3 +123,96 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } + +func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + metrics := common.NopMetrics() + oldAddr, oldSignerInfo, _ := buildTestSigner(t) + newAddr, newSignerInfo, newSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(2, newSignerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: entry1.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + executor, err := NewExecutor( + memStore, + nil, + nil, + newSigner, + cacheManager, + metrics, + config.DefaultConfig(), + gen, + nil, + nil, + zerolog.Nop(), + common.DefaultBlockOptions(), + make(chan error, 1), + nil, + ) + require.NoError(t, err) + + prevHeader := &types.SignedHeader{ + Header: types.Header{ + Version: types.InitStateVersion, + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(gen.StartTime.UnixNano()), + }, + AppHash: []byte("state-root-0"), + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signature: types.Signature([]byte("sig-1")), + Signer: oldSignerInfo, + } + prevData := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 1, + Time: prevHeader.BaseHeader.Time, + }, + Txs: nil, + } + + batch, err := memStore.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) + require.NoError(t, batch.SetHeight(1)) + require.NoError(t, batch.Commit()) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: 1, + LastBlockTime: prevHeader.Time(), + LastHeaderHash: prevHeader.Hash(), + AppHash: []byte("state-root-1"), + }) + + header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ + Batch: &coreseq.Batch{}, + Time: time.Now(), + }) + require.NoError(t, err) + require.Equal(t, newAddr, header.ProposerAddress) + require.Equal(t, newAddr, header.Signer.Address) + require.Equal(t, uint64(2), data.Height()) +} diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index 83f56d9cb5..e53e351832 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -1,7 +1,6 @@ package submitting import ( - "bytes" "context" "encoding/json" "fmt" @@ -476,10 +475,6 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si return nil, nil, fmt.Errorf("failed to get address: %w", err) } - if len(genesis.ProposerAddress) > 0 && !bytes.Equal(addr, genesis.ProposerAddress) { - return nil, nil, fmt.Errorf("signer address mismatch with genesis proposer") - } - signerInfo := types.Signer{ PubKey: pubKey, Address: addr, @@ -494,6 +489,10 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si continue } + if err := genesis.ValidateProposer(unsignedData.Height(), addr, pubKey); err != nil { + return nil, nil, fmt.Errorf("signer does not match proposer schedule for data at height %d: %w", unsignedData.Height(), err) + } + signature, err := signer.Sign(ctx, unsignedDataListBz[i]) if err != nil { return nil, nil, fmt.Errorf("failed to sign data: %w", err) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index d25786018b..9c55b9bd6c 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -343,6 +343,97 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { assert.True(t, ok) } +func TestDASubmitter_SubmitData_UsesScheduledProposerForHeight(t *testing.T) { + submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) + ctx := context.Background() + dataNamespace := datypes.NamespaceFromString(testDataNamespace).Bytes() + + mockDA.On( + "Submit", + mock.Anything, + mock.AnythingOfType("[][]uint8"), + mock.AnythingOfType("float64"), + dataNamespace, + mock.Anything, + ).Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { + return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 2}} + }).Once() + + oldAddr, oldPub, _ := createTestSigner(t) + nextAddr, nextPub, nextSigner := createTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(gen.InitialHeight, oldPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(2, nextPub) + require.NoError(t, err) + + gen.ProposerAddress = entry1.Address + gen.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + submitter.genesis = gen + + data1 := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(time.Now().UnixNano()), + }, + Txs: types.Txs{}, + } + + header1 := &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 1, + Time: uint64(time.Now().UnixNano()), + }, + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signer: types.Signer{PubKey: oldPub, Address: oldAddr}, + } + + data := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 2, + Time: uint64(time.Now().UnixNano()), + }, + Txs: types.Txs{types.Tx("rotated-key-tx")}, + } + + header := &types.SignedHeader{ + Header: types.Header{ + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 2, + Time: uint64(time.Now().UnixNano()), + }, + ProposerAddress: nextAddr, + DataHash: data.DACommitment(), + }, + Signer: types.Signer{PubKey: nextPub, Address: nextAddr}, + } + + sig1 := types.Signature([]byte("sig-1")) + sig2 := types.Signature([]byte("sig-2")) + batch, err := st.NewBatch(ctx) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(header1, data1, &sig1)) + require.NoError(t, batch.SaveBlockData(header, data, &sig2)) + require.NoError(t, batch.SetHeight(2)) + require.NoError(t, batch.Commit()) + + signedDataList, marshalledData, err := cm.GetPendingData(ctx) + require.NoError(t, err) + err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nextSigner, gen) + require.NoError(t, err) + + _, ok := cm.GetDataDAIncludedByHeight(2) + assert.True(t, ok) + assert.NotEqual(t, oldAddr, nextAddr) +} + func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) ctx := context.Background() diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 7c77400571..56000e744e 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -1,7 +1,6 @@ package syncing import ( - "bytes" "errors" "fmt" @@ -9,11 +8,11 @@ import ( "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, proposerAddr []byte) error { - if !bytes.Equal(proposerAddr, genesis.ProposerAddress) { - return fmt.Errorf("unexpected proposer: got %x, expected %x", - proposerAddr, genesis.ProposerAddress) +func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, signer types.Signer) error { + if err := genesis.ValidateProposer(height, proposerAddr, signer.PubKey); err != nil { + return fmt.Errorf("unexpected proposer at height %d: %w", height, err) } + return nil } @@ -22,7 +21,7 @@ func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis return errors.New("empty signed data") } - if err := assertExpectedProposer(genesis, signedData.Signer.Address); err != nil { + if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer); err != nil { return err } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index d4fa93ce04..1b3393f181 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -299,7 +299,7 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH return nil } - if err := r.assertExpectedProposer(header.ProposerAddress); err != nil { + if err := r.assertExpectedProposer(header); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } @@ -355,9 +355,9 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { return &signedData.Data } -// assertExpectedProposer validates the proposer address -func (r *daRetriever) assertExpectedProposer(proposerAddr []byte) error { - return assertExpectedProposer(r.genesis, proposerAddr) +// assertExpectedProposer validates the proposer schedule entry for the header height. +func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { + return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer) } // assertValidSignedData validates signed data using the configured signature provider diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index a3778757a1..67e5a1b278 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,7 +81,7 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - if err := h.assertExpectedProposer(p2pHeader.ProposerAddress); err != nil { + if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err } @@ -125,11 +125,11 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC return nil } -// assertExpectedProposer validates the proposer address. -func (h *P2PHandler) assertExpectedProposer(proposerAddr []byte) error { - if !bytes.Equal(h.genesis.ProposerAddress, proposerAddr) { - return fmt.Errorf("proposer address mismatch: got %x, expected %x", - proposerAddr, h.genesis.ProposerAddress) +// assertExpectedProposer validates the proposer schedule entry for the header height. +func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { + if err := assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { + return err } + return nil } diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 8bffc31ede..dc370a9482 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -215,6 +215,44 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) } +func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + nextAddr, nextPub, nextSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) + require.NoError(t, err) + + p.Genesis.ProposerAddress = entry1.Address + p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + p.Genesis.DAEpochForcedInclusion = 1 + require.NoError(t, p.Genesis.Validate()) + p.Handler.genesis = p.Genesis + + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, nextAddr, nextPub, nextSigner) + data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 11, 1)} + header.DataHash = data.DACommitment() + bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) + require.NoError(t, err) + sig, err := nextSigner.Sign(t.Context(), bz) + require.NoError(t, err) + header.Signature = sig + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() + p.DataStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(data, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err = p.Handler.ProcessHeight(ctx, 11, ch) + require.NoError(t, err) + + events := collectEvents(t, ch, 50*time.Millisecond) + require.Len(t, events, 1) + require.Equal(t, nextAddr, events[0].Header.ProposerAddress) +} + func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { p := setupP2P(t) ctx := t.Context() diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index aaebb7a458..b67fe86e09 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,7 +125,7 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.ProposerAddress); err != nil { + if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 0cfdf5c7ae..01bda4a8d9 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -297,6 +297,10 @@ function sidebarHome() { text: "Create genesis for your chain", link: "/guides/create-genesis", }, + { + text: "Rotate proposer key", + link: "/guides/operations/proposer-key-rotation", + }, { text: "Metrics", link: "/guides/metrics", diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md new file mode 100644 index 0000000000..7d4114709c --- /dev/null +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -0,0 +1,151 @@ +# ADR 023: Proposer Key Rotation via Height-Based Schedule + +## Changelog + +- 2026-04-23: Implemented proposer key rotation through a height-indexed proposer schedule + +## Context + +ev-node historically treated the proposer as a single static identity embedded in genesis via `proposer_address`. +That assumption leaked into block production, DA submission, and sync validation. As a result, rotating a compromised +or operationally obsolete proposer key required out-of-band coordination and effectively behaved like a manual +re-genesis from the point of view of node operators. + +This was suboptimal for three reasons: + +1. It made proposer rotation operationally risky and easy to get wrong. +2. Fresh nodes syncing from genesis had no protocol-visible record of when the proposer changed. +3. Validation only pinned the proposer address, not the scheduled public key that should be producing blocks. + +## Alternative Approaches + +### 1. Manual key swap only + +Operators can stop the sequencer, swap the local signer, redistribute config, and restart nodes. +This is insufficient because the chain itself does not encode when the proposer changed, so historical sync +and validation become ambiguous. + +### 2. Re-issue a new genesis on each rotation + +This treats every proposer rotation like a chain restart. It is operationally heavy, conflates upgrades with +rotations, and breaks continuity for nodes syncing historical data. + +### 3. Height-indexed proposer schedule in genesis (Chosen) + +Record proposer changes as an ordered schedule indexed by activation height. This preserves chain continuity while +making rotation rules explicit and replayable from genesis. + +## Decision + +ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. + +Each entry declares: + +- `start_height` +- `address` +- `pub_key` + +The active proposer for block height `h` is the last entry whose `start_height <= h`. + +The legacy `proposer_address` field remains for backward compatibility. When no explicit schedule is present, +ev-node derives an implicit single-entry schedule beginning at `initial_height`. + +When an explicit schedule is present: + +- the first entry must start at `initial_height` +- entries must be strictly increasing by `start_height` +- each entry's `address` must match the configured `pub_key` +- `proposer_address`, when present, must match the first schedule entry + +## Detailed Design + +### Data model + +Genesis gains: + +```json +"proposer_schedule": [ + { + "start_height": 1, + "address": "...", + "pub_key": "..." + }, + { + "start_height": 1250000, + "address": "...", + "pub_key": "..." + } +] +``` + +The existing `proposer_address` field is retained as a compatibility field and is normalized to the first +scheduled proposer when a schedule is present. + +### Validation rules + +The proposer schedule is now consulted in all proposer-sensitive paths: + +1. executor startup accepts any signer that appears somewhere in the schedule +2. block creation resolves the proposer for the exact height being produced +3. DA submission validates the configured signer against the scheduled proposer for each signed data height +4. sync validation validates incoming headers and signed data against the scheduled proposer for their heights + +This makes proposer rotation protocol-visible for both live nodes and nodes syncing historical data. + +### Operational procedure + +For a planned rotation: + +1. Choose activation height `H` +2. Add a new `proposer_schedule` entry with `start_height = H` +3. Distribute the updated genesis/config to node operators +4. Upgrade follower/full nodes before activation +5. Stop the old sequencer before `H` +6. Start the new sequencer with the replacement key at or after `H` + +The old proposer remains valid for heights `< H`, and the new proposer becomes valid at heights `>= H`. + +### Security considerations + +This design improves safety over address-only pinning by allowing validation against the scheduled public key. +It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority +or rotation certificate flow if the network needs signer replacement without prior static scheduling. + +### Testing + +Coverage includes: + +- genesis schedule validation and height resolution +- sync acceptance of scheduled proposer rotation +- DA submission using a rotated proposer key at the configured height +- executor block creation using the proposer scheduled for the produced height + +## Status + +Implemented + +## Consequences + +### Positive + +- proposer rotation is now part of the chain configuration rather than an operator convention +- fresh nodes can validate historical proposer changes from genesis +- sync and DA validation can pin scheduled public keys, not just addresses +- routine key rotation no longer requires a chain restart + +### Negative + +- proposer schedule changes are consensus-visible and require coordinated rollout +- operators must distribute updated genesis/config before activation height +- emergency rotation still requires preplanned scheduling or a later authority-based mechanism + +### Neutral + +- legacy single-proposer deployments continue to work without defining `proposer_schedule` + +## References + +- [pkg/genesis/genesis.go](../../pkg/genesis/genesis.go) +- [pkg/genesis/proposer_schedule.go](../../pkg/genesis/proposer_schedule.go) +- [block/internal/executing/executor.go](../../block/internal/executing/executor.go) +- [block/internal/syncing/assert.go](../../block/internal/syncing/assert.go) diff --git a/docs/guides/create-genesis.md b/docs/guides/create-genesis.md index 5886325dab..365b491b82 100644 --- a/docs/guides/create-genesis.md +++ b/docs/guides/create-genesis.md @@ -125,6 +125,10 @@ Before doing so, add a `da_start_height` field to the genesis file, that corresp jq '.da_start_height = 1' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json ``` +:::tip +If you want to plan a future proposer key migration without restarting the chain, define a `proposer_schedule` in your genesis and roll it out as a coordinated upgrade. See [Rotate proposer key](./operations/proposer-key-rotation.md). +::: + ## Summary By following these steps, you will set up the genesis for your chain, initialize the validator, add a genesis account, and start the chain. This guide provides a basic framework for configuring and starting your chain using the gm-world binary. Make sure you initialized your chain correctly, and use the `gmd` command for all operations. diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md new file mode 100644 index 0000000000..b25c43fc7a --- /dev/null +++ b/docs/guides/operations/proposer-key-rotation.md @@ -0,0 +1,195 @@ +# Rotate proposer key + +Use this guide to rotate a sequencer proposer key without restarting the chain. The active proposer is selected from `proposer_schedule` in `genesis.json` based on block height. + +## Before you start + +- This is a coordinated network upgrade. Every node must run a binary that supports `proposer_schedule`. +- Every node must use the same updated `genesis.json` before the activation height. +- `ev-node` loads `genesis.json` when the node starts. Updating the file on disk is not enough; you must restart nodes after replacing it. +- The old proposer key remains valid until the block before the activation height. If the old key cannot safely produce until then, stop the sequencer and coordinate operator recovery first. + +## How proposer rotation is stored in genesis + +`proposer_address`, `proposer_schedule[].address`, and `proposer_schedule[].pub_key` are base64-encoded strings in JSON. + +```json +{ + "initial_height": 1, + "proposer_address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", + "proposer_schedule": [ + { + "start_height": 1, + "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", + "pub_key": "5l6vM0b0GqQYQw4x0cI6q7N2vD1cE+oV6rN5eQ7v6dM=" + }, + { + "start_height": 125000, + "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=", + "pub_key": "9r5mM4XjKx6h6sJv2Jf6dB5nQ0eU9l8cM1qT2wV3yZQ=" + } + ] +} +``` + +Rules enforced by `ev-node`: + +- `proposer_schedule[0].start_height` must equal `initial_height` +- schedule entries must be strictly increasing by `start_height` +- every `address` must match its `pub_key` +- if `proposer_address` is set, it must match the first schedule entry + +Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. + +## 1. Pick an activation height + +Choose an activation height `H` far enough in the future that you can distribute the updated genesis and restart every non-producing node before the cutover. + +```bash +ACTIVATION_HEIGHT=125000 +GENESIS="$HOME/.evnode/config/genesis.json" +INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" +``` + +## 2. Get the current and replacement proposer public keys + +For a file-based signer, the signer public key is stored in `signer.json` as base64: + +```bash +OLD_SIGNER_DIR="$HOME/.evnode/config" +NEW_SIGNER_DIR="/secure/path/new-signer" + +OLD_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$OLD_SIGNER_DIR/signer.json")" +NEW_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$NEW_SIGNER_DIR/signer.json")" +``` + +If you use a KMS-backed signer, export the replacement Ed25519 public key from your signer flow and base64-encode the raw public key bytes in the same format. The runtime configuration stays the same as in the [AWS KMS signer guide](./aws-kms-signer.md). + +## 3. Derive proposer addresses from the public keys + +`ev-node` derives the proposer address as `sha256(raw_pubkey_bytes)`. The helper below prints the address in the base64 format used by `genesis.json`. + +```bash +proposer_address() { + python3 - "$1" <<'PY' +import base64 +import hashlib +import sys + +pub_key = base64.b64decode(sys.argv[1]) +address = hashlib.sha256(pub_key).digest() +print(base64.b64encode(address).decode()) +PY +} + +OLD_PROPOSER_ADDRESS="$(proposer_address "$OLD_PROPOSER_PUBKEY")" +NEW_PROPOSER_ADDRESS="$(proposer_address "$NEW_PROPOSER_PUBKEY")" +``` + +## 4. Update `genesis.json` + +### If your chain only has `proposer_address` today + +Create an explicit schedule with the current proposer at `initial_height` and the new proposer at `ACTIVATION_HEIGHT`. + +```bash +jq \ + --arg old_addr "$OLD_PROPOSER_ADDRESS" \ + --arg old_pub "$OLD_PROPOSER_PUBKEY" \ + --arg new_addr "$NEW_PROPOSER_ADDRESS" \ + --arg new_pub "$NEW_PROPOSER_PUBKEY" \ + --argjson initial_height "$INITIAL_HEIGHT" \ + --argjson activation_height "$ACTIVATION_HEIGHT" \ + ' + .proposer_address = $old_addr + | .proposer_schedule = [ + { + start_height: $initial_height, + address: $old_addr, + pub_key: $old_pub + }, + { + start_height: $activation_height, + address: $new_addr, + pub_key: $new_pub + } + ] + ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" +``` + +### If your chain already has `proposer_schedule` + +Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HEIGHT` is greater than the last scheduled `start_height`. + +```bash +jq \ + --arg new_addr "$NEW_PROPOSER_ADDRESS" \ + --arg new_pub "$NEW_PROPOSER_PUBKEY" \ + --argjson activation_height "$ACTIVATION_HEIGHT" \ + ' + .proposer_schedule += [ + { + start_height: $activation_height, + address: $new_addr, + pub_key: $new_pub + } + ] + ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" +``` + +Verify the result before you distribute it: + +```bash +jq '{initial_height, proposer_address, proposer_schedule}' "$GENESIS" +``` + +## 5. Distribute the updated genesis and restart followers + +Copy the same `genesis.json` to every full node, replica, and failover node. Restart them after copying the file so they load the updated schedule. + +Do this before the chain reaches `ACTIVATION_HEIGHT`. + +## 6. Cut over the sequencer + +Wait until the chain reaches `ACTIVATION_HEIGHT - 1`, then stop the old sequencer and start it with the replacement signer. + +Example with a file-based signer: + +```bash +evnode start \ + --home "$HOME/.evnode" \ + --evnode.node.aggregator \ + --evnode.signer.signer_type file \ + --evnode.signer.signer_path "$NEW_SIGNER_DIR" \ + --evnode.signer.passphrase "$SIGNER_PASSPHRASE" +``` + +If you run a custom chain binary such as `gmd` or `appd`, use the same start command you already use for the sequencer and only change the signer configuration. + +## 7. Verify the first post-upgrade block + +Fetch the header at `ACTIVATION_HEIGHT` or the next produced block and confirm that it carries the new proposer address. + +```bash +curl -s http://127.0.0.1:26657/header \ + -H 'Content-Type: application/json' \ + -d "{\"jsonrpc\":\"2.0\",\"method\":\"header\",\"params\":{\"height\":\"${ACTIVATION_HEIGHT}\"},\"id\":1}" \ + | jq . +``` + +Some RPC clients render binary fields as hex instead of base64. If needed, convert the base64 genesis address before comparing: + +```bash +python3 - "$NEW_PROPOSER_ADDRESS" <<'PY' +import base64 +import sys + +print("0x" + base64.b64decode(sys.argv[1]).hex()) +PY +``` + +If the node at `ACTIVATION_HEIGHT` is still signed by the old key, stop block production and check three things first: + +1. every node was restarted after receiving the updated genesis +2. `proposer_schedule` contains the new entry at the intended height +3. the sequencer is actually running with the replacement signer diff --git a/docs/guides/operations/upgrades.md b/docs/guides/operations/upgrades.md index 0027f13c36..ac5f6dcbf1 100644 --- a/docs/guides/operations/upgrades.md +++ b/docs/guides/operations/upgrades.md @@ -38,6 +38,12 @@ May require state migration or coordinated network upgrade. 5. Run any migration scripts 6. Restart +### Proposer Key Rotation + +Rotating the proposer key is a coordinated upgrade even when the chain does not restart. All nodes must receive the same updated `genesis.json`, restart to load it, and be ready before the scheduled activation height. + +Use [Rotate proposer key](./proposer-key-rotation.md) for the exact `proposer_schedule` format, genesis update steps, and cutover procedure. + ## ev-node Upgrades ### Check Current Version diff --git a/node/failover.go b/node/failover.go index 42dac4e8bc..752b6aaba3 100644 --- a/node/failover.go +++ b/node/failover.go @@ -139,7 +139,7 @@ func setupFailoverState( headerSyncService.Store(), dataSyncService.Store(), p2pClient, - genesis.ProposerAddress, + genesis.InitialProposerAddress(), logger, nodeConfig, bestKnownHeightProvider, diff --git a/node/full.go b/node/full.go index bd44f9ef42..5d13beebbd 100644 --- a/node/full.go +++ b/node/full.go @@ -78,7 +78,7 @@ func newFullNode( logger zerolog.Logger, nodeOpts NodeOptions, ) (fn *FullNode, err error) { - logger.Debug().Hex("address", genesis.ProposerAddress).Msg("Proposer address") + logger.Debug().Hex("address", genesis.InitialProposerAddress()).Msg("Initial proposer address") blockMetrics, _ := metricsProvider(genesis.ChainID) diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index e1a401d9fc..a5079c72e6 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -1,6 +1,7 @@ package genesis import ( + "bytes" "fmt" "time" ) @@ -11,10 +12,11 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` + ProposerSchedule []ProposerScheduleEntry `json:"proposer_schedule,omitempty"` // DAStartHeight corresponds to the height at which the first DA header/data has been published. // This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA. DAStartHeight uint64 `json:"da_start_height"` @@ -56,8 +58,28 @@ func (g Genesis) Validate() error { return fmt.Errorf("start_time cannot be zero time") } - if g.ProposerAddress == nil { - return fmt.Errorf("proposer_address cannot be nil") + if len(g.ProposerSchedule) == 0 { + if len(g.ProposerAddress) == 0 { + return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") + } + } else { + if err := g.ProposerSchedule[0].validate(g.InitialHeight, true); err != nil { + return fmt.Errorf("invalid proposer_schedule[0]: %w", err) + } + if g.ProposerSchedule[0].StartHeight != g.InitialHeight { + return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) + } + for i := 1; i < len(g.ProposerSchedule); i++ { + if err := g.ProposerSchedule[i].validate(g.InitialHeight, true); err != nil { + return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) + } + if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { + return fmt.Errorf("proposer_schedule must be strictly increasing: entry %d start_height %d is not greater than previous %d", i, g.ProposerSchedule[i].StartHeight, g.ProposerSchedule[i-1].StartHeight) + } + } + if len(g.ProposerAddress) > 0 && !bytes.Equal(g.ProposerAddress, g.ProposerSchedule[0].Address) { + return fmt.Errorf("proposer_address must match proposer_schedule[0].address") + } } if g.DAEpochForcedInclusion < 1 { diff --git a/pkg/genesis/io.go b/pkg/genesis/io.go index 8c9d88e955..dcf9048aa6 100644 --- a/pkg/genesis/io.go +++ b/pkg/genesis/io.go @@ -72,12 +72,12 @@ func LoadGenesis(genesisPath string) (Genesis, error) { return Genesis{}, err } - return genesis, nil + return genesis.normalized(), nil } // Save saves the genesis state to the specified file path. func (g Genesis) Save(genesisPath string) error { - genesisJSON, err := json.MarshalIndent(g, "", " ") + genesisJSON, err := json.MarshalIndent(g.normalized(), "", " ") if err != nil { return fmt.Errorf("failed to marshal genesis state: %w", err) } diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go new file mode 100644 index 0000000000..28d9abbea8 --- /dev/null +++ b/pkg/genesis/proposer_schedule.go @@ -0,0 +1,208 @@ +package genesis + +import ( + "bytes" + "crypto/sha256" + "fmt" + + "github.com/libp2p/go-libp2p/core/crypto" +) + +// ProposerScheduleEntry declares the proposer key that becomes active at start_height. +type ProposerScheduleEntry struct { + StartHeight uint64 `json:"start_height"` + Address []byte `json:"address"` + PubKey []byte `json:"pub_key,omitempty"` +} + +// NewProposerScheduleEntry creates a proposer schedule entry from a libp2p public key. +func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (ProposerScheduleEntry, error) { + if pubKey == nil { + return ProposerScheduleEntry{}, fmt.Errorf("proposer pub_key cannot be nil") + } + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + if err != nil { + return ProposerScheduleEntry{}, fmt.Errorf("marshal proposer pub_key: %w", err) + } + + return ProposerScheduleEntry{ + StartHeight: startHeight, + Address: proposerKeyAddress(pubKey), + PubKey: marshalledPubKey, + }, nil +} + +// PublicKey unmarshals the configured proposer public key. Legacy single-proposer +// configs may omit the pubkey and will return nil, nil here. +func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { + if len(e.PubKey) == 0 { + return nil, nil + } + + pubKey, err := crypto.UnmarshalPublicKey(e.PubKey) + if err != nil { + return nil, fmt.Errorf("unmarshal proposer pub_key: %w", err) + } + + return pubKey, nil +} + +func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool) error { + if e.StartHeight < initialHeight { + return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) + } + + if len(e.Address) == 0 { + return fmt.Errorf("proposer schedule address cannot be empty") + } + + if len(e.PubKey) == 0 { + if requirePubKey { + return fmt.Errorf("proposer schedule pub_key cannot be empty") + } + return nil + } + + pubKey, err := e.PublicKey() + if err != nil { + return err + } + + expectedAddress := proposerKeyAddress(pubKey) + if !bytes.Equal(expectedAddress, e.Address) { + return fmt.Errorf("proposer schedule address does not match pub_key: got %x, expected %x", e.Address, expectedAddress) + } + + return nil +} + +// EffectiveProposerSchedule returns the explicit proposer schedule when present, +// or derives a legacy single-entry schedule from proposer_address. +func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { + if len(g.ProposerSchedule) > 0 { + out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) + copy(out, g.ProposerSchedule) + return out + } + + if len(g.ProposerAddress) == 0 { + return nil + } + + return []ProposerScheduleEntry{{ + StartHeight: g.InitialHeight, + Address: cloneBytes(g.ProposerAddress), + }} +} + +// InitialProposerAddress returns the first proposer address for compatibility +// with code paths that still surface a single address externally. +func (g Genesis) InitialProposerAddress() []byte { + entry, err := g.ProposerAtHeight(g.InitialHeight) + if err != nil { + return nil + } + + return cloneBytes(entry.Address) +} + +func (g Genesis) normalized() Genesis { + normalized := g + if len(normalized.ProposerAddress) == 0 { + normalized.ProposerAddress = normalized.InitialProposerAddress() + } + return normalized +} + +// HasScheduledProposer reports whether the address appears in the effective proposer schedule. +func (g Genesis) HasScheduledProposer(address []byte) bool { + for _, entry := range g.EffectiveProposerSchedule() { + if bytes.Equal(entry.Address, address) { + return true + } + } + return false +} + +// ProposerAtHeight resolves the proposer that is active for the given block height. +func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) { + schedule := g.EffectiveProposerSchedule() + if len(schedule) == 0 { + return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured") + } + + if height < schedule[0].StartHeight { + return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured for height %d before start_height %d", height, schedule[0].StartHeight) + } + + entry := schedule[0] + for i := 1; i < len(schedule); i++ { + if height < schedule[i].StartHeight { + break + } + entry = schedule[i] + } + + return ProposerScheduleEntry{ + StartHeight: entry.StartHeight, + Address: cloneBytes(entry.Address), + PubKey: cloneBytes(entry.PubKey), + }, nil +} + +// ValidateProposer checks that the provided proposer address and public key match +// the proposer schedule entry active at the given height. +func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.PubKey) error { + entry, err := g.ProposerAtHeight(height) + if err != nil { + return err + } + + if !bytes.Equal(entry.Address, address) { + return fmt.Errorf("unexpected proposer at height %d: got %x, expected %x", height, address, entry.Address) + } + + if len(entry.PubKey) == 0 { + return nil + } + + if pubKey == nil { + return fmt.Errorf("missing proposer pub_key at height %d", height) + } + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + if err != nil { + return fmt.Errorf("marshal proposer pub_key: %w", err) + } + + if !bytes.Equal(entry.PubKey, marshalledPubKey) { + return fmt.Errorf("unexpected proposer pub_key at height %d", height) + } + + return nil +} + +func cloneBytes(src []byte) []byte { + if src == nil { + return nil + } + + out := make([]byte, len(src)) + copy(out, src) + return out +} + +func proposerKeyAddress(pubKey crypto.PubKey) []byte { + if pubKey == nil { + return nil + } + + raw, err := pubKey.Raw() + if err != nil { + return nil + } + + sum := sha256.Sum256(raw) + return sum[:] +} diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go new file mode 100644 index 0000000000..8da1457bc1 --- /dev/null +++ b/pkg/genesis/proposer_schedule_test.go @@ -0,0 +1,93 @@ +package genesis + +import ( + "crypto/rand" + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/stretchr/testify/require" +) + +func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { + t.Helper() + + _, pubKey, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + entry, err := NewProposerScheduleEntry(startHeight, pubKey) + require.NoError(t, err) + + return entry, pubKey +} + +func TestGenesisProposerAtHeight(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 3) + entry2, _ := makeProposerScheduleEntry(t, 10) + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 3, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + + proposer, err := genesis.ProposerAtHeight(3) + require.NoError(t, err) + require.Equal(t, entry1.Address, proposer.Address) + + proposer, err = genesis.ProposerAtHeight(9) + require.NoError(t, err) + require.Equal(t, entry1.Address, proposer.Address) + + proposer, err = genesis.ProposerAtHeight(10) + require.NoError(t, err) + require.Equal(t, entry2.Address, proposer.Address) +} + +func TestGenesisValidateProposerSchedule(t *testing.T) { + entry1, pubKey1 := makeProposerScheduleEntry(t, 1) + entry2, pubKey2 := makeProposerScheduleEntry(t, 20) + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) + require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) + require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) +} + +func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 50) + + rawGenesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + genesisPath := filepath.Join(t.TempDir(), "genesis.json") + genesisJSON, err := json.Marshal(rawGenesis) + require.NoError(t, err) + require.NoError(t, os.WriteFile(genesisPath, genesisJSON, 0o600)) + + loaded, err := LoadGenesis(genesisPath) + require.NoError(t, err) + require.Equal(t, entry1.Address, loaded.ProposerAddress) + require.Equal(t, rawGenesis.ProposerSchedule, loaded.ProposerSchedule) +} From 6a0f3679f5cb8d6933d1792b1fdd67670f9a46dd Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 15:38:56 +0200 Subject: [PATCH 2/6] remove need for pubkey and add some tests --- block/internal/syncing/assert.go | 8 +++-- block/internal/syncing/da_retriever.go | 2 +- block/internal/syncing/p2p_handler.go | 6 +--- block/internal/syncing/raft_retriever.go | 2 +- .../operations/proposer-key-rotation.md | 23 +++++--------- pkg/genesis/genesis.go | 4 +-- pkg/genesis/proposer_schedule.go | 30 ++++++------------- pkg/genesis/proposer_schedule_test.go | 21 ++++++++++++- 8 files changed, 46 insertions(+), 50 deletions(-) diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 56000e744e..1bed6db8b9 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -4,12 +4,14 @@ import ( "errors" "fmt" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, signer types.Signer) error { - if err := genesis.ValidateProposer(height, proposerAddr, signer.PubKey); err != nil { +func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, pubKey crypto.PubKey) error { + if err := genesis.ValidateProposer(height, proposerAddr, pubKey); err != nil { return fmt.Errorf("unexpected proposer at height %d: %w", height, err) } @@ -21,7 +23,7 @@ func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis return errors.New("empty signed data") } - if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer); err != nil { + if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer.PubKey); err != nil { return err } diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 1b3393f181..75bc631e8f 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -357,7 +357,7 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { // assertExpectedProposer validates the proposer schedule entry for the header height. func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer) + return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) } // assertValidSignedData validates signed data using the configured signature provider diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 67e5a1b278..0e8a08cea3 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -127,9 +127,5 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC // assertExpectedProposer validates the proposer schedule entry for the header height. func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { - if err := assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { - return err - } - - return nil + return assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) } diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index b67fe86e09..4cb15aec07 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,7 +125,7 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer); err != nil { + if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey); err != nil { r.logger.Debug().Err(err).Msg("unexpected proposer") return nil } diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md index b25c43fc7a..3c5667d50c 100644 --- a/docs/guides/operations/proposer-key-rotation.md +++ b/docs/guides/operations/proposer-key-rotation.md @@ -11,7 +11,7 @@ Use this guide to rotate a sequencer proposer key without restarting the chain. ## How proposer rotation is stored in genesis -`proposer_address`, `proposer_schedule[].address`, and `proposer_schedule[].pub_key` are base64-encoded strings in JSON. +`proposer_address` and `proposer_schedule[].address` are base64-encoded strings in JSON. ```json { @@ -20,13 +20,11 @@ Use this guide to rotate a sequencer proposer key without restarting the chain. "proposer_schedule": [ { "start_height": 1, - "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", - "pub_key": "5l6vM0b0GqQYQw4x0cI6q7N2vD1cE+oV6rN5eQ7v6dM=" + "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=" }, { "start_height": 125000, - "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=", - "pub_key": "9r5mM4XjKx6h6sJv2Jf6dB5nQ0eU9l8cM1qT2wV3yZQ=" + "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=" } ] } @@ -36,7 +34,6 @@ Rules enforced by `ev-node`: - `proposer_schedule[0].start_height` must equal `initial_height` - schedule entries must be strictly increasing by `start_height` -- every `address` must match its `pub_key` - if `proposer_address` is set, it must match the first schedule entry Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. @@ -53,7 +50,7 @@ INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" ## 2. Get the current and replacement proposer public keys -For a file-based signer, the signer public key is stored in `signer.json` as base64: +For a file-based signer, the signer public key is stored in `signer.json` as base64. You only put the derived address into genesis, but you still need the public key once to compute that address. ```bash OLD_SIGNER_DIR="$HOME/.evnode/config" @@ -95,9 +92,7 @@ Create an explicit schedule with the current proposer at `initial_height` and th ```bash jq \ --arg old_addr "$OLD_PROPOSER_ADDRESS" \ - --arg old_pub "$OLD_PROPOSER_PUBKEY" \ --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --arg new_pub "$NEW_PROPOSER_PUBKEY" \ --argjson initial_height "$INITIAL_HEIGHT" \ --argjson activation_height "$ACTIVATION_HEIGHT" \ ' @@ -105,13 +100,11 @@ jq \ | .proposer_schedule = [ { start_height: $initial_height, - address: $old_addr, - pub_key: $old_pub + address: $old_addr }, { start_height: $activation_height, - address: $new_addr, - pub_key: $new_pub + address: $new_addr } ] ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" @@ -124,14 +117,12 @@ Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HE ```bash jq \ --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --arg new_pub "$NEW_PROPOSER_PUBKEY" \ --argjson activation_height "$ACTIVATION_HEIGHT" \ ' .proposer_schedule += [ { start_height: $activation_height, - address: $new_addr, - pub_key: $new_pub + address: $new_addr } ] ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index a5079c72e6..1cbe506e1c 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -63,14 +63,14 @@ func (g Genesis) Validate() error { return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") } } else { - if err := g.ProposerSchedule[0].validate(g.InitialHeight, true); err != nil { + if err := g.ProposerSchedule[0].validate(g.InitialHeight); err != nil { return fmt.Errorf("invalid proposer_schedule[0]: %w", err) } if g.ProposerSchedule[0].StartHeight != g.InitialHeight { return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) } for i := 1; i < len(g.ProposerSchedule); i++ { - if err := g.ProposerSchedule[i].validate(g.InitialHeight, true); err != nil { + if err := g.ProposerSchedule[i].validate(g.InitialHeight); err != nil { return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) } if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go index 28d9abbea8..3ac6e4f831 100644 --- a/pkg/genesis/proposer_schedule.go +++ b/pkg/genesis/proposer_schedule.go @@ -8,7 +8,8 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" ) -// ProposerScheduleEntry declares the proposer key that becomes active at start_height. +// ProposerScheduleEntry declares the proposer address that becomes active at start_height. +// PubKey is optional and can be used to pin the exact key material for a schedule entry. type ProposerScheduleEntry struct { StartHeight uint64 `json:"start_height"` Address []byte `json:"address"` @@ -33,8 +34,8 @@ func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (Propose }, nil } -// PublicKey unmarshals the configured proposer public key. Legacy single-proposer -// configs may omit the pubkey and will return nil, nil here. +// PublicKey unmarshals the configured proposer public key. Address-only schedule +// entries may omit the pubkey and will return nil, nil here. func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { if len(e.PubKey) == 0 { return nil, nil @@ -48,7 +49,7 @@ func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { return pubKey, nil } -func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool) error { +func (e ProposerScheduleEntry) validate(initialHeight uint64) error { if e.StartHeight < initialHeight { return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) } @@ -58,9 +59,6 @@ func (e ProposerScheduleEntry) validate(initialHeight uint64, requirePubKey bool } if len(e.PubKey) == 0 { - if requirePubKey { - return fmt.Errorf("proposer schedule pub_key cannot be empty") - } return nil } @@ -92,7 +90,7 @@ func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { return []ProposerScheduleEntry{{ StartHeight: g.InitialHeight, - Address: cloneBytes(g.ProposerAddress), + Address: bytes.Clone(g.ProposerAddress), }} } @@ -104,7 +102,7 @@ func (g Genesis) InitialProposerAddress() []byte { return nil } - return cloneBytes(entry.Address) + return bytes.Clone(entry.Address) } func (g Genesis) normalized() Genesis { @@ -146,8 +144,8 @@ func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) return ProposerScheduleEntry{ StartHeight: entry.StartHeight, - Address: cloneBytes(entry.Address), - PubKey: cloneBytes(entry.PubKey), + Address: bytes.Clone(entry.Address), + PubKey: bytes.Clone(entry.PubKey), }, nil } @@ -183,16 +181,6 @@ func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.P return nil } -func cloneBytes(src []byte) []byte { - if src == nil { - return nil - } - - out := make([]byte, len(src)) - copy(out, src) - return out -} - func proposerKeyAddress(pubKey crypto.PubKey) []byte { if pubKey == nil { return nil diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 8da1457bc1..48835deb89 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -51,7 +51,7 @@ func TestGenesisProposerAtHeight(t *testing.T) { require.Equal(t, entry2.Address, proposer.Address) } -func TestGenesisValidateProposerSchedule(t *testing.T) { +func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { entry1, pubKey1 := makeProposerScheduleEntry(t, 1) entry2, pubKey2 := makeProposerScheduleEntry(t, 20) @@ -69,6 +69,25 @@ func TestGenesisValidateProposerSchedule(t *testing.T) { require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) } +func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { + entry1, pubKey1 := makeProposerScheduleEntry(t, 1) + entry2, pubKey2 := makeProposerScheduleEntry(t, 20) + entry1.PubKey = nil + entry2.PubKey = nil + + genesis := Genesis{ + ChainID: "test-chain", + StartTime: time.Now().UTC(), + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + require.NoError(t, genesis.Validate()) + require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) + require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) +} + func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { entry1, _ := makeProposerScheduleEntry(t, 1) entry2, _ := makeProposerScheduleEntry(t, 50) From 6ad13f299dc52a8467a98d9aff8260b882cb30d5 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Thu, 23 Apr 2026 17:06:36 +0200 Subject: [PATCH 3/6] comments and amendments --- block/internal/executing/executor_test.go | 126 +++++++++++ block/internal/syncing/p2p_handler_test.go | 38 ++++ docs/adr/adr-023-proposer-key-rotation.md | 24 ++- pkg/genesis/genesis_test.go | 175 +++++++++++++++ pkg/genesis/proposer_schedule.go | 19 +- pkg/genesis/proposer_schedule_test.go | 236 ++++++++++++++++++++- 6 files changed, 608 insertions(+), 10 deletions(-) diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index cec6a3fecc..5f2d4db7d8 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -216,3 +216,129 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { require.Equal(t, newAddr, header.Signer.Address) require.Equal(t, uint64(2), data.Height()) } + +// TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose +// address does not appear anywhere in the proposer schedule cannot start the +// executor. This prevents a misconfigured replacement key from coming up as +// an aggregator on a chain it was never scheduled on. +func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + _, scheduledSigner, _ := buildTestSigner(t) + _, _, strayerSigner := buildTestSigner(t) + + entry, err := genesis.NewProposerScheduleEntry(1, scheduledSigner.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now(), + ProposerAddress: entry.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + _, err = NewExecutor( + memStore, nil, nil, strayerSigner, cacheManager, + common.NopMetrics(), config.DefaultConfig(), gen, + nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), + make(chan error, 1), nil, + ) + require.ErrorIs(t, err, common.ErrNotProposer) +} + +// TestExecutor_CreateBlock_RejectsSignerAtWrongHeight verifies that a signer +// which is scheduled (so startup succeeds) but not active at the current +// height cannot produce a block. This guards the per-height proposer check +// inside CreateBlock — without it, a rotation could be jumped ahead or +// rolled back by whichever signer the operator happens to start. +func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + oldAddr, oldSignerInfo, oldSigner := buildTestSigner(t) + _, newSignerInfo, _ := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) + require.NoError(t, err) + // Second entry activates at height 5. The old signer is scheduled at + // height 1 and is NOT the proposer for height 5+. + entry2, err := genesis.NewProposerScheduleEntry(5, newSignerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: time.Now().Add(-time.Second), + ProposerAddress: entry1.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // Start the executor as the old signer — it IS in the schedule at + // height 1, so NewExecutor must accept it. + executor, err := NewExecutor( + memStore, nil, nil, oldSigner, cacheManager, + common.NopMetrics(), config.DefaultConfig(), gen, + nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), + make(chan error, 1), nil, + ) + require.NoError(t, err) + + // Seed a height-4 block so CreateBlock(5) has a parent to reference. + prevHeader := &types.SignedHeader{ + Header: types.Header{ + Version: types.InitStateVersion, + BaseHeader: types.BaseHeader{ + ChainID: gen.ChainID, + Height: 4, + Time: uint64(gen.StartTime.UnixNano()), + }, + AppHash: []byte("state-root-4"), + ProposerAddress: oldAddr, + DataHash: common.DataHashForEmptyTxs, + }, + Signature: types.Signature([]byte("sig-4")), + Signer: oldSignerInfo, + } + prevData := &types.Data{ + Metadata: &types.Metadata{ + ChainID: gen.ChainID, + Height: 4, + Time: prevHeader.BaseHeader.Time, + }, + } + + batch, err := memStore.NewBatch(context.Background()) + require.NoError(t, err) + require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) + require.NoError(t, batch.SetHeight(4)) + require.NoError(t, batch.Commit()) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: 4, + LastBlockTime: prevHeader.Time(), + LastHeaderHash: prevHeader.Hash(), + AppHash: []byte("state-root-4"), + }) + + // Height 5 belongs to the NEW signer per the schedule — the old + // signer must be rejected even though it's a known schedule member. + _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ + Batch: &coreseq.Batch{}, + Time: time.Now(), + }) + require.Error(t, err) + require.Contains(t, err.Error(), "proposer") +} diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index dc370a9482..1ba3b86e27 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -253,6 +253,44 @@ func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) require.Equal(t, nextAddr, events[0].Header.ProposerAddress) } +// TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation verifies +// the counterpart to the rotation-allows test: a signer that IS in the schedule +// but only active at a later height must not be accepted for blocks before the +// activation height. Without the per-height check, any scheduled signer could +// forge blocks outside their active window. +func TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + nextAddr, nextPub, nextSigner := buildTestSigner(t) + + entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) + require.NoError(t, err) + entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) + require.NoError(t, err) + + p.Genesis.ProposerAddress = entry1.Address + p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} + p.Genesis.DAEpochForcedInclusion = 1 + require.NoError(t, p.Genesis.Validate()) + p.Handler.genesis = p.Genesis + + // entry2 is scheduled but only active at height 11. Height 10 still + // belongs to entry1, so a header from the next signer at height 10 + // must be rejected. + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 10, nextAddr, nextPub, nextSigner) + header.DataHash = common.DataHashForEmptyTxs + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err = p.Handler.ProcessHeight(ctx, 10, ch) + require.Error(t, err) + + require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) + p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) +} + func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { p := setupP2P(t) ctx := t.Context() diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md index 7d4114709c..2c2934bf28 100644 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -27,18 +27,32 @@ and validation become ambiguous. ### 2. Re-issue a new genesis on each rotation -This treats every proposer rotation like a chain restart. It is operationally heavy, conflates upgrades with -rotations, and breaks continuity for nodes syncing historical data. +This treats every proposer rotation like a chain restart: a new `chain_id`, state reset back to `initial_height`, +and existing block history discarded. It is operationally heavy, conflates upgrades with rotations, and breaks +continuity for nodes syncing historical data. ### 3. Height-indexed proposer schedule in genesis (Chosen) -Record proposer changes as an ordered schedule indexed by activation height. This preserves chain continuity while -making rotation rules explicit and replayable from genesis. +Record proposer changes as an ordered schedule indexed by activation height. The `genesis.json` file is updated +with a new schedule entry and redistributed, but the chain keeps its `chain_id`, continues from the current +height, preserves all block history, and fresh nodes can still validate the entire chain end-to-end across +rotation boundaries. The rollout is still coordinated — every node must receive the updated `genesis.json` and +restart before the activation height — but none of the chain's state or provenance is reset. ## Decision ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. +### What this is not + +This is **not** a re-genesis. Re-genesis — in the sense we mean it above — would involve issuing a new `chain_id`, +resetting height to `initial_height`, and discarding existing block history. Proposer key rotation does none of +that: the `chain_id` is unchanged, block height keeps progressing, all previous blocks remain valid, and fresh +nodes can sync the chain from genesis across any number of rotation boundaries. + +The `genesis.json` file itself is updated (a new `proposer_schedule` entry is appended) and operators must +restart every node to reload it. The file changes; the chain's state does not. + Each entry declares: - `start_height` @@ -137,7 +151,7 @@ Implemented - proposer schedule changes are consensus-visible and require coordinated rollout - operators must distribute updated genesis/config before activation height -- emergency rotation still requires preplanned scheduling or a later authority-based mechanism +- emergency rotation still requires prior scheduling or a later authority-based mechanism ### Neutral diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index da3cc14b1f..a5aca88586 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -1,10 +1,13 @@ package genesis import ( + "crypto/rand" "testing" "time" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewGenesis(t *testing.T) { @@ -135,3 +138,175 @@ func TestGenesis_Validate(t *testing.T) { }) } } + +func TestGenesis_ValidateProposerSchedule(t *testing.T) { + validTime := time.Now().UTC() + + newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { + _, pub, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + entry, err := NewProposerScheduleEntry(startHeight, pub) + require.NoError(t, err) + return entry, pub + } + + entry1, _ := newEntry(1) + entry10, _ := newEntry(10) + entry20, _ := newEntry(20) + + tests := []struct { + name string + mutate func() Genesis + wantErr string + }{ + { + name: "valid - schedule without proposer_address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + }, + { + name: "valid - schedule with matching proposer_address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: entry1.Address, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + }, + { + name: "invalid - first entry start_height != initial_height", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry10, entry20}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must equal initial_height", + }, + { + name: "invalid - first entry start_height below initial_height", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must be >= initial_height", + }, + { + name: "invalid - non-increasing (equal start_heights)", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry1}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "strictly increasing", + }, + { + name: "invalid - non-increasing (decreasing start_heights)", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry20, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "start_height must equal initial_height", + }, + { + name: "invalid - entry address does not match pub_key", + mutate: func() Genesis { + tampered := entry10 + tampered.Address = append([]byte(nil), entry10.Address...) + tampered.Address[0] ^= 0xFF + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, tampered}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "address does not match pub_key", + }, + { + name: "invalid - proposer_address mismatches schedule[0].address", + mutate: func() Genesis { + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerAddress: entry10.Address, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "proposer_address must match proposer_schedule[0].address", + }, + { + name: "invalid - empty address in entry", + mutate: func() Genesis { + empty := entry10 + empty.Address = nil + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, empty}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "address cannot be empty", + }, + { + name: "invalid - malformed pub_key bytes", + mutate: func() Genesis { + bad := entry10 + bad.PubKey = []byte{0x00, 0x01, 0x02} + return Genesis{ + ChainID: "c", + StartTime: validTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, bad}, + DAEpochForcedInclusion: 1, + } + }, + wantErr: "unmarshal proposer pub_key", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.mutate().Validate() + if tt.wantErr == "" { + require.NoError(t, err) + return + } + require.Error(t, err) + require.Contains(t, err.Error(), tt.wantErr) + }) + } +} diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go index 3ac6e4f831..c53684535a 100644 --- a/pkg/genesis/proposer_schedule.go +++ b/pkg/genesis/proposer_schedule.go @@ -80,7 +80,13 @@ func (e ProposerScheduleEntry) validate(initialHeight uint64) error { func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { if len(g.ProposerSchedule) > 0 { out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) - copy(out, g.ProposerSchedule) + for i, entry := range g.ProposerSchedule { + out[i] = ProposerScheduleEntry{ + StartHeight: entry.StartHeight, + Address: bytes.Clone(entry.Address), + PubKey: bytes.Clone(entry.PubKey), + } + } return out } @@ -162,6 +168,17 @@ func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.P } if len(entry.PubKey) == 0 { + // Address-only schedule entry. Without a pinned pubkey we still + // have to bind the caller-provided pubkey to the scheduled + // address, otherwise a forger can pair the scheduled address + // with an arbitrary key and later satisfy signature checks that + // trust Signer.PubKey. + if pubKey != nil { + derived := proposerKeyAddress(pubKey) + if !bytes.Equal(entry.Address, derived) { + return fmt.Errorf("proposer pub_key does not match scheduled address at height %d", height) + } + } return nil } diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 48835deb89..950481526f 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -1,6 +1,7 @@ package genesis import ( + "bytes" "crypto/rand" "encoding/json" "os" @@ -10,8 +11,14 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/require" + + "github.com/evstack/ev-node/pkg/signer/noop" ) +// testGenesisStartTime is a fixed timestamp for genesis fixtures so tests do +// not depend on wall-clock time. +var testGenesisStartTime = time.Unix(1_700_000_000, 0).UTC() + func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { t.Helper() @@ -30,7 +37,7 @@ func TestGenesisProposerAtHeight(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 3, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -57,7 +64,7 @@ func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -77,7 +84,7 @@ func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { genesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -88,13 +95,234 @@ func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) } +func TestNewProposerScheduleEntry_NilPubKey(t *testing.T) { + _, err := NewProposerScheduleEntry(1, nil) + require.Error(t, err) +} + +func TestProposerAtHeight_BeforeFirstStartHeight(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 5) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 5, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + _, err := genesis.ProposerAtHeight(4) + require.Error(t, err) + require.Contains(t, err.Error(), "before start_height") +} + +func TestProposerAtHeight_NoProposerConfigured(t *testing.T) { + genesis := Genesis{ChainID: "c", InitialHeight: 1} + _, err := genesis.ProposerAtHeight(1) + require.Error(t, err) + require.Contains(t, err.Error(), "no proposer configured") +} + +func TestProposerAtHeight_ReturnedEntryIsCopy(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + got, err := genesis.ProposerAtHeight(1) + require.NoError(t, err) + got.Address[0] ^= 0xFF + got.PubKey[0] ^= 0xFF + + same, err := genesis.ProposerAtHeight(1) + require.NoError(t, err) + require.Equal(t, entry.Address, same.Address) + require.Equal(t, entry.PubKey, same.PubKey) +} + +func TestValidateProposer_WrongAddress(t *testing.T) { + entry, pubKey := makeProposerScheduleEntry(t, 1) + other, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + err := genesis.ValidateProposer(1, other.Address, pubKey) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected proposer at height 1") +} + +func TestValidateProposer_MissingPubKey(t *testing.T) { + entry, _ := makeProposerScheduleEntry(t, 1) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + err := genesis.ValidateProposer(1, entry.Address, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing proposer pub_key") +} + +// TestValidateProposer_AddressOnly_RejectsForgedPubKey ensures that an address-only +// schedule entry still binds the caller-provided pubkey to the scheduled address. +// Without this check, a forger could claim Signer.Address = scheduled_addr with an +// arbitrary Signer.PubKey and later pass signature validation that trusts that pubkey. +func TestValidateProposer_AddressOnly_RejectsForgedPubKey(t *testing.T) { + scheduled, _ := makeProposerScheduleEntry(t, 1) + _, attackerPub := makeProposerScheduleEntry(t, 1) + + scheduled.PubKey = nil // address-only entry + + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{scheduled}, + DAEpochForcedInclusion: 1, + } + + // Scheduled address paired with a different pubkey must be rejected. + err := genesis.ValidateProposer(1, scheduled.Address, attackerPub) + require.Error(t, err) + require.Contains(t, err.Error(), "does not match scheduled address") +} + +func TestValidateProposer_UsesActiveEntryAtHeight(t *testing.T) { + entry1, pub1 := makeProposerScheduleEntry(t, 1) + entry2, pub2 := makeProposerScheduleEntry(t, 10) + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // entry2 signer trying to sign height within entry1's active range must fail. + require.Error(t, genesis.ValidateProposer(9, entry2.Address, pub2)) + // entry1 signer trying to sign height within entry2's active range must fail. + require.Error(t, genesis.ValidateProposer(10, entry1.Address, pub1)) +} + +func TestHasScheduledProposer(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 10) + unknown, _ := makeProposerScheduleEntry(t, 99) + + explicit := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + require.True(t, explicit.HasScheduledProposer(entry1.Address)) + require.True(t, explicit.HasScheduledProposer(entry2.Address)) + require.False(t, explicit.HasScheduledProposer(unknown.Address)) + + legacy := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerAddress: entry1.Address, + DAEpochForcedInclusion: 1, + } + require.True(t, legacy.HasScheduledProposer(entry1.Address)) + require.False(t, legacy.HasScheduledProposer(entry2.Address)) + + empty := Genesis{ChainID: "c", InitialHeight: 1} + require.False(t, empty.HasScheduledProposer(entry1.Address)) +} + +func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { + entry1, _ := makeProposerScheduleEntry(t, 1) + entry2, _ := makeProposerScheduleEntry(t, 10) + origAddr := bytes.Clone(entry1.Address) + origPub := bytes.Clone(entry1.PubKey) + + genesis := Genesis{ + ChainID: "c", + StartTime: testGenesisStartTime, + InitialHeight: 1, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, + DAEpochForcedInclusion: 1, + } + + // Mutating returned byte slices must not corrupt the genesis-backed data. + got := genesis.EffectiveProposerSchedule() + got[0].Address[0] ^= 0xFF + got[0].PubKey[0] ^= 0xFF + + require.Equal(t, origAddr, genesis.ProposerSchedule[0].Address) + require.Equal(t, origPub, genesis.ProposerSchedule[0].PubKey) +} + +func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { + addr := []byte("some-address-bytes") + legacy := Genesis{ + ChainID: "c", + InitialHeight: 7, + ProposerAddress: addr, + } + schedule := legacy.EffectiveProposerSchedule() + require.Len(t, schedule, 1) + require.Equal(t, uint64(7), schedule[0].StartHeight) + require.Equal(t, addr, schedule[0].Address) + require.Empty(t, schedule[0].PubKey) + + // mutating the derived slice must not affect the genesis backing data. + schedule[0].Address[0] ^= 0xFF + require.Equal(t, addr, legacy.ProposerAddress) +} + +func TestEffectiveProposerSchedule_Empty(t *testing.T) { + require.Nil(t, Genesis{}.EffectiveProposerSchedule()) +} + +func TestInitialProposerAddress_EmptyGenesisReturnsNil(t *testing.T) { + require.Nil(t, Genesis{InitialHeight: 1}.InitialProposerAddress()) +} + +// TestProposerKeyAddressMatchesSignerGetAddress pins the invariant that the +// genesis-side address derivation matches the signer implementations. If a +// signer ever changes its address formula this test will fail and flag the +// break instead of silently producing rejected blocks after a key rotation. +func TestProposerKeyAddressMatchesSignerGetAddress(t *testing.T) { + priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + s, err := noop.NewNoopSigner(priv) + require.NoError(t, err) + + signerAddr, err := s.GetAddress() + require.NoError(t, err) + + genesisAddr := proposerKeyAddress(pub) + require.Equal(t, signerAddr, genesisAddr) + + entry, err := NewProposerScheduleEntry(1, pub) + require.NoError(t, err) + require.Equal(t, signerAddr, entry.Address) +} + func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { entry1, _ := makeProposerScheduleEntry(t, 1) entry2, _ := makeProposerScheduleEntry(t, 50) rawGenesis := Genesis{ ChainID: "test-chain", - StartTime: time.Now().UTC(), + StartTime: testGenesisStartTime, InitialHeight: 1, ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, From 45a7d75cb67b6d3ed6b3c02b9979945fd8a2b85c Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 13:02:32 +0200 Subject: [PATCH 4/6] commen changes --- block/internal/executing/executor.go | 7 +- block/internal/executing/executor_test.go | 94 ++++++++++++++++++++-- block/internal/syncing/p2p_handler.go | 9 +++ block/internal/syncing/p2p_handler_test.go | 17 ++++ docs/adr/adr-023-proposer-key-rotation.md | 12 +-- pkg/genesis/genesis_test.go | 6 +- pkg/genesis/proposer_schedule_test.go | 3 +- 7 files changed, 131 insertions(+), 17 deletions(-) diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 4cc7f4984c..3fe5ba5d10 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -755,7 +755,12 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, nil) + pubKey, err = proposer.PublicKey() + if err != nil { + return nil, nil, fmt.Errorf("failed to get scheduled proposer public key: %w", err) + } + + validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index 5f2d4db7d8..c6f6ccc1a5 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -7,6 +7,7 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,6 +21,8 @@ import ( "github.com/evstack/ev-node/types" ) +var fixedExecutorTestStartTime = time.Unix(1_700_000_000, 0).UTC() + func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create in-memory store ds := sync.MutexWrap(datastore.NewMapDatastore()) @@ -143,7 +146,7 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry1.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -209,7 +212,7 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ Batch: &coreseq.Batch{}, - Time: time.Now(), + Time: fixedExecutorTestStartTime.Add(time.Second), }) require.NoError(t, err) require.Equal(t, newAddr, header.ProposerAddress) @@ -217,6 +220,85 @@ func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { require.Equal(t, uint64(2), data.Height()) } +func TestExecutor_CreateBlock_BasedSequencerUsesScheduledPubKey(t *testing.T) { + ds := sync.MutexWrap(datastore.NewMapDatastore()) + memStore := store.New(ds) + + cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) + require.NoError(t, err) + + _, signerInfo, _ := buildTestSigner(t) + entry, err := genesis.NewProposerScheduleEntry(1, signerInfo.PubKey) + require.NoError(t, err) + + gen := genesis.Genesis{ + ChainID: "test-chain", + InitialHeight: 1, + StartTime: fixedExecutorTestStartTime, + ProposerAddress: entry.Address, + ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, + DAEpochForcedInclusion: 1, + } + + cfg := config.DefaultConfig() + cfg.Node.BasedSequencer = true + + wantValidatorHash := types.Hash{0x01} + hasherCalled := false + options := common.DefaultBlockOptions() + options.ValidatorHasherProvider = func(address []byte, pubKey crypto.PubKey) (types.Hash, error) { + hasherCalled = true + require.Equal(t, entry.Address, address) + require.NotNil(t, pubKey) + + marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) + require.NoError(t, err) + require.Equal(t, entry.PubKey, marshalledPubKey) + + return wantValidatorHash, nil + } + + executor, err := NewExecutor( + memStore, + nil, + nil, + nil, + cacheManager, + common.NopMetrics(), + cfg, + gen, + nil, + nil, + zerolog.Nop(), + options, + make(chan error, 1), + nil, + ) + require.NoError(t, err) + + executor.setLastState(types.State{ + Version: types.InitStateVersion, + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + AppHash: []byte("state-root-1"), + }) + + header, data, err := executor.CreateBlock(context.Background(), 1, &BatchData{ + Batch: &coreseq.Batch{}, + Time: fixedExecutorTestStartTime, + }) + require.NoError(t, err) + require.True(t, hasherCalled) + require.Equal(t, wantValidatorHash, header.ValidatorHash) + require.Equal(t, entry.Address, header.Signer.Address) + require.NotNil(t, header.Signer.PubKey) + + marshalledPubKey, err := crypto.MarshalPublicKey(header.Signer.PubKey) + require.NoError(t, err) + require.Equal(t, entry.PubKey, marshalledPubKey) + require.Equal(t, uint64(1), data.Height()) +} + // TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose // address does not appear anywhere in the proposer schedule cannot start the // executor. This prevents a misconfigured replacement key from coming up as @@ -237,7 +319,7 @@ func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now(), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, DAEpochForcedInclusion: 1, @@ -277,7 +359,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { gen := genesis.Genesis{ ChainID: "test-chain", InitialHeight: 1, - StartTime: time.Now().Add(-time.Second), + StartTime: fixedExecutorTestStartTime, ProposerAddress: entry1.Address, ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, DAEpochForcedInclusion: 1, @@ -300,7 +382,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { BaseHeader: types.BaseHeader{ ChainID: gen.ChainID, Height: 4, - Time: uint64(gen.StartTime.UnixNano()), + Time: uint64(fixedExecutorTestStartTime.Add(4 * time.Second).UnixNano()), }, AppHash: []byte("state-root-4"), ProposerAddress: oldAddr, @@ -337,7 +419,7 @@ func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { // signer must be rejected even though it's a known schedule member. _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ Batch: &coreseq.Batch{}, - Time: time.Now(), + Time: fixedExecutorTestStartTime.Add(5 * time.Second), }) require.Error(t, err) require.Contains(t, err.Error(), "proposer") diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 0e8a08cea3..2150bd2933 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,6 +81,15 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } + if headerHeight := p2pHeader.Height(); headerHeight != height { + err := fmt.Errorf("header height mismatch: requested %d, got %d", height, headerHeight) + h.logger.Warn(). + Uint64("requested_height", height). + Uint64("header_height", headerHeight). + Err(err). + Msg("discarding mismatched header from P2P") + return err + } if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") return err diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 1ba3b86e27..84a875ffbf 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -215,6 +215,23 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) } +func TestP2PHandler_ProcessHeight_RejectsHeaderHeightMismatch(t *testing.T) { + p := setupP2P(t) + ctx := context.Background() + + header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 12, p.ProposerAddr, p.ProposerPub, p.Signer) + + p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() + + ch := make(chan common.DAHeightEvent, 1) + err := p.Handler.ProcessHeight(ctx, 10, ch) + require.Error(t, err) + require.Contains(t, err.Error(), "header height mismatch") + + require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) + p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) +} + func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { p := setupP2P(t) ctx := context.Background() diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md index 2c2934bf28..1c1b067982 100644 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ b/docs/adr/adr-023-proposer-key-rotation.md @@ -57,7 +57,7 @@ Each entry declares: - `start_height` - `address` -- `pub_key` +- `pub_key` (optional; when present, it must match `address`) The active proposer for block height `h` is the last entry whose `start_height <= h`. @@ -68,8 +68,9 @@ When an explicit schedule is present: - the first entry must start at `initial_height` - entries must be strictly increasing by `start_height` -- each entry's `address` must match the configured `pub_key` -- `proposer_address`, when present, must match the first schedule entry +- if `pub_key` is present, the entry's `address` must match it +- entries without `pub_key` are interpreted by `address` only +- `proposer_address`, when present, must match the first schedule entry's `address` ## Detailed Design @@ -86,8 +87,7 @@ Genesis gains: }, { "start_height": 1250000, - "address": "...", - "pub_key": "..." + "address": "..." } ] ``` @@ -121,7 +121,7 @@ The old proposer remains valid for heights `< H`, and the new proposer becomes v ### Security considerations -This design improves safety over address-only pinning by allowing validation against the scheduled public key. +This design improves safety by allowing validation against the scheduled public key when one is pinned. It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority or rotation certificate flow if the network needs signer replacement without prior static scheduling. diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index a5aca88586..9c850f3963 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -140,7 +140,7 @@ func TestGenesis_Validate(t *testing.T) { } func TestGenesis_ValidateProposerSchedule(t *testing.T) { - validTime := time.Now().UTC() + validTime := time.Unix(1_700_000_000, 0).UTC() newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { _, pub, err := crypto.GenerateEd25519Key(rand.Reader) @@ -230,11 +230,11 @@ func TestGenesis_ValidateProposerSchedule(t *testing.T) { ChainID: "c", StartTime: validTime, InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry20, entry10}, + ProposerSchedule: []ProposerScheduleEntry{entry1, entry20, entry10}, DAEpochForcedInclusion: 1, } }, - wantErr: "start_height must equal initial_height", + wantErr: "strictly increasing", }, { name: "invalid - entry address does not match pub_key", diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go index 950481526f..88f87590ea 100644 --- a/pkg/genesis/proposer_schedule_test.go +++ b/pkg/genesis/proposer_schedule_test.go @@ -270,6 +270,7 @@ func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { addr := []byte("some-address-bytes") + origAddr := bytes.Clone(addr) legacy := Genesis{ ChainID: "c", InitialHeight: 7, @@ -283,7 +284,7 @@ func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { // mutating the derived slice must not affect the genesis backing data. schedule[0].Address[0] ^= 0xFF - require.Equal(t, addr, legacy.ProposerAddress) + require.Equal(t, origAddr, legacy.ProposerAddress) } func TestEffectiveProposerSchedule_Empty(t *testing.T) { From 8db31f9c919cab358f0e7acde149bd34eac642bb Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 16:31:21 +0200 Subject: [PATCH 5/6] redo the design --- apps/evm/go.mod | 1 + apps/grpc/go.mod | 1 + apps/testapp/go.mod | 5 +- apps/testapp/kv/kvexecutor.go | 14 +- apps/testapp/kv/kvexecutor_test.go | 12 +- block/internal/common/replay.go | 44 ++- block/internal/executing/executor.go | 116 ++++-- .../executing/executor_benchmark_test.go | 4 +- .../internal/executing/executor_logic_test.go | 40 +- block/internal/executing/executor_test.go | 303 ---------------- block/internal/reaping/bench_test.go | 4 +- block/internal/submitting/da_submitter.go | 15 +- .../internal/submitting/da_submitter_test.go | 91 ----- block/internal/syncing/assert.go | 15 +- block/internal/syncing/da_retriever.go | 10 - block/internal/syncing/da_retriever_test.go | 15 +- block/internal/syncing/p2p_handler.go | 18 - block/internal/syncing/p2p_handler_test.go | 91 +---- block/internal/syncing/raft_retriever.go | 4 - block/internal/syncing/syncer.go | 54 ++- block/internal/syncing/syncer_test.go | 58 ++- .../types/src/proto/evnode.v1.messages.rs | 147 +++++++- .../types/src/proto/evnode.v1.services.rs | 306 +++++++++++++++- core/README.md | 11 +- core/execution/dummy.go | 4 +- core/execution/dummy_test.go | 14 +- core/execution/execution.go | 19 +- docs/.vitepress/config.ts | 4 - ...r-023-execution-owned-proposer-rotation.md | 83 +++++ docs/adr/adr-023-proposer-key-rotation.md | 165 --------- .../custom/implement-executor.md | 33 +- docs/guides/create-genesis.md | 4 - .../operations/proposer-key-rotation.md | 186 ---------- docs/guides/operations/upgrades.md | 6 - docs/reference/interfaces/executor.md | 20 +- execution/evm/execution.go | 22 +- execution/evm/go.mod | 5 + execution/evm/test/go.mod | 1 + execution/grpc/client.go | 12 +- execution/grpc/client_test.go | 16 +- execution/grpc/go.mod | 5 + execution/grpc/server.go | 8 +- execution/grpc/server_test.go | 11 +- go.mod | 2 + node/execution_test.go | 2 +- node/failover.go | 2 +- node/full.go | 2 +- pkg/genesis/genesis.go | 34 +- pkg/genesis/genesis_test.go | 175 --------- pkg/genesis/io.go | 4 +- pkg/genesis/proposer_schedule.go | 213 ----------- pkg/genesis/proposer_schedule_test.go | 341 ------------------ pkg/rpc/server/server.go | 11 +- pkg/telemetry/executor_tracing.go | 9 +- pkg/telemetry/executor_tracing_test.go | 4 +- proto/evnode/v1/evnode.proto | 2 + proto/evnode/v1/execution.proto | 8 + proto/evnode/v1/state.proto | 1 + test/e2e/go.mod | 1 + test/mocks/execution.go | 38 +- test/mocks/height_aware_executor.go | 27 +- types/header.go | 14 +- types/pb/evnode/v1/evnode.pb.go | 20 +- types/pb/evnode/v1/execution.pb.go | 42 ++- types/pb/evnode/v1/state.pb.go | 36 +- types/serialization.go | 50 ++- types/state.go | 33 +- 67 files changed, 1180 insertions(+), 1888 deletions(-) create mode 100644 docs/adr/adr-023-execution-owned-proposer-rotation.md delete mode 100644 docs/adr/adr-023-proposer-key-rotation.md delete mode 100644 docs/guides/operations/proposer-key-rotation.md delete mode 100644 pkg/genesis/proposer_schedule.go delete mode 100644 pkg/genesis/proposer_schedule_test.go diff --git a/apps/evm/go.mod b/apps/evm/go.mod index 2dcdda8469..a215b11b07 100644 --- a/apps/evm/go.mod +++ b/apps/evm/go.mod @@ -4,6 +4,7 @@ go 1.25.7 replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm ) diff --git a/apps/grpc/go.mod b/apps/grpc/go.mod index 66caa09cb6..64a32c143d 100644 --- a/apps/grpc/go.mod +++ b/apps/grpc/go.mod @@ -4,6 +4,7 @@ go 1.25.7 replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/grpc => ../../execution/grpc ) diff --git a/apps/testapp/go.mod b/apps/testapp/go.mod index 652a4615c0..7285464eb2 100644 --- a/apps/testapp/go.mod +++ b/apps/testapp/go.mod @@ -2,7 +2,10 @@ module github.com/evstack/ev-node/apps/testapp go 1.25.7 -replace github.com/evstack/ev-node => ../../. +replace ( + github.com/evstack/ev-node => ../../. + github.com/evstack/ev-node/core => ../../core +) require ( github.com/evstack/ev-node v1.1.1 diff --git a/apps/testapp/kv/kvexecutor.go b/apps/testapp/kv/kvexecutor.go index aef3aedf3a..1a3ec4b776 100644 --- a/apps/testapp/kv/kvexecutor.go +++ b/apps/testapp/kv/kvexecutor.go @@ -239,16 +239,16 @@ func (k *KVExecutor) GetTxs(ctx context.Context) ([][]byte, error) { // ExecuteTxs processes each transaction assumed to be in the format "key=value". // It updates the database accordingly using a batch and removes the executed transactions from the mempool. // Invalid transactions are filtered out and logged, but execution continues. -func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { select { case <-ctx.Done(): - return nil, ctx.Err() + return execution.ExecuteResult{}, ctx.Err() default: } batch, err := k.db.Batch(ctx) if err != nil { - return nil, fmt.Errorf("failed to create database batch: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to create database batch: %w", err) } validTxCount := 0 @@ -291,7 +291,7 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u err = batch.Put(ctx, dsKey, []byte(value)) if err != nil { // This error is unlikely for Put unless the context is cancelled. - return nil, fmt.Errorf("failed to stage put operation in batch for key '%s': %w", key, err) + return execution.ExecuteResult{}, fmt.Errorf("failed to stage put operation in batch for key '%s': %w", key, err) } validTxCount++ } @@ -304,7 +304,7 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u // Commit the batch to apply all changes atomically err = batch.Commit(ctx) if err != nil { - return nil, fmt.Errorf("failed to commit transaction batch: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to commit transaction batch: %w", err) } k.blocksProduced.Add(1) @@ -315,10 +315,10 @@ func (k *KVExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight u if err != nil { // This is problematic, state was changed but root calculation failed. // May need more robust error handling or recovery logic. - return nil, fmt.Errorf("failed to compute state root after executing transactions: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to compute state root after executing transactions: %w", err) } - return stateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } // SetFinal marks a block as finalized at the specified height. diff --git a/apps/testapp/kv/kvexecutor_test.go b/apps/testapp/kv/kvexecutor_test.go index 97280aee10..486fa576f8 100644 --- a/apps/testapp/kv/kvexecutor_test.go +++ b/apps/testapp/kv/kvexecutor_test.go @@ -105,13 +105,13 @@ func TestExecuteTxs_Valid(t *testing.T) { []byte("key2=value2"), } - stateRoot, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) if err != nil { t.Fatalf("ExecuteTxs failed: %v", err) } // Check that stateRoot contains the updated key-value pairs - rootStr := string(stateRoot) + rootStr := string(result.UpdatedStateRoot) if !strings.Contains(rootStr, "key1:value1;") || !strings.Contains(rootStr, "key2:value2;") { t.Errorf("State root does not contain expected key-values: %s", rootStr) } @@ -134,13 +134,13 @@ func TestExecuteTxs_Invalid(t *testing.T) { []byte(""), } - stateRoot, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), []byte("")) if err != nil { t.Fatalf("ExecuteTxs should handle gibberish gracefully, got error: %v", err) } // State root should still be computed (empty block is valid) - if stateRoot == nil { + if result.UpdatedStateRoot == nil { t.Error("Expected non-nil state root even with all invalid transactions") } @@ -152,13 +152,13 @@ func TestExecuteTxs_Invalid(t *testing.T) { []byte(""), } - stateRoot2, err := exec.ExecuteTxs(ctx, mixedTxs, 2, time.Now(), stateRoot) + result2, err := exec.ExecuteTxs(ctx, mixedTxs, 2, time.Now(), result.UpdatedStateRoot) if err != nil { t.Fatalf("ExecuteTxs should filter invalid transactions and process valid ones, got error: %v", err) } // State root should contain only the valid transactions - rootStr := string(stateRoot2) + rootStr := string(result2.UpdatedStateRoot) if !strings.Contains(rootStr, "valid_key:valid_value") || !strings.Contains(rootStr, "another_valid:value2") { t.Errorf("State root should contain valid transactions: %s", rootStr) } diff --git a/block/internal/common/replay.go b/block/internal/common/replay.go index ba13a5a4b7..a120450d22 100644 --- a/block/internal/common/replay.go +++ b/block/internal/common/replay.go @@ -152,11 +152,12 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { if height == s.genesis.InitialHeight { // For the first block, use genesis state. prevState = types.State{ - ChainID: s.genesis.ChainID, - InitialHeight: s.genesis.InitialHeight, - LastBlockHeight: s.genesis.InitialHeight - 1, - LastBlockTime: s.genesis.StartTime, - AppHash: header.AppHash, // Genesis app hash (input to first block execution) + ChainID: s.genesis.ChainID, + InitialHeight: s.genesis.InitialHeight, + LastBlockHeight: s.genesis.InitialHeight - 1, + LastBlockTime: s.genesis.StartTime, + AppHash: header.AppHash, // Genesis app hash (input to first block execution) + NextProposerAddress: append([]byte(nil), s.genesis.ProposerAddress...), } } else { // GetStateAtHeight(height-1) returns the state AFTER block height-1 was executed, @@ -179,10 +180,25 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Int("tx_count", len(rawTxs)). Msg("executing transactions on execution layer") - newAppHash, err := s.exec.ExecuteTxs(ctx, rawTxs, height, header.Time(), prevState.AppHash) + result, err := s.exec.ExecuteTxs(ctx, rawTxs, height, header.Time(), prevState.AppHash) if err != nil { return fmt.Errorf("failed to execute transactions: %w", err) } + newAppHash := result.UpdatedStateRoot + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + return fmt.Errorf("next proposer mismatch at height %d: header empty, execution %x", height, result.NextProposerAddress) + } + if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: header %x, execution %x", + height, + header.NextProposerAddress, + result.NextProposerAddress, + ) + } + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: header %x, execution unchanged", height, header.NextProposerAddress) + } // The result of ExecuteTxs (newAppHash) should match the stored state at this height. // Note: header.AppHash is the PREVIOUS state's app hash (input), not the expected output. @@ -207,6 +223,22 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Msg("app hash mismatch during replay") return err } + if len(expectedState.NextProposerAddress) > 0 { + expectedNextProposer := header.NextProposerAddress + if len(expectedNextProposer) == 0 { + expectedNextProposer = result.NextProposerAddress + } + if len(expectedNextProposer) == 0 { + expectedNextProposer = header.ProposerAddress + } + if !bytes.Equal(expectedNextProposer, expectedState.NextProposerAddress) { + return fmt.Errorf("next proposer mismatch at height %d: expected %x got %x", + height, + expectedState.NextProposerAddress, + expectedNextProposer, + ) + } + } s.logger.Debug(). Uint64("height", height). Str("app_hash", hex.EncodeToString(newAppHash)). diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index 3fe5ba5d10..de825db24b 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -121,14 +121,6 @@ func NewExecutor( return nil, errors.New("signer cannot be nil") } - addr, err := signer.GetAddress() - if err != nil { - return nil, fmt.Errorf("failed to get address: %w", err) - } - - if !genesis.HasScheduledProposer(addr) { - return nil, common.ErrNotProposer - } } if raftNode != nil && reflect.ValueOf(raftNode).IsNil() { raftNode = nil @@ -242,15 +234,22 @@ func (e *Executor) initializeState() error { } state = types.State{ - ChainID: e.genesis.ChainID, - InitialHeight: e.genesis.InitialHeight, - LastBlockHeight: e.genesis.InitialHeight - 1, - LastBlockTime: e.genesis.StartTime, - AppHash: stateRoot, + ChainID: e.genesis.ChainID, + InitialHeight: e.genesis.InitialHeight, + LastBlockHeight: e.genesis.InitialHeight - 1, + LastBlockTime: e.genesis.StartTime, + AppHash: stateRoot, + NextProposerAddress: e.initialProposerAddress(e.ctx), // DA start height is usually 0 at InitChain unless it is a re-genesis or a based sequencer. DAHeight: e.genesis.DAStartHeight, } } + if len(state.NextProposerAddress) == 0 { + state.NextProposerAddress = e.initialProposerAddress(e.ctx) + } + if err := e.assertConfiguredSigner(state.NextProposerAddress); err != nil { + return err + } if e.raftNode != nil { // Ensure node is fully synced before producing any blocks @@ -379,6 +378,32 @@ func (e *Executor) initializeState() error { return nil } +func (e *Executor) initialProposerAddress(ctx context.Context) []byte { + if e.exec != nil { + info, err := e.exec.GetExecutionInfo(ctx) + if err != nil { + e.logger.Warn().Err(err).Msg("failed to get execution info for proposer, falling back to genesis proposer") + } else if len(info.NextProposerAddress) > 0 { + return append([]byte(nil), info.NextProposerAddress...) + } + } + return append([]byte(nil), e.genesis.ProposerAddress...) +} + +func (e *Executor) assertConfiguredSigner(expectedProposer []byte) error { + if e.config.Node.BasedSequencer { + return nil + } + addr, err := e.signer.GetAddress() + if err != nil { + return fmt.Errorf("failed to get address: %w", err) + } + if !bytes.Equal(addr, expectedProposer) { + return common.ErrNotProposer + } + return nil +} + // executionLoop handles block production and aggregation func (e *Executor) executionLoop() { e.logger.Info().Msg("starting execution loop") @@ -547,6 +572,13 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to apply block: %w", err) } + if !bytes.Equal(newState.NextProposerAddress, header.ProposerAddress) { + header.NextProposerAddress = append([]byte(nil), newState.NextProposerAddress...) + header.InvalidateHash() + } else if len(header.NextProposerAddress) > 0 { + header.NextProposerAddress = nil + header.InvalidateHash() + } // set the DA height in the sequencer newState.DAHeight = e.sequencer.GetDAHeight() @@ -696,9 +728,9 @@ func (e *Executor) RetrieveBatch(ctx context.Context) (*BatchData, error) { func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *BatchData) (*types.SignedHeader, *types.Data, error) { currentState := e.getLastState() headerTime := uint64(e.genesis.StartTime.UnixNano()) - proposer, err := e.genesis.ProposerAtHeight(height) - if err != nil { - return nil, nil, fmt.Errorf("resolve proposer for height %d: %w", height, err) + proposerAddress := currentState.NextProposerAddress + if len(proposerAddress) == 0 { + proposerAddress = e.genesis.ProposerAddress } var lastHeaderHash types.Hash @@ -732,35 +764,29 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba // Get signer info and validator hash var pubKey crypto.PubKey - var signerAddress []byte var validatorHash types.Hash if e.signer != nil { + var err error pubKey, err = e.signer.GetPublic() if err != nil { return nil, nil, fmt.Errorf("failed to get public key: %w", err) } - - signerAddress, err = e.signer.GetAddress() + addr, err := e.signer.GetAddress() if err != nil { - return nil, nil, fmt.Errorf("failed to get signer address: %w", err) + return nil, nil, fmt.Errorf("failed to get address: %w", err) } - - if err := e.genesis.ValidateProposer(height, signerAddress, pubKey); err != nil { - return nil, nil, fmt.Errorf("signer does not match proposer schedule: %w", err) + if !bytes.Equal(addr, proposerAddress) { + return nil, nil, common.ErrNotProposer } - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) + validatorHash, err = e.options.ValidatorHasherProvider(proposerAddress, pubKey) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } } else { - pubKey, err = proposer.PublicKey() - if err != nil { - return nil, nil, fmt.Errorf("failed to get scheduled proposer public key: %w", err) - } - - validatorHash, err = e.options.ValidatorHasherProvider(proposer.Address, pubKey) + var err error + validatorHash, err = e.options.ValidatorHasherProvider(proposerAddress, nil) if err != nil { return nil, nil, fmt.Errorf("failed to get validator hash: %w", err) } @@ -780,13 +806,13 @@ func (e *Executor) CreateBlock(ctx context.Context, height uint64, batchData *Ba }, LastHeaderHash: lastHeaderHash, AppHash: currentState.AppHash, - ProposerAddress: proposer.Address, + ProposerAddress: proposerAddress, ValidatorHash: validatorHash, }, Signature: lastSignature, Signer: types.Signer{ PubKey: pubKey, - Address: proposer.Address, + Address: proposerAddress, }, } @@ -830,14 +856,24 @@ func (e *Executor) ApplyBlock(ctx context.Context, header types.Header, data *ty // Execute transactions execCtx := context.WithValue(ctx, types.HeaderContextKey, header) - newAppHash, err := e.executeTxsWithRetry(execCtx, rawTxs, header, currentState) + result, err := e.executeTxsWithRetry(execCtx, rawTxs, header, currentState) if err != nil { e.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + header.NextProposerAddress = append([]byte(nil), result.NextProposerAddress...) + } else if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) + } + header.InvalidateHash() + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) + } // Create new state - newState, err := currentState.NextState(header, newAppHash) + newState, err := currentState.NextState(header, result.UpdatedStateRoot) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } @@ -868,12 +904,12 @@ func (e *Executor) signHeader(ctx context.Context, header *types.Header) (types. // executeTxsWithRetry executes transactions with retry logic. // NOTE: the function retries the execution client call regardless of the error. Some execution clients errors are irrecoverable, and will eventually halt the node, as expected. -func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) ([]byte, error) { +func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) (coreexecutor.ExecuteResult, error) { for attempt := 1; attempt <= common.MaxRetriesBeforeHalt; attempt++ { - newAppHash, err := e.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) + result, err := e.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) if err != nil { if attempt == common.MaxRetriesBeforeHalt { - return nil, fmt.Errorf("failed to execute transactions: %w", err) + return coreexecutor.ExecuteResult{}, fmt.Errorf("failed to execute transactions: %w", err) } e.logger.Error().Err(err). @@ -886,14 +922,14 @@ func (e *Executor) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, hea case <-time.After(common.MaxRetriesTimeout): continue case <-e.ctx.Done(): - return nil, fmt.Errorf("context cancelled during retry: %w", e.ctx.Err()) + return coreexecutor.ExecuteResult{}, fmt.Errorf("context cancelled during retry: %w", e.ctx.Err()) } } - return newAppHash, nil + return result, nil } - return nil, nil + return coreexecutor.ExecuteResult{}, nil } // sendCriticalError sends a critical error to the error channel without blocking diff --git a/block/internal/executing/executor_benchmark_test.go b/block/internal/executing/executor_benchmark_test.go index be71d8fe26..da13a5f760 100644 --- a/block/internal/executing/executor_benchmark_test.go +++ b/block/internal/executing/executor_benchmark_test.go @@ -149,8 +149,8 @@ func (s *stubExecClient) InitChain(context.Context, time.Time, uint64, string) ( return s.stateRoot, nil } func (s *stubExecClient) GetTxs(context.Context) ([][]byte, error) { return nil, nil } -func (s *stubExecClient) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { - return s.stateRoot, nil +func (s *stubExecClient) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) (coreexec.ExecuteResult, error) { + return coreexec.ExecuteResult{UpdatedStateRoot: s.stateRoot}, nil } func (s *stubExecClient) SetFinal(context.Context, uint64) error { return nil } func (s *stubExecClient) GetExecutionInfo(context.Context) (coreexec.ExecutionInfo, error) { diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 1498bf5f79..0b1f86769a 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -19,6 +19,7 @@ import ( "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" + coreexec "github.com/evstack/ev-node/core/execution" coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" @@ -68,6 +69,43 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(data.Txs)) assert.EqualValues(t, common.DataHashForEmptyTxs, sh.DataHash) + assert.Empty(t, sh.NextProposerAddress) + + state, err := fx.MemStore.GetState(context.Background()) + require.NoError(t, err) + assert.Equal(t, fx.Exec.genesis.ProposerAddress, state.NextProposerAddress) +} + +func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { + fx := setupTestExecutor(t, 1000) + defer fx.Cancel() + + nextAddr, _, _ := buildTestSigner(t) + + fx.MockSeq.EXPECT().GetNextBatch(mock.Anything, mock.AnythingOfType("sequencer.GetNextBatchRequest")). + RunAndReturn(func(ctx context.Context, req coreseq.GetNextBatchRequest) (*coreseq.GetNextBatchResponse, error) { + return &coreseq.GetNextBatchResponse{Batch: &coreseq.Batch{Transactions: nil}, Timestamp: time.Now()}, nil + }).Once() + + fx.MockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.AnythingOfType("time.Time"), fx.InitStateRoot). + Return(coreexec.ExecuteResult{ + UpdatedStateRoot: []byte("new_root"), + NextProposerAddress: nextAddr, + }, nil).Once() + + fx.MockSeq.EXPECT().GetDAHeight().Return(uint64(0)).Once() + + require.NoError(t, fx.Exec.ProduceBlock(fx.Exec.ctx)) + + header, data, err := fx.MemStore.GetBlockData(context.Background(), 1) + require.NoError(t, err) + require.NoError(t, header.ValidateBasicWithData(data)) + assert.Equal(t, nextAddr, header.NextProposerAddress) + + state, err := fx.MemStore.GetState(context.Background()) + require.NoError(t, err) + assert.Equal(t, nextAddr, state.NextProposerAddress) + assert.Equal(t, header.Hash(), state.LastHeaderHash) } func TestProduceBlock_OutputPassesValidation(t *testing.T) { @@ -220,7 +258,7 @@ func TestExecutor_executeTxsWithRetry(t *testing.T) { if tt.expectSuccess { require.NoError(t, err) - assert.Equal(t, tt.expectHash, result) + assert.Equal(t, tt.expectHash, result.UpdatedStateRoot) } else { require.Error(t, err) if tt.expectError != "" { diff --git a/block/internal/executing/executor_test.go b/block/internal/executing/executor_test.go index c6f6ccc1a5..1099cdb87d 100644 --- a/block/internal/executing/executor_test.go +++ b/block/internal/executing/executor_test.go @@ -1,28 +1,23 @@ package executing import ( - "context" "testing" "time" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/sync" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/evstack/ev-node/block/internal/cache" "github.com/evstack/ev-node/block/internal/common" - coreseq "github.com/evstack/ev-node/core/sequencer" "github.com/evstack/ev-node/pkg/config" "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/pkg/store" "github.com/evstack/ev-node/types" ) -var fixedExecutorTestStartTime = time.Unix(1_700_000_000, 0).UTC() - func TestExecutor_BroadcasterIntegration(t *testing.T) { // Create in-memory store ds := sync.MutexWrap(datastore.NewMapDatastore()) @@ -126,301 +121,3 @@ func TestExecutor_NilBroadcasters(t *testing.T) { assert.Equal(t, cacheManager, executor.cache) assert.Equal(t, gen, executor.genesis) } - -func TestExecutor_CreateBlock_UsesScheduledProposerForHeight(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - metrics := common.NopMetrics() - oldAddr, oldSignerInfo, _ := buildTestSigner(t) - newAddr, newSignerInfo, newSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(2, newSignerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry1.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - executor, err := NewExecutor( - memStore, - nil, - nil, - newSigner, - cacheManager, - metrics, - config.DefaultConfig(), - gen, - nil, - nil, - zerolog.Nop(), - common.DefaultBlockOptions(), - make(chan error, 1), - nil, - ) - require.NoError(t, err) - - prevHeader := &types.SignedHeader{ - Header: types.Header{ - Version: types.InitStateVersion, - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(gen.StartTime.UnixNano()), - }, - AppHash: []byte("state-root-0"), - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signature: types.Signature([]byte("sig-1")), - Signer: oldSignerInfo, - } - prevData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 1, - Time: prevHeader.BaseHeader.Time, - }, - Txs: nil, - } - - batch, err := memStore.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) - require.NoError(t, batch.SetHeight(1)) - require.NoError(t, batch.Commit()) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - LastBlockHeight: 1, - LastBlockTime: prevHeader.Time(), - LastHeaderHash: prevHeader.Hash(), - AppHash: []byte("state-root-1"), - }) - - header, data, err := executor.CreateBlock(context.Background(), 2, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime.Add(time.Second), - }) - require.NoError(t, err) - require.Equal(t, newAddr, header.ProposerAddress) - require.Equal(t, newAddr, header.Signer.Address) - require.Equal(t, uint64(2), data.Height()) -} - -func TestExecutor_CreateBlock_BasedSequencerUsesScheduledPubKey(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - _, signerInfo, _ := buildTestSigner(t) - entry, err := genesis.NewProposerScheduleEntry(1, signerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - cfg := config.DefaultConfig() - cfg.Node.BasedSequencer = true - - wantValidatorHash := types.Hash{0x01} - hasherCalled := false - options := common.DefaultBlockOptions() - options.ValidatorHasherProvider = func(address []byte, pubKey crypto.PubKey) (types.Hash, error) { - hasherCalled = true - require.Equal(t, entry.Address, address) - require.NotNil(t, pubKey) - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - require.NoError(t, err) - require.Equal(t, entry.PubKey, marshalledPubKey) - - return wantValidatorHash, nil - } - - executor, err := NewExecutor( - memStore, - nil, - nil, - nil, - cacheManager, - common.NopMetrics(), - cfg, - gen, - nil, - nil, - zerolog.Nop(), - options, - make(chan error, 1), - nil, - ) - require.NoError(t, err) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - AppHash: []byte("state-root-1"), - }) - - header, data, err := executor.CreateBlock(context.Background(), 1, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime, - }) - require.NoError(t, err) - require.True(t, hasherCalled) - require.Equal(t, wantValidatorHash, header.ValidatorHash) - require.Equal(t, entry.Address, header.Signer.Address) - require.NotNil(t, header.Signer.PubKey) - - marshalledPubKey, err := crypto.MarshalPublicKey(header.Signer.PubKey) - require.NoError(t, err) - require.Equal(t, entry.PubKey, marshalledPubKey) - require.Equal(t, uint64(1), data.Height()) -} - -// TestNewExecutor_RejectsSignerOutsideSchedule verifies that a signer whose -// address does not appear anywhere in the proposer schedule cannot start the -// executor. This prevents a misconfigured replacement key from coming up as -// an aggregator on a chain it was never scheduled on. -func TestNewExecutor_RejectsSignerOutsideSchedule(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - _, scheduledSigner, _ := buildTestSigner(t) - _, _, strayerSigner := buildTestSigner(t) - - entry, err := genesis.NewProposerScheduleEntry(1, scheduledSigner.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - _, err = NewExecutor( - memStore, nil, nil, strayerSigner, cacheManager, - common.NopMetrics(), config.DefaultConfig(), gen, - nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), - make(chan error, 1), nil, - ) - require.ErrorIs(t, err, common.ErrNotProposer) -} - -// TestExecutor_CreateBlock_RejectsSignerAtWrongHeight verifies that a signer -// which is scheduled (so startup succeeds) but not active at the current -// height cannot produce a block. This guards the per-height proposer check -// inside CreateBlock — without it, a rotation could be jumped ahead or -// rolled back by whichever signer the operator happens to start. -func TestExecutor_CreateBlock_RejectsSignerAtWrongHeight(t *testing.T) { - ds := sync.MutexWrap(datastore.NewMapDatastore()) - memStore := store.New(ds) - - cacheManager, err := cache.NewManager(config.DefaultConfig(), memStore, zerolog.Nop()) - require.NoError(t, err) - - oldAddr, oldSignerInfo, oldSigner := buildTestSigner(t) - _, newSignerInfo, _ := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(1, oldSignerInfo.PubKey) - require.NoError(t, err) - // Second entry activates at height 5. The old signer is scheduled at - // height 1 and is NOT the proposer for height 5+. - entry2, err := genesis.NewProposerScheduleEntry(5, newSignerInfo.PubKey) - require.NoError(t, err) - - gen := genesis.Genesis{ - ChainID: "test-chain", - InitialHeight: 1, - StartTime: fixedExecutorTestStartTime, - ProposerAddress: entry1.Address, - ProposerSchedule: []genesis.ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // Start the executor as the old signer — it IS in the schedule at - // height 1, so NewExecutor must accept it. - executor, err := NewExecutor( - memStore, nil, nil, oldSigner, cacheManager, - common.NopMetrics(), config.DefaultConfig(), gen, - nil, nil, zerolog.Nop(), common.DefaultBlockOptions(), - make(chan error, 1), nil, - ) - require.NoError(t, err) - - // Seed a height-4 block so CreateBlock(5) has a parent to reference. - prevHeader := &types.SignedHeader{ - Header: types.Header{ - Version: types.InitStateVersion, - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 4, - Time: uint64(fixedExecutorTestStartTime.Add(4 * time.Second).UnixNano()), - }, - AppHash: []byte("state-root-4"), - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signature: types.Signature([]byte("sig-4")), - Signer: oldSignerInfo, - } - prevData := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 4, - Time: prevHeader.BaseHeader.Time, - }, - } - - batch, err := memStore.NewBatch(context.Background()) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(prevHeader, prevData, &prevHeader.Signature)) - require.NoError(t, batch.SetHeight(4)) - require.NoError(t, batch.Commit()) - - executor.setLastState(types.State{ - Version: types.InitStateVersion, - ChainID: gen.ChainID, - InitialHeight: gen.InitialHeight, - LastBlockHeight: 4, - LastBlockTime: prevHeader.Time(), - LastHeaderHash: prevHeader.Hash(), - AppHash: []byte("state-root-4"), - }) - - // Height 5 belongs to the NEW signer per the schedule — the old - // signer must be rejected even though it's a known schedule member. - _, _, err = executor.CreateBlock(context.Background(), 5, &BatchData{ - Batch: &coreseq.Batch{}, - Time: fixedExecutorTestStartTime.Add(5 * time.Second), - }) - require.Error(t, err) - require.Contains(t, err.Error(), "proposer") -} diff --git a/block/internal/reaping/bench_test.go b/block/internal/reaping/bench_test.go index 5ec0aaa69d..3c879148a1 100644 --- a/block/internal/reaping/bench_test.go +++ b/block/internal/reaping/bench_test.go @@ -60,8 +60,8 @@ func (e *infiniteExecutor) GetTxs(_ context.Context) ([][]byte, error) { return txs, nil } -func (e *infiniteExecutor) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) ([]byte, error) { - return nil, nil +func (e *infiniteExecutor) ExecuteTxs(_ context.Context, _ [][]byte, _ uint64, _ time.Time, _ []byte) (coreexecutor.ExecuteResult, error) { + return coreexecutor.ExecuteResult{}, nil } func (e *infiniteExecutor) FilterTxs(_ context.Context, txs [][]byte, _ uint64, _ uint64, _ bool) ([]coreexecutor.FilterStatus, error) { diff --git a/block/internal/submitting/da_submitter.go b/block/internal/submitting/da_submitter.go index e53e351832..f5f4a829bf 100644 --- a/block/internal/submitting/da_submitter.go +++ b/block/internal/submitting/da_submitter.go @@ -1,6 +1,7 @@ package submitting import ( + "bytes" "context" "encoding/json" "fmt" @@ -362,6 +363,14 @@ func (s *DASubmitter) signEnvelopesParallel( // signAndCacheEnvelope signs a single header and caches the result. func (s *DASubmitter) signAndCacheEnvelope(ctx context.Context, header *types.SignedHeader, marshalledHeader []byte, signer signer.Signer) ([]byte, error) { + addr, err := signer.GetAddress() + if err != nil { + return nil, fmt.Errorf("failed to get signer address: %w", err) + } + if len(header.Signer.Address) > 0 && !bytes.Equal(addr, header.Signer.Address) { + return nil, fmt.Errorf("envelope signer address mismatch: got %x, expected %x", addr, header.Signer.Address) + } + // Sign the pre-marshalled header content envelopeSignature, err := signer.Sign(ctx, marshalledHeader) if err != nil { @@ -460,7 +469,7 @@ func (s *DASubmitter) SubmitData(ctx context.Context, unsignedDataList []*types. } // signData signs unsigned SignedData structs returned from cache -func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, genesis genesis.Genesis) ([]*types.SignedData, [][]byte, error) { +func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.SignedData, unsignedDataListBz [][]byte, signer signer.Signer, _ genesis.Genesis) ([]*types.SignedData, [][]byte, error) { if signer == nil { return nil, nil, fmt.Errorf("signer is nil") } @@ -489,10 +498,6 @@ func (s *DASubmitter) signData(ctx context.Context, unsignedDataList []*types.Si continue } - if err := genesis.ValidateProposer(unsignedData.Height(), addr, pubKey); err != nil { - return nil, nil, fmt.Errorf("signer does not match proposer schedule for data at height %d: %w", unsignedData.Height(), err) - } - signature, err := signer.Sign(ctx, unsignedDataListBz[i]) if err != nil { return nil, nil, fmt.Errorf("failed to sign data: %w", err) diff --git a/block/internal/submitting/da_submitter_test.go b/block/internal/submitting/da_submitter_test.go index 9c55b9bd6c..d25786018b 100644 --- a/block/internal/submitting/da_submitter_test.go +++ b/block/internal/submitting/da_submitter_test.go @@ -343,97 +343,6 @@ func TestDASubmitter_SubmitData_Success(t *testing.T) { assert.True(t, ok) } -func TestDASubmitter_SubmitData_UsesScheduledProposerForHeight(t *testing.T) { - submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) - ctx := context.Background() - dataNamespace := datypes.NamespaceFromString(testDataNamespace).Bytes() - - mockDA.On( - "Submit", - mock.Anything, - mock.AnythingOfType("[][]uint8"), - mock.AnythingOfType("float64"), - dataNamespace, - mock.Anything, - ).Return(func(_ context.Context, blobs [][]byte, _ float64, _ []byte, _ []byte) datypes.ResultSubmit { - return datypes.ResultSubmit{BaseResult: datypes.BaseResult{Code: datypes.StatusSuccess, SubmittedCount: uint64(len(blobs)), Height: 2}} - }).Once() - - oldAddr, oldPub, _ := createTestSigner(t) - nextAddr, nextPub, nextSigner := createTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(gen.InitialHeight, oldPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(2, nextPub) - require.NoError(t, err) - - gen.ProposerAddress = entry1.Address - gen.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - submitter.genesis = gen - - data1 := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - Txs: types.Txs{}, - } - - header1 := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 1, - Time: uint64(time.Now().UnixNano()), - }, - ProposerAddress: oldAddr, - DataHash: common.DataHashForEmptyTxs, - }, - Signer: types.Signer{PubKey: oldPub, Address: oldAddr}, - } - - data := &types.Data{ - Metadata: &types.Metadata{ - ChainID: gen.ChainID, - Height: 2, - Time: uint64(time.Now().UnixNano()), - }, - Txs: types.Txs{types.Tx("rotated-key-tx")}, - } - - header := &types.SignedHeader{ - Header: types.Header{ - BaseHeader: types.BaseHeader{ - ChainID: gen.ChainID, - Height: 2, - Time: uint64(time.Now().UnixNano()), - }, - ProposerAddress: nextAddr, - DataHash: data.DACommitment(), - }, - Signer: types.Signer{PubKey: nextPub, Address: nextAddr}, - } - - sig1 := types.Signature([]byte("sig-1")) - sig2 := types.Signature([]byte("sig-2")) - batch, err := st.NewBatch(ctx) - require.NoError(t, err) - require.NoError(t, batch.SaveBlockData(header1, data1, &sig1)) - require.NoError(t, batch.SaveBlockData(header, data, &sig2)) - require.NoError(t, batch.SetHeight(2)) - require.NoError(t, batch.Commit()) - - signedDataList, marshalledData, err := cm.GetPendingData(ctx) - require.NoError(t, err) - err = submitter.SubmitData(ctx, signedDataList, marshalledData, cm, nextSigner, gen) - require.NoError(t, err) - - _, ok := cm.GetDataDAIncludedByHeight(2) - assert.True(t, ok) - assert.NotEqual(t, oldAddr, nextAddr) -} - func TestDASubmitter_SubmitData_SkipsEmptyData(t *testing.T) { submitter, st, cm, mockDA, gen := setupDASubmitterTest(t) ctx := context.Background() diff --git a/block/internal/syncing/assert.go b/block/internal/syncing/assert.go index 1bed6db8b9..3a23a06876 100644 --- a/block/internal/syncing/assert.go +++ b/block/internal/syncing/assert.go @@ -4,27 +4,16 @@ import ( "errors" "fmt" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/evstack/ev-node/pkg/genesis" "github.com/evstack/ev-node/types" ) -func assertExpectedProposer(genesis genesis.Genesis, height uint64, proposerAddr []byte, pubKey crypto.PubKey) error { - if err := genesis.ValidateProposer(height, proposerAddr, pubKey); err != nil { - return fmt.Errorf("unexpected proposer at height %d: %w", height, err) - } - - return nil -} - func assertValidSignedData(signedData *types.SignedData, genesis genesis.Genesis) error { if signedData == nil || signedData.Txs == nil { return errors.New("empty signed data") } - - if err := assertExpectedProposer(genesis, signedData.Height(), signedData.Signer.Address, signedData.Signer.PubKey); err != nil { - return err + if signedData.Signer.PubKey == nil { + return errors.New("missing signer public key in signed data") } dataBytes, err := signedData.Data.MarshalBinary() diff --git a/block/internal/syncing/da_retriever.go b/block/internal/syncing/da_retriever.go index 75bc631e8f..62405cf61d 100644 --- a/block/internal/syncing/da_retriever.go +++ b/block/internal/syncing/da_retriever.go @@ -299,11 +299,6 @@ func (r *daRetriever) tryDecodeHeader(bz []byte, daHeight uint64) *types.SignedH return nil } - if err := r.assertExpectedProposer(header); err != nil { - r.logger.Debug().Err(err).Msg("unexpected proposer") - return nil - } - if isValidEnvelope && !r.strictMode { r.logger.Info().Uint64("height", header.Height()).Msg("valid DA envelope detected, switching to STRICT MODE") r.strictMode = true @@ -355,11 +350,6 @@ func (r *daRetriever) tryDecodeData(bz []byte, daHeight uint64) *types.Data { return &signedData.Data } -// assertExpectedProposer validates the proposer schedule entry for the header height. -func (r *daRetriever) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) -} - // assertValidSignedData validates signed data using the configured signature provider func (r *daRetriever) assertValidSignedData(signedData *types.SignedData) error { return assertValidSignedData(signedData, r.genesis) diff --git a/block/internal/syncing/da_retriever_test.go b/block/internal/syncing/da_retriever_test.go index 3b587def1f..c2786c36b0 100644 --- a/block/internal/syncing/da_retriever_test.go +++ b/block/internal/syncing/da_retriever_test.go @@ -215,15 +215,18 @@ func TestDARetriever_TryDecodeHeaderAndData_Basic(t *testing.T) { assert.Nil(t, r.tryDecodeData([]byte("junk"), 1)) } -func TestDARetriever_tryDecodeData_InvalidSignatureOrProposer(t *testing.T) { +func TestDARetriever_tryDecodeData_InvalidSignature(t *testing.T) { - goodAddr, pub, signer := buildSyncTestSigner(t) - badAddr := []byte("not-the-proposer") - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: badAddr} + addr, pub, signer := buildSyncTestSigner(t) + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} r := newTestDARetriever(t, nil, config.DefaultConfig(), gen) - // Signed data is made by goodAddr; retriever expects badAddr -> should be rejected - db, _ := makeSignedDataBytes(t, gen.ChainID, 7, goodAddr, pub, signer, 1) + _, signedData := makeSignedDataBytes(t, gen.ChainID, 7, addr, pub, signer, 1) + require.NotEmpty(t, signedData.Signature) + signedData.Signature[0] ^= 0x01 + db, err := signedData.MarshalBinary() + require.NoError(t, err) + assert.Nil(t, r.tryDecodeData(db, 55)) } diff --git a/block/internal/syncing/p2p_handler.go b/block/internal/syncing/p2p_handler.go index 2150bd2933..87a8b6a093 100644 --- a/block/internal/syncing/p2p_handler.go +++ b/block/internal/syncing/p2p_handler.go @@ -81,19 +81,6 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC } return err } - if headerHeight := p2pHeader.Height(); headerHeight != height { - err := fmt.Errorf("header height mismatch: requested %d, got %d", height, headerHeight) - h.logger.Warn(). - Uint64("requested_height", height). - Uint64("header_height", headerHeight). - Err(err). - Msg("discarding mismatched header from P2P") - return err - } - if err := h.assertExpectedProposer(p2pHeader.SignedHeader); err != nil { - h.logger.Debug().Uint64("height", height).Err(err).Msg("invalid header from P2P") - return err - } p2pData, err := h.dataStore.GetByHeight(ctx, height) if err != nil { @@ -133,8 +120,3 @@ func (h *P2PHandler) ProcessHeight(ctx context.Context, height uint64, heightInC h.logger.Debug().Uint64("height", height).Msg("processed event from P2P") return nil } - -// assertExpectedProposer validates the proposer schedule entry for the header height. -func (h *P2PHandler) assertExpectedProposer(header *types.SignedHeader) error { - return assertExpectedProposer(h.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey) -} diff --git a/block/internal/syncing/p2p_handler_test.go b/block/internal/syncing/p2p_handler_test.go index 84a875ffbf..e92a996550 100644 --- a/block/internal/syncing/p2p_handler_test.go +++ b/block/internal/syncing/p2p_handler_test.go @@ -194,7 +194,7 @@ func TestP2PHandler_ProcessHeight_SkipsWhenHeaderMissing(t *testing.T) { p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(9)) } -func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { +func TestP2PHandler_ProcessHeight_AcceptsNonGenesisProposer(t *testing.T) { p := setupP2P(t) ctx := context.Background() var err error @@ -203,58 +203,11 @@ func TestP2PHandler_ProcessHeight_SkipsOnProposerMismatch(t *testing.T) { require.NotEqual(t, string(p.Genesis.ProposerAddress), string(badAddr)) header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, badAddr, pub, signer) - header.DataHash = common.DataHashForEmptyTxs - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(11)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err = p.Handler.ProcessHeight(ctx, 11, ch) - require.Error(t, err) - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(11)) -} - -func TestP2PHandler_ProcessHeight_RejectsHeaderHeightMismatch(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 12, p.ProposerAddr, p.ProposerPub, p.Signer) - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err := p.Handler.ProcessHeight(ctx, 10, ch) - require.Error(t, err) - require.Contains(t, err.Error(), "header height mismatch") - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) -} - -func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - nextAddr, nextPub, nextSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) - require.NoError(t, err) - - p.Genesis.ProposerAddress = entry1.Address - p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - p.Genesis.DAEpochForcedInclusion = 1 - require.NoError(t, p.Genesis.Validate()) - p.Handler.genesis = p.Genesis - - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 11, nextAddr, nextPub, nextSigner) data := &types.P2PData{Data: makeData(p.Genesis.ChainID, 11, 1)} header.DataHash = data.DACommitment() bz, err := types.DefaultAggregatorNodeSignatureBytesProvider(&header.Header) require.NoError(t, err) - sig, err := nextSigner.Sign(t.Context(), bz) + sig, err := signer.Sign(t.Context(), bz) require.NoError(t, err) header.Signature = sig @@ -267,45 +220,7 @@ func TestP2PHandler_ProcessHeight_AllowsScheduledProposerRotation(t *testing.T) events := collectEvents(t, ch, 50*time.Millisecond) require.Len(t, events, 1) - require.Equal(t, nextAddr, events[0].Header.ProposerAddress) -} - -// TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation verifies -// the counterpart to the rotation-allows test: a signer that IS in the schedule -// but only active at a later height must not be accepted for blocks before the -// activation height. Without the per-height check, any scheduled signer could -// forge blocks outside their active window. -func TestP2PHandler_ProcessHeight_RejectsScheduledProposerBeforeActivation(t *testing.T) { - p := setupP2P(t) - ctx := context.Background() - - nextAddr, nextPub, nextSigner := buildTestSigner(t) - - entry1, err := genesis.NewProposerScheduleEntry(p.Genesis.InitialHeight, p.ProposerPub) - require.NoError(t, err) - entry2, err := genesis.NewProposerScheduleEntry(11, nextPub) - require.NoError(t, err) - - p.Genesis.ProposerAddress = entry1.Address - p.Genesis.ProposerSchedule = []genesis.ProposerScheduleEntry{entry1, entry2} - p.Genesis.DAEpochForcedInclusion = 1 - require.NoError(t, p.Genesis.Validate()) - p.Handler.genesis = p.Genesis - - // entry2 is scheduled but only active at height 11. Height 10 still - // belongs to entry1, so a header from the next signer at height 10 - // must be rejected. - header := p2pMakeSignedHeader(t, p.Genesis.ChainID, 10, nextAddr, nextPub, nextSigner) - header.DataHash = common.DataHashForEmptyTxs - - p.HeaderStore.EXPECT().GetByHeight(mock.Anything, uint64(10)).Return(header, nil).Once() - - ch := make(chan common.DAHeightEvent, 1) - err = p.Handler.ProcessHeight(ctx, 10, ch) - require.Error(t, err) - - require.Empty(t, collectEvents(t, ch, 50*time.Millisecond)) - p.DataStore.AssertNotCalled(t, "GetByHeight", mock.Anything, uint64(10)) + require.Equal(t, badAddr, events[0].Header.ProposerAddress) } func TestP2PHandler_ProcessedHeightSkipsPreviouslyHandledBlocks(t *testing.T) { diff --git a/block/internal/syncing/raft_retriever.go b/block/internal/syncing/raft_retriever.go index 4cb15aec07..a0a527f208 100644 --- a/block/internal/syncing/raft_retriever.go +++ b/block/internal/syncing/raft_retriever.go @@ -125,10 +125,6 @@ func (r *raftRetriever) consumeRaftBlock(ctx context.Context, state *raft.RaftBl r.logger.Debug().Err(err).Msg("invalid header structure") return nil } - if err := assertExpectedProposer(r.genesis, header.Height(), header.ProposerAddress, header.Signer.PubKey); err != nil { - r.logger.Debug().Err(err).Msg("unexpected proposer") - return nil - } var data types.Data if err := data.UnmarshalBinary(state.Data); err != nil { diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 40e3c9523f..9bcbd0f3ee 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -320,14 +320,18 @@ func (s *Syncer) initializeState() error { } state = types.State{ - ChainID: s.genesis.ChainID, - InitialHeight: s.genesis.InitialHeight, - LastBlockHeight: s.genesis.InitialHeight - 1, - LastBlockTime: s.genesis.StartTime, - DAHeight: s.genesis.DAStartHeight, - AppHash: stateRoot, + ChainID: s.genesis.ChainID, + InitialHeight: s.genesis.InitialHeight, + LastBlockHeight: s.genesis.InitialHeight - 1, + LastBlockTime: s.genesis.StartTime, + DAHeight: s.genesis.DAStartHeight, + AppHash: stateRoot, + NextProposerAddress: s.initialProposerAddress(s.ctx), } } + if len(state.NextProposerAddress) == 0 { + state.NextProposerAddress = s.initialProposerAddress(s.ctx) + } if state.DAHeight != 0 && state.DAHeight < s.genesis.DAStartHeight { return fmt.Errorf("DA height (%d) is lower than DA start height (%d)", state.DAHeight, s.genesis.DAStartHeight) } @@ -398,6 +402,18 @@ func (s *Syncer) initializeState() error { return nil } +func (s *Syncer) initialProposerAddress(ctx context.Context) []byte { + if s.exec != nil { + info, err := s.exec.GetExecutionInfo(ctx) + if err != nil { + s.logger.Warn().Err(err).Msg("failed to get execution info for proposer, falling back to genesis proposer") + } else if len(info.NextProposerAddress) > 0 { + return append([]byte(nil), info.NextProposerAddress...) + } + } + return append([]byte(nil), s.genesis.ProposerAddress...) +} + // processLoop is the main coordination loop for processing events func (s *Syncer) processLoop(ctx context.Context) { s.logger.Info().Msg("starting process loop") @@ -816,14 +832,24 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type // Execute transactions ctx = context.WithValue(ctx, types.HeaderContextKey, header) - newAppHash, err := s.executeTxsWithRetry(ctx, rawTxs, header, currentState) + result, err := s.executeTxsWithRetry(ctx, rawTxs, header, currentState) if err != nil { s.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } + if len(result.NextProposerAddress) > 0 { + if len(header.NextProposerAddress) == 0 { + return types.State{}, fmt.Errorf("next proposer mismatch: header empty, execution %x", result.NextProposerAddress) + } + if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) + } + } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { + return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) + } // Create new state - newState, err := currentState.NextState(header, newAppHash) + newState, err := currentState.NextState(header, result.UpdatedStateRoot) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } @@ -833,12 +859,12 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type // executeTxsWithRetry executes transactions with retry logic. // NOTE: the function retries the execution client call regardless of the error. Some execution clients errors are irrecoverable, and will eventually halt the node, as expected. -func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) ([]byte, error) { +func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, header types.Header, currentState types.State) (coreexecutor.ExecuteResult, error) { for attempt := 1; attempt <= common.MaxRetriesBeforeHalt; attempt++ { - newAppHash, err := s.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) + result, err := s.exec.ExecuteTxs(ctx, rawTxs, header.Height(), header.Time(), currentState.AppHash) if err != nil { if attempt == common.MaxRetriesBeforeHalt { - return nil, fmt.Errorf("failed to execute transactions: %w", err) + return coreexecutor.ExecuteResult{}, fmt.Errorf("failed to execute transactions: %w", err) } s.logger.Error().Err(err). @@ -851,14 +877,14 @@ func (s *Syncer) executeTxsWithRetry(ctx context.Context, rawTxs [][]byte, heade case <-time.After(common.MaxRetriesTimeout): continue case <-ctx.Done(): - return nil, fmt.Errorf("context cancelled during retry: %w", ctx.Err()) + return coreexecutor.ExecuteResult{}, fmt.Errorf("context cancelled during retry: %w", ctx.Err()) } } - return newAppHash, nil + return result, nil } - return nil, nil + return coreexecutor.ExecuteResult{}, nil } // ValidateBlock validates a synced block diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 67c87e06ed..696d2c939f 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -146,7 +146,7 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { addr, pub, signer := buildSyncTestSigner(t) cfg := config.DefaultConfig() - gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second), ProposerAddress: addr} + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second)} mockExec := testmocks.NewMockExecutor(t) mockExec.EXPECT().InitChain(mock.Anything, mock.Anything, uint64(1), "tchain").Return([]byte("app0"), nil).Once() @@ -191,6 +191,60 @@ func TestSyncer_validateBlock_DataHashMismatch(t *testing.T) { require.Error(t, err) } +func TestSyncer_ValidateBlock_UsesStateNextProposer(t *testing.T) { + addr, _, _ := buildSyncTestSigner(t) + badAddr, badPub, badSigner := buildSyncTestSigner(t) + + gen := genesis.Genesis{ChainID: "tchain", InitialHeight: 1, StartTime: time.Now().Add(-time.Second)} + data := makeData(gen.ChainID, 1, 1) + _, header := makeSignedHeaderBytes(t, gen.ChainID, 1, badAddr, badPub, badSigner, []byte("app0"), data, nil) + + s := &Syncer{logger: zerolog.Nop()} + state := types.State{ + ChainID: gen.ChainID, + InitialHeight: gen.InitialHeight, + LastBlockHeight: gen.InitialHeight - 1, + LastBlockTime: gen.StartTime, + AppHash: []byte("app0"), + NextProposerAddress: addr, + } + + err := s.ValidateBlock(t.Context(), state, data, header) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected proposer") +} + +func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { + addr, _, _ := buildSyncTestSigner(t) + headerNext := []byte("header-next-proposer") + execNext := []byte("execution-next-proposer") + + mockExec := testmocks.NewMockExecutor(t) + data := makeData("tchain", 1, 1) + header := types.Header{ + BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, + ProposerAddress: addr, + NextProposerAddress: headerNext, + } + currentState := types.State{AppHash: []byte("app0"), NextProposerAddress: addr} + + mockExec.EXPECT().ExecuteTxs(mock.Anything, mock.Anything, uint64(1), mock.Anything, currentState.AppHash). + Return(execution.ExecuteResult{ + UpdatedStateRoot: []byte("app1"), + NextProposerAddress: execNext, + }, nil).Once() + + s := &Syncer{ + exec: mockExec, + ctx: t.Context(), + logger: zerolog.Nop(), + } + + _, err := s.ApplyBlock(t.Context(), header, data, currentState) + require.Error(t, err) + require.Contains(t, err.Error(), "next proposer mismatch") +} + func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { ds := dssync.MutexWrap(datastore.NewMapDatastore()) st := store.New(ds) @@ -936,7 +990,7 @@ func TestSyncer_executeTxsWithRetry(t *testing.T) { if tt.expectSuccess { require.NoError(t, err) - assert.Equal(t, tt.expectHash, result) + assert.Equal(t, tt.expectHash, result.UpdatedStateRoot) } else { require.Error(t, err) if tt.expectError != "" { diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index 019046d0b7..495aac85d6 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -65,6 +65,9 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, + /// Proposer address selected by this block's execution result for the next block. + #[prost(bytes = "vec", tag = "13")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -76,6 +79,19 @@ pub struct SignedHeader { #[prost(message, optional, tag = "3")] pub signer: ::core::option::Option, } +/// DAHeaderEnvelope is a wrapper around SignedHeader for DA submission. +/// It is binary compatible with SignedHeader (fields 1-3) but adds an envelope signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct DaHeaderEnvelope { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(bytes = "vec", tag = "4")] + pub envelope_signature: ::prost::alloc::vec::Vec, +} /// Signer is a signer of a block in the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Signer { @@ -139,6 +155,28 @@ pub struct Vote { #[prost(bytes = "vec", tag = "5")] pub validator_address: ::prost::alloc::vec::Vec, } +/// P2PSignedHeader +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pSignedHeader { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(uint64, optional, tag = "4")] + pub da_height_hint: ::core::option::Option, +} +/// P2PData +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pData { + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + #[prost(bytes = "vec", repeated, tag = "2")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(uint64, optional, tag = "3")] + pub da_height_hint: ::core::option::Option, +} /// State is the state of the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct State { @@ -158,6 +196,26 @@ pub struct State { pub app_hash: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "9")] pub last_header_hash: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "10")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// RaftBlockState represents a replicated block state +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct RaftBlockState { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(uint64, tag = "2")] + pub last_submitted_da_header_height: u64, + #[prost(uint64, tag = "3")] + pub last_submitted_da_data_height: u64, + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "5")] + pub timestamp: u64, + #[prost(bytes = "vec", tag = "6")] + pub header: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "7")] + pub data: ::prost::alloc::vec::Vec, } /// SequencerDACheckpoint tracks the position in the DA where transactions were last processed #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -212,6 +270,17 @@ pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// BlockData contains data retrieved from a single DA height. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockData { + #[prost(uint64, tag = "1")] + pub height: u64, + /// Unix timestamp in nanoseconds + #[prost(int64, tag = "2")] + pub timestamp: i64, + #[prost(bytes = "vec", repeated, tag = "3")] + pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} /// InitChainRequest contains the genesis parameters for chain initialization #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainRequest { @@ -231,9 +300,6 @@ pub struct InitChainResponse { /// Hash representing initial state #[prost(bytes = "vec", tag = "1")] pub state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed bytes for transactions in a block - #[prost(uint64, tag = "2")] - pub max_bytes: u64, } /// GetTxsRequest is the request for fetching transactions /// @@ -272,6 +338,10 @@ pub struct ExecuteTxsResponse { /// Maximum allowed transaction size (may change with protocol updates) #[prost(uint64, tag = "2")] pub max_bytes: u64, + /// Proposer address that should sign the next block. + /// Empty means the current proposer remains active. + #[prost(bytes = "vec", tag = "3")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SetFinalRequest marks a block as finalized #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -285,6 +355,77 @@ pub struct SetFinalRequest { /// Empty response, errors are returned via gRPC status #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalResponse {} +/// GetExecutionInfoRequest requests execution layer parameters +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoRequest {} +/// GetExecutionInfoResponse contains execution layer parameters +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoResponse { + /// Maximum gas allowed for transactions in a block + /// For non-gas-based execution layers, this should be 0 + #[prost(uint64, tag = "1")] + pub max_gas: u64, + /// Proposer address that should sign the next block from the execution + /// layer's current view. Empty means unchanged or unavailable. + #[prost(bytes = "vec", tag = "2")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// FilterTxsRequest contains transactions to validate and filter +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsRequest { + /// All transactions (force-included + mempool) + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Maximum cumulative size allowed (0 means no size limit) + #[prost(uint64, tag = "2")] + pub max_bytes: u64, + /// Maximum cumulative gas allowed (0 means no gas limit) + #[prost(uint64, tag = "3")] + pub max_gas: u64, + /// Whether force-included transactions are present + #[prost(bool, tag = "4")] + pub has_force_included_transaction: bool, +} +/// FilterTxsResponse contains the filter status for each transaction +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsResponse { + /// Filter status for each transaction (same length as txs in request) + #[prost(enumeration = "FilterStatus", repeated, tag = "1")] + pub statuses: ::prost::alloc::vec::Vec, +} +/// FilterStatus represents the result of filtering a transaction +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FilterStatus { + /// Transaction will make it to the next batch + FilterOk = 0, + /// Transaction will be filtered out because invalid (too big, malformed, etc.) + FilterRemove = 1, + /// Transaction is valid but postponed for later processing due to size/gas constraint + FilterPostpone = 2, +} +impl FilterStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::FilterOk => "FILTER_OK", + Self::FilterRemove => "FILTER_REMOVE", + Self::FilterPostpone => "FILTER_POSTPONE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FILTER_OK" => Some(Self::FilterOk), + "FILTER_REMOVE" => Some(Self::FilterRemove), + "FILTER_POSTPONE" => Some(Self::FilterPostpone), + _ => None, + } + } +} /// Block contains all the components of a complete block #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index b34ae918b7..ef7fed4048 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -439,6 +439,9 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, + /// Proposer address selected by this block's execution result for the next block. + #[prost(bytes = "vec", tag = "13")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] @@ -450,6 +453,19 @@ pub struct SignedHeader { #[prost(message, optional, tag = "3")] pub signer: ::core::option::Option, } +/// DAHeaderEnvelope is a wrapper around SignedHeader for DA submission. +/// It is binary compatible with SignedHeader (fields 1-3) but adds an envelope signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct DaHeaderEnvelope { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(bytes = "vec", tag = "4")] + pub envelope_signature: ::prost::alloc::vec::Vec, +} /// Signer is a signer of a block in the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Signer { @@ -513,6 +529,28 @@ pub struct Vote { #[prost(bytes = "vec", tag = "5")] pub validator_address: ::prost::alloc::vec::Vec, } +/// P2PSignedHeader +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pSignedHeader { + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option
, + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "3")] + pub signer: ::core::option::Option, + #[prost(uint64, optional, tag = "4")] + pub da_height_hint: ::core::option::Option, +} +/// P2PData +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct P2pData { + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + #[prost(bytes = "vec", repeated, tag = "2")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(uint64, optional, tag = "3")] + pub da_height_hint: ::core::option::Option, +} /// State is the state of the blockchain. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct State { @@ -532,6 +570,26 @@ pub struct State { pub app_hash: ::prost::alloc::vec::Vec, #[prost(bytes = "vec", tag = "9")] pub last_header_hash: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "10")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// RaftBlockState represents a replicated block state +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct RaftBlockState { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(uint64, tag = "2")] + pub last_submitted_da_header_height: u64, + #[prost(uint64, tag = "3")] + pub last_submitted_da_data_height: u64, + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "5")] + pub timestamp: u64, + #[prost(bytes = "vec", tag = "6")] + pub header: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "7")] + pub data: ::prost::alloc::vec::Vec, } /// SequencerDACheckpoint tracks the position in the DA where transactions were last processed #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -957,6 +1015,17 @@ pub struct Batch { #[prost(bytes = "vec", repeated, tag = "1")] pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, } +/// BlockData contains data retrieved from a single DA height. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockData { + #[prost(uint64, tag = "1")] + pub height: u64, + /// Unix timestamp in nanoseconds + #[prost(int64, tag = "2")] + pub timestamp: i64, + #[prost(bytes = "vec", repeated, tag = "3")] + pub blobs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} /// InitChainRequest contains the genesis parameters for chain initialization #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct InitChainRequest { @@ -976,9 +1045,6 @@ pub struct InitChainResponse { /// Hash representing initial state #[prost(bytes = "vec", tag = "1")] pub state_root: ::prost::alloc::vec::Vec, - /// Maximum allowed bytes for transactions in a block - #[prost(uint64, tag = "2")] - pub max_bytes: u64, } /// GetTxsRequest is the request for fetching transactions /// @@ -1017,6 +1083,10 @@ pub struct ExecuteTxsResponse { /// Maximum allowed transaction size (may change with protocol updates) #[prost(uint64, tag = "2")] pub max_bytes: u64, + /// Proposer address that should sign the next block. + /// Empty means the current proposer remains active. + #[prost(bytes = "vec", tag = "3")] + pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SetFinalRequest marks a block as finalized #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] @@ -1030,6 +1100,77 @@ pub struct SetFinalRequest { /// Empty response, errors are returned via gRPC status #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct SetFinalResponse {} +/// GetExecutionInfoRequest requests execution layer parameters +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoRequest {} +/// GetExecutionInfoResponse contains execution layer parameters +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct GetExecutionInfoResponse { + /// Maximum gas allowed for transactions in a block + /// For non-gas-based execution layers, this should be 0 + #[prost(uint64, tag = "1")] + pub max_gas: u64, + /// Proposer address that should sign the next block from the execution + /// layer's current view. Empty means unchanged or unavailable. + #[prost(bytes = "vec", tag = "2")] + pub next_proposer_address: ::prost::alloc::vec::Vec, +} +/// FilterTxsRequest contains transactions to validate and filter +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsRequest { + /// All transactions (force-included + mempool) + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Maximum cumulative size allowed (0 means no size limit) + #[prost(uint64, tag = "2")] + pub max_bytes: u64, + /// Maximum cumulative gas allowed (0 means no gas limit) + #[prost(uint64, tag = "3")] + pub max_gas: u64, + /// Whether force-included transactions are present + #[prost(bool, tag = "4")] + pub has_force_included_transaction: bool, +} +/// FilterTxsResponse contains the filter status for each transaction +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct FilterTxsResponse { + /// Filter status for each transaction (same length as txs in request) + #[prost(enumeration = "FilterStatus", repeated, tag = "1")] + pub statuses: ::prost::alloc::vec::Vec, +} +/// FilterStatus represents the result of filtering a transaction +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FilterStatus { + /// Transaction will make it to the next batch + FilterOk = 0, + /// Transaction will be filtered out because invalid (too big, malformed, etc.) + FilterRemove = 1, + /// Transaction is valid but postponed for later processing due to size/gas constraint + FilterPostpone = 2, +} +impl FilterStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::FilterOk => "FILTER_OK", + Self::FilterRemove => "FILTER_REMOVE", + Self::FilterPostpone => "FILTER_POSTPONE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FILTER_OK" => Some(Self::FilterOk), + "FILTER_REMOVE" => Some(Self::FilterRemove), + "FILTER_POSTPONE" => Some(Self::FilterPostpone), + _ => None, + } + } +} /// Generated client implementations. pub mod executor_service_client { #![allow( @@ -1219,6 +1360,58 @@ pub mod executor_service_client { .insert(GrpcMethod::new("evnode.v1.ExecutorService", "SetFinal")); self.inner.unary(req, path, codec).await } + /// GetExecutionInfo returns current execution layer parameters + pub async fn get_execution_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/evnode.v1.ExecutorService/GetExecutionInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("evnode.v1.ExecutorService", "GetExecutionInfo"), + ); + self.inner.unary(req, path, codec).await + } + /// FilterTxs validates force-included transactions and calculates gas for all transactions + pub async fn filter_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/evnode.v1.ExecutorService/FilterTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("evnode.v1.ExecutorService", "FilterTxs")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -1263,6 +1456,22 @@ pub mod executor_service_server { tonic::Response, tonic::Status, >; + /// GetExecutionInfo returns current execution layer parameters + async fn get_execution_info( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// FilterTxs validates force-included transactions and calculates gas for all transactions + async fn filter_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// ExecutorService defines the execution layer interface for EVNode #[derive(Debug)] @@ -1521,6 +1730,97 @@ pub mod executor_service_server { }; Box::pin(fut) } + "/evnode.v1.ExecutorService/GetExecutionInfo" => { + #[allow(non_camel_case_types)] + struct GetExecutionInfoSvc(pub Arc); + impl< + T: ExecutorService, + > tonic::server::UnaryService + for GetExecutionInfoSvc { + type Response = super::GetExecutionInfoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_execution_info(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetExecutionInfoSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/evnode.v1.ExecutorService/FilterTxs" => { + #[allow(non_camel_case_types)] + struct FilterTxsSvc(pub Arc); + impl< + T: ExecutorService, + > tonic::server::UnaryService + for FilterTxsSvc { + type Response = super::FilterTxsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::filter_txs(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = FilterTxsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/core/README.md b/core/README.md index 8f30a3a20f..0138cc004b 100644 --- a/core/README.md +++ b/core/README.md @@ -20,13 +20,20 @@ The `Executor` interface defines how the execution layer processes transactions // Executor defines the interface for the execution layer. type Executor interface { // InitChain initializes the chain based on the genesis information. - InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, maxBytes uint64, err error) + InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) // GetTxs retrieves transactions from the mempool. GetTxs(ctx context.Context) ([][]byte, error) // ExecuteTxs executes a block of transactions against the current state. - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, maxBytes uint64, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) // SetFinal marks a block height as final. SetFinal(ctx context.Context, blockHeight uint64) error + // GetExecutionInfo returns execution parameters used by ev-node. + GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) +} + +type ExecuteResult struct { + UpdatedStateRoot []byte + NextProposerAddress []byte } ``` diff --git a/core/execution/dummy.go b/core/execution/dummy.go index d6fb38959e..8953ded2a7 100644 --- a/core/execution/dummy.go +++ b/core/execution/dummy.go @@ -61,7 +61,7 @@ func (e *DummyExecutor) InjectTx(tx []byte) { } // ExecuteTxs simulate execution of transactions. -func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (ExecuteResult, error) { e.mu.Lock() defer e.mu.Unlock() @@ -73,7 +73,7 @@ func (e *DummyExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeigh pending := hash.Sum(nil) e.pendingRoots[blockHeight] = pending e.removeExecutedTxs(txs) - return pending, nil + return ExecuteResult{UpdatedStateRoot: pending}, nil } // SetFinal marks block at given height as finalized. diff --git a/core/execution/dummy_test.go b/core/execution/dummy_test.go index f6be3d400b..e77f1a39c6 100644 --- a/core/execution/dummy_test.go +++ b/core/execution/dummy_test.go @@ -131,13 +131,13 @@ func TestExecuteTxs(t *testing.T) { prevStateRoot := executor.GetStateRoot() txsToExecute := [][]byte{tx1, tx3} - newStateRoot, err := executor.ExecuteTxs(ctx, txsToExecute, blockHeight, timestamp, prevStateRoot) + result, err := executor.ExecuteTxs(ctx, txsToExecute, blockHeight, timestamp, prevStateRoot) if err != nil { t.Fatalf("ExecuteTxs returned error: %v", err) } - if bytes.Equal(newStateRoot, prevStateRoot) { + if bytes.Equal(result.UpdatedStateRoot, prevStateRoot) { t.Error("stateRoot should have changed after ExecuteTxs") } @@ -167,7 +167,7 @@ func TestSetFinal(t *testing.T) { prevStateRoot := executor.GetStateRoot() txs := [][]byte{[]byte("tx1"), []byte("tx2")} - pendingRoot, _ := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + pendingResult, _ := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) // Set the block as final err := executor.SetFinal(ctx, blockHeight) @@ -177,8 +177,8 @@ func TestSetFinal(t *testing.T) { // Verify that the state root was updated newStateRoot := executor.GetStateRoot() - if !bytes.Equal(newStateRoot, pendingRoot) { - t.Errorf("Expected state root to be updated to pending root %v, got %v", pendingRoot, newStateRoot) + if !bytes.Equal(newStateRoot, pendingResult.UpdatedStateRoot) { + t.Errorf("Expected state root to be updated to pending root %v, got %v", pendingResult.UpdatedStateRoot, newStateRoot) } // Verify that the pending root was removed @@ -398,7 +398,7 @@ func TestExecuteTxsWithInvalidPrevStateRoot(t *testing.T) { timestamp := time.Now() txs := [][]byte{[]byte("tx1"), []byte("tx2")} - newStateRoot, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, invalidPrevStateRoot) + result, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, invalidPrevStateRoot) // The dummy executor doesn't validate the previous state root, so it should still work // This is a characteristic of the dummy implementation @@ -406,7 +406,7 @@ func TestExecuteTxsWithInvalidPrevStateRoot(t *testing.T) { t.Fatalf("ExecuteTxs with invalid prevStateRoot returned error: %v", err) } - if len(newStateRoot) == 0 { + if len(result.UpdatedStateRoot) == 0 { t.Error("Expected non-empty state root even with invalid prevStateRoot") } diff --git a/core/execution/execution.go b/core/execution/execution.go index f3ebe1da91..477407e1da 100644 --- a/core/execution/execution.go +++ b/core/execution/execution.go @@ -63,9 +63,9 @@ type Executor interface { // - prevStateRoot: Previous block's state root hash // // Returns: - // - updatedStateRoot: New state root after executing transactions + // - result: New execution result after executing transactions // - err: Any execution errors - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) // SetFinal marks a block as finalized at the specified height. // Requirements: @@ -132,6 +132,21 @@ type ExecutionInfo struct { // MaxGas is the maximum gas allowed for transactions in a block. // For non-gas-based execution layers, this should be 0. MaxGas uint64 + + // NextProposerAddress is the proposer address that should sign the next + // block from the execution layer's current view. Empty means unchanged or + // unavailable, and callers should fall back to their current proposer. + NextProposerAddress []byte +} + +// ExecuteResult contains execution output that consensus must persist. +type ExecuteResult struct { + // UpdatedStateRoot is the new state root after executing transactions. + UpdatedStateRoot []byte + + // NextProposerAddress is the proposer address selected by execution for the + // next block. Empty means the current proposer remains active. + NextProposerAddress []byte } // HeightProvider is an optional interface that execution clients can implement diff --git a/docs/.vitepress/config.ts b/docs/.vitepress/config.ts index 01bda4a8d9..0cfdf5c7ae 100644 --- a/docs/.vitepress/config.ts +++ b/docs/.vitepress/config.ts @@ -297,10 +297,6 @@ function sidebarHome() { text: "Create genesis for your chain", link: "/guides/create-genesis", }, - { - text: "Rotate proposer key", - link: "/guides/operations/proposer-key-rotation", - }, { text: "Metrics", link: "/guides/metrics", diff --git a/docs/adr/adr-023-execution-owned-proposer-rotation.md b/docs/adr/adr-023-execution-owned-proposer-rotation.md new file mode 100644 index 0000000000..c89a78412b --- /dev/null +++ b/docs/adr/adr-023-execution-owned-proposer-rotation.md @@ -0,0 +1,83 @@ +# ADR 023: Execution-Owned Proposer Rotation + +## Changelog + +- 2026-04-24: Initial ADR. + +## Status + +Proposed + +## Context + +ev-node originally selected the block proposer from genesis. That made proposer changes a consensus configuration concern and pushed key rotation into a static schedule. This is too rigid for EVM rollups and other execution environments where proposer selection should be governed by execution state. + +The replacement design moves proposer selection into the execution environment. ev-node remains responsible for signing, propagating, validating, and persisting blocks, but it consumes proposer updates returned by execution. + +## Decision + +`Executor.ExecuteTxs` returns an execution result containing: + +- `UpdatedStateRoot`: the state root after executing the block. +- `NextProposerAddress`: the address expected to sign the next block. + +`GetExecutionInfo` also exposes `NextProposerAddress` for startup. If execution returns an empty proposer at startup, ev-node falls back to `genesis.proposer_address`. + +An empty `NextProposerAddress` from `ExecuteTxs` means the proposer is unchanged. ev-node must not write a redundant header field in that case, preserving compatibility with existing headers and hash chains. + +When execution returns a non-empty next proposer: + +- The producing node commits it to `Header.NextProposerAddress` before signing the header. +- Syncing nodes require the signed header value to match the execution result. +- `State.NextProposerAddress` is updated and used as the expected signer for `LastBlockHeight + 1`. + +`Header.NextProposerAddress` lets header-only paths and DA envelope validation see proposer transitions without replaying execution first. The execution result remains the authority; mismatches between the signed header and execution are invalid. + +## EVM System Contract Model + +For ev-reth, proposer selection should be implemented as execution state, likely through a system contract. The contract stores the active next proposer address and exposes controlled update methods. + +The controlling address can be a multisig or security council. This keeps operational key rotation in execution state instead of requiring a new genesis file or node-side schedule. A future ev-reth implementation should read the contract during block execution and return the selected proposer through `ExecuteTxsResponse.next_proposer_address`. + +This ADR does not define the system contract ABI. The contract should be specified with ev-reth because access control, call routing, and predeploy/system-contract conventions are execution-environment details. + +## Security Considerations + +The security council or multisig becomes the authority for proposer updates. It must use a threshold and operational process appropriate for production signer rotation. + +The system contract must restrict writes to the configured authority. Unauthorized proposer updates are consensus-critical because they determine who can sign the next block. + +ev-node validates the execution output against the signed header. A malicious proposer cannot advertise one next proposer in the header while execution derives another. + +If the execution interface returns an empty proposer, ev-node treats the proposer as unchanged. At startup, empty execution info falls back to genesis so existing execution implementations remain usable. + +Compromise of the security council can still rotate the proposer to an attacker. This ADR reduces node configuration risk; it does not eliminate governance-key risk. + +## Consequences + +Positive: + +- Proposer rotation becomes deterministic execution state. +- EVM chains can use a system contract and multisig-controlled rotation. +- Existing chains keep working when execution returns an empty proposer. +- Header verification can follow rotations once the rotating block is known. + +Negative: + +- The execution API changes and all execution adapters must return `ExecuteResult`. +- Proposer updates become consensus-critical execution outputs. +- ev-reth needs a separate system-contract design and implementation. + +## Alternatives Considered + +Genesis proposer schedule: + +- Rejected. It makes rotation a static node/genesis concern and is not a good fit for security-council or multisig-controlled EVM deployments. + +Node-local proposer configuration: + +- Rejected. Nodes could disagree about the active proposer unless every operator updates configuration at the same time. + +Execution-only proposer without header commitment: + +- Rejected. Syncing nodes can replay execution, but header and DA envelope paths benefit from having the selected next proposer committed in the signed header when it changes. diff --git a/docs/adr/adr-023-proposer-key-rotation.md b/docs/adr/adr-023-proposer-key-rotation.md deleted file mode 100644 index 1c1b067982..0000000000 --- a/docs/adr/adr-023-proposer-key-rotation.md +++ /dev/null @@ -1,165 +0,0 @@ -# ADR 023: Proposer Key Rotation via Height-Based Schedule - -## Changelog - -- 2026-04-23: Implemented proposer key rotation through a height-indexed proposer schedule - -## Context - -ev-node historically treated the proposer as a single static identity embedded in genesis via `proposer_address`. -That assumption leaked into block production, DA submission, and sync validation. As a result, rotating a compromised -or operationally obsolete proposer key required out-of-band coordination and effectively behaved like a manual -re-genesis from the point of view of node operators. - -This was suboptimal for three reasons: - -1. It made proposer rotation operationally risky and easy to get wrong. -2. Fresh nodes syncing from genesis had no protocol-visible record of when the proposer changed. -3. Validation only pinned the proposer address, not the scheduled public key that should be producing blocks. - -## Alternative Approaches - -### 1. Manual key swap only - -Operators can stop the sequencer, swap the local signer, redistribute config, and restart nodes. -This is insufficient because the chain itself does not encode when the proposer changed, so historical sync -and validation become ambiguous. - -### 2. Re-issue a new genesis on each rotation - -This treats every proposer rotation like a chain restart: a new `chain_id`, state reset back to `initial_height`, -and existing block history discarded. It is operationally heavy, conflates upgrades with rotations, and breaks -continuity for nodes syncing historical data. - -### 3. Height-indexed proposer schedule in genesis (Chosen) - -Record proposer changes as an ordered schedule indexed by activation height. The `genesis.json` file is updated -with a new schedule entry and redistributed, but the chain keeps its `chain_id`, continues from the current -height, preserves all block history, and fresh nodes can still validate the entire chain end-to-end across -rotation boundaries. The rollout is still coordinated — every node must receive the updated `genesis.json` and -restart before the activation height — but none of the chain's state or provenance is reset. - -## Decision - -ev-node now supports proposer rotation through a `proposer_schedule` field in genesis. - -### What this is not - -This is **not** a re-genesis. Re-genesis — in the sense we mean it above — would involve issuing a new `chain_id`, -resetting height to `initial_height`, and discarding existing block history. Proposer key rotation does none of -that: the `chain_id` is unchanged, block height keeps progressing, all previous blocks remain valid, and fresh -nodes can sync the chain from genesis across any number of rotation boundaries. - -The `genesis.json` file itself is updated (a new `proposer_schedule` entry is appended) and operators must -restart every node to reload it. The file changes; the chain's state does not. - -Each entry declares: - -- `start_height` -- `address` -- `pub_key` (optional; when present, it must match `address`) - -The active proposer for block height `h` is the last entry whose `start_height <= h`. - -The legacy `proposer_address` field remains for backward compatibility. When no explicit schedule is present, -ev-node derives an implicit single-entry schedule beginning at `initial_height`. - -When an explicit schedule is present: - -- the first entry must start at `initial_height` -- entries must be strictly increasing by `start_height` -- if `pub_key` is present, the entry's `address` must match it -- entries without `pub_key` are interpreted by `address` only -- `proposer_address`, when present, must match the first schedule entry's `address` - -## Detailed Design - -### Data model - -Genesis gains: - -```json -"proposer_schedule": [ - { - "start_height": 1, - "address": "...", - "pub_key": "..." - }, - { - "start_height": 1250000, - "address": "..." - } -] -``` - -The existing `proposer_address` field is retained as a compatibility field and is normalized to the first -scheduled proposer when a schedule is present. - -### Validation rules - -The proposer schedule is now consulted in all proposer-sensitive paths: - -1. executor startup accepts any signer that appears somewhere in the schedule -2. block creation resolves the proposer for the exact height being produced -3. DA submission validates the configured signer against the scheduled proposer for each signed data height -4. sync validation validates incoming headers and signed data against the scheduled proposer for their heights - -This makes proposer rotation protocol-visible for both live nodes and nodes syncing historical data. - -### Operational procedure - -For a planned rotation: - -1. Choose activation height `H` -2. Add a new `proposer_schedule` entry with `start_height = H` -3. Distribute the updated genesis/config to node operators -4. Upgrade follower/full nodes before activation -5. Stop the old sequencer before `H` -6. Start the new sequencer with the replacement key at or after `H` - -The old proposer remains valid for heights `< H`, and the new proposer becomes valid at heights `>= H`. - -### Security considerations - -This design improves safety by allowing validation against the scheduled public key when one is pinned. -It does not solve emergency rotation authorization by itself; a future design can add a separate upgrade authority -or rotation certificate flow if the network needs signer replacement without prior static scheduling. - -### Testing - -Coverage includes: - -- genesis schedule validation and height resolution -- sync acceptance of scheduled proposer rotation -- DA submission using a rotated proposer key at the configured height -- executor block creation using the proposer scheduled for the produced height - -## Status - -Implemented - -## Consequences - -### Positive - -- proposer rotation is now part of the chain configuration rather than an operator convention -- fresh nodes can validate historical proposer changes from genesis -- sync and DA validation can pin scheduled public keys, not just addresses -- routine key rotation no longer requires a chain restart - -### Negative - -- proposer schedule changes are consensus-visible and require coordinated rollout -- operators must distribute updated genesis/config before activation height -- emergency rotation still requires prior scheduling or a later authority-based mechanism - -### Neutral - -- legacy single-proposer deployments continue to work without defining `proposer_schedule` - -## References - -- [pkg/genesis/genesis.go](../../pkg/genesis/genesis.go) -- [pkg/genesis/proposer_schedule.go](../../pkg/genesis/proposer_schedule.go) -- [block/internal/executing/executor.go](../../block/internal/executing/executor.go) -- [block/internal/syncing/assert.go](../../block/internal/syncing/assert.go) diff --git a/docs/getting-started/custom/implement-executor.md b/docs/getting-started/custom/implement-executor.md index 7a1d51886f..6e6bd11c14 100644 --- a/docs/getting-started/custom/implement-executor.md +++ b/docs/getting-started/custom/implement-executor.md @@ -6,10 +6,11 @@ The Executor interface is the boundary between ev-node and your execution layer. ```go type Executor interface { - InitChain(ctx context.Context, genesis Genesis) ([]byte, error) + InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) ([]byte, error) GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, height uint64, timestamp time.Time) (*ExecutionResult, error) + ExecuteTxs(ctx context.Context, txs [][]byte, height uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) SetFinal(ctx context.Context, height uint64) error + GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) } ``` @@ -95,7 +96,8 @@ func (e *MyExecutor) ExecuteTxs( txs [][]byte, height uint64, timestamp time.Time, -) (*ExecutionResult, error) + prevStateRoot []byte, +) (execution.ExecuteResult, error) ``` **Parameters:** @@ -103,17 +105,17 @@ func (e *MyExecutor) ExecuteTxs( - `txs` — Ordered transactions to execute - `height` — Block height - `timestamp` — Block timestamp +- `prevStateRoot` — Previous block's state root **Returns:** -- `ExecutionResult` containing new state root and gas used +- `execution.ExecuteResult` containing the new state root and optional next proposer address - Error only for system failures (not tx failures) **Responsibilities:** - Execute each transaction in order - Update state -- Track gas usage - Handle transaction failures gracefully - Return new state root @@ -125,30 +127,27 @@ func (e *MyExecutor) ExecuteTxs( txs [][]byte, height uint64, timestamp time.Time, -) (*ExecutionResult, error) { - var totalGas uint64 - + prevStateRoot []byte, +) (execution.ExecuteResult, error) { for _, txBytes := range txs { tx, err := DecodeTx(txBytes) if err != nil { continue // Skip invalid tx } - gas, err := e.executeTx(tx) - if err != nil { + if err := e.executeTx(tx); err != nil { // Log but continue - tx failure != block failure continue } - - totalGas += gas } // Commit state changes stateRoot := e.db.Commit() - return &ExecutionResult{ - StateRoot: stateRoot, - GasUsed: totalGas, + return execution.ExecuteResult{ + UpdatedStateRoot: stateRoot, + // Empty keeps the current proposer. + NextProposerAddress: nil, }, nil } ``` @@ -210,9 +209,9 @@ func TestExecuteTxs(t *testing.T) { require.NoError(t, err) // Execute - result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now()) + result, err := exec.ExecuteTxs(ctx, txs, 1, time.Now(), initialStateRoot) require.NoError(t, err) - require.NotEmpty(t, result.StateRoot) + require.NotEmpty(t, result.UpdatedStateRoot) } ``` diff --git a/docs/guides/create-genesis.md b/docs/guides/create-genesis.md index 365b491b82..5886325dab 100644 --- a/docs/guides/create-genesis.md +++ b/docs/guides/create-genesis.md @@ -125,10 +125,6 @@ Before doing so, add a `da_start_height` field to the genesis file, that corresp jq '.da_start_height = 1' ~/.$CHAIN_ID/config/genesis.json > temp.json && mv temp.json ~/.$CHAIN_ID/config/genesis.json ``` -:::tip -If you want to plan a future proposer key migration without restarting the chain, define a `proposer_schedule` in your genesis and roll it out as a coordinated upgrade. See [Rotate proposer key](./operations/proposer-key-rotation.md). -::: - ## Summary By following these steps, you will set up the genesis for your chain, initialize the validator, add a genesis account, and start the chain. This guide provides a basic framework for configuring and starting your chain using the gm-world binary. Make sure you initialized your chain correctly, and use the `gmd` command for all operations. diff --git a/docs/guides/operations/proposer-key-rotation.md b/docs/guides/operations/proposer-key-rotation.md deleted file mode 100644 index 3c5667d50c..0000000000 --- a/docs/guides/operations/proposer-key-rotation.md +++ /dev/null @@ -1,186 +0,0 @@ -# Rotate proposer key - -Use this guide to rotate a sequencer proposer key without restarting the chain. The active proposer is selected from `proposer_schedule` in `genesis.json` based on block height. - -## Before you start - -- This is a coordinated network upgrade. Every node must run a binary that supports `proposer_schedule`. -- Every node must use the same updated `genesis.json` before the activation height. -- `ev-node` loads `genesis.json` when the node starts. Updating the file on disk is not enough; you must restart nodes after replacing it. -- The old proposer key remains valid until the block before the activation height. If the old key cannot safely produce until then, stop the sequencer and coordinate operator recovery first. - -## How proposer rotation is stored in genesis - -`proposer_address` and `proposer_schedule[].address` are base64-encoded strings in JSON. - -```json -{ - "initial_height": 1, - "proposer_address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=", - "proposer_schedule": [ - { - "start_height": 1, - "address": "0FQmA4Hn9dn8m4ZpM4+fV4e8KhkWjI4V2Vt1j9Qm5pA=" - }, - { - "start_height": 125000, - "address": "Y7z5v9mQm4Nw6mD0a2yR9kD2B0qv5iJj1Q1R7gD4B7Q=" - } - ] -} -``` - -Rules enforced by `ev-node`: - -- `proposer_schedule[0].start_height` must equal `initial_height` -- schedule entries must be strictly increasing by `start_height` -- if `proposer_address` is set, it must match the first schedule entry - -Keep all earlier schedule entries. Fresh full nodes need them to validate historical blocks. - -## 1. Pick an activation height - -Choose an activation height `H` far enough in the future that you can distribute the updated genesis and restart every non-producing node before the cutover. - -```bash -ACTIVATION_HEIGHT=125000 -GENESIS="$HOME/.evnode/config/genesis.json" -INITIAL_HEIGHT="$(jq -r '.initial_height' "$GENESIS")" -``` - -## 2. Get the current and replacement proposer public keys - -For a file-based signer, the signer public key is stored in `signer.json` as base64. You only put the derived address into genesis, but you still need the public key once to compute that address. - -```bash -OLD_SIGNER_DIR="$HOME/.evnode/config" -NEW_SIGNER_DIR="/secure/path/new-signer" - -OLD_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$OLD_SIGNER_DIR/signer.json")" -NEW_PROPOSER_PUBKEY="$(jq -r '.pub_key' "$NEW_SIGNER_DIR/signer.json")" -``` - -If you use a KMS-backed signer, export the replacement Ed25519 public key from your signer flow and base64-encode the raw public key bytes in the same format. The runtime configuration stays the same as in the [AWS KMS signer guide](./aws-kms-signer.md). - -## 3. Derive proposer addresses from the public keys - -`ev-node` derives the proposer address as `sha256(raw_pubkey_bytes)`. The helper below prints the address in the base64 format used by `genesis.json`. - -```bash -proposer_address() { - python3 - "$1" <<'PY' -import base64 -import hashlib -import sys - -pub_key = base64.b64decode(sys.argv[1]) -address = hashlib.sha256(pub_key).digest() -print(base64.b64encode(address).decode()) -PY -} - -OLD_PROPOSER_ADDRESS="$(proposer_address "$OLD_PROPOSER_PUBKEY")" -NEW_PROPOSER_ADDRESS="$(proposer_address "$NEW_PROPOSER_PUBKEY")" -``` - -## 4. Update `genesis.json` - -### If your chain only has `proposer_address` today - -Create an explicit schedule with the current proposer at `initial_height` and the new proposer at `ACTIVATION_HEIGHT`. - -```bash -jq \ - --arg old_addr "$OLD_PROPOSER_ADDRESS" \ - --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --argjson initial_height "$INITIAL_HEIGHT" \ - --argjson activation_height "$ACTIVATION_HEIGHT" \ - ' - .proposer_address = $old_addr - | .proposer_schedule = [ - { - start_height: $initial_height, - address: $old_addr - }, - { - start_height: $activation_height, - address: $new_addr - } - ] - ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" -``` - -### If your chain already has `proposer_schedule` - -Append the new entry. Do not replace older entries, and make sure `ACTIVATION_HEIGHT` is greater than the last scheduled `start_height`. - -```bash -jq \ - --arg new_addr "$NEW_PROPOSER_ADDRESS" \ - --argjson activation_height "$ACTIVATION_HEIGHT" \ - ' - .proposer_schedule += [ - { - start_height: $activation_height, - address: $new_addr - } - ] - ' "$GENESIS" > "$GENESIS.tmp" && mv "$GENESIS.tmp" "$GENESIS" -``` - -Verify the result before you distribute it: - -```bash -jq '{initial_height, proposer_address, proposer_schedule}' "$GENESIS" -``` - -## 5. Distribute the updated genesis and restart followers - -Copy the same `genesis.json` to every full node, replica, and failover node. Restart them after copying the file so they load the updated schedule. - -Do this before the chain reaches `ACTIVATION_HEIGHT`. - -## 6. Cut over the sequencer - -Wait until the chain reaches `ACTIVATION_HEIGHT - 1`, then stop the old sequencer and start it with the replacement signer. - -Example with a file-based signer: - -```bash -evnode start \ - --home "$HOME/.evnode" \ - --evnode.node.aggregator \ - --evnode.signer.signer_type file \ - --evnode.signer.signer_path "$NEW_SIGNER_DIR" \ - --evnode.signer.passphrase "$SIGNER_PASSPHRASE" -``` - -If you run a custom chain binary such as `gmd` or `appd`, use the same start command you already use for the sequencer and only change the signer configuration. - -## 7. Verify the first post-upgrade block - -Fetch the header at `ACTIVATION_HEIGHT` or the next produced block and confirm that it carries the new proposer address. - -```bash -curl -s http://127.0.0.1:26657/header \ - -H 'Content-Type: application/json' \ - -d "{\"jsonrpc\":\"2.0\",\"method\":\"header\",\"params\":{\"height\":\"${ACTIVATION_HEIGHT}\"},\"id\":1}" \ - | jq . -``` - -Some RPC clients render binary fields as hex instead of base64. If needed, convert the base64 genesis address before comparing: - -```bash -python3 - "$NEW_PROPOSER_ADDRESS" <<'PY' -import base64 -import sys - -print("0x" + base64.b64decode(sys.argv[1]).hex()) -PY -``` - -If the node at `ACTIVATION_HEIGHT` is still signed by the old key, stop block production and check three things first: - -1. every node was restarted after receiving the updated genesis -2. `proposer_schedule` contains the new entry at the intended height -3. the sequencer is actually running with the replacement signer diff --git a/docs/guides/operations/upgrades.md b/docs/guides/operations/upgrades.md index ac5f6dcbf1..0027f13c36 100644 --- a/docs/guides/operations/upgrades.md +++ b/docs/guides/operations/upgrades.md @@ -38,12 +38,6 @@ May require state migration or coordinated network upgrade. 5. Run any migration scripts 6. Restart -### Proposer Key Rotation - -Rotating the proposer key is a coordinated upgrade even when the chain does not restart. All nodes must receive the same updated `genesis.json`, restart to load it, and be ready before the scheduled activation height. - -Use [Rotate proposer key](./proposer-key-rotation.md) for the exact `proposer_schedule` format, genesis update steps, and cutover procedure. - ## ev-node Upgrades ### Check Current Version diff --git a/docs/reference/interfaces/executor.md b/docs/reference/interfaces/executor.md index 5cb0e9f8d8..31b425474d 100644 --- a/docs/reference/interfaces/executor.md +++ b/docs/reference/interfaces/executor.md @@ -8,7 +8,7 @@ The Executor interface defines how ev-node communicates with execution layers. I type Executor interface { InitChain(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) (stateRoot []byte, err error) GetTxs(ctx context.Context) ([][]byte, error) - ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) + ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) SetFinal(ctx context.Context, blockHeight uint64) error GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) FilterTxs(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]FilterStatus, error) @@ -64,7 +64,7 @@ GetTxs(ctx context.Context) ([][]byte, error) Processes transactions to produce a new block state. ```go -ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) +ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (result ExecuteResult, err error) ``` **Parameters:** @@ -76,7 +76,15 @@ ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time **Returns:** -- `updatedStateRoot` - New state root after execution +- `result.UpdatedStateRoot` - New state root after execution +- `result.NextProposerAddress` - Address expected to sign the next block. Empty means the proposer is unchanged. + +```go +type ExecuteResult struct { + UpdatedStateRoot []byte + NextProposerAddress []byte +} +``` **Requirements:** @@ -115,10 +123,14 @@ GetExecutionInfo(ctx context.Context) (ExecutionInfo, error) ```go type ExecutionInfo struct { - MaxGas uint64 // Maximum gas per block (0 = no gas-based limiting) + MaxGas uint64 + NextProposerAddress []byte } ``` +- `MaxGas` - Maximum gas per block (0 = no gas-based limiting) +- `NextProposerAddress` - Execution layer's current next proposer. Empty at startup means ev-node falls back to `genesis.proposer_address`. + ### FilterTxs Validates and filters transactions for block inclusion. diff --git a/execution/evm/execution.go b/execution/evm/execution.go index 3067893360..ecca8bd642 100644 --- a/execution/evm/execution.go +++ b/execution/evm/execution.go @@ -344,7 +344,7 @@ func (c *EngineClient) GetTxs(ctx context.Context) ([][]byte, error) { // - Checks for already-promoted blocks to enable idempotent execution // - Saves ExecMeta with payloadID after forkchoiceUpdatedV3 for crash recovery // - Updates ExecMeta to "promoted" after successful execution -func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) { +func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { // 1. Check for idempotent execution stateRoot, payloadID, found, idempotencyErr := c.reconcileExecutionAtHeight(ctx, blockHeight, timestamp, txs) @@ -353,22 +353,26 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight // Continue execution on error, as it might be transient } else if found { if stateRoot != nil { - return stateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } if payloadID != nil { // Found in-progress execution, attempt to resume - return c.processPayload(ctx, *payloadID, txs) + stateRoot, err := c.processPayload(ctx, *payloadID, txs) + if err != nil { + return execution.ExecuteResult{}, err + } + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } } prevBlockHash, prevHeaderStateRoot, prevGasLimit, _, err := c.getBlockInfo(ctx, blockHeight-1) if err != nil { - return nil, fmt.Errorf("failed to get block info: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("failed to get block info: %w", err) } // It's possible that the prev state root passed in is nil if this is the first block. // If so, we can't do a comparison. Otherwise, we compare the roots. if len(prevStateRoot) > 0 && !bytes.Equal(prevStateRoot, prevHeaderStateRoot.Bytes()) { - return nil, fmt.Errorf("prevStateRoot mismatch at height %d: consensus=%x execution=%x", blockHeight-1, prevStateRoot, prevHeaderStateRoot.Bytes()) + return execution.ExecuteResult{}, fmt.Errorf("prevStateRoot mismatch at height %d: consensus=%x execution=%x", blockHeight-1, prevStateRoot, prevHeaderStateRoot.Bytes()) } // 2. Prepare payload attributes @@ -445,7 +449,7 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight return nil }, MaxPayloadStatusRetries, InitialRetryBackoff, "ExecuteTxs forkchoice") if err != nil { - return nil, err + return execution.ExecuteResult{}, err } // Save ExecMeta with payloadID for crash recovery (Stage="started") @@ -453,7 +457,11 @@ func (c *EngineClient) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight c.saveExecMeta(ctx, blockHeight, timestamp.Unix(), newPayloadID[:], nil, nil, txs, ExecStageStarted) // 4. Process the payload (get, submit, finalize) - return c.processPayload(ctx, *newPayloadID, txs) + stateRoot, err = c.processPayload(ctx, *newPayloadID, txs) + if err != nil { + return execution.ExecuteResult{}, err + } + return execution.ExecuteResult{UpdatedStateRoot: stateRoot}, nil } // setHead updates the head block hash without changing safe or finalized. diff --git a/execution/evm/go.mod b/execution/evm/go.mod index 5a014af738..17b6831d67 100644 --- a/execution/evm/go.mod +++ b/execution/evm/go.mod @@ -2,6 +2,11 @@ module github.com/evstack/ev-node/execution/evm go 1.25.7 +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core +) + require ( github.com/ethereum/go-ethereum v1.17.2 github.com/evstack/ev-node v1.1.0 diff --git a/execution/evm/test/go.mod b/execution/evm/test/go.mod index 78b3552949..23aadab045 100644 --- a/execution/evm/test/go.mod +++ b/execution/evm/test/go.mod @@ -199,5 +199,6 @@ require ( replace ( github.com/evstack/ev-node => ../../../ + github.com/evstack/ev-node/core => ../../../core github.com/evstack/ev-node/execution/evm => ../ ) diff --git a/execution/grpc/client.go b/execution/grpc/client.go index efb9d2f840..fcc805a1c2 100644 --- a/execution/grpc/client.go +++ b/execution/grpc/client.go @@ -99,7 +99,7 @@ func (c *Client) GetTxs(ctx context.Context) ([][]byte, error) { // This method sends transactions to the execution service for processing and // returns the updated state root after execution. The execution service ensures // deterministic execution and validates the state transition. -func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (updatedStateRoot []byte, err error) { +func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { req := connect.NewRequest(&pb.ExecuteTxsRequest{ Txs: txs, BlockHeight: blockHeight, @@ -109,10 +109,13 @@ func (c *Client) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint6 resp, err := c.client.ExecuteTxs(ctx, req) if err != nil { - return nil, fmt.Errorf("grpc client: failed to execute txs: %w", err) + return execution.ExecuteResult{}, fmt.Errorf("grpc client: failed to execute txs: %w", err) } - return resp.Msg.UpdatedStateRoot, nil + return execution.ExecuteResult{ + UpdatedStateRoot: resp.Msg.UpdatedStateRoot, + NextProposerAddress: resp.Msg.NextProposerAddress, + }, nil } // SetFinal marks a block as finalized at the specified height. @@ -145,7 +148,8 @@ func (c *Client) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, } return execution.ExecutionInfo{ - MaxGas: resp.Msg.MaxGas, + MaxGas: resp.Msg.MaxGas, + NextProposerAddress: resp.Msg.NextProposerAddress, }, nil } diff --git a/execution/grpc/client_test.go b/execution/grpc/client_test.go index 59ec6416ff..3a9477823d 100644 --- a/execution/grpc/client_test.go +++ b/execution/grpc/client_test.go @@ -13,7 +13,7 @@ import ( type mockExecutor struct { initChainFunc func(ctx context.Context, genesisTime time.Time, initialHeight uint64, chainID string) ([]byte, error) getTxsFunc func(ctx context.Context) ([][]byte, error) - executeTxsFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) + executeTxsFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) setFinalFunc func(ctx context.Context, blockHeight uint64) error getExecutionInfoFunc func(ctx context.Context) (execution.ExecutionInfo, error) filterTxsFunc func(ctx context.Context, txs [][]byte, maxBytes, maxGas uint64, hasForceIncludedTransaction bool) ([]execution.FilterStatus, error) @@ -33,11 +33,11 @@ func (m *mockExecutor) GetTxs(ctx context.Context) ([][]byte, error) { return [][]byte{[]byte("tx1"), []byte("tx2")}, nil } -func (m *mockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (m *mockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { if m.executeTxsFunc != nil { return m.executeTxsFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } - return []byte("updated_state_root"), nil + return execution.ExecuteResult{UpdatedStateRoot: []byte("updated_state_root")}, nil } func (m *mockExecutor) SetFinal(ctx context.Context, blockHeight uint64) error { @@ -151,7 +151,7 @@ func TestClient_ExecuteTxs(t *testing.T) { expectedStateRoot := []byte("new_state_root") mockExec := &mockExecutor{ - executeTxsFunc: func(ctx context.Context, txsIn [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { + executeTxsFunc: func(ctx context.Context, txsIn [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { if len(txsIn) != len(txs) { t.Errorf("expected %d txs, got %d", len(txs), len(txsIn)) } @@ -164,7 +164,7 @@ func TestClient_ExecuteTxs(t *testing.T) { if string(psr) != string(prevStateRoot) { t.Errorf("expected prev state root %s, got %s", prevStateRoot, psr) } - return expectedStateRoot, nil + return execution.ExecuteResult{UpdatedStateRoot: expectedStateRoot}, nil }, } @@ -177,13 +177,13 @@ func TestClient_ExecuteTxs(t *testing.T) { client := NewClient(server.URL) // Test ExecuteTxs - stateRoot, err := client.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := client.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) if err != nil { t.Fatalf("unexpected error: %v", err) } - if string(stateRoot) != string(expectedStateRoot) { - t.Errorf("expected state root %s, got %s", expectedStateRoot, stateRoot) + if string(result.UpdatedStateRoot) != string(expectedStateRoot) { + t.Errorf("expected state root %s, got %s", expectedStateRoot, result.UpdatedStateRoot) } } diff --git a/execution/grpc/go.mod b/execution/grpc/go.mod index 2312dd9d25..7817bb8f91 100644 --- a/execution/grpc/go.mod +++ b/execution/grpc/go.mod @@ -2,6 +2,11 @@ module github.com/evstack/ev-node/execution/grpc go 1.25.7 +replace ( + github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core +) + require ( connectrpc.com/connect v1.19.2 connectrpc.com/grpcreflect v1.3.0 diff --git a/execution/grpc/server.go b/execution/grpc/server.go index e0488b7655..1123d60fe7 100644 --- a/execution/grpc/server.go +++ b/execution/grpc/server.go @@ -102,7 +102,7 @@ func (s *Server) ExecuteTxs( return nil, connect.NewError(connect.CodeInvalidArgument, errors.New("prev_state_root is required")) } - updatedStateRoot, err := s.executor.ExecuteTxs( + result, err := s.executor.ExecuteTxs( ctx, req.Msg.Txs, req.Msg.BlockHeight, @@ -114,7 +114,8 @@ func (s *Server) ExecuteTxs( } return connect.NewResponse(&pb.ExecuteTxsResponse{ - UpdatedStateRoot: updatedStateRoot, + UpdatedStateRoot: result.UpdatedStateRoot, + NextProposerAddress: result.NextProposerAddress, }), nil } @@ -150,7 +151,8 @@ func (s *Server) GetExecutionInfo( } return connect.NewResponse(&pb.GetExecutionInfoResponse{ - MaxGas: info.MaxGas, + MaxGas: info.MaxGas, + NextProposerAddress: info.NextProposerAddress, }), nil } diff --git a/execution/grpc/server_test.go b/execution/grpc/server_test.go index e2a01b4bc4..559d8457b2 100644 --- a/execution/grpc/server_test.go +++ b/execution/grpc/server_test.go @@ -9,6 +9,7 @@ import ( "connectrpc.com/connect" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/evstack/ev-node/core/execution" pb "github.com/evstack/ev-node/types/pb/evnode/v1" ) @@ -190,7 +191,7 @@ func TestServer_ExecuteTxs(t *testing.T) { tests := []struct { name string req *pb.ExecuteTxsRequest - mockFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) + mockFunc func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) wantErr bool wantCode connect.Code }{ @@ -202,8 +203,8 @@ func TestServer_ExecuteTxs(t *testing.T) { Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, }, - mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { - return expectedStateRoot, nil + mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { + return execution.ExecuteResult{UpdatedStateRoot: expectedStateRoot}, nil }, wantErr: false, }, @@ -245,8 +246,8 @@ func TestServer_ExecuteTxs(t *testing.T) { Timestamp: timestamppb.New(timestamp), PrevStateRoot: prevStateRoot, }, - mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) ([]byte, error) { - return nil, errors.New("execute txs failed") + mockFunc: func(ctx context.Context, t [][]byte, bh uint64, ts time.Time, psr []byte) (execution.ExecuteResult, error) { + return execution.ExecuteResult{}, errors.New("execute txs failed") }, wantErr: true, wantCode: connect.CodeInternal, diff --git a/go.mod b/go.mod index 2b517a48af..37b153773f 100644 --- a/go.mod +++ b/go.mod @@ -51,6 +51,8 @@ require ( gotest.tools/v3 v3.5.2 ) +replace github.com/evstack/ev-node/core => ./core + require ( cloud.google.com/go v0.123.0 // indirect cloud.google.com/go/auth v0.18.2 // indirect diff --git a/node/execution_test.go b/node/execution_test.go index 91133a218d..8f75cf87b6 100644 --- a/node/execution_test.go +++ b/node/execution_test.go @@ -98,7 +98,7 @@ func executeTransactions(t *testing.T, executor coreexecutor.Executor, ctx conte timestamp := time.Now() newStateRoot, err := executor.ExecuteTxs(ctx, txs, blockHeight, timestamp, stateRoot) require.NoError(t, err) - return newStateRoot + return newStateRoot.UpdatedStateRoot } func finalizeExecution(t *testing.T, executor coreexecutor.Executor, ctx context.Context) { diff --git a/node/failover.go b/node/failover.go index 752b6aaba3..42dac4e8bc 100644 --- a/node/failover.go +++ b/node/failover.go @@ -139,7 +139,7 @@ func setupFailoverState( headerSyncService.Store(), dataSyncService.Store(), p2pClient, - genesis.InitialProposerAddress(), + genesis.ProposerAddress, logger, nodeConfig, bestKnownHeightProvider, diff --git a/node/full.go b/node/full.go index 5d13beebbd..bd44f9ef42 100644 --- a/node/full.go +++ b/node/full.go @@ -78,7 +78,7 @@ func newFullNode( logger zerolog.Logger, nodeOpts NodeOptions, ) (fn *FullNode, err error) { - logger.Debug().Hex("address", genesis.InitialProposerAddress()).Msg("Initial proposer address") + logger.Debug().Hex("address", genesis.ProposerAddress).Msg("Proposer address") blockMetrics, _ := metricsProvider(genesis.ChainID) diff --git a/pkg/genesis/genesis.go b/pkg/genesis/genesis.go index 1cbe506e1c..e1a401d9fc 100644 --- a/pkg/genesis/genesis.go +++ b/pkg/genesis/genesis.go @@ -1,7 +1,6 @@ package genesis import ( - "bytes" "fmt" "time" ) @@ -12,11 +11,10 @@ const ChainIDFlag = "chain_id" // This genesis struct only contains the fields required by evolve. // The app state or other fields are not included here. type Genesis struct { - ChainID string `json:"chain_id"` - StartTime time.Time `json:"start_time"` - InitialHeight uint64 `json:"initial_height"` - ProposerAddress []byte `json:"proposer_address"` - ProposerSchedule []ProposerScheduleEntry `json:"proposer_schedule,omitempty"` + ChainID string `json:"chain_id"` + StartTime time.Time `json:"start_time"` + InitialHeight uint64 `json:"initial_height"` + ProposerAddress []byte `json:"proposer_address"` // DAStartHeight corresponds to the height at which the first DA header/data has been published. // This value is meant to be updated after genesis and shared to all syncing nodes for speeding up syncing via DA. DAStartHeight uint64 `json:"da_start_height"` @@ -58,28 +56,8 @@ func (g Genesis) Validate() error { return fmt.Errorf("start_time cannot be zero time") } - if len(g.ProposerSchedule) == 0 { - if len(g.ProposerAddress) == 0 { - return fmt.Errorf("proposer_address cannot be empty when proposer_schedule is unset") - } - } else { - if err := g.ProposerSchedule[0].validate(g.InitialHeight); err != nil { - return fmt.Errorf("invalid proposer_schedule[0]: %w", err) - } - if g.ProposerSchedule[0].StartHeight != g.InitialHeight { - return fmt.Errorf("proposer_schedule[0].start_height must equal initial_height (%d), got %d", g.InitialHeight, g.ProposerSchedule[0].StartHeight) - } - for i := 1; i < len(g.ProposerSchedule); i++ { - if err := g.ProposerSchedule[i].validate(g.InitialHeight); err != nil { - return fmt.Errorf("invalid proposer_schedule[%d]: %w", i, err) - } - if g.ProposerSchedule[i].StartHeight <= g.ProposerSchedule[i-1].StartHeight { - return fmt.Errorf("proposer_schedule must be strictly increasing: entry %d start_height %d is not greater than previous %d", i, g.ProposerSchedule[i].StartHeight, g.ProposerSchedule[i-1].StartHeight) - } - } - if len(g.ProposerAddress) > 0 && !bytes.Equal(g.ProposerAddress, g.ProposerSchedule[0].Address) { - return fmt.Errorf("proposer_address must match proposer_schedule[0].address") - } + if g.ProposerAddress == nil { + return fmt.Errorf("proposer_address cannot be nil") } if g.DAEpochForcedInclusion < 1 { diff --git a/pkg/genesis/genesis_test.go b/pkg/genesis/genesis_test.go index 9c850f3963..da3cc14b1f 100644 --- a/pkg/genesis/genesis_test.go +++ b/pkg/genesis/genesis_test.go @@ -1,13 +1,10 @@ package genesis import ( - "crypto/rand" "testing" "time" - "github.com/libp2p/go-libp2p/core/crypto" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestNewGenesis(t *testing.T) { @@ -138,175 +135,3 @@ func TestGenesis_Validate(t *testing.T) { }) } } - -func TestGenesis_ValidateProposerSchedule(t *testing.T) { - validTime := time.Unix(1_700_000_000, 0).UTC() - - newEntry := func(startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { - _, pub, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - entry, err := NewProposerScheduleEntry(startHeight, pub) - require.NoError(t, err) - return entry, pub - } - - entry1, _ := newEntry(1) - entry10, _ := newEntry(10) - entry20, _ := newEntry(20) - - tests := []struct { - name string - mutate func() Genesis - wantErr string - }{ - { - name: "valid - schedule without proposer_address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - }, - { - name: "valid - schedule with matching proposer_address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: entry1.Address, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - }, - { - name: "invalid - first entry start_height != initial_height", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry10, entry20}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "start_height must equal initial_height", - }, - { - name: "invalid - first entry start_height below initial_height", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "start_height must be >= initial_height", - }, - { - name: "invalid - non-increasing (equal start_heights)", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry1}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "strictly increasing", - }, - { - name: "invalid - non-increasing (decreasing start_heights)", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry20, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "strictly increasing", - }, - { - name: "invalid - entry address does not match pub_key", - mutate: func() Genesis { - tampered := entry10 - tampered.Address = append([]byte(nil), entry10.Address...) - tampered.Address[0] ^= 0xFF - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, tampered}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "address does not match pub_key", - }, - { - name: "invalid - proposer_address mismatches schedule[0].address", - mutate: func() Genesis { - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerAddress: entry10.Address, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry10}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "proposer_address must match proposer_schedule[0].address", - }, - { - name: "invalid - empty address in entry", - mutate: func() Genesis { - empty := entry10 - empty.Address = nil - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, empty}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "address cannot be empty", - }, - { - name: "invalid - malformed pub_key bytes", - mutate: func() Genesis { - bad := entry10 - bad.PubKey = []byte{0x00, 0x01, 0x02} - return Genesis{ - ChainID: "c", - StartTime: validTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, bad}, - DAEpochForcedInclusion: 1, - } - }, - wantErr: "unmarshal proposer pub_key", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.mutate().Validate() - if tt.wantErr == "" { - require.NoError(t, err) - return - } - require.Error(t, err) - require.Contains(t, err.Error(), tt.wantErr) - }) - } -} diff --git a/pkg/genesis/io.go b/pkg/genesis/io.go index dcf9048aa6..8c9d88e955 100644 --- a/pkg/genesis/io.go +++ b/pkg/genesis/io.go @@ -72,12 +72,12 @@ func LoadGenesis(genesisPath string) (Genesis, error) { return Genesis{}, err } - return genesis.normalized(), nil + return genesis, nil } // Save saves the genesis state to the specified file path. func (g Genesis) Save(genesisPath string) error { - genesisJSON, err := json.MarshalIndent(g.normalized(), "", " ") + genesisJSON, err := json.MarshalIndent(g, "", " ") if err != nil { return fmt.Errorf("failed to marshal genesis state: %w", err) } diff --git a/pkg/genesis/proposer_schedule.go b/pkg/genesis/proposer_schedule.go deleted file mode 100644 index c53684535a..0000000000 --- a/pkg/genesis/proposer_schedule.go +++ /dev/null @@ -1,213 +0,0 @@ -package genesis - -import ( - "bytes" - "crypto/sha256" - "fmt" - - "github.com/libp2p/go-libp2p/core/crypto" -) - -// ProposerScheduleEntry declares the proposer address that becomes active at start_height. -// PubKey is optional and can be used to pin the exact key material for a schedule entry. -type ProposerScheduleEntry struct { - StartHeight uint64 `json:"start_height"` - Address []byte `json:"address"` - PubKey []byte `json:"pub_key,omitempty"` -} - -// NewProposerScheduleEntry creates a proposer schedule entry from a libp2p public key. -func NewProposerScheduleEntry(startHeight uint64, pubKey crypto.PubKey) (ProposerScheduleEntry, error) { - if pubKey == nil { - return ProposerScheduleEntry{}, fmt.Errorf("proposer pub_key cannot be nil") - } - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - if err != nil { - return ProposerScheduleEntry{}, fmt.Errorf("marshal proposer pub_key: %w", err) - } - - return ProposerScheduleEntry{ - StartHeight: startHeight, - Address: proposerKeyAddress(pubKey), - PubKey: marshalledPubKey, - }, nil -} - -// PublicKey unmarshals the configured proposer public key. Address-only schedule -// entries may omit the pubkey and will return nil, nil here. -func (e ProposerScheduleEntry) PublicKey() (crypto.PubKey, error) { - if len(e.PubKey) == 0 { - return nil, nil - } - - pubKey, err := crypto.UnmarshalPublicKey(e.PubKey) - if err != nil { - return nil, fmt.Errorf("unmarshal proposer pub_key: %w", err) - } - - return pubKey, nil -} - -func (e ProposerScheduleEntry) validate(initialHeight uint64) error { - if e.StartHeight < initialHeight { - return fmt.Errorf("proposer schedule start_height must be >= initial_height (%d), got %d", initialHeight, e.StartHeight) - } - - if len(e.Address) == 0 { - return fmt.Errorf("proposer schedule address cannot be empty") - } - - if len(e.PubKey) == 0 { - return nil - } - - pubKey, err := e.PublicKey() - if err != nil { - return err - } - - expectedAddress := proposerKeyAddress(pubKey) - if !bytes.Equal(expectedAddress, e.Address) { - return fmt.Errorf("proposer schedule address does not match pub_key: got %x, expected %x", e.Address, expectedAddress) - } - - return nil -} - -// EffectiveProposerSchedule returns the explicit proposer schedule when present, -// or derives a legacy single-entry schedule from proposer_address. -func (g Genesis) EffectiveProposerSchedule() []ProposerScheduleEntry { - if len(g.ProposerSchedule) > 0 { - out := make([]ProposerScheduleEntry, len(g.ProposerSchedule)) - for i, entry := range g.ProposerSchedule { - out[i] = ProposerScheduleEntry{ - StartHeight: entry.StartHeight, - Address: bytes.Clone(entry.Address), - PubKey: bytes.Clone(entry.PubKey), - } - } - return out - } - - if len(g.ProposerAddress) == 0 { - return nil - } - - return []ProposerScheduleEntry{{ - StartHeight: g.InitialHeight, - Address: bytes.Clone(g.ProposerAddress), - }} -} - -// InitialProposerAddress returns the first proposer address for compatibility -// with code paths that still surface a single address externally. -func (g Genesis) InitialProposerAddress() []byte { - entry, err := g.ProposerAtHeight(g.InitialHeight) - if err != nil { - return nil - } - - return bytes.Clone(entry.Address) -} - -func (g Genesis) normalized() Genesis { - normalized := g - if len(normalized.ProposerAddress) == 0 { - normalized.ProposerAddress = normalized.InitialProposerAddress() - } - return normalized -} - -// HasScheduledProposer reports whether the address appears in the effective proposer schedule. -func (g Genesis) HasScheduledProposer(address []byte) bool { - for _, entry := range g.EffectiveProposerSchedule() { - if bytes.Equal(entry.Address, address) { - return true - } - } - return false -} - -// ProposerAtHeight resolves the proposer that is active for the given block height. -func (g Genesis) ProposerAtHeight(height uint64) (ProposerScheduleEntry, error) { - schedule := g.EffectiveProposerSchedule() - if len(schedule) == 0 { - return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured") - } - - if height < schedule[0].StartHeight { - return ProposerScheduleEntry{}, fmt.Errorf("no proposer configured for height %d before start_height %d", height, schedule[0].StartHeight) - } - - entry := schedule[0] - for i := 1; i < len(schedule); i++ { - if height < schedule[i].StartHeight { - break - } - entry = schedule[i] - } - - return ProposerScheduleEntry{ - StartHeight: entry.StartHeight, - Address: bytes.Clone(entry.Address), - PubKey: bytes.Clone(entry.PubKey), - }, nil -} - -// ValidateProposer checks that the provided proposer address and public key match -// the proposer schedule entry active at the given height. -func (g Genesis) ValidateProposer(height uint64, address []byte, pubKey crypto.PubKey) error { - entry, err := g.ProposerAtHeight(height) - if err != nil { - return err - } - - if !bytes.Equal(entry.Address, address) { - return fmt.Errorf("unexpected proposer at height %d: got %x, expected %x", height, address, entry.Address) - } - - if len(entry.PubKey) == 0 { - // Address-only schedule entry. Without a pinned pubkey we still - // have to bind the caller-provided pubkey to the scheduled - // address, otherwise a forger can pair the scheduled address - // with an arbitrary key and later satisfy signature checks that - // trust Signer.PubKey. - if pubKey != nil { - derived := proposerKeyAddress(pubKey) - if !bytes.Equal(entry.Address, derived) { - return fmt.Errorf("proposer pub_key does not match scheduled address at height %d", height) - } - } - return nil - } - - if pubKey == nil { - return fmt.Errorf("missing proposer pub_key at height %d", height) - } - - marshalledPubKey, err := crypto.MarshalPublicKey(pubKey) - if err != nil { - return fmt.Errorf("marshal proposer pub_key: %w", err) - } - - if !bytes.Equal(entry.PubKey, marshalledPubKey) { - return fmt.Errorf("unexpected proposer pub_key at height %d", height) - } - - return nil -} - -func proposerKeyAddress(pubKey crypto.PubKey) []byte { - if pubKey == nil { - return nil - } - - raw, err := pubKey.Raw() - if err != nil { - return nil - } - - sum := sha256.Sum256(raw) - return sum[:] -} diff --git a/pkg/genesis/proposer_schedule_test.go b/pkg/genesis/proposer_schedule_test.go deleted file mode 100644 index 88f87590ea..0000000000 --- a/pkg/genesis/proposer_schedule_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package genesis - -import ( - "bytes" - "crypto/rand" - "encoding/json" - "os" - "path/filepath" - "testing" - "time" - - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/stretchr/testify/require" - - "github.com/evstack/ev-node/pkg/signer/noop" -) - -// testGenesisStartTime is a fixed timestamp for genesis fixtures so tests do -// not depend on wall-clock time. -var testGenesisStartTime = time.Unix(1_700_000_000, 0).UTC() - -func makeProposerScheduleEntry(t *testing.T, startHeight uint64) (ProposerScheduleEntry, crypto.PubKey) { - t.Helper() - - _, pubKey, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - entry, err := NewProposerScheduleEntry(startHeight, pubKey) - require.NoError(t, err) - - return entry, pubKey -} - -func TestGenesisProposerAtHeight(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 3) - entry2, _ := makeProposerScheduleEntry(t, 10) - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 3, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - - proposer, err := genesis.ProposerAtHeight(3) - require.NoError(t, err) - require.Equal(t, entry1.Address, proposer.Address) - - proposer, err = genesis.ProposerAtHeight(9) - require.NoError(t, err) - require.Equal(t, entry1.Address, proposer.Address) - - proposer, err = genesis.ProposerAtHeight(10) - require.NoError(t, err) - require.Equal(t, entry2.Address, proposer.Address) -} - -func TestGenesisValidateProposerScheduleWithPinnedPubKey(t *testing.T) { - entry1, pubKey1 := makeProposerScheduleEntry(t, 1) - entry2, pubKey2 := makeProposerScheduleEntry(t, 20) - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) - require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) - require.Error(t, genesis.ValidateProposer(21, entry2.Address, pubKey1)) -} - -func TestGenesisValidateAddressOnlyProposerSchedule(t *testing.T) { - entry1, pubKey1 := makeProposerScheduleEntry(t, 1) - entry2, pubKey2 := makeProposerScheduleEntry(t, 20) - entry1.PubKey = nil - entry2.PubKey = nil - - genesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - require.NoError(t, genesis.Validate()) - require.NoError(t, genesis.ValidateProposer(1, entry1.Address, pubKey1)) - require.NoError(t, genesis.ValidateProposer(21, entry2.Address, pubKey2)) -} - -func TestNewProposerScheduleEntry_NilPubKey(t *testing.T) { - _, err := NewProposerScheduleEntry(1, nil) - require.Error(t, err) -} - -func TestProposerAtHeight_BeforeFirstStartHeight(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 5) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 5, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - _, err := genesis.ProposerAtHeight(4) - require.Error(t, err) - require.Contains(t, err.Error(), "before start_height") -} - -func TestProposerAtHeight_NoProposerConfigured(t *testing.T) { - genesis := Genesis{ChainID: "c", InitialHeight: 1} - _, err := genesis.ProposerAtHeight(1) - require.Error(t, err) - require.Contains(t, err.Error(), "no proposer configured") -} - -func TestProposerAtHeight_ReturnedEntryIsCopy(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - got, err := genesis.ProposerAtHeight(1) - require.NoError(t, err) - got.Address[0] ^= 0xFF - got.PubKey[0] ^= 0xFF - - same, err := genesis.ProposerAtHeight(1) - require.NoError(t, err) - require.Equal(t, entry.Address, same.Address) - require.Equal(t, entry.PubKey, same.PubKey) -} - -func TestValidateProposer_WrongAddress(t *testing.T) { - entry, pubKey := makeProposerScheduleEntry(t, 1) - other, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - err := genesis.ValidateProposer(1, other.Address, pubKey) - require.Error(t, err) - require.Contains(t, err.Error(), "unexpected proposer at height 1") -} - -func TestValidateProposer_MissingPubKey(t *testing.T) { - entry, _ := makeProposerScheduleEntry(t, 1) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry}, - DAEpochForcedInclusion: 1, - } - - err := genesis.ValidateProposer(1, entry.Address, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "missing proposer pub_key") -} - -// TestValidateProposer_AddressOnly_RejectsForgedPubKey ensures that an address-only -// schedule entry still binds the caller-provided pubkey to the scheduled address. -// Without this check, a forger could claim Signer.Address = scheduled_addr with an -// arbitrary Signer.PubKey and later pass signature validation that trusts that pubkey. -func TestValidateProposer_AddressOnly_RejectsForgedPubKey(t *testing.T) { - scheduled, _ := makeProposerScheduleEntry(t, 1) - _, attackerPub := makeProposerScheduleEntry(t, 1) - - scheduled.PubKey = nil // address-only entry - - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{scheduled}, - DAEpochForcedInclusion: 1, - } - - // Scheduled address paired with a different pubkey must be rejected. - err := genesis.ValidateProposer(1, scheduled.Address, attackerPub) - require.Error(t, err) - require.Contains(t, err.Error(), "does not match scheduled address") -} - -func TestValidateProposer_UsesActiveEntryAtHeight(t *testing.T) { - entry1, pub1 := makeProposerScheduleEntry(t, 1) - entry2, pub2 := makeProposerScheduleEntry(t, 10) - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // entry2 signer trying to sign height within entry1's active range must fail. - require.Error(t, genesis.ValidateProposer(9, entry2.Address, pub2)) - // entry1 signer trying to sign height within entry2's active range must fail. - require.Error(t, genesis.ValidateProposer(10, entry1.Address, pub1)) -} - -func TestHasScheduledProposer(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 10) - unknown, _ := makeProposerScheduleEntry(t, 99) - - explicit := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - require.True(t, explicit.HasScheduledProposer(entry1.Address)) - require.True(t, explicit.HasScheduledProposer(entry2.Address)) - require.False(t, explicit.HasScheduledProposer(unknown.Address)) - - legacy := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerAddress: entry1.Address, - DAEpochForcedInclusion: 1, - } - require.True(t, legacy.HasScheduledProposer(entry1.Address)) - require.False(t, legacy.HasScheduledProposer(entry2.Address)) - - empty := Genesis{ChainID: "c", InitialHeight: 1} - require.False(t, empty.HasScheduledProposer(entry1.Address)) -} - -func TestEffectiveProposerSchedule_ExplicitScheduleIsDeepCopy(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 10) - origAddr := bytes.Clone(entry1.Address) - origPub := bytes.Clone(entry1.PubKey) - - genesis := Genesis{ - ChainID: "c", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - // Mutating returned byte slices must not corrupt the genesis-backed data. - got := genesis.EffectiveProposerSchedule() - got[0].Address[0] ^= 0xFF - got[0].PubKey[0] ^= 0xFF - - require.Equal(t, origAddr, genesis.ProposerSchedule[0].Address) - require.Equal(t, origPub, genesis.ProposerSchedule[0].PubKey) -} - -func TestEffectiveProposerSchedule_LegacyFallback(t *testing.T) { - addr := []byte("some-address-bytes") - origAddr := bytes.Clone(addr) - legacy := Genesis{ - ChainID: "c", - InitialHeight: 7, - ProposerAddress: addr, - } - schedule := legacy.EffectiveProposerSchedule() - require.Len(t, schedule, 1) - require.Equal(t, uint64(7), schedule[0].StartHeight) - require.Equal(t, addr, schedule[0].Address) - require.Empty(t, schedule[0].PubKey) - - // mutating the derived slice must not affect the genesis backing data. - schedule[0].Address[0] ^= 0xFF - require.Equal(t, origAddr, legacy.ProposerAddress) -} - -func TestEffectiveProposerSchedule_Empty(t *testing.T) { - require.Nil(t, Genesis{}.EffectiveProposerSchedule()) -} - -func TestInitialProposerAddress_EmptyGenesisReturnsNil(t *testing.T) { - require.Nil(t, Genesis{InitialHeight: 1}.InitialProposerAddress()) -} - -// TestProposerKeyAddressMatchesSignerGetAddress pins the invariant that the -// genesis-side address derivation matches the signer implementations. If a -// signer ever changes its address formula this test will fail and flag the -// break instead of silently producing rejected blocks after a key rotation. -func TestProposerKeyAddressMatchesSignerGetAddress(t *testing.T) { - priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) - - s, err := noop.NewNoopSigner(priv) - require.NoError(t, err) - - signerAddr, err := s.GetAddress() - require.NoError(t, err) - - genesisAddr := proposerKeyAddress(pub) - require.Equal(t, signerAddr, genesisAddr) - - entry, err := NewProposerScheduleEntry(1, pub) - require.NoError(t, err) - require.Equal(t, signerAddr, entry.Address) -} - -func TestLoadGenesisNormalizesLegacyProposerAddressFromSchedule(t *testing.T) { - entry1, _ := makeProposerScheduleEntry(t, 1) - entry2, _ := makeProposerScheduleEntry(t, 50) - - rawGenesis := Genesis{ - ChainID: "test-chain", - StartTime: testGenesisStartTime, - InitialHeight: 1, - ProposerSchedule: []ProposerScheduleEntry{entry1, entry2}, - DAEpochForcedInclusion: 1, - } - - genesisPath := filepath.Join(t.TempDir(), "genesis.json") - genesisJSON, err := json.Marshal(rawGenesis) - require.NoError(t, err) - require.NoError(t, os.WriteFile(genesisPath, genesisJSON, 0o600)) - - loaded, err := LoadGenesis(genesisPath) - require.NoError(t, err) - require.Equal(t, entry1.Address, loaded.ProposerAddress) - require.Equal(t, rawGenesis.ProposerSchedule, loaded.ProposerSchedule) -} diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index 419f8b6631..24eb133124 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -142,11 +142,12 @@ func (s *StoreServer) GetState( // Convert state to protobuf type pbState := &pb.State{ - AppHash: state.AppHash, - LastBlockHeight: state.LastBlockHeight, - LastBlockTime: timestamppb.New(state.LastBlockTime), - DaHeight: state.DAHeight, - ChainId: state.ChainID, + AppHash: state.AppHash, + LastBlockHeight: state.LastBlockHeight, + LastBlockTime: timestamppb.New(state.LastBlockTime), + DaHeight: state.DAHeight, + ChainId: state.ChainID, + NextProposerAddress: state.NextProposerAddress, Version: &pb.Version{ Block: state.Version.Block, App: state.Version.App, diff --git a/pkg/telemetry/executor_tracing.go b/pkg/telemetry/executor_tracing.go index 0f5507e04f..365ae7dd14 100644 --- a/pkg/telemetry/executor_tracing.go +++ b/pkg/telemetry/executor_tracing.go @@ -2,6 +2,7 @@ package telemetry import ( "context" + "encoding/hex" "time" "go.opentelemetry.io/otel" @@ -58,7 +59,7 @@ func (t *tracedExecutor) GetTxs(ctx context.Context) ([][]byte, error) { return txs, err } -func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { ctx, span := t.tracer.Start(ctx, "Executor.ExecuteTxs", trace.WithAttributes( attribute.Int("tx.count", len(txs)), @@ -68,12 +69,14 @@ func (t *tracedExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeig ) defer span.End() - stateRoot, err := t.inner.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := t.inner.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + } else if len(result.NextProposerAddress) > 0 { + span.SetAttributes(attribute.String("next_proposer_address", hex.EncodeToString(result.NextProposerAddress))) } - return stateRoot, err + return result, err } func (t *tracedExecutor) SetFinal(ctx context.Context, blockHeight uint64) error { diff --git a/pkg/telemetry/executor_tracing_test.go b/pkg/telemetry/executor_tracing_test.go index e53c8919af..a1715c928f 100644 --- a/pkg/telemetry/executor_tracing_test.go +++ b/pkg/telemetry/executor_tracing_test.go @@ -183,10 +183,10 @@ func TestWithTracingExecutor_ExecuteTxs_Success(t *testing.T) { ExecuteTxs(mock.Anything, txs, blockHeight, timestamp, prevStateRoot). Return(expectedStateRoot, nil) - stateRoot, err := traced.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) + result, err := traced.ExecuteTxs(ctx, txs, blockHeight, timestamp, prevStateRoot) require.NoError(t, err) - require.Equal(t, expectedStateRoot, stateRoot) + require.Equal(t, expectedStateRoot, result.UpdatedStateRoot) // verify span spans := sr.Ended() diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index e60bd56e0d..0002f818f6 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -38,6 +38,8 @@ message Header { bytes validator_hash = 11; // Chain ID the block belongs to string chain_id = 12; + // Proposer address selected by this block's execution result for the next block. + bytes next_proposer_address = 13; reserved 5, 7, 9; } diff --git a/proto/evnode/v1/execution.proto b/proto/evnode/v1/execution.proto index a3abbea36a..13d19db336 100644 --- a/proto/evnode/v1/execution.proto +++ b/proto/evnode/v1/execution.proto @@ -77,6 +77,10 @@ message ExecuteTxsResponse { // Maximum allowed transaction size (may change with protocol updates) uint64 max_bytes = 2; + + // Proposer address that should sign the next block. + // Empty means the current proposer remains active. + bytes next_proposer_address = 3; } // SetFinalRequest marks a block as finalized @@ -98,6 +102,10 @@ message GetExecutionInfoResponse { // Maximum gas allowed for transactions in a block // For non-gas-based execution layers, this should be 0 uint64 max_gas = 1; + + // Proposer address that should sign the next block from the execution + // layer's current view. Empty means unchanged or unavailable. + bytes next_proposer_address = 2; } // FilterStatus represents the result of filtering a transaction diff --git a/proto/evnode/v1/state.proto b/proto/evnode/v1/state.proto index 1e8f35422d..7788c0123e 100644 --- a/proto/evnode/v1/state.proto +++ b/proto/evnode/v1/state.proto @@ -16,6 +16,7 @@ message State { uint64 da_height = 6; bytes app_hash = 8; bytes last_header_hash = 9; + bytes next_proposer_address = 10; reserved 7; } diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 9ffb941fe7..6d58d40d17 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -23,6 +23,7 @@ require ( replace ( github.com/evstack/ev-node => ../../ + github.com/evstack/ev-node/core => ../../core github.com/evstack/ev-node/execution/evm => ../../execution/evm github.com/evstack/ev-node/execution/evm/test => ../../execution/evm/test ) diff --git a/test/mocks/execution.go b/test/mocks/execution.go index 706e556291..8c973524e7 100644 --- a/test/mocks/execution.go +++ b/test/mocks/execution.go @@ -40,23 +40,29 @@ func (_m *MockExecutor) EXPECT() *MockExecutor_Expecter { } // ExecuteTxs provides a mock function for the type MockExecutor -func (_mock *MockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (_mock *MockExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { ret := _mock.Called(ctx, txs, blockHeight, timestamp, prevStateRoot) if len(ret) == 0 { panic("no return value specified for ExecuteTxs") } - var r0 []byte + var r0 execution.ExecuteResult var r1 error - if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) ([]byte, error)); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) (execution.ExecuteResult, error)); ok { return returnFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } - if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) []byte); ok { + if returnFunc, ok := ret.Get(0).(func(context.Context, [][]byte, uint64, time.Time, []byte) execution.ExecuteResult); ok { r0 = returnFunc(ctx, txs, blockHeight, timestamp, prevStateRoot) } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) + switch result := ret.Get(0).(type) { + case nil: + case execution.ExecuteResult: + r0 = result + case []byte: + r0 = execution.ExecuteResult{UpdatedStateRoot: result} + default: + r0 = ret.Get(0).(execution.ExecuteResult) } } if returnFunc, ok := ret.Get(1).(func(context.Context, [][]byte, uint64, time.Time, []byte) error); ok { @@ -115,12 +121,12 @@ func (_c *MockExecutor_ExecuteTxs_Call) Run(run func(ctx context.Context, txs [] return _c } -func (_c *MockExecutor_ExecuteTxs_Call) Return(updatedStateRoot []byte, err error) *MockExecutor_ExecuteTxs_Call { - _c.Call.Return(updatedStateRoot, err) +func (_c *MockExecutor_ExecuteTxs_Call) Return(result interface{}, err error) *MockExecutor_ExecuteTxs_Call { + _c.Call.Return(result, err) return _c } -func (_c *MockExecutor_ExecuteTxs_Call) RunAndReturn(run func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error)) *MockExecutor_ExecuteTxs_Call { +func (_c *MockExecutor_ExecuteTxs_Call) RunAndReturn(run func(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error)) *MockExecutor_ExecuteTxs_Call { _c.Call.Return(run) return _c } @@ -213,6 +219,20 @@ func (_c *MockExecutor_FilterTxs_Call) RunAndReturn(run func(ctx context.Context // GetExecutionInfo provides a mock function for the type MockExecutor func (_mock *MockExecutor) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) { + if len(_mock.ExpectedCalls) == 0 { + return execution.ExecutionInfo{}, nil + } + hasExpectation := false + for _, call := range _mock.ExpectedCalls { + if call.Method == "GetExecutionInfo" { + hasExpectation = true + break + } + } + if !hasExpectation { + return execution.ExecutionInfo{}, nil + } + ret := _mock.Called(ctx) if len(ret) == 0 { diff --git a/test/mocks/height_aware_executor.go b/test/mocks/height_aware_executor.go index 354534c484..9e512d291b 100644 --- a/test/mocks/height_aware_executor.go +++ b/test/mocks/height_aware_executor.go @@ -44,9 +44,18 @@ func (m *MockHeightAwareExecutor) GetTxs(ctx context.Context) ([][]byte, error) } // ExecuteTxs implements the Executor interface. -func (m *MockHeightAwareExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) ([]byte, error) { +func (m *MockHeightAwareExecutor) ExecuteTxs(ctx context.Context, txs [][]byte, blockHeight uint64, timestamp time.Time, prevStateRoot []byte) (execution.ExecuteResult, error) { args := m.Called(ctx, txs, blockHeight, timestamp, prevStateRoot) - return args.Get(0).([]byte), args.Error(1) + switch result := args.Get(0).(type) { + case nil: + return execution.ExecuteResult{}, args.Error(1) + case execution.ExecuteResult: + return result, args.Error(1) + case []byte: + return execution.ExecuteResult{UpdatedStateRoot: result}, args.Error(1) + default: + return args.Get(0).(execution.ExecuteResult), args.Error(1) + } } // SetFinal implements the Executor interface. @@ -63,6 +72,20 @@ func (m *MockHeightAwareExecutor) GetLatestHeight(ctx context.Context) (uint64, // GetExecutionInfo implements the Executor interface. func (m *MockHeightAwareExecutor) GetExecutionInfo(ctx context.Context) (execution.ExecutionInfo, error) { + if len(m.ExpectedCalls) == 0 { + return execution.ExecutionInfo{}, nil + } + hasExpectation := false + for _, call := range m.ExpectedCalls { + if call.Method == "GetExecutionInfo" { + hasExpectation = true + break + } + } + if !hasExpectation { + return execution.ExecutionInfo{}, nil + } + args := m.Called(ctx) return args.Get(0).(execution.ExecutionInfo), args.Error(1) } diff --git a/types/header.go b/types/header.go index 2b5e2881b9..7beb9a9728 100644 --- a/types/header.go +++ b/types/header.go @@ -82,6 +82,11 @@ type Header struct { // pubkey can't be recovered by the signature (e.g. ed25519). ProposerAddress []byte // original proposer of the block + // NextProposerAddress is selected by executing this block and becomes the + // proposer expected for the next block. Empty means the current proposer + // remains active. + NextProposerAddress []byte + // Legacy holds fields that were removed from the canonical header JSON/Go // representation but may still be required for backwards compatible binary // serialization (e.g. legacy signing payloads). @@ -124,11 +129,15 @@ func (h *Header) Time() time.Time { // Verify verifies the header. func (h *Header) Verify(untrstH *Header) error { - if !bytes.Equal(untrstH.ProposerAddress, h.ProposerAddress) { + expectedProposer := h.ProposerAddress + if len(h.NextProposerAddress) > 0 { + expectedProposer = h.NextProposerAddress + } + if !bytes.Equal(untrstH.ProposerAddress, expectedProposer) { return &header.VerifyError{ Reason: fmt.Errorf("%w: expected proposer (%X) got (%X)", ErrProposerVerificationFailed, - h.ProposerAddress, + expectedProposer, untrstH.ProposerAddress, ), } @@ -270,6 +279,7 @@ func (h Header) Clone() Header { clone.AppHash = cloneBytes(h.AppHash) clone.ValidatorHash = cloneBytes(h.ValidatorHash) clone.ProposerAddress = cloneBytes(h.ProposerAddress) + clone.NextProposerAddress = cloneBytes(h.NextProposerAddress) clone.Legacy = h.Legacy.Clone() clone.cachedHash = nil diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index b0a866e76e..55ab34b0ff 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -102,9 +102,11 @@ type Header struct { // validatorhash for compatibility with tendermint light client. ValidatorHash []byte `protobuf:"bytes,11,opt,name=validator_hash,json=validatorHash,proto3" json:"validator_hash,omitempty"` // Chain ID the block belongs to - ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Proposer address selected by this block's execution result for the next block. + NextProposerAddress []byte `protobuf:"bytes,13,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Header) Reset() { @@ -200,6 +202,13 @@ func (x *Header) GetChainId() string { return "" } +func (x *Header) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // SignedHeader is a header with a signature and a signer. type SignedHeader struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -792,7 +801,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x16evnode/v1/evnode.proto\x12\tevnode.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"1\n" + "\aVersion\x12\x14\n" + "\x05block\x18\x01 \x01(\x04R\x05block\x12\x10\n" + - "\x03app\x18\x02 \x01(\x04R\x03app\"\xc3\x02\n" + + "\x03app\x18\x02 \x01(\x04R\x03app\"\xf7\x02\n" + "\x06Header\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x16\n" + "\x06height\x18\x02 \x01(\x04R\x06height\x12\x12\n" + @@ -803,7 +812,8 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x10proposer_address\x18\n" + " \x01(\fR\x0fproposerAddress\x12%\n" + "\x0evalidator_hash\x18\v \x01(\fR\rvalidatorHash\x12\x19\n" + - "\bchain_id\x18\f \x01(\tR\achainIdJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + + "\bchain_id\x18\f \x01(\tR\achainId\x122\n" + + "\x15next_proposer_address\x18\r \x01(\fR\x13nextProposerAddressJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + "\"\x88\x01\n" + "\fSignedHeader\x12)\n" + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + diff --git a/types/pb/evnode/v1/execution.pb.go b/types/pb/evnode/v1/execution.pb.go index 2b33c910d2..86d2ae8031 100644 --- a/types/pb/evnode/v1/execution.pb.go +++ b/types/pb/evnode/v1/execution.pb.go @@ -347,9 +347,12 @@ type ExecuteTxsResponse struct { // New state root after executing transactions UpdatedStateRoot []byte `protobuf:"bytes,1,opt,name=updated_state_root,json=updatedStateRoot,proto3" json:"updated_state_root,omitempty"` // Maximum allowed transaction size (may change with protocol updates) - MaxBytes uint64 `protobuf:"varint,2,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + MaxBytes uint64 `protobuf:"varint,2,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Proposer address that should sign the next block. + // Empty means the current proposer remains active. + NextProposerAddress []byte `protobuf:"bytes,3,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExecuteTxsResponse) Reset() { @@ -396,6 +399,13 @@ func (x *ExecuteTxsResponse) GetMaxBytes() uint64 { return 0 } +func (x *ExecuteTxsResponse) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // SetFinalRequest marks a block as finalized type SetFinalRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -521,9 +531,12 @@ type GetExecutionInfoResponse struct { state protoimpl.MessageState `protogen:"open.v1"` // Maximum gas allowed for transactions in a block // For non-gas-based execution layers, this should be 0 - MaxGas uint64 `protobuf:"varint,1,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + MaxGas uint64 `protobuf:"varint,1,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` + // Proposer address that should sign the next block from the execution + // layer's current view. Empty means unchanged or unavailable. + NextProposerAddress []byte `protobuf:"bytes,2,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetExecutionInfoResponse) Reset() { @@ -563,6 +576,13 @@ func (x *GetExecutionInfoResponse) GetMaxGas() uint64 { return 0 } +func (x *GetExecutionInfoResponse) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // FilterTxsRequest contains transactions to validate and filter type FilterTxsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -701,16 +721,18 @@ const file_evnode_v1_execution_proto_rawDesc = "" + "\x03txs\x18\x01 \x03(\fR\x03txs\x12!\n" + "\fblock_height\x18\x02 \x01(\x04R\vblockHeight\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12&\n" + - "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\"_\n" + + "\x0fprev_state_root\x18\x04 \x01(\fR\rprevStateRoot\"\x93\x01\n" + "\x12ExecuteTxsResponse\x12,\n" + "\x12updated_state_root\x18\x01 \x01(\fR\x10updatedStateRoot\x12\x1b\n" + - "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\"4\n" + + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x122\n" + + "\x15next_proposer_address\x18\x03 \x01(\fR\x13nextProposerAddress\"4\n" + "\x0fSetFinalRequest\x12!\n" + "\fblock_height\x18\x01 \x01(\x04R\vblockHeight\"\x12\n" + "\x10SetFinalResponse\"\x19\n" + - "\x17GetExecutionInfoRequest\"3\n" + + "\x17GetExecutionInfoRequest\"g\n" + "\x18GetExecutionInfoResponse\x12\x17\n" + - "\amax_gas\x18\x01 \x01(\x04R\x06maxGas\"\x9f\x01\n" + + "\amax_gas\x18\x01 \x01(\x04R\x06maxGas\x122\n" + + "\x15next_proposer_address\x18\x02 \x01(\fR\x13nextProposerAddress\"\x9f\x01\n" + "\x10FilterTxsRequest\x12\x10\n" + "\x03txs\x18\x01 \x03(\fR\x03txs\x12\x1b\n" + "\tmax_bytes\x18\x02 \x01(\x04R\bmaxBytes\x12\x17\n" + diff --git a/types/pb/evnode/v1/state.pb.go b/types/pb/evnode/v1/state.pb.go index a76c7efb28..868164e407 100644 --- a/types/pb/evnode/v1/state.pb.go +++ b/types/pb/evnode/v1/state.pb.go @@ -24,17 +24,18 @@ const ( // State is the state of the blockchain. type State struct { - state protoimpl.MessageState `protogen:"open.v1"` - Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - InitialHeight uint64 `protobuf:"varint,3,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` - LastBlockHeight uint64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3" json:"last_block_time,omitempty"` - DaHeight uint64 `protobuf:"varint,6,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` - AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - LastHeaderHash []byte `protobuf:"bytes,9,opt,name=last_header_hash,json=lastHeaderHash,proto3" json:"last_header_hash,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + InitialHeight uint64 `protobuf:"varint,3,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + LastBlockHeight uint64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_block_time,json=lastBlockTime,proto3" json:"last_block_time,omitempty"` + DaHeight uint64 `protobuf:"varint,6,opt,name=da_height,json=daHeight,proto3" json:"da_height,omitempty"` + AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastHeaderHash []byte `protobuf:"bytes,9,opt,name=last_header_hash,json=lastHeaderHash,proto3" json:"last_header_hash,omitempty"` + NextProposerAddress []byte `protobuf:"bytes,10,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *State) Reset() { @@ -123,6 +124,13 @@ func (x *State) GetLastHeaderHash() []byte { return nil } +func (x *State) GetNextProposerAddress() []byte { + if x != nil { + return x.NextProposerAddress + } + return nil +} + // RaftBlockState represents a replicated block state type RaftBlockState struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -275,7 +283,7 @@ var File_evnode_v1_state_proto protoreflect.FileDescriptor const file_evnode_v1_state_proto_rawDesc = "" + "\n" + - "\x15evnode/v1/state.proto\x12\tevnode.v1\x1a\x16evnode/v1/evnode.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xcf\x02\n" + + "\x15evnode/v1/state.proto\x12\tevnode.v1\x1a\x16evnode/v1/evnode.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x03\n" + "\x05State\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x19\n" + "\bchain_id\x18\x02 \x01(\tR\achainId\x12%\n" + @@ -284,7 +292,9 @@ const file_evnode_v1_state_proto_rawDesc = "" + "\x0flast_block_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\rlastBlockTime\x12\x1b\n" + "\tda_height\x18\x06 \x01(\x04R\bdaHeight\x12\x19\n" + "\bapp_hash\x18\b \x01(\fR\aappHash\x12(\n" + - "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHashJ\x04\b\a\x10\b\"\x8e\x02\n" + + "\x10last_header_hash\x18\t \x01(\fR\x0elastHeaderHash\x122\n" + + "\x15next_proposer_address\x18\n" + + " \x01(\fR\x13nextProposerAddressJ\x04\b\a\x10\b\"\x8e\x02\n" + "\x0eRaftBlockState\x12\x16\n" + "\x06height\x18\x01 \x01(\x04R\x06height\x12D\n" + "\x1flast_submitted_da_header_height\x18\x02 \x01(\x04R\x1blastSubmittedDaHeaderHeight\x12@\n" + diff --git a/types/serialization.go b/types/serialization.go index dd131dd3bd..114de41194 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -89,6 +89,7 @@ func (h *Header) MarshalBinary() ([]byte, error) { ph.AppHash = h.AppHash ph.ProposerAddress = h.ProposerAddress ph.ValidatorHash = h.ValidatorHash + ph.NextProposerAddress = h.NextProposerAddress if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) } @@ -238,6 +239,7 @@ func (sh *SignedHeader) MarshalBinary() ([]byte, error) { ph.DataHash = sh.DataHash ph.AppHash = sh.AppHash ph.ProposerAddress = sh.ProposerAddress + ph.NextProposerAddress = sh.NextProposerAddress ph.ValidatorHash = sh.ValidatorHash if unknown := encodeLegacyUnknownFields(sh.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) @@ -378,14 +380,15 @@ func (h *Header) ToProto() *pb.Header { Block: h.Version.Block, App: h.Version.App, }, - Height: h.BaseHeader.Height, - Time: h.BaseHeader.Time, - LastHeaderHash: h.LastHeaderHash[:], - DataHash: h.DataHash[:], - AppHash: h.AppHash[:], - ProposerAddress: h.ProposerAddress[:], - ChainId: h.BaseHeader.ChainID, - ValidatorHash: h.ValidatorHash, + Height: h.BaseHeader.Height, + Time: h.BaseHeader.Time, + LastHeaderHash: h.LastHeaderHash[:], + DataHash: h.DataHash[:], + AppHash: h.AppHash[:], + ProposerAddress: h.ProposerAddress[:], + ChainId: h.BaseHeader.ChainID, + ValidatorHash: h.ValidatorHash, + NextProposerAddress: h.NextProposerAddress, } if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { pHeader.ProtoReflect().SetUnknown(unknown) @@ -436,6 +439,11 @@ func (h *Header) FromProto(other *pb.Header) error { } else { h.ValidatorHash = nil } + if other.NextProposerAddress != nil { + h.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) + } else { + h.NextProposerAddress = nil + } legacy, err := decodeLegacyHeaderFields(other) if err != nil { @@ -533,6 +541,7 @@ func (s *State) MarshalBinary() ([]byte, error) { ps.DaHeight = s.DAHeight ps.AppHash = s.AppHash ps.LastHeaderHash = s.LastHeaderHash + ps.NextProposerAddress = s.NextProposerAddress bz, err := proto.Marshal(ps) @@ -554,13 +563,14 @@ func (s *State) ToProto() (*pb.State, error) { Block: s.Version.Block, App: s.Version.App, }, - ChainId: s.ChainID, - InitialHeight: s.InitialHeight, - LastBlockHeight: s.LastBlockHeight, - LastBlockTime: ×tamppb.Timestamp{Seconds: secs, Nanos: nanos}, - DaHeight: s.DAHeight, - AppHash: s.AppHash[:], - LastHeaderHash: s.LastHeaderHash[:], + ChainId: s.ChainID, + InitialHeight: s.InitialHeight, + LastBlockHeight: s.LastBlockHeight, + LastBlockTime: ×tamppb.Timestamp{Seconds: secs, Nanos: nanos}, + DaHeight: s.DAHeight, + AppHash: s.AppHash[:], + LastHeaderHash: s.LastHeaderHash[:], + NextProposerAddress: s.NextProposerAddress, }, nil } @@ -596,6 +606,11 @@ func (s *State) FromProto(other *pb.State) error { s.LastHeaderHash = nil } s.DAHeight = other.GetDaHeight() + if other.NextProposerAddress != nil { + s.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) + } else { + s.NextProposerAddress = nil + } return nil } @@ -888,5 +903,10 @@ func marshalLegacyHeader(h *Header) ([]byte, error) { payload = append(payload, clone.BaseHeader.ChainID...) } + // next proposer address + if len(clone.NextProposerAddress) > 0 { + payload = appendBytesField(payload, 13, clone.NextProposerAddress) + } + return payload, nil } diff --git a/types/state.go b/types/state.go index ccc383d79b..2d51e761af 100644 --- a/types/state.go +++ b/types/state.go @@ -37,20 +37,32 @@ type State struct { // the latest AppHash we've received from calling abci.Commit() AppHash []byte + + // NextProposerAddress is the proposer expected to sign LastBlockHeight+1. + // It is initialized from genesis and then updated from execution results. + NextProposerAddress []byte } -func (s *State) NextState(header Header, stateRoot []byte) (State, error) { +func (s *State) NextState(header Header, stateRoot []byte, nextProposerAddress ...[]byte) (State, error) { height := header.Height() + nextProposer := header.NextProposerAddress + if len(nextProposerAddress) > 0 && len(nextProposerAddress[0]) > 0 { + nextProposer = nextProposerAddress[0] + } + if len(nextProposer) == 0 { + nextProposer = header.ProposerAddress + } return State{ - Version: s.Version, - ChainID: s.ChainID, - InitialHeight: s.InitialHeight, - LastBlockHeight: height, - LastBlockTime: header.Time(), - AppHash: stateRoot, - LastHeaderHash: header.Hash(), - DAHeight: s.DAHeight, + Version: s.Version, + ChainID: s.ChainID, + InitialHeight: s.InitialHeight, + LastBlockHeight: height, + LastBlockTime: header.Time(), + AppHash: stateRoot, + LastHeaderHash: header.Hash(), + DAHeight: s.DAHeight, + NextProposerAddress: cloneBytes(nextProposer), }, nil } @@ -64,6 +76,9 @@ func (s State) AssertValidForNextState(header *SignedHeader, data *Data) error { if err := Validate(header, data); err != nil { return fmt.Errorf("header-data validation failed: %w", err) } + if len(s.NextProposerAddress) > 0 && !bytes.Equal(header.ProposerAddress, s.NextProposerAddress) { + return fmt.Errorf("unexpected proposer - got: %x, want: %x", header.ProposerAddress, s.NextProposerAddress) + } return nil } From 6b2db4b9347cf9f043444f4d52cd7bec06942ff0 Mon Sep 17 00:00:00 2001 From: tac0turtle Date: Fri, 24 Apr 2026 18:05:05 +0200 Subject: [PATCH 6/6] revert header change --- block/internal/common/replay.go | 34 ++++--------------- block/internal/executing/executor.go | 19 +---------- .../internal/executing/executor_logic_test.go | 4 +-- block/internal/syncing/syncer.go | 12 +------ block/internal/syncing/syncer_test.go | 14 ++++---- .../types/src/proto/evnode.v1.messages.rs | 3 -- .../types/src/proto/evnode.v1.services.rs | 3 -- ...r-023-execution-owned-proposer-rotation.md | 15 ++++---- proto/evnode/v1/evnode.proto | 5 +-- types/header.go | 26 +++----------- types/pb/evnode/v1/evnode.pb.go | 22 ++++-------- types/serialization.go | 30 +++++----------- types/signed_header_test.go | 24 +++---------- types/state.go | 2 +- 14 files changed, 48 insertions(+), 165 deletions(-) diff --git a/block/internal/common/replay.go b/block/internal/common/replay.go index a120450d22..426961422d 100644 --- a/block/internal/common/replay.go +++ b/block/internal/common/replay.go @@ -185,19 +185,10 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { return fmt.Errorf("failed to execute transactions: %w", err) } newAppHash := result.UpdatedStateRoot - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - return fmt.Errorf("next proposer mismatch at height %d: header empty, execution %x", height, result.NextProposerAddress) - } - if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return fmt.Errorf("next proposer mismatch at height %d: header %x, execution %x", - height, - header.NextProposerAddress, - result.NextProposerAddress, - ) - } - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return fmt.Errorf("next proposer mismatch at height %d: header %x, execution unchanged", height, header.NextProposerAddress) + + newState, err := prevState.NextState(header.Header, newAppHash, result.NextProposerAddress) + if err != nil { + return fmt.Errorf("calculate next state: %w", err) } // The result of ExecuteTxs (newAppHash) should match the stored state at this height. @@ -224,18 +215,11 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { return err } if len(expectedState.NextProposerAddress) > 0 { - expectedNextProposer := header.NextProposerAddress - if len(expectedNextProposer) == 0 { - expectedNextProposer = result.NextProposerAddress - } - if len(expectedNextProposer) == 0 { - expectedNextProposer = header.ProposerAddress - } - if !bytes.Equal(expectedNextProposer, expectedState.NextProposerAddress) { + if !bytes.Equal(newState.NextProposerAddress, expectedState.NextProposerAddress) { return fmt.Errorf("next proposer mismatch at height %d: expected %x got %x", height, expectedState.NextProposerAddress, - expectedNextProposer, + newState.NextProposerAddress, ) } } @@ -251,12 +235,6 @@ func (s *Replayer) replayBlock(ctx context.Context, height uint64) error { Msg("replayBlock: ExecuteTxs completed (no stored state to verify against)") } - // Calculate new state - newState, err := prevState.NextState(header.Header, newAppHash) - if err != nil { - return fmt.Errorf("calculate next state: %w", err) - } - // Persist the new state batch, err := s.store.NewBatch(ctx) if err != nil { diff --git a/block/internal/executing/executor.go b/block/internal/executing/executor.go index de825db24b..4d62f8600d 100644 --- a/block/internal/executing/executor.go +++ b/block/internal/executing/executor.go @@ -572,13 +572,6 @@ func (e *Executor) ProduceBlock(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to apply block: %w", err) } - if !bytes.Equal(newState.NextProposerAddress, header.ProposerAddress) { - header.NextProposerAddress = append([]byte(nil), newState.NextProposerAddress...) - header.InvalidateHash() - } else if len(header.NextProposerAddress) > 0 { - header.NextProposerAddress = nil - header.InvalidateHash() - } // set the DA height in the sequencer newState.DAHeight = e.sequencer.GetDAHeight() @@ -861,19 +854,9 @@ func (e *Executor) ApplyBlock(ctx context.Context, header types.Header, data *ty e.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - header.NextProposerAddress = append([]byte(nil), result.NextProposerAddress...) - } else if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) - } - header.InvalidateHash() - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) - } // Create new state - newState, err := currentState.NextState(header, result.UpdatedStateRoot) + newState, err := currentState.NextState(header, result.UpdatedStateRoot, result.NextProposerAddress) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } diff --git a/block/internal/executing/executor_logic_test.go b/block/internal/executing/executor_logic_test.go index 0b1f86769a..f010dd80ef 100644 --- a/block/internal/executing/executor_logic_test.go +++ b/block/internal/executing/executor_logic_test.go @@ -69,14 +69,13 @@ func TestProduceBlock_EmptyBatch_SetsEmptyDataHash(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(data.Txs)) assert.EqualValues(t, common.DataHashForEmptyTxs, sh.DataHash) - assert.Empty(t, sh.NextProposerAddress) state, err := fx.MemStore.GetState(context.Background()) require.NoError(t, err) assert.Equal(t, fx.Exec.genesis.ProposerAddress, state.NextProposerAddress) } -func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { +func TestProduceBlock_PersistsExecutionNextProposer(t *testing.T) { fx := setupTestExecutor(t, 1000) defer fx.Cancel() @@ -100,7 +99,6 @@ func TestProduceBlock_CommitsExecutionNextProposer(t *testing.T) { header, data, err := fx.MemStore.GetBlockData(context.Background(), 1) require.NoError(t, err) require.NoError(t, header.ValidateBasicWithData(data)) - assert.Equal(t, nextAddr, header.NextProposerAddress) state, err := fx.MemStore.GetState(context.Background()) require.NoError(t, err) diff --git a/block/internal/syncing/syncer.go b/block/internal/syncing/syncer.go index 9bcbd0f3ee..c615c1f38d 100644 --- a/block/internal/syncing/syncer.go +++ b/block/internal/syncing/syncer.go @@ -837,19 +837,9 @@ func (s *Syncer) ApplyBlock(ctx context.Context, header types.Header, data *type s.sendCriticalError(fmt.Errorf("failed to execute transactions: %w", err)) return types.State{}, fmt.Errorf("failed to execute transactions: %w", err) } - if len(result.NextProposerAddress) > 0 { - if len(header.NextProposerAddress) == 0 { - return types.State{}, fmt.Errorf("next proposer mismatch: header empty, execution %x", result.NextProposerAddress) - } - if !bytes.Equal(header.NextProposerAddress, result.NextProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution %x", header.NextProposerAddress, result.NextProposerAddress) - } - } else if len(header.NextProposerAddress) > 0 && !bytes.Equal(header.NextProposerAddress, header.ProposerAddress) { - return types.State{}, fmt.Errorf("next proposer mismatch: header %x, execution unchanged", header.NextProposerAddress) - } // Create new state - newState, err := currentState.NextState(header, result.UpdatedStateRoot) + newState, err := currentState.NextState(header, result.UpdatedStateRoot, result.NextProposerAddress) if err != nil { return types.State{}, fmt.Errorf("failed to create next state: %w", err) } diff --git a/block/internal/syncing/syncer_test.go b/block/internal/syncing/syncer_test.go index 696d2c939f..2b4bce1aa6 100644 --- a/block/internal/syncing/syncer_test.go +++ b/block/internal/syncing/syncer_test.go @@ -214,17 +214,15 @@ func TestSyncer_ValidateBlock_UsesStateNextProposer(t *testing.T) { require.Contains(t, err.Error(), "unexpected proposer") } -func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { +func TestSyncer_ApplyBlockPersistsExecutionNextProposer(t *testing.T) { addr, _, _ := buildSyncTestSigner(t) - headerNext := []byte("header-next-proposer") execNext := []byte("execution-next-proposer") mockExec := testmocks.NewMockExecutor(t) data := makeData("tchain", 1, 1) header := types.Header{ - BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, - ProposerAddress: addr, - NextProposerAddress: headerNext, + BaseHeader: types.BaseHeader{ChainID: "tchain", Height: 1, Time: uint64(time.Now().UnixNano())}, + ProposerAddress: addr, } currentState := types.State{AppHash: []byte("app0"), NextProposerAddress: addr} @@ -240,9 +238,9 @@ func TestSyncer_ApplyBlockRejectsExecutionNextProposerMismatch(t *testing.T) { logger: zerolog.Nop(), } - _, err := s.ApplyBlock(t.Context(), header, data, currentState) - require.Error(t, err) - require.Contains(t, err.Error(), "next proposer mismatch") + newState, err := s.ApplyBlock(t.Context(), header, data, currentState) + require.NoError(t, err) + require.Equal(t, execNext, newState.NextProposerAddress) } func TestProcessHeightEvent_SyncsAndUpdatesState(t *testing.T) { diff --git a/client/crates/types/src/proto/evnode.v1.messages.rs b/client/crates/types/src/proto/evnode.v1.messages.rs index 495aac85d6..e6038f54ce 100644 --- a/client/crates/types/src/proto/evnode.v1.messages.rs +++ b/client/crates/types/src/proto/evnode.v1.messages.rs @@ -65,9 +65,6 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, - /// Proposer address selected by this block's execution result for the next block. - #[prost(bytes = "vec", tag = "13")] - pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/client/crates/types/src/proto/evnode.v1.services.rs b/client/crates/types/src/proto/evnode.v1.services.rs index ef7fed4048..013e96db37 100644 --- a/client/crates/types/src/proto/evnode.v1.services.rs +++ b/client/crates/types/src/proto/evnode.v1.services.rs @@ -439,9 +439,6 @@ pub struct Header { /// Chain ID the block belongs to #[prost(string, tag = "12")] pub chain_id: ::prost::alloc::string::String, - /// Proposer address selected by this block's execution result for the next block. - #[prost(bytes = "vec", tag = "13")] - pub next_proposer_address: ::prost::alloc::vec::Vec, } /// SignedHeader is a header with a signature and a signer. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] diff --git a/docs/adr/adr-023-execution-owned-proposer-rotation.md b/docs/adr/adr-023-execution-owned-proposer-rotation.md index c89a78412b..9f78ef27e8 100644 --- a/docs/adr/adr-023-execution-owned-proposer-rotation.md +++ b/docs/adr/adr-023-execution-owned-proposer-rotation.md @@ -27,11 +27,11 @@ An empty `NextProposerAddress` from `ExecuteTxs` means the proposer is unchanged When execution returns a non-empty next proposer: -- The producing node commits it to `Header.NextProposerAddress` before signing the header. -- Syncing nodes require the signed header value to match the execution result. - `State.NextProposerAddress` is updated and used as the expected signer for `LastBlockHeight + 1`. +- Full nodes validate the next block signer against the previous state's `NextProposerAddress`. +- Header encoding remains unchanged. `Header.ProposerAddress` continues to identify the signer of the current block only. -`Header.NextProposerAddress` lets header-only paths and DA envelope validation see proposer transitions without replaying execution first. The execution result remains the authority; mismatches between the signed header and execution are invalid. +The execution result is the authority for proposer rotation. Header-only paths cannot derive proposer transitions without either replaying execution or using a future proof/certificate mechanism. This preserves header compatibility while keeping the rotation rule deterministic for full nodes. ## EVM System Contract Model @@ -47,7 +47,7 @@ The security council or multisig becomes the authority for proposer updates. It The system contract must restrict writes to the configured authority. Unauthorized proposer updates are consensus-critical because they determine who can sign the next block. -ev-node validates the execution output against the signed header. A malicious proposer cannot advertise one next proposer in the header while execution derives another. +ev-node validates each block's signer against the proposer address stored in the previous state. A malicious proposer cannot rotate the next signer through node-local configuration; the rotation must be derived from execution. If the execution interface returns an empty proposer, ev-node treats the proposer as unchanged. At startup, empty execution info falls back to genesis so existing execution implementations remain usable. @@ -60,13 +60,14 @@ Positive: - Proposer rotation becomes deterministic execution state. - EVM chains can use a system contract and multisig-controlled rotation. - Existing chains keep working when execution returns an empty proposer. -- Header verification can follow rotations once the rotating block is known. +- Existing header encoding remains compatible because no new header field is required. Negative: - The execution API changes and all execution adapters must return `ExecuteResult`. - Proposer updates become consensus-critical execution outputs. - ev-reth needs a separate system-contract design and implementation. +- Header-only/light-client paths cannot follow proposer rotation without execution replay or a later proof design. ## Alternatives Considered @@ -78,6 +79,6 @@ Node-local proposer configuration: - Rejected. Nodes could disagree about the active proposer unless every operator updates configuration at the same time. -Execution-only proposer without header commitment: +Header commitment for next proposer: -- Rejected. Syncing nodes can replay execution, but header and DA envelope paths benefit from having the selected next proposer committed in the signed header when it changes. +- Rejected for the first version. It would expose rotations to header-only paths, but it changes the signed header and hash encoding. Keeping rotation in execution/state avoids a header compatibility break. diff --git a/proto/evnode/v1/evnode.proto b/proto/evnode/v1/evnode.proto index 0002f818f6..a86f234998 100644 --- a/proto/evnode/v1/evnode.proto +++ b/proto/evnode/v1/evnode.proto @@ -38,10 +38,7 @@ message Header { bytes validator_hash = 11; // Chain ID the block belongs to string chain_id = 12; - // Proposer address selected by this block's execution result for the next block. - bytes next_proposer_address = 13; - - reserved 5, 7, 9; + reserved 5, 7, 9, 13; } // SignedHeader is a header with a signature and a signer. diff --git a/types/header.go b/types/header.go index 7beb9a9728..3049425ebe 100644 --- a/types/header.go +++ b/types/header.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "context" "encoding" "errors" @@ -43,7 +42,8 @@ var ( // ErrNoProposerAddress is returned when the proposer address is not set. ErrNoProposerAddress = errors.New("no proposer address") - // ErrProposerVerificationFailed is returned when the proposer verification fails. + // ErrProposerVerificationFailed is deprecated. Proposer authorization is + // enforced through State validation because proposer rotation is execution-owned. ErrProposerVerificationFailed = errors.New("proposer verification failed") // ErrInvalidTimestamp is returned when the timestamp is invalid. @@ -82,11 +82,6 @@ type Header struct { // pubkey can't be recovered by the signature (e.g. ed25519). ProposerAddress []byte // original proposer of the block - // NextProposerAddress is selected by executing this block and becomes the - // proposer expected for the next block. Empty means the current proposer - // remains active. - NextProposerAddress []byte - // Legacy holds fields that were removed from the canonical header JSON/Go // representation but may still be required for backwards compatible binary // serialization (e.g. legacy signing payloads). @@ -129,19 +124,9 @@ func (h *Header) Time() time.Time { // Verify verifies the header. func (h *Header) Verify(untrstH *Header) error { - expectedProposer := h.ProposerAddress - if len(h.NextProposerAddress) > 0 { - expectedProposer = h.NextProposerAddress - } - if !bytes.Equal(untrstH.ProposerAddress, expectedProposer) { - return &header.VerifyError{ - Reason: fmt.Errorf("%w: expected proposer (%X) got (%X)", - ErrProposerVerificationFailed, - expectedProposer, - untrstH.ProposerAddress, - ), - } - } + // Proposer rotation is execution/state-owned. The trusted header alone no + // longer contains enough information to authorize the signer of the next + // header, so full nodes enforce proposer validity through State validation. return nil } @@ -279,7 +264,6 @@ func (h Header) Clone() Header { clone.AppHash = cloneBytes(h.AppHash) clone.ValidatorHash = cloneBytes(h.ValidatorHash) clone.ProposerAddress = cloneBytes(h.ProposerAddress) - clone.NextProposerAddress = cloneBytes(h.NextProposerAddress) clone.Legacy = h.Legacy.Clone() clone.cachedHash = nil diff --git a/types/pb/evnode/v1/evnode.pb.go b/types/pb/evnode/v1/evnode.pb.go index 55ab34b0ff..d98acd10a2 100644 --- a/types/pb/evnode/v1/evnode.pb.go +++ b/types/pb/evnode/v1/evnode.pb.go @@ -102,11 +102,9 @@ type Header struct { // validatorhash for compatibility with tendermint light client. ValidatorHash []byte `protobuf:"bytes,11,opt,name=validator_hash,json=validatorHash,proto3" json:"validator_hash,omitempty"` // Chain ID the block belongs to - ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - // Proposer address selected by this block's execution result for the next block. - NextProposerAddress []byte `protobuf:"bytes,13,opt,name=next_proposer_address,json=nextProposerAddress,proto3" json:"next_proposer_address,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + ChainId string `protobuf:"bytes,12,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Header) Reset() { @@ -202,13 +200,6 @@ func (x *Header) GetChainId() string { return "" } -func (x *Header) GetNextProposerAddress() []byte { - if x != nil { - return x.NextProposerAddress - } - return nil -} - // SignedHeader is a header with a signature and a signer. type SignedHeader struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -801,7 +792,7 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x16evnode/v1/evnode.proto\x12\tevnode.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"1\n" + "\aVersion\x12\x14\n" + "\x05block\x18\x01 \x01(\x04R\x05block\x12\x10\n" + - "\x03app\x18\x02 \x01(\x04R\x03app\"\xf7\x02\n" + + "\x03app\x18\x02 \x01(\x04R\x03app\"\xc9\x02\n" + "\x06Header\x12,\n" + "\aversion\x18\x01 \x01(\v2\x12.evnode.v1.VersionR\aversion\x12\x16\n" + "\x06height\x18\x02 \x01(\x04R\x06height\x12\x12\n" + @@ -812,9 +803,8 @@ const file_evnode_v1_evnode_proto_rawDesc = "" + "\x10proposer_address\x18\n" + " \x01(\fR\x0fproposerAddress\x12%\n" + "\x0evalidator_hash\x18\v \x01(\fR\rvalidatorHash\x12\x19\n" + - "\bchain_id\x18\f \x01(\tR\achainId\x122\n" + - "\x15next_proposer_address\x18\r \x01(\fR\x13nextProposerAddressJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + - "\"\x88\x01\n" + + "\bchain_id\x18\f \x01(\tR\achainIdJ\x04\b\x05\x10\x06J\x04\b\a\x10\bJ\x04\b\t\x10\n" + + "J\x04\b\r\x10\x0e\"\x88\x01\n" + "\fSignedHeader\x12)\n" + "\x06header\x18\x01 \x01(\v2\x11.evnode.v1.HeaderR\x06header\x12\x1c\n" + "\tsignature\x18\x02 \x01(\fR\tsignature\x12)\n" + diff --git a/types/serialization.go b/types/serialization.go index 114de41194..b16e7d549d 100644 --- a/types/serialization.go +++ b/types/serialization.go @@ -89,7 +89,6 @@ func (h *Header) MarshalBinary() ([]byte, error) { ph.AppHash = h.AppHash ph.ProposerAddress = h.ProposerAddress ph.ValidatorHash = h.ValidatorHash - ph.NextProposerAddress = h.NextProposerAddress if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) } @@ -239,7 +238,6 @@ func (sh *SignedHeader) MarshalBinary() ([]byte, error) { ph.DataHash = sh.DataHash ph.AppHash = sh.AppHash ph.ProposerAddress = sh.ProposerAddress - ph.NextProposerAddress = sh.NextProposerAddress ph.ValidatorHash = sh.ValidatorHash if unknown := encodeLegacyUnknownFields(sh.Legacy); len(unknown) > 0 { ph.ProtoReflect().SetUnknown(unknown) @@ -380,15 +378,14 @@ func (h *Header) ToProto() *pb.Header { Block: h.Version.Block, App: h.Version.App, }, - Height: h.BaseHeader.Height, - Time: h.BaseHeader.Time, - LastHeaderHash: h.LastHeaderHash[:], - DataHash: h.DataHash[:], - AppHash: h.AppHash[:], - ProposerAddress: h.ProposerAddress[:], - ChainId: h.BaseHeader.ChainID, - ValidatorHash: h.ValidatorHash, - NextProposerAddress: h.NextProposerAddress, + Height: h.BaseHeader.Height, + Time: h.BaseHeader.Time, + LastHeaderHash: h.LastHeaderHash[:], + DataHash: h.DataHash[:], + AppHash: h.AppHash[:], + ProposerAddress: h.ProposerAddress[:], + ChainId: h.BaseHeader.ChainID, + ValidatorHash: h.ValidatorHash, } if unknown := encodeLegacyUnknownFields(h.Legacy); len(unknown) > 0 { pHeader.ProtoReflect().SetUnknown(unknown) @@ -439,12 +436,6 @@ func (h *Header) FromProto(other *pb.Header) error { } else { h.ValidatorHash = nil } - if other.NextProposerAddress != nil { - h.NextProposerAddress = append([]byte(nil), other.NextProposerAddress...) - } else { - h.NextProposerAddress = nil - } - legacy, err := decodeLegacyHeaderFields(other) if err != nil { return err @@ -903,10 +894,5 @@ func marshalLegacyHeader(h *Header) ([]byte, error) { payload = append(payload, clone.BaseHeader.ChainID...) } - // next proposer address - if len(clone.NextProposerAddress) > 0 { - payload = appendBytesField(payload, 13, clone.NextProposerAddress) - } - return payload, nil } diff --git a/types/signed_header_test.go b/types/signed_header_test.go index c159e674cc..c89299964f 100644 --- a/types/signed_header_test.go +++ b/types/signed_header_test.go @@ -70,32 +70,16 @@ func testVerify(t *testing.T, trusted *SignedHeader, untrustedAdj *SignedHeader, }, err: nil, }, - // 4. Test proposer verification - // changes the proposed address to a random address - // Expect failure + // 4. Test proposer rotation at the header layer. + // Proposer authorization is state-owned, so header verification only + // checks the chain link and allows a different proposer address. { prepare: func() (*SignedHeader, bool) { untrusted := *untrustedAdj untrusted.ProposerAddress = GetRandomBytes(32) return &untrusted, true }, - err: &header.VerifyError{ - Reason: ErrProposerVerificationFailed, - }, - }, - // 5. Test proposer verification for non-adjacent headers - // changes the proposed address to a random address and updates height - // Expect failure - { - prepare: func() (*SignedHeader, bool) { - untrusted := *untrustedAdj - untrusted.ProposerAddress = GetRandomBytes(32) - untrusted.BaseHeader.Height++ - return &untrusted, true - }, - err: &header.VerifyError{ - Reason: ErrProposerVerificationFailed, - }, + err: nil, }, } diff --git a/types/state.go b/types/state.go index 2d51e761af..10f11a51ae 100644 --- a/types/state.go +++ b/types/state.go @@ -45,7 +45,7 @@ type State struct { func (s *State) NextState(header Header, stateRoot []byte, nextProposerAddress ...[]byte) (State, error) { height := header.Height() - nextProposer := header.NextProposerAddress + nextProposer := s.NextProposerAddress if len(nextProposerAddress) > 0 && len(nextProposerAddress[0]) > 0 { nextProposer = nextProposerAddress[0] }