From b61e6bc35c296464fd872f00fb96a0d460ab44e1 Mon Sep 17 00:00:00 2001 From: Matthias Fasching <5011972+fasmat@users.noreply.github.com> Date: Thu, 30 May 2024 08:29:20 +0200 Subject: [PATCH] Backport 5999: Increase ATX limits to 6.0 Mio (#6000) Increase ATX limits to 6.0 Mio (#5999) --- CHANGELOG.md | 11 +++++++++-- common/types/activation.go | 2 +- common/types/activation_scale.go | 4 ++-- common/types/block.go | 10 +++++----- common/types/block_scale.go | 4 ++-- fetch/wire_types.go | 16 ++++++++-------- fetch/wire_types_scale.go | 16 ++++++++-------- hare3/types.go | 10 +++++----- hare3/types_scale.go | 4 ++-- p2p/server/server.go | 11 +++-------- p2p/server/server_scale.go | 4 ++-- p2p/server/server_test.go | 3 ++- 12 files changed, 49 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 657a922c69..969a7dc2d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,13 +2,20 @@ See [RELEASE](./RELEASE.md) for workflow instructions. +## Release v1.5.7 + +### Improvements + +* [#5999](https://github.com/spacemeshos/go-spacemesh/pull/5999) Increase limits to allow up to 6.0 Mio ATXs per epoch. + ## Release v1.5.6 ### Improvements -* [#5943](https://github.com/spacemeshos/go-spacemesh/pull/5943) Fix timing out querying proof in 1:N in a presence of a broken Poet. +* [#5943](https://github.com/spacemeshos/go-spacemesh/pull/5943) Fix timing out querying proof in 1:N in a presence of + a broken Poet. - Previously, every identitiy waited for the full timeout time (~20 minutes) before giving up. + Previously, every identity waited for the full timeout time (~20 minutes) before giving up. ## Release v1.5.5 diff --git a/common/types/activation.go b/common/types/activation.go index 66f61665d8..ef1819019c 100644 --- a/common/types/activation.go +++ b/common/types/activation.go @@ -378,7 +378,7 @@ func ATXIDsToHashes(ids []ATXID) []Hash32 { type EpochActiveSet struct { Epoch EpochID - Set []ATXID `scale:"max=5500000"` // to be in line with `EpochData` in fetch/wire_types.go + Set []ATXID `scale:"max=6000000"` // to be in line with `EpochData` in fetch/wire_types.go } var MaxEpochActiveSetSize = scale.MustGetMaxElements[EpochActiveSet]("Set") diff --git a/common/types/activation_scale.go b/common/types/activation_scale.go index 6a814f3616..70088eedbd 100644 --- a/common/types/activation_scale.go +++ b/common/types/activation_scale.go @@ -91,7 +91,7 @@ func (t *EpochActiveSet) EncodeScale(enc *scale.Encoder) (total int, err error) total += n } { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Set, 5500000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Set, 6000000) if err != nil { return total, err } @@ -110,7 +110,7 @@ func (t *EpochActiveSet) DecodeScale(dec *scale.Decoder) (total int, err error) t.Epoch = EpochID(field) } { - field, n, err := scale.DecodeStructSliceWithLimit[ATXID](dec, 5500000) + field, n, err := scale.DecodeStructSliceWithLimit[ATXID](dec, 6000000) if err != nil { return total, err } diff --git a/common/types/block.go b/common/types/block.go index 7bd641d98a..d45721d0b9 100644 --- a/common/types/block.go +++ b/common/types/block.go @@ -77,14 +77,14 @@ type InnerBlock struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 5.5 Mio ATXs that would be a total of 5.5 Mio + 50 * 4032 = 5 701 600 slots. + // If we expect 6.0 Mio ATXs that would be a total of 6.0 Mio + 50 * 4032 = 6 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 5 701 600 / 4032 = 1414.1 rewards in a block with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(3 701 600 * 1/4032 * 4031/4032) = 37.6 + // 6 201 600 / 4032 = 1538.1 rewards in a block with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(3 701 600 * 1/4032 * 4031/4032) = 39.2 // - // This means that we can expect a maximum of 1414.1 + 6*37.6 = 1639.7 rewards per block with + // This means that we can expect a maximum of 1538.1 + 6*39.2 = 1773.4 rewards per block with // > 99.9997% probability. - Rewards []AnyReward `scale:"max=1650"` + Rewards []AnyReward `scale:"max=1775"` TxIDs []TransactionID `scale:"max=100000"` } diff --git a/common/types/block_scale.go b/common/types/block_scale.go index a9558ab426..963650bbd3 100644 --- a/common/types/block_scale.go +++ b/common/types/block_scale.go @@ -45,7 +45,7 @@ func (t *InnerBlock) EncodeScale(enc *scale.Encoder) (total int, err error) { total += n } { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Rewards, 1650) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Rewards, 1775) if err != nil { return total, err } @@ -79,7 +79,7 @@ func (t *InnerBlock) DecodeScale(dec *scale.Decoder) (total int, err error) { t.TickHeight = uint64(field) } { - field, n, err := scale.DecodeStructSliceWithLimit[AnyReward](dec, 1650) + field, n, err := scale.DecodeStructSliceWithLimit[AnyReward](dec, 1775) if err != nil { return total, err } diff --git a/fetch/wire_types.go b/fetch/wire_types.go index 62b5faab72..3c8d1ba2e8 100644 --- a/fetch/wire_types.go +++ b/fetch/wire_types.go @@ -37,7 +37,7 @@ type RequestMessage struct { type ResponseMessage struct { Hash types.Hash32 // keep in line with limit of Response.Data in `p2p/server/server.go` - Data []byte `scale:"max=183500800"` // 175 MiB > 5.5 mio ATX * 32 bytes per ID + Data []byte `scale:"max=209715200"` // 200 MiB > 6.0 mio ATX * 32 bytes per ID } // RequestBatch is a batch of requests and a hash of all requests as ID. @@ -116,7 +116,7 @@ type MeshHashes struct { } type MaliciousIDs struct { - NodeIDs []types.NodeID `scale:"max=5500000"` // to be in line with `EpochData.AtxIDs` below + NodeIDs []types.NodeID `scale:"max=6000000"` // to be in line with `EpochData.AtxIDs` below } type EpochData struct { @@ -128,7 +128,7 @@ type EpochData struct { // - the size of `Rewards` in the type `InnerBlock` in common/types/block.go // - the size of `Ballots` in the type `LayerData` below // - the size of `Proposals` in the type `Value` in hare3/types.go - AtxIDs []types.ATXID `scale:"max=5500000"` + AtxIDs []types.ATXID `scale:"max=6000000"` } // LayerData is the data response for a given layer ID. @@ -139,14 +139,14 @@ type LayerData struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 5.5 Mio ATXs that would be a total of 5.5 Mio + 50 * 4032 = 4 701 600 slots. + // If we expect 6.0 Mio ATXs that would be a total of 6.0 Mio + 50 * 4032 = 6 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 5 701 600 / 4032 = 1414.1 ballots in a layer with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(3 701 600 * 1/4032 * 4031/4032) = 37.6 + // 6 201 600 / 4032 = 1538.1 ballots in a layer with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(3 701 600 * 1/4032 * 4031/4032) = 39.2 // - // This means that we can expect a maximum of 1414.1 + 6*37.6 = 1639.7 ballots per layer with + // This means that we can expect a maximum of 1538.1 + 6*39.2 = 1773.4 ballots per layer with // > 99.9997% probability. - Ballots []types.BallotID `scale:"max=1650"` + Ballots []types.BallotID `scale:"max=1775"` } type OpinionRequest struct { diff --git a/fetch/wire_types_scale.go b/fetch/wire_types_scale.go index 929ccc5775..a3ce049e4b 100644 --- a/fetch/wire_types_scale.go +++ b/fetch/wire_types_scale.go @@ -55,7 +55,7 @@ func (t *ResponseMessage) EncodeScale(enc *scale.Encoder) (total int, err error) total += n } { - n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 183500800) + n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 209715200) if err != nil { return total, err } @@ -73,7 +73,7 @@ func (t *ResponseMessage) DecodeScale(dec *scale.Decoder) (total int, err error) total += n } { - field, n, err := scale.DecodeByteSliceWithLimit(dec, 183500800) + field, n, err := scale.DecodeByteSliceWithLimit(dec, 209715200) if err != nil { return total, err } @@ -235,7 +235,7 @@ func (t *MeshHashes) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *MaliciousIDs) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.NodeIDs, 5500000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.NodeIDs, 6000000) if err != nil { return total, err } @@ -246,7 +246,7 @@ func (t *MaliciousIDs) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *MaliciousIDs) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.NodeID](dec, 5500000) + field, n, err := scale.DecodeStructSliceWithLimit[types.NodeID](dec, 6000000) if err != nil { return total, err } @@ -258,7 +258,7 @@ func (t *MaliciousIDs) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *EpochData) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.AtxIDs, 5500000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.AtxIDs, 6000000) if err != nil { return total, err } @@ -269,7 +269,7 @@ func (t *EpochData) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *EpochData) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.ATXID](dec, 5500000) + field, n, err := scale.DecodeStructSliceWithLimit[types.ATXID](dec, 6000000) if err != nil { return total, err } @@ -281,7 +281,7 @@ func (t *EpochData) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *LayerData) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Ballots, 1650) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Ballots, 1775) if err != nil { return total, err } @@ -292,7 +292,7 @@ func (t *LayerData) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *LayerData) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.BallotID](dec, 1650) + field, n, err := scale.DecodeStructSliceWithLimit[types.BallotID](dec, 1775) if err != nil { return total, err } diff --git a/hare3/types.go b/hare3/types.go index 9815a3becd..4507df33da 100644 --- a/hare3/types.go +++ b/hare3/types.go @@ -82,14 +82,14 @@ type Value struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 5.5 Mio ATXs that would be a total of 5.5 Mio + 50 * 4032 = 5 701 600 slots. + // If we expect 6.0 Mio ATXs that would be a total of 6.0 Mio + 50 * 4032 = 6 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 5 701 600 / 4032 = 1414.1 eligibilities in a layer with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(3 701 600 * 1/4032 * 4031/4032) = 37.6 + // 6 201 600 / 4032 = 1538.1 eligibilities in a layer with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(3 701 600 * 1/4032 * 4031/4032) = 39.2 // - // This means that we can expect a maximum of 1414.1 + 6*37.6 = 1639.7 eligibilities in a layer with + // This means that we can expect a maximum of 1538.1 + 6*39.2 = 1773.4 eligibilities in a layer with // > 99.9997% probability. - Proposals []types.ProposalID `scale:"max=1650"` + Proposals []types.ProposalID `scale:"max=1775"` // Reference is set in messages for commit and notify rounds. Reference *types.Hash32 } diff --git a/hare3/types_scale.go b/hare3/types_scale.go index 542d176655..8cdbad39c6 100644 --- a/hare3/types_scale.go +++ b/hare3/types_scale.go @@ -48,7 +48,7 @@ func (t *IterRound) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *Value) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Proposals, 1650) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Proposals, 1775) if err != nil { return total, err } @@ -66,7 +66,7 @@ func (t *Value) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *Value) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.ProposalID](dec, 1650) + field, n, err := scale.DecodeStructSliceWithLimit[types.ProposalID](dec, 1775) if err != nil { return total, err } diff --git a/p2p/server/server.go b/p2p/server/server.go index 9ea44a506d..4ff74fadbd 100644 --- a/p2p/server/server.go +++ b/p2p/server/server.go @@ -127,11 +127,6 @@ func NewServerError(msg string) *ServerError { return &ServerError{msg: msg} } -func (*ServerError) Is(target error) bool { - _, ok := target.(*ServerError) - return ok -} - func (err *ServerError) Error() string { return fmt.Sprintf("peer error: %s", err.msg) } @@ -141,7 +136,7 @@ func (err *ServerError) Error() string { // Response is a server response. type Response struct { // keep in line with limit of ResponseMessage.Data in `fetch/wire_types.go` - Data []byte `scale:"max=183500800"` // 120 MiB > 3.5 mio ATX * 32 bytes per ID + Data []byte `scale:"max=209715200"` // 200 MiB > 6.0 mio ATX * 32 bytes per ID Error string `scale:"max=1024"` // TODO(mafa): make error code instead of string } @@ -364,11 +359,11 @@ func (s *Server) StreamRequest( ) } - serverError := errors.Is(err, &ServerError{}) + var srvError *ServerError took := time.Since(start).Seconds() switch { case s.metrics == nil: - case serverError: + case errors.As(err, &srvError): s.metrics.clientServerError.Inc() s.metrics.clientLatency.Observe(took) case err != nil: diff --git a/p2p/server/server_scale.go b/p2p/server/server_scale.go index ded1d1033e..03cce911d5 100644 --- a/p2p/server/server_scale.go +++ b/p2p/server/server_scale.go @@ -9,7 +9,7 @@ import ( func (t *Response) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 183500800) + n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 209715200) if err != nil { return total, err } @@ -27,7 +27,7 @@ func (t *Response) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *Response) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeByteSliceWithLimit(dec, 183500800) + field, n, err := scale.DecodeByteSliceWithLimit(dec, 209715200) if err != nil { return total, err } diff --git a/p2p/server/server_test.go b/p2p/server/server_test.go index 0290833d8d..7f3a84af78 100644 --- a/p2p/server/server_test.go +++ b/p2p/server/server_test.go @@ -75,7 +75,8 @@ func TestServer(t *testing.T) { t.Run("ReceiveError", func(t *testing.T) { n := srv1.NumAcceptedRequests() _, err := client.Request(ctx, mesh.Hosts()[2].ID(), request) - require.ErrorIs(t, err, &ServerError{}) + var srvErr *ServerError + require.ErrorAs(t, err, &srvErr) require.ErrorContains(t, err, "peer error") require.ErrorContains(t, err, testErr.Error()) require.Equal(t, n+1, srv1.NumAcceptedRequests())