Skip to content

Commit

Permalink
shrink the chache sizes
Browse files Browse the repository at this point in the history
  • Loading branch information
kiltsonfire committed Jun 4, 2024
1 parent 22f39e9 commit 9d7698b
Show file tree
Hide file tree
Showing 9 changed files with 26 additions and 27 deletions.
5 changes: 2 additions & 3 deletions core/bodydb.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@ import (
)

const (
bodyCacheLimit = 256
blockCacheLimit = 256
maxHeadsQueueLimit = 1024
bodyCacheLimit = 25
blockCacheLimit = 25
)

type BodyDb struct {
Expand Down
18 changes: 9 additions & 9 deletions core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,15 @@ import (
)

const (
c_maxAppendQueue = 1000000 // Maximum number of future headers we can store in cache
c_maxFutureTime = 30 // Max time into the future (in seconds) we will accept a block
c_appendQueueRetryPeriod = 1 // Time (in seconds) before retrying to append from AppendQueue
c_appendQueueThreshold = 200 // Number of blocks to load from the disk to ram on every proc of append queue
c_processingCache = 10 // Number of block hashes held to prevent multi simultaneous appends on a single block hash
c_primeRetryThreshold = 1800 // Number of times a block is retry to be appended before eviction from append queue in Prime
c_regionRetryThreshold = 1200 // Number of times a block is retry to be appended before eviction from append queue in Region
c_zoneRetryThreshold = 600 // Number of times a block is retry to be appended before eviction from append queue in Zone
c_maxFutureBlocksPrime uint64 = 3 // Number of blocks ahead of the current block to be put in the hashNumberList
c_maxAppendQueue = 3000 // Maximum number of future headers we can store in cache
c_maxFutureTime = 30 // Max time into the future (in seconds) we will accept a block
c_appendQueueRetryPeriod = 1 // Time (in seconds) before retrying to append from AppendQueue
c_appendQueueThreshold = 200 // Number of blocks to load from the disk to ram on every proc of append queue
c_processingCache = 10 // Number of block hashes held to prevent multi simultaneous appends on a single block hash
c_primeRetryThreshold = 1800 // Number of times a block is retry to be appended before eviction from append queue in Prime
c_regionRetryThreshold = 1200 // Number of times a block is retry to be appended before eviction from append queue in Region
c_zoneRetryThreshold = 600 // Number of times a block is retry to be appended before eviction from append queue in Zone
c_maxFutureBlocksPrime uint64 = 3 // Number of blocks ahead of the current block to be put in the hashNumberList
c_maxFutureBlocksRegion uint64 = 150
c_maxFutureBlocksZone uint64 = 2000
c_appendQueueRetryPriorityThreshold = 5 // If retry counter for a block is less than this number, then its put in the special list that is tried first to be appended
Expand Down
2 changes: 1 addition & 1 deletion core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
)

const (
headerCacheLimit = 512
headerCacheLimit = 25
numberCacheLimit = 2048
c_subRollupCacheSize = 50
primeHorizonThreshold = 20
Expand Down
14 changes: 7 additions & 7 deletions core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,21 @@ import (
)

const (
c_maxPendingEtxBatchesPrime = 30000
c_maxPendingEtxBatchesRegion = 10000
c_maxPendingEtxBatchesPrime = 3000
c_maxPendingEtxBatchesRegion = 1000
c_maxPendingEtxsRollup = 256
c_maxBloomFilters = 1024
c_maxBloomFilters = 25
c_pendingHeaderChacheBufferFactor = 2
pendingHeaderGCTime = 5
c_terminusIndex = 3
c_startingPrintLimit = 10
c_regionRelayProc = 3
c_primeRelayProc = 10
c_asyncPhUpdateChanSize = 10
c_phCacheSize = 500
c_pEtxRetryThreshold = 100 // Number of pEtxNotFound return on a dom block before asking for pEtx/Rollup from sub
c_currentStateComputeWindow = 20 // Number of blocks around the current header the state generation is always done
c_inboundEtxCacheSize = 10 // Number of inboundEtxs to keep in cache so that, we don't recompute it every time dom is processed
c_phCacheSize = 50
c_pEtxRetryThreshold = 10 // Number of pEtxNotFound return on a dom block before asking for pEtx/Rollup from sub
c_currentStateComputeWindow = 20 // Number of blocks around the current header the state generation is always done
c_inboundEtxCacheSize = 10 // Number of inboundEtxs to keep in cache so that, we don't recompute it every time dom is processed

)

Expand Down
2 changes: 1 addition & 1 deletion core/state/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (

const (
// Number of codehash->size associations to keep.
codeSizeCacheSize = 100000
codeSizeCacheSize = 10000

// Cache size granted for caching clean code.
codeCacheSize = 64 * 1024 * 1024
Expand Down
2 changes: 1 addition & 1 deletion core/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ var DefaultTxPoolConfig = TxPoolConfig{

AccountSlots: 10,
GlobalSlots: 9000 + 1024, // urgent + floating queue capacity with 4:1 ratio
MaxSenders: 100000, // 5 MB - at least 10 blocks worth of transactions in case of reorg or high production rate
MaxSenders: 10000, // 5 MB - at least 10 blocks worth of transactions in case of reorg or high production rate
SendersChBuffer: 1024, // at 500 TPS in zone, 2s buffer
AccountQueue: 1,
GlobalQueue: 2048,
Expand Down
6 changes: 3 additions & 3 deletions core/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ const (
staleThreshold = 7

// pendingBlockBodyLimit is maximum number of pending block bodies to be kept in cache.
pendingBlockBodyLimit = 320
pendingBlockBodyLimit = 16

// c_headerPrintsExpiryTime is how long a header hash is kept in the cache, so that currentInfo
// is not printed on a Proc frequency
Expand All @@ -58,9 +58,9 @@ const (
// c_chainSideChanSize is the size of the channel listening to uncle events
chainSideChanSize = 10

c_uncleCacheSize = 100
c_uncleCacheSize = 32

c_workShareFilterDist = 100 // the dist from the current block for the work share inclusion in the worker
c_workShareFilterDist = 10 // the dist from the current block for the work share inclusion in the worker
)

// environment is the worker's current environment and holds all
Expand Down
2 changes: 1 addition & 1 deletion p2p/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ import (

const (
// c_defaultCacheSize is the default size for the p2p cache
c_defaultCacheSize = 100
c_defaultCacheSize = 32
)

// P2PNode represents a libp2p node
Expand Down
2 changes: 1 addition & 1 deletion params/protocol_params.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ var (
MinQuaiConversionAmount = new(big.Int).Mul(big.NewInt(1), big.NewInt(GWei)) // 0.000000001 Quai
MaxWorkShareCount = 16
WorkSharesThresholdDiff = 3 // Number of bits lower than the target that the default consensus engine uses
WorkSharesInclusionDepth = 7 // Number of blocks upto which the work shares can be referenced and this is protocol enforced
WorkSharesInclusionDepth = 3 // Number of blocks upto which the work shares can be referenced and this is protocol enforced
)

// This is TimeFactor*TimeFactor*common.NumZonesInRegion*common.NumRegionsInPrime
Expand Down

0 comments on commit 9d7698b

Please sign in to comment.