diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 09e849ee7e..598c8888b3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,7 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @evan-forbes @cmwaters @staheri14 @rach-id @ninabarbakadze @rootulp +* @celestiaorg/celestia-core # Overrides for tooling packages docs/celestia-architecture @liamsi @adlerjohn diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 8b6705b1fd..b36de99cf6 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -19,7 +19,7 @@ jobs: # steps: # - uses: actions/setup-go@v3 # with: - # go-version: "1.19" + # go-version: "1.22.2" # - uses: actions/checkout@v3 @@ -43,7 +43,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: "1.22.2" - uses: actions/checkout@v4 with: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 60fe9a6b84..6dc0b8e24c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -10,6 +10,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.22.2" - name: Create a file with all the pkgs run: go list ./... > pkgs.txt - name: Split pkgs into 4 files @@ -45,7 +48,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.22.2" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: @@ -67,7 +70,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.22.2" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index 5b93752694..aa0fdbe9b2 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - uses: actions/checkout@v4 diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index dfa9b88526..fbc0d1c074 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -23,7 +23,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - uses: actions/checkout@v4 with: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index f265593c8b..f15ae6efbb 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -14,8 +14,8 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.19' - - uses: actions/checkout@v4 + go-version: '1.22.2' + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 7e78fd0182..43a5130e58 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -11,7 +11,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - uses: actions/checkout@v4 diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index f2993a1157..2456c89e3a 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -10,23 +10,21 @@ on: branches: - v[0-9]+.[0-9]+.x-celestia -# TODO: re-enable after figuring out what needs to get fixed or if this is -# handled upstream in main -# jobs: -# govulncheck: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/setup-go@v3 -# with: -# go-version: "1.19" -# - uses: actions/checkout@v3 -# - uses: technote-space/get-diff-action@v6 -# with: -# PATTERNS: | -# **/*.go -# go.mod -# go.sum -# Makefile -# - name: govulncheck -# run: make vulncheck -# if: "env.GIT_DIFF != ''" +jobs: + govulncheck: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + with: + go-version: "1.22.2" + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 + with: + PATTERNS: | + **/*.go + go.mod + go.sum + Makefile + - name: govulncheck + run: make vulncheck + if: "env.GIT_DIFF != ''" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2a69042d98..c53cd9f62a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -34,7 +34,7 @@ jobs: # Required: the version of golangci-lint is required and # must be specified without patch version: we always use the # latest patch version. - version: v1.50.1 + version: v1.56.2 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index a7db3aae6c..2a6cd29807 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index c7c977e4a3..a080834af5 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - name: Check version run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index da1fe53d40..32687945f3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/setup-go@v4 with: - go-version: '1.19' + go-version: '1.22.2' - name: Generate release notes run: | @@ -32,4 +32,3 @@ jobs: args: release --clean --release-notes ../release_notes.md env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 38e5caa668..f4cde15b2f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -25,8 +25,8 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" - - uses: actions/checkout@v4 + go-version: "1.22.2" + - uses: actions/checkout@v3 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -57,7 +57,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.22.2" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: @@ -89,7 +89,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.22.2" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: @@ -121,7 +121,7 @@ jobs: # steps: # - uses: actions/setup-go@v3 # with: - # go-version: "1.19" + # go-version: "1.22.2" # - uses: actions/checkout@v3 # - uses: technote-space/get-diff-action@v6 # with: diff --git a/.golangci.yml b/.golangci.yml index 32d31102a2..e101841b3d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,11 @@ +run: + skip-files: + - "libs/pubsub/query/query.peg.go" + linters: enable: - asciicheck - bodyclose - - depguard - dogsled - dupl - errcheck @@ -10,13 +13,13 @@ linters: - goconst - gofmt - goimports - - revive + #- revive - gosec - gosimple - govet - ineffassign - misspell - - nakedret + #- nakedret - nolintlint - prealloc - staticcheck @@ -31,6 +34,13 @@ issues: - path: _test\.go linters: - gosec + - staticcheck + - nolintlint + - path: test/fuzz/ + linters: + - gosec + - nolintlint + - staticcheck max-same-issues: 50 linters-settings: @@ -40,7 +50,74 @@ linters-settings: min-confidence: 0 maligned: suggest-new: true - -run: - skip-files: - - libs/pubsub/query/query.peg.go + goconst: + ignore-tests: true + depguard: + rules: + main: + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/tendermint + - github.com/cometbft + - github.com/cosmos + - github.com/gogo + - github.com/Workiva/go-datastructures + - github.com/ChainSafe/go-schnorrkel + - github.com/creachadair/taskgroup + - github.com/gtank/merlin + - github.com/btcsuite/btcd/btcec/v2 + - github.com/BurntSushi/toml + - github.com/go-git/go-git/v5 + - github.com/go-kit + - github.com/go-logfmt/logfmt + - github.com/gofrs/uuid + - github.com/google + - github.com/gorilla/websocket + - github.com/informalsystems/tm-load-test/pkg/loadtest + - github.com/lib/pq + - github.com/libp2p/go-buffer-pool + - github.com/Masterminds/semver/v3 + - github.com/minio/highwayhash + - github.com/oasisprotocol/curve25519-voi + - github.com/pkg/errors + - github.com/prometheus + - github.com/rcrowley/go-metrics + - github.com/rs/cors + - github.com/snikch/goodman + - github.com/spf13 + - github.com/stretchr/testify/require + - github.com/syndtr/goleveldb + # celestia-core specific + - github.com/influxdata/influxdb-client-go/v2 + - github.com/grafana/pyroscope-go + - github.com/grafana/otel-profiling-go + - github.com/celestiaorg/nmt + test: + files: + - "$test" + allow: + - $gostd + - github.com/cosmos + - github.com/tendermint + - github.com/cometbft + - github.com/gogo + - github.com/Workiva/go-datastructures + - github.com/ChainSafe/go-schnorrkel + - github.com/creachadair/taskgroup + - github.com/gtank/merlin + - github.com/adlio/schema + - github.com/btcsuite/btcd + - github.com/fortytw2/leaktest + - github.com/go-kit + - github.com/google/uuid + - github.com/gorilla/websocket + - github.com/lib/pq + - github.com/oasisprotocol/curve25519-voi/primitives/merlin + - github.com/ory/dockertest + - github.com/pkg/errors + - github.com/prometheus/client_golang/prometheus/promhttp + - github.com/spf13 + - github.com/stretchr/testify diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index af6361e690..c1ee8ad8e7 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,6 +1,6 @@ # Use a build arg to ensure that both stages use the same, # hopefully current, go version. -ARG GOLANG_BASE_IMAGE=golang:1.19-alpine +ARG GOLANG_BASE_IMAGE=golang:1.22.4-alpine # stage 1 Generate CometBFT Binary FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder @@ -58,4 +58,3 @@ CMD ["node"] # Expose the data directory as a volume since there's mutable state in there VOLUME [ "$CMTHOME" ] - diff --git a/Makefile b/Makefile index 276b3a327c..9350998c71 100644 --- a/Makefile +++ b/Makefile @@ -154,7 +154,7 @@ endif proto-gen: check-proto-deps @echo "Generating Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf generate + @go run github.com/bufbuild/buf/cmd/buf@v1.29.0 generate @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ @cp ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc .PHONY: proto-gen @@ -163,7 +163,7 @@ proto-gen: check-proto-deps # execution only. proto-lint: check-proto-deps @echo "Linting Protobuf files" - @go run github.com/bufbuild/buf/cmd/buf lint + @go run github.com/bufbuild/buf/cmd/buf@v1.29.0 lint .PHONY: proto-lint proto-format: check-proto-format-deps @@ -176,11 +176,11 @@ proto-check-breaking: check-proto-deps @echo "Note: This is only useful if your changes have not yet been committed." @echo " Otherwise read up on buf's \"breaking\" command usage:" @echo " https://docs.buf.build/breaking/usage" - @go run github.com/bufbuild/buf/cmd/buf breaking --against ".git" + @go run github.com/bufbuild/buf/cmd/buf@v1.29.0 breaking --against ".git" .PHONY: proto-check-breaking proto-check-breaking-ci: - @go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x-celestia + @go run github.com/bufbuild/buf/cmd/buf@v1.29.0 breaking --against $(HTTPS_GIT)#branch=v0.34.x-celestia .PHONY: proto-check-breaking-ci ############################################################################### @@ -259,7 +259,7 @@ format: lint: @echo "--> Running linter" - @go run github.com/golangci/golangci-lint/cmd/golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.2 run .PHONY: lint vulncheck: diff --git a/README.md b/README.md index 2506c57e97..8bf3f2a7e8 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ This repo intends on preserving the minimal possible diff with [cometbft/cometbf - **specific to Celestia**: consider if [celestia-app](https://github.com/celestiaorg/celestia-app) is a better target - **not specific to Celestia**: consider making the contribution upstream in CometBFT -1. [Install Go](https://go.dev/doc/install) 1.19+ +1. [Install Go](https://go.dev/doc/install) 1.22.4+ 2. Fork this repo 3. Clone your fork 4. Find an issue to work on (see [good first issues](https://github.com/celestiaorg/celestia-core/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)) diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 1aac7257a8..c4af564c5e 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1270,11 +1270,11 @@ func (m *RequestApplySnapshotChunk) GetSender() string { } type RequestPrepareProposal struct { - // block_data is an array of transactions that will be included in a block, - // sent to the app for possible modifications. - // applications can not exceed the size of the data passed to it. + // BlockData is a slice of candidate transactions that may be included in a + // block. BlockData is sent to the application so that the application can + // filter and re-arrange the slice of candidate transactions. BlockData *types1.Data `protobuf:"bytes,1,opt,name=block_data,json=blockData,proto3" json:"block_data,omitempty"` - // If an application decides to populate block_data with extra information, they can not exceed this value. + // BlockDataSize is the maximum size (in bytes) that BlockData should be. BlockDataSize int64 `protobuf:"varint,2,opt,name=block_data_size,json=blockDataSize,proto3" json:"block_data_size,omitempty"` // chain_id is a unique identifier for the blockchain network this proposal // belongs to (e.g. mocha-1). diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index 58b40b2bed..0bef6485ba 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -96,15 +96,15 @@ func AddNodeFlags(cmd *cobra.Command) { "database directory") cmd.PersistentFlags().String( - trace.FlagInfluxDBURL, - config.Instrumentation.InfluxURL, - trace.FlagInfluxDBURLDescription, + trace.FlagTracePushConfig, + config.Instrumentation.TracePushConfig, + trace.FlagTracePushConfigDescription, ) cmd.PersistentFlags().String( - trace.FlagInfluxDBToken, - config.Instrumentation.InfluxToken, - trace.FlagInfluxDBTokenDescription, + trace.FlagTracePullAddress, + config.Instrumentation.TracePullAddress, + trace.FlagTracePullAddressDescription, ) cmd.PersistentFlags().String( diff --git a/config/config.go b/config/config.go index 1a202715a0..d5e0312551 100644 --- a/config/config.go +++ b/config/config.go @@ -62,11 +62,11 @@ var ( minSubscriptionBufferSize = 100 defaultSubscriptionBufferSize = 200 - // DefaultInfluxTables is a list of tables that are used for storing traces. + // DefaultTracingTables is a list of tables that are used for storing traces. // This global var is filled by an init function in the schema package. This // allows for the schema package to contain all the relevant logic while // avoiding import cycles. - DefaultInfluxTables = []string{} + DefaultTracingTables = "" ) // Config defines the top level configuration for a CometBFT node @@ -941,8 +941,13 @@ func (cfg *FastSyncConfig) ValidateBasic() error { // including timeouts and details about the WAL and the block structure. type ConsensusConfig struct { RootDir string `mapstructure:"home"` - WalPath string `mapstructure:"wal_file"` - walFile string // overrides WalPath if set + // If set to true, only internal messages will be written + // to the WAL. External messages like votes, proposals + // block parts, will not be written + // Default: true + OnlyInternalWal bool `mapstructure:"only_internal_wal"` + WalPath string `mapstructure:"wal_file"` + walFile string // overrides WalPath if set // How long we wait for a proposal block before prevoting nil TimeoutPropose time.Duration `mapstructure:"timeout_propose"` @@ -979,6 +984,7 @@ type ConsensusConfig struct { // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ + OnlyInternalWal: true, WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), TimeoutPropose: 3000 * time.Millisecond, TimeoutProposeDelta: 500 * time.Millisecond, @@ -1188,24 +1194,24 @@ type InstrumentationConfig struct { // Instrumentation namespace. Namespace string `mapstructure:"namespace"` - // InfluxURL is the influxdb url. - InfluxURL string `mapstructure:"influx_url"` + // TracePushConfig is the relative path of the push config. This second + // config contains credentials for where and how often to. + TracePushConfig string `mapstructure:"trace_push_config"` - // InfluxToken is the influxdb token. - InfluxToken string `mapstructure:"influx_token"` + // TracePullAddress is the address that the trace server will listen on for + // pulling data. + TracePullAddress string `mapstructure:"trace_pull_address"` - // InfluxOrg is the influxdb organization. - InfluxOrg string `mapstructure:"influx_org"` + // TraceType is the type of tracer used. Options are "local" and "noop". + TraceType string `mapstructure:"trace_type"` - // InfluxBucket is the influxdb bucket. - InfluxBucket string `mapstructure:"influx_bucket"` + // TraceBufferSize is the number of traces to write in a single batch. + TraceBufferSize int `mapstructure:"trace_push_batch_size"` - // InfluxBatchSize is the number of points to write in a single batch. - InfluxBatchSize int `mapstructure:"influx_batch_size"` - - // InfluxTables is the list of tables that will be traced. See the - // pkg/trace/schema for a complete list of tables. - InfluxTables []string `mapstructure:"influx_tables"` + // TracingTables is the list of tables that will be traced. See the + // pkg/trace/schema for a complete list of tables. It is represented as a + // comma separate string. For example: "consensus_round_state,mempool_tx". + TracingTables string `mapstructure:"tracing_tables"` // PyroscopeURL is the pyroscope url used to establish a connection with a // pyroscope continuous profiling server. @@ -1229,11 +1235,11 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { PrometheusListenAddr: ":26660", MaxOpenConnections: 3, Namespace: "cometbft", - InfluxURL: "", - InfluxOrg: "celestia", - InfluxBucket: "e2e", - InfluxBatchSize: 20, - InfluxTables: DefaultInfluxTables, + TracePushConfig: "", + TracePullAddress: "", + TraceType: "noop", + TraceBufferSize: 1000, + TracingTables: DefaultTracingTables, PyroscopeURL: "", PyroscopeTrace: false, PyroscopeProfileTypes: []string{ @@ -1264,21 +1270,18 @@ func (cfg *InstrumentationConfig) ValidateBasic() error { if cfg.PyroscopeTrace && cfg.PyroscopeURL == "" { return errors.New("pyroscope_trace can't be enabled if profiling is disabled") } - // if there is not InfluxURL configured, then we do not need to validate the rest + // if there is not TracePushConfig configured, then we do not need to validate the rest // of the config because we are not connecting. - if cfg.InfluxURL == "" { + if cfg.TracePushConfig == "" { return nil } - if cfg.InfluxToken == "" { + if cfg.TracePullAddress == "" { return fmt.Errorf("token is required") } - if cfg.InfluxOrg == "" { + if cfg.TraceType == "" { return fmt.Errorf("org is required") } - if cfg.InfluxBucket == "" { - return fmt.Errorf("bucket is required") - } - if cfg.InfluxBatchSize <= 0 { + if cfg.TraceBufferSize <= 0 { return fmt.Errorf("batch size must be greater than 0") } return nil diff --git a/config/toml.go b/config/toml.go index 34c99aba7a..11d69130d1 100644 --- a/config/toml.go +++ b/config/toml.go @@ -456,6 +456,11 @@ version = "{{ .FastSync.Version }}" ####################################################### [consensus] +# If set to "true", only internal messages will be +# written to the WAL. External messages like votes, proposal, +# block parts, will not be written. +only_internal_wal = "{{ .Consensus.OnlyInternalWal }}" + wal_file = "{{ js .Consensus.WalPath }}" # How long we wait for a proposal block before prevoting nil @@ -547,25 +552,26 @@ max_open_connections = {{ .Instrumentation.MaxOpenConnections }} # Instrumentation namespace namespace = "{{ .Instrumentation.Namespace }}" -# The URL of the influxdb instance to use for remote event -# collection. If empty, remote event collection is disabled. -influx_url = "{{ .Instrumentation.InfluxURL }}" - -# The influxdb token to use for remote event collection. -influx_token = "{{ .Instrumentation.InfluxToken }}" +# TracePushConfig is the relative path of the push config. +# This second config contains credentials for where and how often to +# push trace data to. For example, if the config is next to this config, +# it would be "push_config.json". +trace_push_config = "{{ .Instrumentation.TracePushConfig }}" -# The influxdb bucket to use for remote event collection. -influx_bucket = "{{ .Instrumentation.InfluxBucket }}" +# The tracer pull address specifies which address will be used for pull based +# event collection. If empty, the pull based server will not be started. +trace_pull_address = "{{ .Instrumentation.TracePullAddress }}" -# The influxdb org to use for event remote collection. -influx_org = "{{ .Instrumentation.InfluxOrg }}" +# The tracer to use for collecting trace data. +trace_type = "{{ .Instrumentation.TraceType }}" # The size of the batches that are sent to the database. -influx_batch_size = {{ .Instrumentation.InfluxBatchSize }} +trace_push_batch_size = {{ .Instrumentation.TraceBufferSize }} # The list of tables that are updated when tracing. All available tables and -# their schema can be found in the pkg/trace/schema package. -influx_tables = [{{ range .Instrumentation.InfluxTables }}{{ printf "%q, " . }}{{end}}] +# their schema can be found in the pkg/trace/schema package. It is represented as a +# comma separate string. For example: "consensus_round_state,mempool_tx". +tracing_tables = "{{ .Instrumentation.TracingTables }}" # The URL of the pyroscope instance to use for continuous profiling. # If empty, continuous profiling is disabled. diff --git a/consensus/reactor.go b/consensus/reactor.go index ce5ce90b7c..eec5afa8d0 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -50,7 +50,7 @@ type Reactor struct { rs *cstypes.RoundState Metrics *Metrics - traceClient *trace.Client + traceClient trace.Tracer } type ReactorOption func(*Reactor) @@ -63,7 +63,7 @@ func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) waitSync: waitSync, rs: consensusState.GetRoundState(), Metrics: NopMetrics(), - traceClient: &trace.Client{}, + traceClient: trace.NoOpTracer(), } conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) @@ -272,6 +272,15 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { conR.conS.mtx.Lock() initialHeight := conR.conS.state.InitialHeight conR.conS.mtx.Unlock() + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusNewRoundStep, + schema.Download, + fmt.Sprintf("%d", msg.Step), + ) if err = msg.ValidateHeight(initialHeight); err != nil { conR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", msg, "err", err) conR.Switch.StopPeerForError(e.Src, err) @@ -279,14 +288,39 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { } ps.ApplyNewRoundStepMessage(msg) case *NewValidBlockMessage: + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusNewValidBlock, + schema.Download, + ) ps.ApplyNewValidBlockMessage(msg) case *HasVoteMessage: ps.ApplyHasVoteMessage(msg) + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusHasVote, + schema.Download, + msg.Type.String(), + ) case *VoteSetMaj23Message: cs := conR.conS cs.mtx.Lock() height, votes := cs.Height, cs.Votes cs.mtx.Unlock() + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusVoteSet23Precommit, + schema.Download, + ) if height != msg.Height { return } @@ -316,10 +350,20 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { if votes := ourVotes.ToProto(); votes != nil { eMsg.Votes = *votes } - p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck + if p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck ChannelID: VoteSetBitsChannel, Message: eMsg, - }, conR.Logger) + }, conR.Logger) { + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.Round, + string(e.Src.ID()), + schema.ConsensusVoteSetBits, + schema.Upload, + msg.Type.String(), + ) + } default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -333,12 +377,27 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { case *ProposalMessage: ps.SetHasProposal(msg.Proposal) conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} + schema.WriteProposal( + conR.traceClient, + msg.Proposal.Height, + msg.Proposal.Round, + string(e.Src.ID()), + schema.Download, + ) case *ProposalPOLMessage: ps.ApplyProposalPOLMessage(msg) + schema.WriteConsensusState( + conR.traceClient, + msg.Height, + msg.ProposalPOLRound, + string(e.Src.ID()), + schema.ConsensusPOL, + schema.Download, + ) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, int(msg.Part.Index)) conR.Metrics.BlockParts.With("peer_id", string(e.Src.ID())).Add(1) - schema.WriteBlockPart(conR.traceClient, msg.Height, msg.Round, e.Src.ID(), msg.Part.Index, schema.TransferTypeDownload) + schema.WriteBlockPart(conR.traceClient, msg.Height, msg.Round, msg.Part.Index, false, string(e.Src.ID()), schema.Download) conR.conS.peerMsgQueue <- msgInfo{msg, e.Src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) @@ -357,7 +416,7 @@ func (conR *Reactor) ReceiveEnvelope(e p2p.Envelope) { cs.Validators.Size(), cs.LastCommit.Size() cs.mtx.RUnlock() - schema.WriteVote(conR.traceClient, height, round, msg.Vote, e.Src.ID(), schema.TransferTypeDownload) + schema.WriteVote(conR.traceClient, height, round, msg.Vote, string(e.Src.ID()), schema.Download) ps.EnsureVoteBitArrays(height, valSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize) @@ -477,6 +536,15 @@ func (conR *Reactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { ChannelID: StateChannel, Message: nrsMsg, }) + schema.WriteConsensusState( + conR.traceClient, + nrsMsg.Height, + nrsMsg.Round, + schema.Broadcast, + schema.ConsensusNewRoundStep, + schema.Upload, + fmt.Sprintf("%d", nrsMsg.Step), + ) } func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { @@ -492,6 +560,14 @@ func (conR *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { ChannelID: StateChannel, Message: csMsg, }) + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + schema.Broadcast, + schema.ConsensusNewValidBlock, + schema.Upload, + ) } // Broadcasts HasVoteMessage to peers that care. @@ -506,6 +582,15 @@ func (conR *Reactor) broadcastHasVoteMessage(vote *types.Vote) { ChannelID: StateChannel, Message: msg, }) + schema.WriteConsensusState( + conR.traceClient, + vote.Height, + vote.Round, + schema.Broadcast, + schema.ConsensusHasVote, + schema.Upload, + vote.Type.String(), + ) /* // TODO: Make this broadcast more selective. for _, peer := range conR.Switch.Peers().List() { @@ -544,10 +629,20 @@ func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *cmtcons.NewRoundStep) func (conR *Reactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.getRoundState() nrsMsg := makeRoundStepMessage(rs) - p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: StateChannel, Message: nrsMsg, - }, conR.Logger) + }, conR.Logger) { + schema.WriteConsensusState( + conR.traceClient, + nrsMsg.Height, + nrsMsg.Round, + string(peer.ID()), + schema.ConsensusNewRoundStep, + schema.Upload, + fmt.Sprintf("%d", nrsMsg.Step), + ) + } } func (conR *Reactor) updateRoundStateRoutine() { @@ -599,7 +694,7 @@ OUTER_LOOP: Part: *parts, }, }, logger) { - schema.WriteBlockPart(conR.traceClient, rs.Height, rs.Round, peer.ID(), part.Index, schema.TransferTypeUpload) + schema.WriteBlockPart(conR.traceClient, rs.Height, rs.Round, part.Index, false, string(peer.ID()), schema.Upload) ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) } continue OUTER_LOOP @@ -652,6 +747,13 @@ OUTER_LOOP: }, logger) { // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) + schema.WriteProposal( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.Upload, + ) } } // ProposalPOL: lets peer know which POL votes we have so far. @@ -660,14 +762,23 @@ OUTER_LOOP: // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). if 0 <= rs.Proposal.POLRound { logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: DataChannel, Message: &cmtcons.ProposalPOL{ Height: rs.Height, ProposalPolRound: rs.Proposal.POLRound, ProposalPol: *rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray().ToProto(), }, - }, logger) + }, logger) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusPOL, + schema.Upload, + ) + } } continue OUTER_LOOP } @@ -719,6 +830,15 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt }, }, logger) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + schema.WriteBlockPart( + conR.traceClient, + prs.Height, + prs.Round, + uint32(index), + true, + string(peer.ID()), + schema.Upload, + ) } else { logger.Debug("Sending block part for catchup failed") // sleep to avoid retrying too fast @@ -783,7 +903,7 @@ OUTER_LOOP: if vote != nil { logger.Debug("Picked Catchup commit to send", "height", prs.Height) schema.WriteVote(conR.traceClient, rs.Height, rs.Round, vote, - ps.peer.ID(), schema.TransferTypeUpload) + string(ps.peer.ID()), schema.Upload) continue OUTER_LOOP } } @@ -812,7 +932,7 @@ func (conR *Reactor) pickSendVoteAndTrace(votes types.VoteSetReader, rs *cstypes vote := ps.PickSendVote(votes) if vote != nil { // if a vote is sent, trace it schema.WriteVote(conR.traceClient, rs.Height, rs.Round, vote, - ps.peer.ID(), schema.TransferTypeUpload) + string(ps.peer.ID()), schema.Upload) return true } return false @@ -894,7 +1014,7 @@ OUTER_LOOP: if rs.Height == prs.Height { if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -902,7 +1022,16 @@ OUTER_LOOP: Type: cmtproto.PrevoteType, BlockID: maj23.ToProto(), }, - }, ps.logger) + }, ps.logger) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Prevote, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -914,7 +1043,7 @@ OUTER_LOOP: prs := ps.GetRoundState() if rs.Height == prs.Height { if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -922,7 +1051,16 @@ OUTER_LOOP: Type: cmtproto.PrecommitType, BlockID: maj23.ToProto(), }, - }, ps.logger) + }, ps.logger) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Precommit, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -935,7 +1073,7 @@ OUTER_LOOP: if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -943,7 +1081,16 @@ OUTER_LOOP: Type: cmtproto.PrevoteType, BlockID: maj23.ToProto(), }, - }, ps.logger) + }, ps.logger) { + schema.WriteConsensusState( + conR.traceClient, + rs.Height, + rs.Round, + string(peer.ID()), + schema.ConsensusPOL, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -958,7 +1105,7 @@ OUTER_LOOP: if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && prs.Height >= conR.conS.blockStore.Base() { if commit := conR.conS.LoadCommit(prs.Height); commit != nil { - p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck + if p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck ChannelID: StateChannel, Message: &cmtcons.VoteSetMaj23{ Height: prs.Height, @@ -966,7 +1113,16 @@ OUTER_LOOP: Type: cmtproto.PrecommitType, BlockID: commit.BlockID.ToProto(), }, - }, ps.logger) + }, ps.logger) { + schema.WriteConsensusState( + conR.traceClient, + prs.Height, + prs.Round, + string(peer.ID()), + schema.ConsensusVoteSet23Precommit, + schema.Upload, + ) + } time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } @@ -1046,7 +1202,7 @@ func ReactorMetrics(metrics *Metrics) ReactorOption { return func(conR *Reactor) { conR.Metrics = metrics } } -func ReactorTracing(traceClient *trace.Client) ReactorOption { +func ReactorTracing(traceClient trace.Tracer) ReactorOption { return func(conR *Reactor) { conR.traceClient = traceClient } } diff --git a/consensus/replay.go b/consensus/replay.go index 586ddebf80..7df9142f32 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -258,15 +258,10 @@ func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.Ap } appHash := res.LastBlockAppHash - h.logger.Info("ABCI Handshake App Info", - "height", blockHeight, - "hash", appHash, - "software-version", res.Version, - "protocol-version", res.AppVersion, - ) - - // Only set the version if there is no existing state. - if h.initialState.LastBlockHeight == 0 { + appVersion := h.initialState.Version.Consensus.App + // set app version if it's not set via genesis + if h.initialState.LastBlockHeight == 0 && appVersion == 0 && res.AppVersion != 0 { + appVersion = res.AppVersion h.initialState.Version.Consensus.App = res.AppVersion } @@ -277,7 +272,10 @@ func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.Ap } h.logger.Info("Completed ABCI Handshake - CometBFT and App are synced", - "appHeight", blockHeight, "appHash", appHash) + "appHeight", blockHeight, + "appHash", appHash, + "appVersion", appVersion, + ) // TODO: (on restart) replay mempool diff --git a/consensus/state.go b/consensus/state.go index 7a37a1a35c..35fa46d44d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -143,7 +143,7 @@ type State struct { // for reporting metrics metrics *Metrics - traceClient *trace.Client + traceClient trace.Tracer } // StateOption sets an optional parameter on the State. @@ -174,7 +174,7 @@ func NewState( evpool: evpool, evsw: cmtevents.NewEventSwitch(), metrics: NopMetrics(), - traceClient: &trace.Client{}, + traceClient: trace.NoOpTracer(), } // set function defaults (may be overwritten before calling Start) @@ -217,7 +217,7 @@ func StateMetrics(metrics *Metrics) StateOption { } // SetTraceClient sets the remote event collector. -func SetTraceClient(ec *trace.Client) StateOption { +func SetTraceClient(ec trace.Tracer) StateOption { return func(cs *State) { cs.traceClient = ec } } @@ -706,7 +706,9 @@ func (cs *State) newStep() { cs.nSteps++ - schema.WriteRoundState(cs.traceClient, cs.Height, cs.Round, cs.Step) + step := uint8(cs.RoundState.Step) + + schema.WriteRoundState(cs.traceClient, cs.Height, cs.Round, step) // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { @@ -773,8 +775,10 @@ func (cs *State) receiveRoutine(maxSteps int) { cs.handleTxsAvailable() case mi = <-cs.peerMsgQueue: - if err := cs.wal.Write(mi); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + if !cs.config.OnlyInternalWal { + if err := cs.wal.Write(mi); err != nil { + cs.Logger.Error("failed writing to WAL", "err", err) + } } // handles proposals, block parts, votes @@ -1158,7 +1162,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { block, blockParts = cs.TwoThirdPrevoteBlock, cs.TwoThirdPrevoteBlockParts } else { // Create a new proposal block from state/txs from the mempool. + schema.WriteABCI(cs.traceClient, schema.PrepareProposalStart, height, round) block, blockParts = cs.createProposalBlock() + schema.WriteABCI(cs.traceClient, schema.PrepareProposalEnd, height, round) if block == nil { return } @@ -1303,12 +1309,16 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { return } + schema.WriteABCI(cs.traceClient, schema.ProcessProposalStart, height, round) + stateMachineValidBlock, err := cs.blockExec.ProcessProposal(cs.ProposalBlock) if err != nil { cs.Logger.Error("state machine returned an error when trying to process proposal block", "err", err) return } + schema.WriteABCI(cs.traceClient, schema.ProcessProposalEnd, height, round) + // Vote nil if application invalidated the block if !stateMachineValidBlock { // The app says we must vote nil @@ -1695,6 +1705,8 @@ func (cs *State) finalizeCommit(height int64) { retainHeight int64 ) + schema.WriteABCI(cs.traceClient, schema.CommitStart, height, 0) + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( stateCopy, types.BlockID{ @@ -1708,6 +1720,8 @@ func (cs *State) finalizeCommit(height int64) { panic(fmt.Sprintf("failed to apply block; error %v", err)) } + schema.WriteABCI(cs.traceClient, schema.CommitEnd, height, 0) + fail.Fail() // XXX // Prune old heights, if requested by ABCI app. @@ -1845,7 +1859,7 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { blockSize := block.Size() // trace some metadata about the block - schema.WriteBlock(cs.traceClient, block, blockSize) + schema.WriteBlockSummary(cs.traceClient, block, blockSize) cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) cs.metrics.TotalTxs.Add(float64(len(block.Data.Txs))) diff --git a/go.mod b/go.mod index 4daf6bee75..a9f2528f00 100644 --- a/go.mod +++ b/go.mod @@ -1,24 +1,36 @@ module github.com/tendermint/tendermint -go 1.21 - -toolchain go1.21.1 +go 1.22.4 require ( github.com/BurntSushi/toml v1.2.1 github.com/ChainSafe/go-schnorrkel v1.0.0 + github.com/Masterminds/semver/v3 v3.2.0 github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.3.3 + github.com/aws/aws-sdk-go v1.40.45 + github.com/btcsuite/btcd/btcec/v2 v2.3.2 + github.com/btcsuite/btcd/btcutil v1.1.3 + github.com/bufbuild/buf v1.15.1 + github.com/celestiaorg/nmt v0.21.0 + github.com/cometbft/cometbft-db v0.7.0 + github.com/creachadair/taskgroup v0.3.2 github.com/fortytw2/leaktest v1.3.0 + github.com/go-git/go-git/v5 v5.11.0 github.com/go-kit/kit v0.12.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 github.com/gofrs/uuid v4.4.0+incompatible + github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 github.com/golangci/golangci-lint v1.52.0 github.com/google/orderedcode v0.0.1 + github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.0 + github.com/grafana/otel-profiling-go v0.5.1 + github.com/grafana/pyroscope-go v1.1.1 github.com/gtank/merlin v0.1.1 + github.com/informalsystems/tm-load-test v1.3.0 github.com/lib/pq v1.10.7 github.com/libp2p/go-buffer-pool v0.1.0 github.com/minio/highwayhash v1.0.2 @@ -30,51 +42,20 @@ require ( github.com/sasha-s/go-deadlock v0.3.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.18.1 - github.com/stretchr/testify v1.8.4 -) - -require ( - github.com/google/uuid v1.4.0 - golang.org/x/crypto v0.21.0 - golang.org/x/net v0.23.0 - google.golang.org/grpc v1.60.0 -) - -require ( - github.com/gogo/protobuf v1.3.2 - github.com/informalsystems/tm-load-test v1.3.0 -) - -require ( - github.com/bufbuild/buf v1.15.1 - github.com/creachadair/taskgroup v0.3.2 + github.com/spf13/viper v1.15.0 + github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 -) - -require ( - github.com/Masterminds/semver/v3 v3.2.0 - github.com/btcsuite/btcd/btcec/v2 v2.3.2 - github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/celestiaorg/nmt v0.20.0 - github.com/cometbft/cometbft-db v0.7.0 - github.com/go-git/go-git/v5 v5.11.0 github.com/vektra/mockery/v2 v2.23.1 + go.opentelemetry.io/otel v1.24.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.18.0 + go.opentelemetry.io/otel/sdk v1.21.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 gonum.org/v1/gonum v0.12.0 + google.golang.org/grpc v1.60.0 google.golang.org/protobuf v1.31.0 ) -require ( - github.com/cometbft/cometbft v0.38.8 - github.com/golang/mock v1.4.4 - github.com/influxdata/influxdb-client-go/v2 v2.12.2 - github.com/pyroscope-io/client v0.7.2 - github.com/pyroscope-io/otel-profiling-go v0.4.0 - go.opentelemetry.io/otel v1.15.1 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.15.1 - go.opentelemetry.io/otel/sdk v1.15.1 -) - require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect @@ -112,14 +93,12 @@ require ( github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d // indirect - github.com/cosmos/gogoproto v1.4.11 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/daixiang0/gci v0.10.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect @@ -144,7 +123,7 @@ require ( github.com/go-critic/go-critic v0.7.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -178,6 +157,8 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -186,13 +167,13 @@ require ( github.com/hexops/gotextdiff v1.0.3 // indirect github.com/iancoleman/strcase v0.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 // indirect github.com/jgautheron/goconst v1.5.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jinzhu/copier v0.3.5 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect github.com/junk1tm/musttag v0.5.0 // indirect @@ -245,7 +226,6 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/pyroscope-io/godeltaprof v0.1.2 // indirect github.com/quasilyte/go-ruleguard v0.4.0 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect @@ -273,11 +253,12 @@ require ( github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect @@ -294,7 +275,8 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opentelemetry.io/otel/trace v1.15.1 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.10.0 // indirect go.uber.org/zap v1.24.0 // indirect diff --git a/go.sum b/go.sum index f9aa1098de..7bb7979244 100644 --- a/go.sum +++ b/go.sum @@ -97,6 +97,8 @@ github.com/ashanbrown/forbidigo v1.5.1 h1:WXhzLjOlnuDYPYQo/eFlcFMi8X/kLfvWLYu6CS github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -146,8 +148,6 @@ github.com/bufbuild/protocompile v0.5.1/go.mod h1:G5iLmavmF4NsYtpZFvE3B/zFch2GIY github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= -github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= @@ -175,8 +175,6 @@ github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUK github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cometbft/cometbft v0.38.8 h1:XyJ9Cu3xqap6xtNxiemrO8roXZ+KS2Zlu7qQ0w1trvU= -github.com/cometbft/cometbft v0.38.8/go.mod h1:xOoGZrtUT+A5izWfHSJgl0gYZUE7lu7Z2XIS1vWG/QQ= github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= @@ -203,7 +201,6 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -219,8 +216,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= -github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= @@ -230,7 +225,6 @@ github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -287,11 +281,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-critic/go-critic v0.7.0 h1:tqbKzB8pqi0NsRZ+1pyU4aweAF7A7QN0Pi4Q02+rYnQ= @@ -320,12 +311,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -413,7 +403,6 @@ github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzr github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= github.com/golangci/golangci-lint v1.52.0 h1:T7w3tuF1goz64qGV+ML4MgysSl/yUfA3UZJK92oE48A= github.com/golangci/golangci-lint v1.52.0/go.mod h1:wlTh+d/oVlgZC2yCe6nlxrxNAnuhEQC0Zdygoh72Uak= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -439,7 +428,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -471,7 +459,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 h1:9alfqbrhuD+9fLZ4iaAVwhlp5PEhmnBt7yvK2Oy5C1U= github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= @@ -488,6 +475,12 @@ github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoIS github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= +github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= +github.com/grafana/pyroscope-go v1.1.1 h1:PQoUU9oWtO3ve/fgIiklYuGilvsm8qaGhlY4Vw6MAcQ= +github.com/grafana/pyroscope-go v1.1.1/go.mod h1:Mw26jU7jsL/KStNSGGuuVYdUq7Qghem5P8aXYXSXG88= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= @@ -517,10 +510,6 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb-client-go/v2 v2.12.2 h1:uYABKdrEKlYm+++qfKdbgaHKBPmoWR5wpbmj6MBB/2g= -github.com/influxdata/influxdb-client-go/v2 v2.12.2/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/informalsystems/tm-load-test v1.3.0 h1:FGjKy7vBw6mXNakt+wmNWKggQZRsKkEYpaFk/zR64VA= github.com/informalsystems/tm-load-test v1.3.0/go.mod h1:OQ5AQ9TbT5hKWBNIwsMjn6Bf4O0U4b1kRc+0qZlQJKw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -539,6 +528,10 @@ github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -566,8 +559,6 @@ github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -588,8 +579,6 @@ github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoa github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.4.0 h1:sylp7d9kh6AdXN2DpVGHBRb5guTVAgOxqNGhbqc4b1c= @@ -605,8 +594,6 @@ github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xq github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= @@ -615,16 +602,9 @@ github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2 github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= @@ -761,12 +741,6 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/pyroscope-io/client v0.7.2 h1:OX2qdUQsS8RSkn/3C8isD7f/P0YiZQlRbAlecAaj/R8= -github.com/pyroscope-io/client v0.7.2/go.mod h1:FEocnjn+Ngzxy6EtU9ZxXWRvQ0+pffkrBxHLnPpxwi8= -github.com/pyroscope-io/godeltaprof v0.1.2 h1:MdlEmYELd5w+lvIzmZvXGNMVzW2Qc9jDMuJaPOR75g4= -github.com/pyroscope-io/godeltaprof v0.1.2/go.mod h1:psMITXp90+8pFenXkKIpNhrfmI9saQnPbba27VIaiQE= -github.com/pyroscope-io/otel-profiling-go v0.4.0 h1:Hk/rbUqOWoByoWy1tt4r5BX5xoKAvs5drr0511Ki8ic= -github.com/pyroscope-io/otel-profiling-go v0.4.0/go.mod h1:MXaofiWU7PgLP7eISUZJYVO4Z8WYMqpkYgeP4XrPLyg= github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -867,8 +841,9 @@ github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8L github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -879,10 +854,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -898,8 +869,6 @@ github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpR github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= -github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= @@ -922,9 +891,6 @@ github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89 github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vektra/mockery/v2 v2.23.1 h1:N59FENM2d/gWE6Ns5JPuf9a7jqQWeheGefZqvuvb1dM= @@ -988,8 +954,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= @@ -1077,10 +1041,10 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1120,7 +1084,6 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1131,7 +1094,6 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1142,7 +1104,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1159,7 +1120,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1193,9 +1153,6 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1211,7 +1168,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= @@ -1224,8 +1180,6 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1404,6 +1358,7 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/light/rpc/client.go b/light/rpc/client.go index bca4c8f13b..7f7fc64b14 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -586,6 +586,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul // ProveShares calls rpcclient#ProveShares method and returns an NMT proof for a set // of shares, defined by `startShare` and `endShare`, to the corresponding rows. // Then, a binary merkle inclusion proof from the latter rows to the data root. +// Deprecated: Use ProveSharesV2 instead. func (c *Client) ProveShares( ctx context.Context, height uint64, @@ -596,6 +597,20 @@ func (c *Client) ProveShares( return res, err } +// ProveSharesV2 returns a proof of inclusion for a share range to the data root +// of the given height. +// The range is end-exclusive and defined by startShare and endShare. +// Note: this proof is composed of multiple proofs. +func (c *Client) ProveSharesV2( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + res, err := c.next.ProveSharesV2(ctx, height, startShare, endShare) + return res, err +} + func (c *Client) TxSearch( ctx context.Context, query string, diff --git a/mempool/cat/pool_test.go b/mempool/cat/pool_test.go index 3f84d00dca..978c908028 100644 --- a/mempool/cat/pool_test.go +++ b/mempool/cat/pool_test.go @@ -758,6 +758,12 @@ func TestTxPool_BroadcastQueue(t *testing.T) { wg := sync.WaitGroup{} wg.Add(1) + + for i := 0; i < txs; i++ { + tx := newDefaultTx(fmt.Sprintf("%d", i)) + require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})) + } + go func() { defer wg.Done() for i := 0; i < txs; i++ { @@ -767,14 +773,8 @@ func TestTxPool_BroadcastQueue(t *testing.T) { case wtx := <-txmp.next(): require.Equal(t, wtx.tx, newDefaultTx(fmt.Sprintf("%d", i))) } - time.Sleep(10 * time.Millisecond) } }() - for i := 0; i < txs; i++ { - tx := newDefaultTx(fmt.Sprintf("%d", i)) - require.NoError(t, txmp.CheckTx(tx, nil, mempool.TxInfo{SenderID: 0})) - } - wg.Wait() } diff --git a/mempool/cat/reactor.go b/mempool/cat/reactor.go index 1c8a374129..d34f631286 100644 --- a/mempool/cat/reactor.go +++ b/mempool/cat/reactor.go @@ -41,7 +41,7 @@ type Reactor struct { mempool *TxPool ids *mempoolIDs requests *requestScheduler - traceClient *trace.Client + traceClient trace.Tracer } type ReactorOptions struct { @@ -57,7 +57,7 @@ type ReactorOptions struct { MaxGossipDelay time.Duration // TraceClient is the trace client for collecting trace level events - TraceClient *trace.Client + TraceClient trace.Tracer } func (opts *ReactorOptions) VerifyAndComplete() error { @@ -91,7 +91,7 @@ func NewReactor(mempool *TxPool, opts *ReactorOptions) (*Reactor, error) { mempool: mempool, ids: newMempoolIDs(), requests: newRequestScheduler(opts.MaxGossipDelay, defaultGlobalRequestTimeout), - traceClient: &trace.Client{}, + traceClient: trace.NoOpTracer(), } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) return memR, nil @@ -228,9 +228,6 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // NOTE: This setup also means that we can support older mempool implementations that simply // flooded the network with transactions. case *protomem.Txs: - for _, tx := range msg.Txs { - schema.WriteMempoolTx(memR.traceClient, e.Src.ID(), tx, schema.TransferTypeDownload, schema.CatVersionFieldValue) - } protoTxs := msg.GetTxs() if len(protoTxs) == 0 { memR.Logger.Error("received empty txs from peer", "src", e.Src) @@ -244,6 +241,7 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { for _, tx := range protoTxs { ntx := types.Tx(tx) key := ntx.Key() + schema.WriteMempoolTx(memR.traceClient, string(e.Src.ID()), key[:], schema.Download) // If we requested the transaction we mark it as received. if memR.requests.Has(peerID, key) { memR.requests.MarkReceived(peerID, key) @@ -273,19 +271,19 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // 3. If we recently evicted the tx and still don't have space for it, we do nothing. // 4. Else, we request the transaction from that peer. case *protomem.SeenTx: - schema.WriteMempoolPeerState( - memR.traceClient, - e.Src.ID(), - schema.SeenTxStateUpdateFieldValue, - schema.TransferTypeDownload, - schema.CatVersionFieldValue, - ) txKey, err := types.TxKeyFromBytes(msg.TxKey) if err != nil { memR.Logger.Error("peer sent SeenTx with incorrect tx key", "err", err) memR.Switch.StopPeerForError(e.Src, err) return } + schema.WriteMempoolPeerState( + memR.traceClient, + string(e.Src.ID()), + schema.SeenTx, + txKey[:], + schema.Download, + ) peerID := memR.ids.GetIDForPeer(e.Src.ID()) memR.mempool.PeerHasTx(peerID, txKey) // Check if we don't already have the transaction and that it was recently rejected @@ -307,13 +305,6 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // A peer is requesting a transaction that we have claimed to have. Find the specified // transaction and broadcast it to the peer. We may no longer have the transaction case *protomem.WantTx: - schema.WriteMempoolPeerState( - memR.traceClient, - e.Src.ID(), - schema.WantTxStateUpdateFieldValue, - schema.TransferTypeDownload, - schema.CatVersionFieldValue, - ) txKey, err := types.TxKeyFromBytes(msg.TxKey) if err != nil { memR.Logger.Error("peer sent WantTx with incorrect tx key", "err", err) @@ -330,19 +321,18 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { tx, has := memR.mempool.GetTxByKey(txKey) if has && !memR.opts.ListenOnly { peerID := memR.ids.GetIDForPeer(e.Src.ID()) - schema.WriteMempoolTx( - memR.traceClient, - e.Src.ID(), - msg.TxKey, - schema.TransferTypeUpload, - schema.CatVersionFieldValue, - ) memR.Logger.Debug("sending a tx in response to a want msg", "peer", peerID) if p2p.SendEnvelopeShim(e.Src, p2p.Envelope{ //nolint:staticcheck ChannelID: mempool.MempoolChannel, Message: &protomem.Txs{Txs: [][]byte{tx}}, }, memR.Logger) { memR.mempool.PeerHasTx(peerID, txKey) + schema.WriteMempoolTx( + memR.traceClient, + string(e.Src.ID()), + txKey[:], + schema.Upload, + ) } } diff --git a/mempool/cat/store.go b/mempool/cat/store.go index 4614722f10..29e4914110 100644 --- a/mempool/cat/store.go +++ b/mempool/cat/store.go @@ -9,15 +9,17 @@ import ( // simple, thread-safe in memory store for transactions type store struct { - mtx sync.RWMutex - bytes int64 - txs map[types.TxKey]*wrappedTx + mtx sync.RWMutex + bytes int64 + txs map[types.TxKey]*wrappedTx + reservedTxs map[types.TxKey]struct{} } func newStore() *store { return &store{ - bytes: 0, - txs: make(map[types.TxKey]*wrappedTx), + bytes: 0, + txs: make(map[types.TxKey]*wrappedTx), + reservedTxs: make(map[types.TxKey]struct{}), } } @@ -27,7 +29,7 @@ func (s *store) set(wtx *wrappedTx) bool { } s.mtx.Lock() defer s.mtx.Unlock() - if tx, exists := s.txs[wtx.key]; !exists || tx.height == -1 { + if _, exists := s.txs[wtx.key]; !exists { s.txs[wtx.key] = wtx s.bytes += wtx.size() return true @@ -65,23 +67,27 @@ func (s *store) remove(txKey types.TxKey) bool { func (s *store) reserve(txKey types.TxKey) bool { s.mtx.Lock() defer s.mtx.Unlock() - _, has := s.txs[txKey] - if !has { - s.txs[txKey] = &wrappedTx{height: -1} + _, isReserved := s.reservedTxs[txKey] + if !isReserved { + s.reservedTxs[txKey] = struct{}{} return true } return false } -// release is called when a pending transaction failed -// to enter the mempool. The empty element and key is removed. +func (s *store) isReserved(txKey types.TxKey) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + _, isReserved := s.reservedTxs[txKey] + return isReserved +} + +// release is called at the end of the process of adding a transaction. +// Regardless if it is added or not, the reserveTxs lookup map element is deleted. func (s *store) release(txKey types.TxKey) { s.mtx.Lock() defer s.mtx.Unlock() - value, ok := s.txs[txKey] - if ok && value.height == -1 { - delete(s.txs, txKey) - } + delete(s.reservedTxs, txKey) } func (s *store) size() int { diff --git a/mempool/cat/store_test.go b/mempool/cat/store_test.go index 4a29106ee7..4397f239ea 100644 --- a/mempool/cat/store_test.go +++ b/mempool/cat/store_test.go @@ -54,7 +54,7 @@ func TestStoreReservingTxs(t *testing.T) { // reserve a tx store.reserve(key) - require.True(t, store.has(key)) + require.True(t, store.isReserved(key)) // should not update the total bytes require.Zero(t, store.totalBytes()) @@ -73,13 +73,24 @@ func TestStoreReservingTxs(t *testing.T) { // reserve the tx again store.reserve(key) - require.True(t, store.has(key)) + require.True(t, store.isReserved(key)) // release should remove the tx store.release(key) require.False(t, store.has(key)) } +func TestReadReserved(t *testing.T) { + store := newStore() + tx := types.Tx("tx1") + store.reserve(tx.Key()) + + require.Nil(t, store.get(tx.Key())) + require.False(t, store.has(tx.Key())) + require.Len(t, store.getAllKeys(), 0) + require.Len(t, store.getAllTxs(), 0) +} + func TestStoreConcurrentAccess(t *testing.T) { store := newStore() diff --git a/mempool/v1/mempool.go b/mempool/v1/mempool.go index 57ea56559d..fa41cef889 100644 --- a/mempool/v1/mempool.go +++ b/mempool/v1/mempool.go @@ -16,7 +16,6 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/pkg/trace" - "github.com/tendermint/tendermint/pkg/trace/schema" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -60,7 +59,7 @@ type TxMempool struct { txBySender map[string]*clist.CElement // for sender != "" evictedTxs mempool.TxCache // for tracking evicted transactions - traceClient *trace.Client + traceClient trace.Tracer } // NewTxMempool constructs a new, empty priority mempool at the specified @@ -84,7 +83,7 @@ func NewTxMempool( height: height, txByKey: make(map[types.TxKey]*clist.CElement), txBySender: make(map[string]*clist.CElement), - traceClient: &trace.Client{}, + traceClient: trace.NoOpTracer(), } if cfg.CacheSize > 0 { txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) @@ -117,7 +116,7 @@ func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { return func(txmp *TxMempool) { txmp.metrics = metrics } } -func WithTraceClient(tc *trace.Client) TxMempoolOption { +func WithTraceClient(tc trace.Tracer) TxMempoolOption { return func(txmp *TxMempool) { txmp.traceClient = tc } @@ -206,7 +205,6 @@ func (txmp *TxMempool) CheckTx(tx types.Tx, cb func(*abci.Response), txInfo memp if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { txmp.metrics.FailedTxs.With(mempool.TypeLabel, mempool.FailedPrecheck).Add(1) - schema.WriteMempoolRejected(txmp.traceClient, err.Error()) return 0, mempool.ErrPreCheck{Reason: err} } } @@ -503,15 +501,6 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon ) txmp.metrics.FailedTxs.With(mempool.TypeLabel, mempool.FailedAdding).Add(1) - reason := fmt.Sprintf( - "code: %d codespace: %s logs: %s local: %v postCheck error: %v", - checkTxRes.Code, - checkTxRes.Codespace, - checkTxRes.Log, - wtx.HasPeer(0), // this checks if the peer id is local - err, - ) - schema.WriteMempoolRejected(txmp.traceClient, reason) // Remove the invalid transaction from the cache, unless the operator has // instructed us to keep invalid transactions. @@ -695,9 +684,6 @@ func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.Respons txmp.metrics.FailedTxs.With(mempool.TypeLabel, mempool.FailedRecheck).Add(1) if !txmp.config.KeepInvalidTxsInCache { txmp.cache.Remove(wtx.tx) - if err != nil { - schema.WriteMempoolRejected(txmp.traceClient, err.Error()) - } } txmp.metrics.Size.Set(float64(txmp.Size())) txmp.metrics.SizeBytes.Set(float64(txmp.SizeBytes())) diff --git a/mempool/v1/reactor.go b/mempool/v1/reactor.go index 1bbe541b2d..2ecfc51a5f 100644 --- a/mempool/v1/reactor.go +++ b/mempool/v1/reactor.go @@ -27,7 +27,7 @@ type Reactor struct { config *cfg.MempoolConfig mempool *TxMempool ids *mempoolIDs - traceClient *trace.Client + traceClient trace.Tracer } type mempoolIDs struct { @@ -94,7 +94,7 @@ func newMempoolIDs() *mempoolIDs { } // NewReactor returns a new Reactor with the given config and mempool. -func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool, traceClient *trace.Client) *Reactor { +func NewReactor(config *cfg.MempoolConfig, mempool *TxMempool, traceClient trace.Tracer) *Reactor { memR := &Reactor{ config: config, mempool: mempool, @@ -180,15 +180,6 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) switch msg := e.Message.(type) { case *protomem.Txs: - for _, tx := range msg.Txs { - schema.WriteMempoolTx( - memR.traceClient, - e.Src.ID(), - tx, - schema.TransferTypeDownload, - schema.V1VersionFieldValue, - ) - } protoTxs := msg.GetTxs() if len(protoTxs) == 0 { memR.Logger.Error("received tmpty txs from peer", "src", e.Src) @@ -202,6 +193,12 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { var err error for _, tx := range protoTxs { ntx := types.Tx(tx) + schema.WriteMempoolTx( + memR.traceClient, + string(e.Src.ID()), + ntx.Hash(), + schema.Download, + ) err = memR.mempool.CheckTx(ntx, nil, txInfo) if errors.Is(err, mempool.ErrTxInCache) { memR.Logger.Debug("Tx already exists in cache", "tx", ntx.String()) @@ -302,14 +299,13 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // record that we have sent the peer the transaction // to avoid doing it a second time memTx.SetPeer(peerID) + schema.WriteMempoolTx( + memR.traceClient, + string(peer.ID()), + memTx.tx.Hash(), + schema.Upload, + ) } - schema.WriteMempoolTx( - memR.traceClient, - peer.ID(), - memTx.tx, - schema.TransferTypeUpload, - schema.V1VersionFieldValue, - ) } select { diff --git a/mempool/v1/reactor_test.go b/mempool/v1/reactor_test.go index b337745a8f..31cb9672e9 100644 --- a/mempool/v1/reactor_test.go +++ b/mempool/v1/reactor_test.go @@ -164,7 +164,7 @@ func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { mempool, cleanup := newMempoolWithAppAndConfig(cc, config) defer cleanup() - reactors[i] = NewReactor(config.Mempool, mempool, &trace.Client{}) // so we dont start the consensus states + reactors[i] = NewReactor(config.Mempool, mempool, trace.NoOpTracer()) // so we dont start the consensus states reactors[i].SetLogger(logger.With("validator", i)) } diff --git a/node/node.go b/node/node.go index dcb0cdc4d8..e2c749fb1f 100644 --- a/node/node.go +++ b/node/node.go @@ -11,9 +11,9 @@ import ( "time" dbm "github.com/cometbft/cometbft-db" + "github.com/grafana/pyroscope-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/pyroscope-io/client/pyroscope" "github.com/rs/cors" sdktrace "go.opentelemetry.io/otel/sdk/trace" @@ -234,7 +234,7 @@ type Node struct { blockIndexer indexer.BlockIndexer indexerService *txindex.IndexerService prometheusSrv *http.Server - influxDBClient *trace.Client + tracer trace.Tracer pyroscopeProfiler *pyroscope.Profiler pyroscopeTracer *sdktrace.TracerProvider } @@ -378,7 +378,7 @@ func createMempoolAndMempoolReactor( state sm.State, memplMetrics *mempl.Metrics, logger log.Logger, - traceClient *trace.Client, + traceClient trace.Tracer, ) (mempl.Mempool, p2p.Reactor) { switch config.Mempool.Version { case cfg.MempoolV2: @@ -515,7 +515,7 @@ func createConsensusReactor(config *cfg.Config, waitSync bool, eventBus *types.EventBus, consensusLogger log.Logger, - traceClient *trace.Client, + traceClient trace.Tracer, ) (*cs.Reactor, *cs.State) { consensusState := cs.NewState( config.Consensus, @@ -549,13 +549,14 @@ func createTransport( nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns, + tracer trace.Tracer, ) ( *p2p.MultiplexTransport, []p2p.PeerFilterFunc, ) { var ( mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig, tracer) connFilters = []p2p.ConnFilterFunc{} peerFilters = []p2p.PeerFilterFunc{} ) @@ -625,12 +626,14 @@ func createSwitch(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, p2pLogger log.Logger, + tracer trace.Tracer, ) *p2p.Switch { sw := p2p.NewSwitch( config.P2P, transport, p2p.WithMetrics(p2pMetrics), p2p.SwitchPeerFilters(peerFilters...), + p2p.WithTracer(tracer), ) sw.SetLogger(p2pLogger) sw.AddReactor("MEMPOOL", mempoolReactor) @@ -856,11 +859,9 @@ func NewNode(config *cfg.Config, csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID, softwareVersion) - // create an optional influxdb client to send arbitrary data to a remote - // influxdb server. This is used to collect trace data from many different nodes - // in a network. - influxdbClient, err := trace.NewClient( - config.Instrumentation, + // create an optional tracer client to collect trace data. + tracer, err := trace.NewTracer( + config, logger, genDoc.ChainID, string(nodeKey.ID()), @@ -870,7 +871,7 @@ func NewNode(config *cfg.Config, } // Make MempoolReactor - mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger, influxdbClient) + mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger, tracer) // Make Evidence Reactor evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) @@ -904,7 +905,7 @@ func NewNode(config *cfg.Config, } consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, - privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, influxdbClient, + privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, tracer, ) // Set up state sync reactor, and schedule a sync if requested. @@ -925,13 +926,13 @@ func NewNode(config *cfg.Config, } // Setup Transport. - transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) + transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp, tracer) // Setup Switch. p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, - stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, tracer, ) err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) @@ -1002,7 +1003,7 @@ func NewNode(config *cfg.Config, indexerService: indexerService, blockIndexer: blockIndexer, eventBus: eventBus, - influxDBClient: influxdbClient, + tracer: tracer, } node.BaseService = *service.NewBaseService(logger, "Node", node) @@ -1151,8 +1152,8 @@ func (n *Node) OnStop() { } } - if n.influxDBClient != nil { - n.influxDBClient.Stop() + if n.tracer != nil { + n.tracer.Stop() } if n.pyroscopeProfiler != nil { diff --git a/node/tracing.go b/node/tracing.go index 4e2e00f76e..f49fd89f86 100644 --- a/node/tracing.go +++ b/node/tracing.go @@ -1,10 +1,10 @@ package node import ( - "github.com/pyroscope-io/client/pyroscope" + "github.com/grafana/pyroscope-go" "github.com/tendermint/tendermint/config" - otelpyroscope "github.com/pyroscope-io/otel-profiling-go" + otelpyroscope "github.com/grafana/otel-profiling-go" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/propagation" diff --git a/p2p/peer.go b/p2p/peer.go index cb36413182..f43cff9d51 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -11,6 +11,8 @@ import ( "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" cmtconn "github.com/tendermint/tendermint/p2p/conn" ) @@ -175,6 +177,7 @@ type peer struct { Data *cmap.CMap metrics *Metrics + traceClient trace.Tracer metricsTicker *time.Ticker mlc *metricsLabelCache @@ -184,6 +187,12 @@ type peer struct { type PeerOption func(*peer) +func WithPeerTracer(t trace.Tracer) PeerOption { + return func(p *peer) { + p.traceClient = t + } +} + func newPeer( pc peerConn, mConfig cmtconn.MConnConfig, @@ -203,6 +212,7 @@ func newPeer( metricsTicker: time.NewTicker(metricsTickerDuration), metrics: NopMetrics(), mlc: mlc, + traceClient: trace.NoOpTracer(), } p.mconn = createMConnection( @@ -494,11 +504,14 @@ func (p *peer) metricsReporter() { case <-p.metricsTicker.C: status := p.mconn.Status() var sendQueueSize float64 + queues := make(map[byte]int, len(status.Channels)) for _, chStatus := range status.Channels { sendQueueSize += float64(chStatus.SendQueueSize) + queues[chStatus.ID] = chStatus.SendQueueSize } p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) + schema.WritePendingBytes(p.traceClient, string(p.ID()), queues) case <-p.Quit(): return } @@ -546,6 +559,7 @@ func createMConnection( p.metrics.PeerReceiveBytesTotal.With(labels...).Add(float64(len(msgBytes))) p.metrics.MessageReceiveBytesTotal.With(append(labels, "message_type", p.mlc.ValueToMetricLabel(msg))...).Add(float64(len(msgBytes))) + schema.WriteReceivedBytes(p.traceClient, string(p.ID()), chID, len(msgBytes)) if nr, ok := reactor.(EnvelopeReceiver); ok { nr.ReceiveEnvelope(Envelope{ ChannelID: chID, diff --git a/p2p/switch.go b/p2p/switch.go index 60d26729b0..af0607e037 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -12,6 +12,8 @@ import ( "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/pkg/trace" + "github.com/tendermint/tendermint/pkg/trace/schema" ) const ( @@ -91,8 +93,9 @@ type Switch struct { rng *rand.Rand // seed for randomizing dial times and orders - metrics *Metrics - mlc *metricsLabelCache + metrics *Metrics + mlc *metricsLabelCache + traceClient trace.Tracer } // NetAddress returns the address the switch is listening on. @@ -126,6 +129,7 @@ func NewSwitch( persistentPeersAddrs: make([]*NetAddress, 0), unconditionalPeerIDs: make(map[ID]struct{}), mlc: newMetricsLabelCache(), + traceClient: trace.NoOpTracer(), } // Ensure we have a completely undeterministic PRNG. @@ -155,6 +159,10 @@ func WithMetrics(metrics *Metrics) SwitchOption { return func(sw *Switch) { sw.metrics = metrics } } +func WithTracer(tracer trace.Tracer) SwitchOption { + return func(sw *Switch) { sw.traceClient = tracer } +} + //--------------------------------------------------------------------- // Switch setup @@ -398,6 +406,7 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { sw.transport.Cleanup(peer) + schema.WritePeerUpdate(sw.traceClient, string(peer.ID()), schema.PeerDisconnect, fmt.Sprintf("%v", reason)) if err := peer.Stop(); err != nil { sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly } @@ -883,6 +892,7 @@ func (sw *Switch) addPeer(p Peer) error { return err } sw.metrics.Peers.Add(float64(1)) + schema.WritePeerUpdate(sw.traceClient, string(p.ID()), schema.PeerJoin, "") // Start all the reactor protocols on the peer. for _, reactor := range sw.reactors { diff --git a/p2p/test_util.go b/p2p/test_util.go index 32166043a1..c527863054 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -10,6 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/log" cmtnet "github.com/tendermint/tendermint/libs/net" cmtrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/trace" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" @@ -199,7 +200,7 @@ func MakeSwitch( panic(err) } - t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg)) + t := NewMultiplexTransport(nodeInfo, nodeKey, MConnConfig(cfg), trace.NoOpTracer()) if err := t.Listen(*addr); err != nil { panic(err) diff --git a/p2p/transport.go b/p2p/transport.go index 416c946942..5c82f1f757 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/pkg/trace" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -158,6 +159,9 @@ type MultiplexTransport struct { // peer currently. All relevant configuration should be refactored into options // with sane defaults. mConfig conn.MConnConfig + + // the tracer is passed to peers for collecting trace data + tracer trace.Tracer } // Test multiplexTransport for interface completeness. @@ -169,6 +173,7 @@ func NewMultiplexTransport( nodeInfo NodeInfo, nodeKey NodeKey, mConfig conn.MConnConfig, + tracer trace.Tracer, ) *MultiplexTransport { return &MultiplexTransport{ acceptc: make(chan accept), @@ -181,6 +186,7 @@ func NewMultiplexTransport( nodeKey: nodeKey, conns: NewConnSet(), resolver: net.DefaultResolver, + tracer: tracer, } } @@ -527,6 +533,7 @@ func (mt *MultiplexTransport) wrapPeer( cfg.onPeerError, cfg.mlc, PeerMetrics(cfg.metrics), + WithPeerTracer(mt.tracer), ) return p diff --git a/p2p/transport_test.go b/p2p/transport_test.go index adaab39955..151ac7edf7 100644 --- a/p2p/transport_test.go +++ b/p2p/transport_test.go @@ -13,6 +13,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/protoio" "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/pkg/trace" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -30,7 +31,7 @@ func newMultiplexTransport( nodeKey NodeKey, ) *MultiplexTransport { return NewMultiplexTransport( - nodeInfo, nodeKey, conn.DefaultMConnConfig(), + nodeInfo, nodeKey, conn.DefaultMConnConfig(), trace.NoOpTracer(), ) } diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index 0df5a24cfd..cf7f9a4aa8 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -299,6 +299,7 @@ type statusInfo struct { } func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { + //nolint:goconst message := "\r\n" + "" diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 7797aa044b..1c309b6fdf 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -41,5 +41,7 @@ var ( NewBaseHashFunc = sha256.New // DataCommitmentBlocksLimit is the limit to the number of blocks we can generate a data commitment for. + // Deprecated: this is no longer used as we're moving towards Blobstream X. However, we're leaving it + // here for backwards compatibility purpose until it's removed in the next breaking release. DataCommitmentBlocksLimit = 1000 ) diff --git a/pkg/trace/README.md b/pkg/trace/README.md index d883e1b413..dfccaa0544 100644 --- a/pkg/trace/README.md +++ b/pkg/trace/README.md @@ -1,136 +1,102 @@ -# trace: push arbitrary trace level data to an influxdb instance +# trace package -This package has code to create a client that can be used to push events to an -influxdb instance. It is used to collect trace data from many different nodes in -a network. If there is no URL in the config.toml, then the underlying client is -nil and no points will be written. The provided chainID and nodeID are used to -tag all points. The underlying client is exposed to allow for custom writes, but -the WritePoint method should be used for most cases, as it enforces the schema. +The `trace` package provides a decently fast way to store traces locally. -## Usage and Schema +## Usage -To use this package, first create a new client using the `NewClient` function, -then pass that client to the relevant components that need to push events. After -that, you can use the `WritePoint` method to push events to influxdb. In the below -example, we're pushing a point in the consensus reactor to measure exactly when -each step of consensus is reached for each node. +To enable the local tracer, add the following to the config.toml file: -```go -client.WritePoint(RoundStateTable, map[string]interface{}{ - HeightFieldKey: height, - RoundFieldKey: round, - StepFieldKey: step.String(), -}) -``` - -Using this method enforces the typical schema, where we are tagging (aka -indexing) each point by the chain-id and the node-id, then adding the local time -of the creation of the event. If you need to push a custom point, you can use -the underlying client directly. See `influxdb2.WriteAPI` for more details. - -### Schema - -All points in influxdb are divided into a key value pair per field. These kvs -are indexed first by a "measurement", which is used as a "table" in other dbs. -Additional indexes can also be added, we're using the chain-id and node-id here. -This allows for us to quickly query for trace data for a specific chain and/or -node. - -```flux -from(bucket: "e2e") - |> range(start: -1h) - |> filter( - fn: (r) => r["_measurement"] == "consensus_round_state" - and r.chain_id == "ci-YREG8X" - and r.node_id == "0b529c309608172a29c49979394734260b42acfb" - ) -``` +```toml +# The tracer to use for collecting trace data. +trace_type = "local" -We can easily retrieve all fields in a relatively standard table format by using -the pivot `fluxQL` command. +# The size of the batches that are sent to the database. +trace_push_batch_size = 1000 -```flux -from(bucket: "mocha") - |> range(start: -1h) - |> filter(fn: (r) => r._measurement == "consensus_round_state") - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") +# The list of tables that are updated when tracing. All available tables and +# their schema can be found in the pkg/trace/schema package. It is represented as a +# comma separate string. For example: "consensus_round_state,mempool_tx". +tracing_tables = "consensus_round_state,mempool_tx" ``` -### Querying Data Using Python - -Python can be used to quickly search for and isolate specific patterns. - -```python -from influxdb_client import InfluxDBClient -from influxdb_client.client.write_api import SYNCHRONOUS - -client = InfluxDBClient(url="http://your-influx-url:8086/", token="your-influx-token", org="celestia") +Trace data will now be stored to the `.celestia-app/data/traces` directory, and +save the file to the specified directory in the `table_name.jsonl` format. -query_api = client.query_api() +To read the contents of the file, open it and pass it the Decode function. This +returns all of the events in that file as a slice. -def create_flux_table_query(start, bucket, measurement, filter_clause): - flux_table_query = f''' - from(bucket: "{bucket}") - |> range(start: {start}) - |> filter(fn: (r) => r._measurement == "{measurement}") - {filter_clause} - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") - ''' - return flux_table_query - -query = create_flux_table_query("-1h", "mocha", "consenus_round_state", "") -result = query_api.query(query=query) +```go +events, err := DecodeFile[schema.MempoolTx](file) +if err != nil { + return err +} ``` -### Running a node with remote tracing on +### Pull Based Event Collection -Tracing will only occur if an influxdb URL in specified either directly in the -`config.toml` or as flags provided to the start sub command. +Pull based event collection is where external servers connect to and pull trace +data from the consensus node. -#### Configure in the `config.toml` +To use this, change the config.toml to store traces in the +.celestia-app/data/traces directory. ```toml -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] +# The tracer pull address specifies which address will be used for pull based +# event collection. If empty, the pull based server will not be started. +trace_pull_address = ":26661" +``` -... +To retrieve a table remotely using the pull based server, call the following +function: -# The URL of the influxdb instance to use for remote event -# collection. If empty, remote event collection is disabled. -influx_url = "http://your-influx-ip:8086/" +```go +err := GetTable("http://1.2.3.4:26661", "mempool_tx", "directory to store the file") +if err != nil { + return err +} +``` -# The influxdb token to use for remote event collection. -influx_token = "your-token" +This stores the data locally in the specified directory. -# The influxdb bucket to use for remote event collection. -influx_bucket = "e2e" -# The influxdb org to use for event remote collection. -influx_org = "celestia" +### Push Based Event Collection -# The size of the batches that are sent to the database. -influx_batch_size = 20 +Push based event collection is where the consensus node pushes trace data to an +external server. At the moment, this is just an S3 bucket. To use this, two options are available: +#### Using push config file -# The list of tables that are updated when tracing. All available tables and -# their schema can be found in the pkg/trace/schema package. -influx_tables = ["consensus_round_state", "mempool_tx", ] +Add the following to the config.toml file: +```toml +# TracePushConfig is the relative path of the push config. +# This second config contains credentials for where and how often to +# push trace data to. For example, if the config is next to this config, +# it would be "push_config.json". +trace_push_config = "{{ .Instrumentation.TracePushConfig }}" ``` -or +The push config file is a JSON file that should look like this: -```sh -celestia-appd start --influxdb-url=http://your-influx-ip:8086/ --influxdb-token="your-token" +```json +{ + "bucket": "bucket-name", + "region": "region", + "access_key": "", + "secret_key": "", + "push_delay": 60 // number of seconds to wait between intervals of pushing all files +} ``` -### e2e tests +#### Using environment variables for s3 bucket -To push events from e2e tests, we only need to specify the URL and the token via -the cli. +Alternatively, you can set the following environment variables: ```bash -cd test/e2e -make && ./build/runner -f ./networks/ci.toml --influxdb-url=http://your-influx-ip:8086/ --influxdb-token="your-token" +export TRACE_PUSH_BUCKET_NAME=bucket-name +export TRACE_PUSH_REGION=region +export TRACE_PUSH_ACCESS_KEY=access-key +export TRACE_PUSH_SECRET_KEY=secret-key +export TRACE_PUSH_DELAY=push-delay ``` + +`bucket_name` , `region`, `access_key`, `secret_key` and `push_delay` are the s3 bucket name, region, access key, secret key and the delay between pushes respectively. diff --git a/pkg/trace/buffered_file.go b/pkg/trace/buffered_file.go new file mode 100644 index 0000000000..9b228e3f9e --- /dev/null +++ b/pkg/trace/buffered_file.go @@ -0,0 +1,101 @@ +package trace + +import ( + "bufio" + "errors" + "io" + "os" + "sync" + "sync/atomic" +) + +// bufferedFile is a file that is being written to and read from. It is thread +// safe, however, when reading from the file, writes will be ignored. +type bufferedFile struct { + // reading protects the file from being written to while it is being read + // from. This is needed beyond in addition to the mutex so that writes can + // be ignored while reading. + reading atomic.Bool + + // mut protects the buffered writer. + mut *sync.Mutex + + // file is the file that is being written to. + file *os.File + + // writer is the buffered writer that is writing to the file. + wr *bufio.Writer +} + +// newbufferedFile creates a new buffered file that writes to the given file. +func newbufferedFile(file *os.File) *bufferedFile { + return &bufferedFile{ + file: file, + wr: bufio.NewWriter(file), + reading: atomic.Bool{}, + mut: &sync.Mutex{}, + } +} + +// Write writes the given bytes to the file. If the file is currently being read +// from, the write will be lost. +func (f *bufferedFile) Write(b []byte) (int, error) { + if f.reading.Load() { + return 0, nil + } + f.mut.Lock() + defer f.mut.Unlock() + return f.wr.Write(b) +} + +func (f *bufferedFile) startReading() error { + f.reading.Store(true) + f.mut.Lock() + defer f.mut.Unlock() + + err := f.wr.Flush() + if err != nil { + f.reading.Store(false) + return err + } + + _, err = f.file.Seek(0, io.SeekStart) + if err != nil { + f.reading.Store(false) + return err + } + + return nil +} + +func (f *bufferedFile) stopReading() error { + f.mut.Lock() + defer f.mut.Unlock() + _, err := f.file.Seek(0, io.SeekEnd) + f.reading.Store(false) + return err +} + +// File returns the underlying file with the seek point reset. The caller should +// not close the file. The caller must call the returned function when they are +// done reading from the file. This function resets the seek point to where it +// was being written to. +func (f *bufferedFile) File() (*os.File, func() error, error) { + if f.reading.Load() { + return nil, func() error { return nil }, errors.New("file is currently being read from") + } + err := f.startReading() + if err != nil { + return nil, func() error { return nil }, err + } + return f.file, f.stopReading, nil +} + +// Close closes the file. +func (f *bufferedFile) Close() error { + // set reading to true to prevent writes while closing the file. + f.mut.Lock() + defer f.mut.Unlock() + f.reading.Store(true) + return f.file.Close() +} diff --git a/pkg/trace/client.go b/pkg/trace/client.go deleted file mode 100644 index 0ee70aebed..0000000000 --- a/pkg/trace/client.go +++ /dev/null @@ -1,155 +0,0 @@ -package trace - -import ( - "context" - "fmt" - "time" - - influxdb2 "github.com/influxdata/influxdb-client-go/v2" - "github.com/influxdata/influxdb-client-go/v2/api/write" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/log" -) - -const ( - NodeIDTag = "node_id" - ChainIDTag = "chain_id" -) - -// ClientConfigConfig is the influxdb client configuration used for -// collecting events. -type ClientConfigConfig struct { - // URL is the influxdb url. - URL string `mapstructure:"influx_url"` - // Token is the influxdb token. - Token string `mapstructure:"influx_token"` - // Org is the influxdb organization. - Org string `mapstructure:"influx_org"` - // Bucket is the influxdb bucket. - Bucket string `mapstructure:"influx_bucket"` - // BatchSize is the number of points to write in a single batch. - BatchSize int `mapstructure:"influx_batch_size"` -} - -// Client is an influxdb client that can be used to push events to influxdb. It -// is used to collect trace data from many different nodes in a network. If -// there is no URL in the config.toml, then the underlying client is nil and no -// points will be written. The provided chainID and nodeID are used to tag all -// points. The underlying client is exposed to allow for custom writes, but the -// WritePoint method should be used for most cases, as it enforces the schema. -type Client struct { - ctx context.Context - cancel context.CancelFunc - cfg *config.InstrumentationConfig - - // chainID is added as a tag all points - chainID string - - // nodeID is added as a tag all points - nodeID string - - // tables is a map from table name to the schema of that table that are - // configured to be collected. - tables map[string]struct{} - - // Client is the influxdb client. This field is nil if no connection is - // established. - Client influxdb2.Client -} - -// Stop closes the influxdb client. -func (c *Client) Stop() { - c.cancel() - if c.Client == nil { - return - } - writeAPI := c.Client.WriteAPI(c.cfg.InfluxOrg, c.cfg.InfluxBucket) - writeAPI.Flush() - c.Client.Close() -} - -// NewClient creates a new influxdb client using the provided config. If there -// is no URL configured, then the underlying client will be nil, and each -// attempt to write a point will do nothing. The provided chainID and nodeID are -// used to tag all points. -func NewClient(cfg *config.InstrumentationConfig, logger log.Logger, chainID, nodeID string) (*Client, error) { - ctx, cancel := context.WithCancel(context.Background()) - cli := &Client{ - cfg: cfg, - Client: nil, - ctx: ctx, - cancel: cancel, - chainID: chainID, - nodeID: nodeID, - tables: sliceToMap(cfg.InfluxTables), - } - if cfg.InfluxURL == "" { - return cli, nil - } - cli.Client = influxdb2.NewClientWithOptions( - cfg.InfluxURL, - cfg.InfluxToken, - influxdb2.DefaultOptions(). - SetBatchSize(uint(cfg.InfluxBatchSize)), - ) - ctx, cancel = context.WithTimeout(ctx, 3*time.Second) - defer cancel() - alive, err := cli.Client.Ping(ctx) - if err != nil { - return nil, err - } - if !alive { - return nil, fmt.Errorf("failure to ping configured influxdb: %s", cfg.InfluxURL) - } - logger.Info("connected to influxdb", "url", cfg.InfluxURL) - go cli.logErrors(logger) - return cli, nil -} - -// logErrors empties the writeAPI error channel and logs any errors. -func (c *Client) logErrors(logger log.Logger) { - writeAPI := c.Client.WriteAPI(c.cfg.InfluxOrg, c.cfg.InfluxBucket) - for { - select { - case err := <-writeAPI.Errors(): - logger.Error("event collector: influxdb write error", "err", err) - case <-c.ctx.Done(): - return - } - } -} - -// IsCollecting returns true if the client is collecting events. -func (c *Client) IsCollecting(table string) bool { - if c.Client == nil { - return false - } - _, has := c.tables[table] - return has -} - -// WritePoint async writes a point to influxdb. To enforce the schema, it -// automatically adds the chain_id and node_id tags, along with setting the -// timestamp to the current time. If the underlying client is nil, it does -// nothing. The "table" arg is used as the influxdb "measurement" for the point. -// If other tags are needed, use WriteCustomPoint. -func (c *Client) WritePoint(table string, fields map[string]interface{}) { - if !c.IsCollecting(table) { - return - } - writeAPI := c.Client.WriteAPI(c.cfg.InfluxOrg, c.cfg.InfluxBucket) - tags := map[string]string{ - NodeIDTag: c.nodeID, - ChainIDTag: c.chainID, - } - p := write.NewPoint(table, tags, fields, time.Now()) - writeAPI.WritePoint(p) -} - -func sliceToMap(tables []string) map[string]struct{} { - m := make(map[string]struct{}) - for _, s := range tables { - m[s] = struct{}{} - } - return m -} diff --git a/pkg/trace/decoder.go b/pkg/trace/decoder.go new file mode 100644 index 0000000000..abf24f4006 --- /dev/null +++ b/pkg/trace/decoder.go @@ -0,0 +1,34 @@ +package trace + +import ( + "bufio" + "encoding/json" + "io" + "os" +) + +// DecodeFile reads a file and decodes it into a slice of events via +// scanning. The table parameter is used to determine the type of the events. +// The file should be a jsonl file. The generic here are passed to the event +// type. +func DecodeFile[T any](f *os.File) ([]Event[T], error) { + var out []Event[T] + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + var e Event[T] + if err := json.Unmarshal([]byte(line), &e); err != nil { + return nil, err + } + + out = append(out, e) + } + + return out, nil +} diff --git a/pkg/trace/doc.go b/pkg/trace/doc.go index 3d6521464e..27cf777c20 100644 --- a/pkg/trace/doc.go +++ b/pkg/trace/doc.go @@ -1,101 +1,2 @@ -/* -# trace: push arbitrary trace level data to an influxdb instance - -This package has code to create a client that can be used to push events to an -influxdb instance. It is used to collect trace data from many different nodes in -a network. If there is no URL in the config.toml, then the underlying client is -nil and no points will be written. The provided chainID and nodeID are used to -tag all points. The underlying client is exposed to allow for custom writes, but -the WritePoint method should be used for most cases, as it enforces the schema. - -## Usage and Schema - -To use this package, first create a new client using the `NewClient` function, -then pass that client to the relevant components that need to push events. After -that, you can use the `WritePoint` method to push events to influxdb. In the below -example, we're pushing a point in the consensus reactor to measure exactly when -each step of consensus is reached for each node. - -```go - - if cs.traceClient.IsCollecting() { - cs.traceClient.WritePoint("consensus", map[string]interface{}{ - "roundData": []interface{}{rs.Height, rs.Round, rs.Step}, - }) - } - -``` - -Using this method enforces the typical schema, where we are tagging (aka -indexing) each point by the chain-id and the node-id, then adding the local time -of the creation of the event. If you need to push a custom point, you can use -the underlying client directly. See influxdb2.WriteAPI for more details. - -### Schema - -All points in influxdb are divided into a key value pair per field. These kvs -are indexed first by a "measurement", which is used as a "table" in other dbs. -Additional indexes can also be added, we're using the chain-id and node-id here. -This allows for us to quickly query for trace data for a specific chain and/or -node. - -```flux -from(bucket: "e2e") - - |> range(start: -1h) - |> filter( - fn: (r) => r["_measurement"] == "consensus" - and r.chain_id == "ci-YREG8X" - and r.node_id == "0b529c309608172a29c49979394734260b42acfb" - ) - -``` - -### Running a node with remote tracing on - -Tracing will only occur if an influxdb URL in specified either directly in the -`config.toml` or as flags provided to the start sub command. - -configure in the config.toml - -```toml -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -... - -# The URL of the influxdb instance to use for remote event -# collection. If empty, remote event collection is disabled. -influx_url = "http://your-influx-ip:8086/" - -# The influxdb token to use for remote event collection. -influx_token = "your-token" - -# The influxdb bucket to use for remote event collection. -influx_bucket = "e2e" - -# The influxdb org to use for event remote collection. -influx_org = "celestia" - -# The size of the batches that are sent to the database. -influx_batch_size = 20 -``` - -or -```sh -celestia-appd start --influxdb-url=http://your-influx-ip:8086/ --influxdb-token="your-token" -``` - -### e2e tests - -To push events from e2e tests, we only need to specify the URL and the token via -the cli. - -```bash -cd test/e2e -make && ./build/runner -f ./networks/ci.toml --influxdb-url=http://your-influx-ip:8086/ --influxdb-token="your-token" -``` -*/ +/**/ package trace diff --git a/pkg/trace/fileserver.go b/pkg/trace/fileserver.go new file mode 100644 index 0000000000..21747d5902 --- /dev/null +++ b/pkg/trace/fileserver.go @@ -0,0 +1,331 @@ +package trace + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +func (lt *LocalTracer) getTableHandler() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Parse the request to get the data + if err := r.ParseForm(); err != nil { + http.Error(w, "Failed to parse form", http.StatusBadRequest) + return + } + + inputString := r.FormValue("table") + if inputString == "" { + http.Error(w, "No data provided", http.StatusBadRequest) + return + } + + f, done, err := lt.readTable(inputString) + if err != nil { + http.Error(w, fmt.Sprintf("failed to read table: %v", err), http.StatusInternalServerError) + return + } + defer done() //nolint:errcheck + + // Use the pump function to continuously read from the file and write to + // the response writer + reader, writer := pump(inputString, bufio.NewReader(f)) + defer reader.Close() + + // Set the content type to the writer's form data content type + w.Header().Set("Content-Type", writer.FormDataContentType()) + + // Copy the data from the reader to the response writer + if _, err := io.Copy(w, reader); err != nil { + http.Error(w, "Failed to send data", http.StatusInternalServerError) + return + } + } +} + +// pump continuously reads from a bufio.Reader and writes to a multipart.Writer. +// It returns the reader end of the pipe and the writer for consumption by the +// server. +func pump(table string, br *bufio.Reader) (*io.PipeReader, *multipart.Writer) { + r, w := io.Pipe() + m := multipart.NewWriter(w) + + go func( + table string, + m *multipart.Writer, + w *io.PipeWriter, + br *bufio.Reader, + ) { + defer w.Close() + defer m.Close() + + part, err := m.CreateFormFile("filename", table+".jsonl") + if err != nil { + return + } + + if _, err = io.Copy(part, br); err != nil { + return + } + + }(table, m, w, br) + + return r, m +} + +func (lt *LocalTracer) servePullData() { + mux := http.NewServeMux() + mux.HandleFunc("/get_table", lt.getTableHandler()) + err := http.ListenAndServe(lt.cfg.Instrumentation.TracePullAddress, mux) //nolint:gosec + if err != nil { + lt.logger.Error("trace pull server failure", "err", err) + } + lt.logger.Info("trace pull server started", "address", lt.cfg.Instrumentation.TracePullAddress) +} + +// GetTable downloads a table from the server and saves it to the given directory. It uses a multipart +// response to download the file. +func GetTable(serverURL, table, dirPath string) error { + data := url.Values{} + data.Set("table", table) + + serverURL = serverURL + "/get_table" + + resp, err := http.PostForm(serverURL, data) //nolint:gosec + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + _, params, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return err + } + + boundary, ok := params["boundary"] + if !ok { + panic("Not a multipart response") + } + + err = os.MkdirAll(dirPath, 0755) + if err != nil { + return err + } + + outputFile, err := os.Create(path.Join(dirPath, table+".jsonl")) + if err != nil { + return err + } + defer outputFile.Close() + + reader := multipart.NewReader(resp.Body, boundary) + + for { + part, err := reader.NextPart() + if err == io.EOF { + break // End of multipart + } + if err != nil { + return err + } + + contentDisposition, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition")) + if err != nil { + return err + } + + if contentDisposition == "form-data" && params["filename"] != "" { + _, err = io.Copy(outputFile, part) + if err != nil { + return err + } + } + + part.Close() + } + + return nil +} + +// S3Config is a struct that holds the configuration for an S3 bucket. +type S3Config struct { + BucketName string `json:"bucket_name"` + Region string `json:"region"` + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + // PushDelay is the time in seconds to wait before pushing the file to S3. + // If this is 0, it defaults is used. + PushDelay int64 `json:"push_delay"` +} + +// readS3Config reads an S3Config from a file in the given directory. +func readS3Config(dir string) (S3Config, error) { + cfg := S3Config{} + f, err := os.Open(filepath.Join(dir, "s3.json")) + if errors.Is(err, os.ErrNotExist) { + return cfg, nil + } + if err != nil { + return cfg, err + } + defer f.Close() + err = json.NewDecoder(f).Decode(&cfg) + if cfg.PushDelay == 0 { + cfg.PushDelay = 60 + } + return cfg, err +} + +// PushS3 pushes a file to an S3 bucket using the given S3Config. It uses the +// chainID and the nodeID to organize the files in the bucket. The directory +// structure is chainID/nodeID/table.jsonl . +func PushS3(chainID, nodeID string, s3cfg S3Config, f *os.File) error { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(s3cfg.Region), + Credentials: credentials.NewStaticCredentials( + s3cfg.AccessKey, + s3cfg.SecretKey, + "", + ), + HTTPClient: &http.Client{ + Timeout: time.Duration(15) * time.Second, + }, + }, + ) + if err != nil { + return err + } + + s3Svc := s3.New(sess) + + key := fmt.Sprintf("%s/%s/%s", chainID, nodeID, filepath.Base(f.Name())) + + _, err = s3Svc.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(s3cfg.BucketName), + Key: aws.String(key), + Body: f, + }) + + return err +} + +func (lt *LocalTracer) pushLoop() { + for { + time.Sleep(time.Second * time.Duration(lt.s3Config.PushDelay)) + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } +} + +func (lt *LocalTracer) PushAll() error { + for table := range lt.fileMap { + f, done, err := lt.readTable(table) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + err = PushS3(lt.chainID, lt.nodeID, lt.s3Config, f) + if err == nil { + break + } + lt.logger.Error("failed to push table", "table", table, "error", err) + time.Sleep(time.Second * time.Duration(rand.Intn(3))) //nolint:gosec + } + err = done() + if err != nil { + return err + } + } + return nil +} + +// S3Download downloads files that match some prefix from an S3 bucket to a +// local directory dst. +func S3Download(dst, prefix string, cfg S3Config) error { + // Ensure local directory structure exists + err := os.MkdirAll(dst, os.ModePerm) + if err != nil { + return err + } + + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(cfg.Region), + Credentials: credentials.NewStaticCredentials( + cfg.AccessKey, + cfg.SecretKey, + "", + ), + }, + ) + if err != nil { + return err + } + + s3Svc := s3.New(sess) + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(cfg.BucketName), + Prefix: aws.String(prefix), + Delimiter: aws.String(""), + } + + err = s3Svc.ListObjectsV2Pages(input, func(page *s3.ListObjectsV2Output, lastPage bool) bool { + for _, content := range page.Contents { + localFilePath := filepath.Join(dst, prefix, strings.TrimPrefix(*content.Key, prefix)) + fmt.Printf("Downloading %s to %s\n", *content.Key, localFilePath) + + // Create the directories in the path + if err := os.MkdirAll(filepath.Dir(localFilePath), os.ModePerm); err != nil { + return false + } + + // Create a file to write the S3 Object contents to. + f, err := os.Create(localFilePath) + if err != nil { + return false + } + + resp, err := s3Svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(cfg.BucketName), + Key: aws.String(*content.Key), + }) + if err != nil { + f.Close() + continue + } + defer resp.Body.Close() + + // Copy the contents of the S3 object to the local file + if _, err := io.Copy(f, resp.Body); err != nil { + return false + } + + fmt.Printf("Successfully downloaded %s to %s\n", *content.Key, localFilePath) + f.Close() + } + return !lastPage // continue paging + }) + return err +} diff --git a/pkg/trace/flags.go b/pkg/trace/flags.go index 5d8b2a44ad..6f17eebd27 100644 --- a/pkg/trace/flags.go +++ b/pkg/trace/flags.go @@ -1,10 +1,10 @@ package trace const ( - FlagInfluxDBURL = "influxdb-url" - FlagInfluxDBToken = "influxdb-token" - FlagInfluxDBURLDescription = "URL of the InfluxDB instance to use for arbitrary data collection. If not specified, data will not be collected" - FlagInfluxDBTokenDescription = "Token to use when writing to the InfluxDB instance. Must be specified if 'influxdb-url' is specified" //nolint:gosec + FlagTracePushConfig = "trace-push-url" + FlagTracePullAddress = "trace-pull-address" + FlagTracePushConfigDescription = "URL of the trace push server" + FlagTracePullAddressDescription = "address to listen on for pulling trace data" FlagPyroscopeURL = "pyroscope-url" FlagPyroscopeURLDescription = "URL of the Pyroscope instance to use for continuous profiling. If not specified, profiling will not be enabled" diff --git a/pkg/trace/local_tracer.go b/pkg/trace/local_tracer.go new file mode 100644 index 0000000000..0d48515eda --- /dev/null +++ b/pkg/trace/local_tracer.go @@ -0,0 +1,241 @@ +package trace + +import ( + "encoding/json" + "fmt" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + PushBucketName = "TRACE_PUSH_BUCKET_NAME" + PushRegion = "TRACE_PUSH_REGION" + PushAccessKey = "TRACE_PUSH_ACCESS_KEY" + PushKey = "TRACE_PUSH_SECRET_KEY" + PushDelay = "TRACE_PUSH_DELAY" +) + +// Event wraps some trace data with metadata that dictates the table and things +// like the chainID and nodeID. +type Event[T any] struct { + ChainID string `json:"chain_id"` + NodeID string `json:"node_id"` + Table string `json:"table"` + Timestamp time.Time `json:"timestamp"` + Msg T `json:"msg"` +} + +// NewEvent creates a new Event with the given chainID, nodeID, table, and msg. +// It adds the current time as the timestamp. +func NewEvent[T any](chainID, nodeID, table string, msg T) Event[T] { + return Event[T]{ + ChainID: chainID, + NodeID: nodeID, + Table: table, + Msg: msg, + Timestamp: time.Now(), + } +} + +// LocalTracer saves all of the events passed to the retuen channel to files +// based on their "type" (a string field in the event). Each type gets its own +// file. The internals are purposefully not *explicitly* thread safe to avoid the +// overhead of locking with each event save. Only pass events to the returned +// channel. Call CloseAll to close all open files. +type LocalTracer struct { + chainID, nodeID string + logger log.Logger + cfg *config.Config + s3Config S3Config + + // fileMap maps tables to their open files files are threadsafe, but the map + // is not. Therefore don't create new files after initialization to remain + // threadsafe. + fileMap map[string]*bufferedFile + // canal is a channel for all events that are being written. It acts as an + // extra buffer to avoid blocking the caller when writing to files. + canal chan Event[Entry] +} + +// NewLocalTracer creates a struct that will save all of the events passed to +// the retuen channel to files based on their "table" (a string field in the +// event). Each type gets its own file. The internal are purposefully not thread +// safe to avoid the overhead of locking with each event save. Only pass events +// to the returned channel. Call CloseAll to close all open files. Goroutine to +// save events is started in this function. +func NewLocalTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (*LocalTracer, error) { + fm := make(map[string]*bufferedFile) + p := path.Join(cfg.RootDir, "data", "traces") + for _, table := range splitAndTrimEmpty(cfg.Instrumentation.TracingTables, ",", " ") { + fileName := fmt.Sprintf("%s/%s.jsonl", p, table) + err := os.MkdirAll(p, 0700) + if err != nil { + return nil, fmt.Errorf("failed to create directory %s: %w", p, err) + } + file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, fmt.Errorf("failed to open or create file %s: %w", fileName, err) + } + fm[table] = newbufferedFile(file) + } + + lt := &LocalTracer{ + fileMap: fm, + cfg: cfg, + canal: make(chan Event[Entry], cfg.Instrumentation.TraceBufferSize), + chainID: chainID, + nodeID: nodeID, + logger: logger, + } + + go lt.drainCanal() + if cfg.Instrumentation.TracePullAddress != "" { + logger.Info("starting pull server", "address", cfg.Instrumentation.TracePullAddress) + go lt.servePullData() + } + + if cfg.Instrumentation.TracePushConfig != "" { + s3Config, err := readS3Config(path.Join(cfg.RootDir, "config", cfg.Instrumentation.TracePushConfig)) + if err != nil { + return nil, fmt.Errorf("failed to read s3 config: %w", err) + } + lt.s3Config = s3Config + go lt.pushLoop() + } else if s3Config, err := GetPushConfigFromEnv(); err == nil { + lt.s3Config = s3Config + go lt.pushLoop() + } + + return lt, nil +} + +// GetPushConfigFromEnv reads the required environment variables to push trace +func GetPushConfigFromEnv() (S3Config, error) { + bucketName := os.Getenv(PushBucketName) + region := os.Getenv(PushRegion) + accessKey := os.Getenv(PushAccessKey) + secretKey := os.Getenv(PushKey) + pushDelay, err := strconv.ParseInt(os.Getenv(PushDelay), 10, 64) + if err != nil { + return S3Config{}, err + } + if bucketName == "" || region == "" || accessKey == "" || secretKey == "" { + return S3Config{}, fmt.Errorf("missing required environment variables") + } + var s3Config = S3Config{ + BucketName: bucketName, + Region: region, + AccessKey: accessKey, + SecretKey: secretKey, + PushDelay: pushDelay, + } + return s3Config, nil +} + +func (lt *LocalTracer) Write(e Entry) { + if !lt.IsCollecting(e.Table()) { + return + } + lt.canal <- NewEvent(lt.chainID, lt.nodeID, e.Table(), e) +} + +// ReadTable returns a file for the given table. If the table is not being +// collected, an error is returned. The caller should not close the file. +func (lt *LocalTracer) readTable(table string) (*os.File, func() error, error) { + bf, has := lt.getFile(table) + if !has { + return nil, func() error { return nil }, fmt.Errorf("table %s not found", table) + } + + return bf.File() +} + +func (lt *LocalTracer) IsCollecting(table string) bool { + _, has := lt.getFile(table) + return has +} + +// getFile gets a file for the given type. This method is purposely +// not thread-safe to avoid the overhead of locking with each event save. +func (lt *LocalTracer) getFile(table string) (*bufferedFile, bool) { + f, has := lt.fileMap[table] + return f, has +} + +// saveEventToFile marshals an Event into JSON and appends it to a file named after the event's Type. +func (lt *LocalTracer) saveEventToFile(event Event[Entry]) error { + file, has := lt.getFile(event.Table) + if !has { + return fmt.Errorf("table %s not found", event.Table) + } + + eventJSON, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("failed to marshal event: %v", err) + } + + if _, err := file.Write(append(eventJSON, '\n')); err != nil { + return fmt.Errorf("failed to write event to file: %v", err) + } + + return nil +} + +// draincanal takes a variadic number of channels of Event pointers and drains them into files. +func (lt *LocalTracer) drainCanal() { + // purposefully do not lock, and rely on the channel to provide sync + // actions, to avoid overhead of locking with each event save. + for ev := range lt.canal { + if err := lt.saveEventToFile(ev); err != nil { + lt.logger.Error("failed to save event to file", "error", err) + } + } +} + +// Stop optionally uploads and closes all open files. +func (lt *LocalTracer) Stop() { + if lt.s3Config.SecretKey != "" { + lt.logger.Info("pushing all tables before stopping") + err := lt.PushAll() + if err != nil { + lt.logger.Error("failed to push tables", "error", err) + } + } + + for _, file := range lt.fileMap { + err := file.Close() + if err != nil { + lt.logger.Error("failed to close file", "error", err) + } + } +} + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +// +// NOTE: this is copy pasted from the config package to avoid a circular +// dependency. See the function of the same name for tests. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/pkg/trace/local_tracer_test.go b/pkg/trace/local_tracer_test.go new file mode 100644 index 0000000000..68841a34b7 --- /dev/null +++ b/pkg/trace/local_tracer_test.go @@ -0,0 +1,183 @@ +package trace + +import ( + "fmt" + "io" + "net" + "os" + "path" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" +) + +const ( + // testEventTable is the table name for the testEvent struct. + testEventTable = "testEvent" +) + +type testEvent struct { + City string `json:"city"` + Length int `json:"length"` +} + +func (c testEvent) Table() string { + return testEventTable +} + +// TestLocalTracerReadWrite tests the local client by writing some events, +// reading them back and comparing them, writing at the same time as reading. +func TestLocalTracerReadWrite(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + annecy := testEvent{"Annecy", 420} + paris := testEvent{"Paris", 420} + client.Write(annecy) + client.Write(paris) + + time.Sleep(100 * time.Millisecond) + + f, done, err := client.readTable(testEventTable) + require.NoError(t, err) + + // write at the same time as reading to test thread safety this test will be + // flakey if this is not being handled correctly. Since we're reading from + // the file, we expect these write to be ignored. + migenees := testEvent{"Migennes", 620} + pontivy := testEvent{"Pontivy", 720} + client.Write(migenees) + client.Write(pontivy) + + // wait to ensure that the write have been processed (and ignored in this case) + time.Sleep(100 * time.Millisecond) + + events, err := DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + // even though we've written twice, we expect only the first two events to be + // be written to the file. When reading the file, all writes are ignored. + require.GreaterOrEqual(t, len(events), 2) + require.Equal(t, annecy, events[0].Msg) + require.Equal(t, paris, events[1].Msg) + + // write again to the file and read it back this time, we expect the writes + // to be written since we've called the done() function. + client.Write(migenees) + client.Write(pontivy) + + time.Sleep(100 * time.Millisecond) + + f, done, err = client.readTable(testEventTable) + require.NoError(t, err) + events, err = DecodeFile[testEvent](f) + require.NoError(t, err) + err = done() + require.NoError(t, err) + require.Len(t, events, 4) + require.Equal(t, migenees, events[2].Msg) + require.Equal(t, pontivy, events[3].Msg) +} + +// TestLocalTracerServerPull tests the pull portion of the server. +func TestLocalTracerServerPull(t *testing.T) { + port, err := getFreePort() + require.NoError(t, err) + client := setupLocalTracer(t, port) + + for i := 0; i < 5; i++ { + client.Write(testEvent{"Annecy", i}) + } + + // Wait for the server to start + time.Sleep(100 * time.Millisecond) + + // Test the server + newDir := t.TempDir() + + url := fmt.Sprintf("http://localhost:%d", port) + + // try to read a table that is not being collected. error expected. + err = GetTable(url, "canal", newDir) + require.Error(t, err) + + err = GetTable(url, testEventTable, newDir) + require.NoError(t, err) + + originalFile, done, err := client.readTable(testEventTable) + require.NoError(t, err) + originalBz, err := io.ReadAll(originalFile) + require.NoError(t, err) + err = done() + require.NoError(t, err) + + path := path.Join(newDir, testEventTable+".jsonl") + downloadedFile, err := os.Open(path) + require.NoError(t, err) + defer downloadedFile.Close() + + downloadedBz, err := io.ReadAll(downloadedFile) + require.NoError(t, err) + require.Equal(t, originalBz, downloadedBz) + + _, err = downloadedFile.Seek(0, 0) // reset the seek on the file to read it again + require.NoError(t, err) + events, err := DecodeFile[testEvent](downloadedFile) + require.NoError(t, err) + require.Len(t, events, 5) + for i := 0; i < 5; i++ { + require.Equal(t, i, events[i].Msg.Length) + } +} + +// TestReadPushConfigFromConfigFile tests reading the push config from the environment variables. +func TestReadPushConfigFromEnvVars(t *testing.T) { + os.Setenv(PushBucketName, "bucket") + os.Setenv(PushRegion, "region") + os.Setenv(PushAccessKey, "access") + os.Setenv(PushKey, "secret") + os.Setenv(PushDelay, "10") + + lt := setupLocalTracer(t, 0) + require.Equal(t, "bucket", lt.s3Config.BucketName) + require.Equal(t, "region", lt.s3Config.Region) + require.Equal(t, "access", lt.s3Config.AccessKey) + require.Equal(t, "secret", lt.s3Config.SecretKey) + require.Equal(t, int64(10), lt.s3Config.PushDelay) +} +func setupLocalTracer(t *testing.T, port int) *LocalTracer { + logger := log.NewNopLogger() + cfg := config.DefaultConfig() + cfg.SetRoot(t.TempDir()) + cfg.Instrumentation.TraceBufferSize = 100 + cfg.Instrumentation.TracingTables = testEventTable + cfg.Instrumentation.TracePullAddress = fmt.Sprintf(":%d", port) + + client, err := NewLocalTracer(cfg, logger, "test_chain", "test_node") + if err != nil { + t.Fatalf("failed to create local client: %v", err) + } + + return client +} + +// getFreePort returns a free port and optionally an error. +func getFreePort() (int, error) { + a, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", a) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} diff --git a/pkg/trace/schema/consensus.go b/pkg/trace/schema/consensus.go index e22b658c32..6584429c2d 100644 --- a/pkg/trace/schema/consensus.go +++ b/pkg/trace/schema/consensus.go @@ -1,8 +1,6 @@ package schema import ( - cstypes "github.com/tendermint/tendermint/consensus/types" - "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/pkg/trace" "github.com/tendermint/tendermint/types" ) @@ -15,168 +13,237 @@ func ConsensusTables() []string { BlockPartsTable, BlockTable, VoteTable, + ConsensusStateTable, + ProposalTable, } } // Schema constants for the consensus round state tracing database. const ( // RoundStateTable is the name of the table that stores the consensus - // state traces. Follows this schema: - // - // | time | height | round | step | + // state traces. RoundStateTable = "consensus_round_state" - - // StepFieldKey is the name of the field that stores the consensus step. The - // value is a string. - StepFieldKey = "step" ) +// RoundState describes schema for the "consensus_round_state" table. +type RoundState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Step uint8 `json:"step"` +} + +// Table returns the table name for the RoundState struct. +func (r RoundState) Table() string { + return RoundStateTable +} + // WriteRoundState writes a tracing point for a tx using the predetermined -// schema for consensus state tracing. This is used to create a table in the following -// schema: -// -// | time | height | round | step | -func WriteRoundState(client *trace.Client, height int64, round int32, step cstypes.RoundStepType) { - client.WritePoint(RoundStateTable, map[string]interface{}{ - HeightFieldKey: height, - RoundFieldKey: round, - StepFieldKey: step.String(), - }) +// schema for consensus state tracing. +func WriteRoundState(client trace.Tracer, height int64, round int32, step uint8) { + client.Write(RoundState{Height: height, Round: round, Step: step}) } // Schema constants for the "consensus_block_parts" table. const ( // BlockPartsTable is the name of the table that stores the consensus block // parts. - // following schema: - // - // | time | height | round | index | peer | transfer type | BlockPartsTable = "consensus_block_parts" - - // BlockPartIndexFieldKey is the name of the field that stores the block - // part - BlockPartIndexFieldKey = "index" ) +// BlockPart describes schema for the "consensus_block_parts" table. +type BlockPart struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + Index int32 `json:"index"` + Catchup bool `json:"catchup"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +// Table returns the table name for the BlockPart struct. +func (b BlockPart) Table() string { + return BlockPartsTable +} + // WriteBlockPart writes a tracing point for a BlockPart using the predetermined -// schema for consensus state tracing. This is used to create a table in the -// following schema: -// -// | time | height | round | index | peer | transfer type | +// schema for consensus state tracing. func WriteBlockPart( - client *trace.Client, + client trace.Tracer, height int64, round int32, - peer p2p.ID, index uint32, - transferType string, + catchup bool, + peer string, + transferType TransferType, ) { - // this check is redundant to what is checked during WritePoint, although it + // this check is redundant to what is checked during client.Write, although it // is an optimization to avoid allocations from the map of fields. if !client.IsCollecting(BlockPartsTable) { return } - client.WritePoint(BlockPartsTable, map[string]interface{}{ - HeightFieldKey: height, - RoundFieldKey: round, - BlockPartIndexFieldKey: index, - PeerFieldKey: peer, - TransferTypeFieldKey: transferType, + client.Write(BlockPart{ + Height: height, + Round: round, + Index: int32(index), + Catchup: catchup, + Peer: peer, + TransferType: transferType, + }) +} + +// Schema constants for the consensus votes tracing database. +const ( + // VoteTable is the name of the table that stores the consensus + // voting traces. + VoteTable = "consensus_vote" +) + +// Vote describes schema for the "consensus_vote" table. +type Vote struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + VoteType string `json:"vote_type"` + VoteHeight int64 `json:"vote_height"` + VoteRound int32 `json:"vote_round"` + VoteMillisecondTimestamp int64 `json:"vote_unix_millisecond_timestamp"` + ValidatorAddress string `json:"vote_validator_address"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` +} + +func (v Vote) Table() string { + return VoteTable +} + +// WriteVote writes a tracing point for a vote using the predetermined +// schema for consensus vote tracing. +func WriteVote(client trace.Tracer, + height int64, // height of the current peer when it received/sent the vote + round int32, // round of the current peer when it received/sent the vote + vote *types.Vote, // vote received by the current peer + peer string, // the peer from which it received the vote or the peer to which it sent the vote + transferType TransferType, // download (received) or upload(sent) +) { + client.Write(Vote{ + Height: height, + Round: round, + VoteType: vote.Type.String(), + VoteHeight: vote.Height, + VoteRound: vote.Round, + VoteMillisecondTimestamp: vote.Timestamp.UnixMilli(), + ValidatorAddress: vote.ValidatorAddress.String(), + Peer: peer, + TransferType: transferType, }) } const ( // BlockTable is the name of the table that stores metadata about consensus blocks. - // following schema: - // - // | time | height | timestamp | BlockTable = "consensus_block" +) - // UnixMillisecondTimestampFieldKey is the name of the field that stores the timestamp in - // the last commit in unix milliseconds. - UnixMillisecondTimestampFieldKey = "unix_millisecond_timestamp" +// BlockSummary describes schema for the "consensus_block" table. +type BlockSummary struct { + Height int64 `json:"height"` + UnixMillisecondTimestamp int64 `json:"unix_millisecond_timestamp"` + TxCount int `json:"tx_count"` + SquareSize uint64 `json:"square_size"` + BlockSize int `json:"block_size"` + Proposer string `json:"proposer"` + LastCommitRound int32 `json:"last_commit_round"` +} - // TxCountFieldKey is the name of the field that stores the number of - // transactions in the block. - TxCountFieldKey = "tx_count" +func (b BlockSummary) Table() string { + return BlockTable +} - // SquareSizeFieldKey is the name of the field that stores the square size - // of the block. SquareSize is the number of shares in a single row or - // column of the origianl data square. - SquareSizeFieldKey = "square_size" +// WriteBlockSummary writes a tracing point for a block using the predetermined +func WriteBlockSummary(client trace.Tracer, block *types.Block, size int) { + client.Write(BlockSummary{ + Height: block.Height, + UnixMillisecondTimestamp: block.Time.UnixMilli(), + TxCount: len(block.Data.Txs), + SquareSize: block.SquareSize, + BlockSize: size, + Proposer: block.ProposerAddress.String(), + LastCommitRound: block.LastCommit.Round, + }) +} - // BlockSizeFieldKey is the name of the field that stores the size of - // the block data in bytes. - BlockSizeFieldKey = "block_size" +const ( + ConsensusStateTable = "consensus_state" +) - // ProposerFieldKey is the name of the field that stores the proposer of - // the block. - ProposerFieldKey = "proposer" +type ConsensusStateUpdateType string - // LastCommitRoundFieldKey is the name of the field that stores the round - // of the last commit. - LastCommitRoundFieldKey = "last_commit_round" +const ( + ConsensusNewValidBlock ConsensusStateUpdateType = "new_valid_block" + ConsensusNewRoundStep ConsensusStateUpdateType = "new_round_step" + ConsensusVoteSetBits ConsensusStateUpdateType = "vote_set_bits" + ConsensusVoteSet23Prevote ConsensusStateUpdateType = "vote_set_23_prevote" + ConsensusVoteSet23Precommit ConsensusStateUpdateType = "vote_set_23_precommit" + ConsensusHasVote ConsensusStateUpdateType = "has_vote" + ConsensusPOL ConsensusStateUpdateType = "pol" ) -func WriteBlock(client *trace.Client, block *types.Block, size int) { - client.WritePoint(BlockTable, map[string]interface{}{ - HeightFieldKey: block.Height, - UnixMillisecondTimestampFieldKey: block.Time.UnixMilli(), - TxCountFieldKey: len(block.Data.Txs), - SquareSizeFieldKey: block.SquareSize, - BlockSizeFieldKey: size, - ProposerFieldKey: block.ProposerAddress.String(), - LastCommitRoundFieldKey: block.LastCommit.Round, +type ConsensusState struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + UpdateType string `json:"update_type"` + Peer string `json:"peer"` + TransferType TransferType `json:"transfer_type"` + Data []string `json:"data,omitempty"` +} + +func (c ConsensusState) Table() string { + return ConsensusStateTable +} + +func WriteConsensusState( + client trace.Tracer, + height int64, + round int32, + peer string, + updateType ConsensusStateUpdateType, + transferType TransferType, + data ...string, +) { + client.Write(ConsensusState{ + Height: height, + Round: round, + Peer: peer, + UpdateType: string(updateType), + TransferType: transferType, + Data: data, }) } -// Schema constants for the consensus votes tracing database. const ( - // VoteTable is the name of the table that stores the consensus - // voting traces. Follows this schema: - // - // | time | height | round | vote_type | vote_height | vote_round - // | vote_block_id| vote_unix_millisecond_timestamp - // | vote_validator_address | vote_validator_index | peer - // | transfer_type | - VoteTable = "consensus_vote" - - VoteTypeFieldKey = "vote_type" - VoteHeightFieldKey = "vote_height" - VoteRoundFieldKey = "vote_round" - VoteBlockIDFieldKey = "vote_block_id" - VoteTimestampFieldKey = "vote_unix_millisecond_timestamp" - ValidatorAddressFieldKey = "vote_validator_address" - ValidatorIndexFieldKey = "vote_validator_index" + ProposalTable = "consensus_proposal" ) -// WriteVote writes a tracing point for a vote using the predetermined -// schema for consensus vote tracing. -// This is used to create a table in the following -// schema: -// -// | time | height | round | vote_type | vote_height | vote_round -// | vote_block_id| vote_unix_millisecond_timestamp -// | vote_validator_address | vote_validator_index | peer -// | transfer_type | -func WriteVote(client *trace.Client, - height int64, // height of the current peer when it received/sent the vote - round int32, // round of the current peer when it received/sent the vote - vote *types.Vote, // vote received by the current peer - peer p2p.ID, // the peer from which it received the vote or the peer to which it sent the vote - transferType string, // download (received) or upload(sent) +type Proposal struct { + Height int64 `json:"height"` + Round int32 `json:"round"` + PeerID string `json:"peer_id"` + TransferType TransferType `json:"transfer_type"` +} + +func (p Proposal) Table() string { + return ProposalTable +} + +func WriteProposal( + client trace.Tracer, + height int64, + round int32, + peerID string, + transferType TransferType, ) { - client.WritePoint(VoteTable, map[string]interface{}{ - HeightFieldKey: height, - RoundFieldKey: round, - VoteTypeFieldKey: vote.Type.String(), - VoteHeightFieldKey: vote.Height, - VoteRoundFieldKey: vote.Round, - VoteBlockIDFieldKey: vote.BlockID.Hash.String(), - VoteTimestampFieldKey: vote.Timestamp.UnixMilli(), - ValidatorAddressFieldKey: vote.ValidatorAddress.String(), - ValidatorIndexFieldKey: vote.ValidatorIndex, - PeerFieldKey: peer, - TransferTypeFieldKey: transferType, + client.Write(Proposal{ + Height: height, + Round: round, + PeerID: peerID, + TransferType: transferType, }) } diff --git a/pkg/trace/schema/mempool.go b/pkg/trace/schema/mempool.go index 4b570f6e2f..f2198fdf31 100644 --- a/pkg/trace/schema/mempool.go +++ b/pkg/trace/schema/mempool.go @@ -2,9 +2,7 @@ package schema import ( "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/pkg/trace" - "github.com/tendermint/tendermint/types" ) // MempoolTables returns the list of tables for mempool tracing. @@ -12,7 +10,6 @@ func MempoolTables() []string { return []string{ MempoolTxTable, MempoolPeerStateTable, - MempoolRejectedTable, } } @@ -20,49 +17,35 @@ func MempoolTables() []string { const ( // MempoolTxTable is the tracing "measurement" (aka table) for the mempool // that stores tracing data related to gossiping transactions. - // - // The schema for this table is: - // | time | peerID | tx size | tx hash | transfer type | mempool version | MempoolTxTable = "mempool_tx" +) - // TxFieldKey is the tracing field key for receiving for sending a - // tx. This should take the form of a tx hash as the value. - TxFieldKey = "tx" - - // SizeFieldKey is the tracing field key for the size of a tx. This - // should take the form of the size of the tx as the value. - SizeFieldKey = "size" - - // VersionFieldKey is the tracing field key for the version of the mempool. - // This is used to distinguish between versions of the mempool. - VersionFieldKey = "version" - - // V1VersionFieldValue is a tracing field value for the version of - // the mempool. This value is used by the "version" field key. - V1VersionFieldValue = "v1" +// MemPoolTx describes the schema for the "mempool_tx" table. +type MempoolTx struct { + TxHash string `json:"tx_hash"` + Peer string `json:"peer"` + Size int `json:"size"` + TransferType TransferType `json:"transfer_type"` +} - // CatVersionFieldValue is a tracing field value for the version of - // the mempool. This value is used by the "version" field key. - CatVersionFieldValue = "cat" -) +// Table returns the table name for the MempoolTx struct. +func (m MempoolTx) Table() string { + return MempoolTxTable +} // WriteMempoolTx writes a tracing point for a tx using the predetermined -// schema for mempool tracing. This is used to create a table in the following -// schema: -// -// | time | peerID | tx size | tx hash | transfer type | mempool version | -func WriteMempoolTx(client *trace.Client, peer p2p.ID, tx []byte, transferType, version string) { - // this check is redundant to what is checked during WritePoint, although it +// schema for mempool tracing. +func WriteMempoolTx(client trace.Tracer, peer string, txHash []byte, transferType TransferType) { + // this check is redundant to what is checked during client.Write, although it // is an optimization to avoid allocations from the map of fields. if !client.IsCollecting(MempoolTxTable) { return } - client.WritePoint(MempoolTxTable, map[string]interface{}{ - TxFieldKey: bytes.HexBytes(types.Tx(tx).Hash()).String(), - PeerFieldKey: peer, - SizeFieldKey: len(tx), - TransferTypeFieldKey: transferType, - VersionFieldKey: version, + client.Write(MempoolTx{ + TxHash: bytes.HexBytes(txHash).String(), + Peer: peer, + Size: len(txHash), + TransferType: transferType, }) } @@ -70,64 +53,48 @@ const ( // MempoolPeerState is the tracing "measurement" (aka table) for the mempool // that stores tracing data related to mempool state, specifically // the gossipping of "SeenTx" and "WantTx". - // - // The schema for this table is: - // | time | peerID | update type | mempool version | MempoolPeerStateTable = "mempool_peer_state" +) - // StateUpdateFieldKey is the tracing field key for state updates of the mempool. - StateUpdateFieldKey = "update" - - // SeenTxStateUpdateFieldValue is a tracing field value for the state - // update of the mempool. This value is used by the "update" field key. - SeenTxStateUpdateFieldValue = "seen_tx" - - // WantTxStateUpdateFieldValue is a tracing field value for the state - // update of the mempool. This value is used by the "update" field key. - WantTxStateUpdateFieldValue = "want_tx" - - // RemovedTxStateUpdateFieldValue is a tracing field value for the local - // state update of the mempool. This value is used by the "update" field - // key. - RemovedTxStateUpdateFieldValue = "removed_tx" +type MempoolStateUpdateType string - // AddedTxStateUpdateFieldValue is a tracing field value for the local state - // update of the mempool. This value is used by the "update" field key. - AddedTxStateUpdateFieldValue = "added_tx" +const ( + SeenTx MempoolStateUpdateType = "SeenTx" + WantTx MempoolStateUpdateType = "WantTx" + Unknown MempoolStateUpdateType = "Unknown" ) -// WriteMempoolPeerState writes a tracing point for the mempool state using -// the predetermined schema for mempool tracing. This is used to create a table -// in the following schema: -// -// | time | peerID | transfer type | state update | mempool version | -func WriteMempoolPeerState(client *trace.Client, peer p2p.ID, stateUpdate, transferType, version string) { - // this check is redundant to what is checked during WritePoint, although it - // is an optimization to avoid allocations from creating the map of fields. - if !client.IsCollecting(MempoolPeerStateTable) { - return - } - client.WritePoint(MempoolPeerStateTable, map[string]interface{}{ - PeerFieldKey: peer, - TransferTypeFieldKey: transferType, - StateUpdateFieldKey: stateUpdate, - VersionFieldKey: version, - }) +// MempoolPeerState describes the schema for the "mempool_peer_state" table. +type MempoolPeerState struct { + Peer string `json:"peer"` + StateUpdate MempoolStateUpdateType `json:"state_update"` + TxHash string `json:"tx_hash"` + TransferType TransferType `json:"transfer_type"` } -const ( - MempoolRejectedTable = "mempool_rejected" - ReasonFieldKey = "reason" -) +// Table returns the table name for the MempoolPeerState struct. +func (m MempoolPeerState) Table() string { + return MempoolPeerStateTable +} -// WriteMempoolRejected records why a transaction was rejected. -func WriteMempoolRejected(client *trace.Client, reason string) { - // this check is redundant to what is checked during WritePoint, although it +// WriteMempoolPeerState writes a tracing point for the mempool state using +// the predetermined schema for mempool tracing. +func WriteMempoolPeerState( + client trace.Tracer, + peer string, + stateUpdate MempoolStateUpdateType, + txHash []byte, + transferType TransferType, +) { + // this check is redundant to what is checked during client.Write, although it // is an optimization to avoid allocations from creating the map of fields. - if !client.IsCollecting(MempoolRejectedTable) { + if !client.IsCollecting(MempoolPeerStateTable) { return } - client.WritePoint(MempoolRejectedTable, map[string]interface{}{ - ReasonFieldKey: reason, + client.Write(MempoolPeerState{ + Peer: peer, + StateUpdate: stateUpdate, + TransferType: transferType, + TxHash: bytes.HexBytes(txHash).String(), }) } diff --git a/pkg/trace/schema/misc.go b/pkg/trace/schema/misc.go new file mode 100644 index 0000000000..638add7aae --- /dev/null +++ b/pkg/trace/schema/misc.go @@ -0,0 +1,42 @@ +package schema + +import "github.com/tendermint/tendermint/pkg/trace" + +const ( + ABCITable = "abci" +) + +// ABCIUpdate is an enum that represents the different types of ABCI +// trace data. +type ABCIUpdate string + +const ( + PrepareProposalStart ABCIUpdate = "prepare_proposal_start" + PrepareProposalEnd ABCIUpdate = "prepare_proposal_end" + ProcessProposalStart ABCIUpdate = "process_proposal_start" + ProcessProposalEnd ABCIUpdate = "process_proposal_end" + CommitStart ABCIUpdate = "commit_start" + CommitEnd ABCIUpdate = "commit_end" +) + +// ABCI describes schema for the "abci" table. +type ABCI struct { + TraceType string `json:"trace"` + Height int64 `json:"height"` + Round int32 `json:"round"` +} + +// Table returns the table name for the ABCI struct and fullfills the +// trace.Entry interface. +func (m ABCI) Table() string { + return ABCITable +} + +// WriteABCI writes a trace for an ABCI method. +func WriteABCI(client trace.Tracer, traceType ABCIUpdate, height int64, round int32) { + client.Write(ABCI{ + TraceType: string(traceType), + Height: height, + Round: round, + }) +} diff --git a/pkg/trace/schema/p2p.go b/pkg/trace/schema/p2p.go new file mode 100644 index 0000000000..e36da0d833 --- /dev/null +++ b/pkg/trace/schema/p2p.go @@ -0,0 +1,82 @@ +package schema + +import "github.com/tendermint/tendermint/pkg/trace" + +// P2PTables returns the list of tables that are used for p2p tracing. +func P2PTables() []string { + return []string{ + PeersTable, + PendingBytesTable, + ReceivedBytesTable, + } +} + +const ( + // PeerUpdateTable is the name of the table that stores the p2p peer + // updates. + PeersTable = "peers" +) + +// P2PPeerUpdate is an enum that represents the different types of p2p +// trace data. +type P2PPeerUpdate string + +const ( + // PeerJoin is the action for when a peer is connected. + PeerJoin P2PPeerUpdate = "connect" + // PeerDisconnect is the action for when a peer is disconnected. + PeerDisconnect P2PPeerUpdate = "disconnect" +) + +// PeerUpdate describes schema for the "peer_update" table. +type PeerUpdate struct { + PeerID string `json:"peer_id"` + Action string `json:"action"` + Reason string `json:"reason"` +} + +// Table returns the table name for the PeerUpdate struct. +func (p PeerUpdate) Table() string { + return PeersTable +} + +// WritePeerUpdate writes a tracing point for a peer update using the predetermined +// schema for p2p tracing. +func WritePeerUpdate(client trace.Tracer, peerID string, action P2PPeerUpdate, reason string) { + client.Write(PeerUpdate{PeerID: peerID, Action: string(action), Reason: reason}) +} + +const ( + PendingBytesTable = "pending_bytes" +) + +type PendingBytes struct { + PeerID string `json:"peer_id"` + Bytes map[byte]int `json:"bytes"` +} + +func (s PendingBytes) Table() string { + return PendingBytesTable +} + +func WritePendingBytes(client trace.Tracer, peerID string, bytes map[byte]int) { + client.Write(PendingBytes{PeerID: peerID, Bytes: bytes}) +} + +const ( + ReceivedBytesTable = "received_bytes" +) + +type ReceivedBytes struct { + PeerID string `json:"peer_id"` + Channel byte `json:"channel"` + Bytes int `json:"bytes"` +} + +func (s ReceivedBytes) Table() string { + return ReceivedBytesTable +} + +func WriteReceivedBytes(client trace.Tracer, peerID string, channel byte, bytes int) { + client.Write(ReceivedBytes{PeerID: peerID, Channel: channel, Bytes: bytes}) +} diff --git a/pkg/trace/schema/schema.go b/pkg/trace/schema/schema.go new file mode 100644 index 0000000000..c0f2316787 --- /dev/null +++ b/pkg/trace/schema/schema.go @@ -0,0 +1,42 @@ +package schema + +import ( + "strings" + + "github.com/tendermint/tendermint/config" +) + +func init() { + config.DefaultTracingTables = strings.Join(AllTables(), ",") +} + +func AllTables() []string { + tables := []string{} + tables = append(tables, MempoolTables()...) + tables = append(tables, ConsensusTables()...) + tables = append(tables, P2PTables()...) + tables = append(tables, ABCITable) + return tables +} + +const ( + Broadcast = "broadcast" +) + +type TransferType int + +const ( + Download TransferType = iota + Upload +) + +func (t TransferType) String() string { + switch t { + case Download: + return "download" + case Upload: + return "upload" + default: + return "unknown" + } +} diff --git a/pkg/trace/schema/schema_test.go b/pkg/trace/schema/schema_test.go new file mode 100644 index 0000000000..e93260d74d --- /dev/null +++ b/pkg/trace/schema/schema_test.go @@ -0,0 +1,17 @@ +package schema + +// Define a test struct with various field types and json tags +type TestStruct struct { + Name string `json:"name"` + Age int `json:"age"` + Email string `json:"email"` +} + +// Mock for a custom type with String method +type CustomType int + +// TestStructWithCustomType includes a field with a custom type having a String method +type TestStructWithCustomType struct { + ID int `json:"id"` + Type CustomType `json:"type"` +} diff --git a/pkg/trace/schema/tables.go b/pkg/trace/schema/tables.go deleted file mode 100644 index 11b106e02c..0000000000 --- a/pkg/trace/schema/tables.go +++ /dev/null @@ -1,42 +0,0 @@ -package schema - -import "github.com/tendermint/tendermint/config" - -func init() { - config.DefaultInfluxTables = AllTables() -} - -func AllTables() []string { - tables := []string{} - tables = append(tables, MempoolTables()...) - tables = append(tables, ConsensusTables()...) - return tables -} - -// General purpose schema constants used across multiple tables -const ( - // PeerFieldKey is the tracing field key for the peer that sent or - // received a tx. This should take the form of the peer's address as the - // value. - PeerFieldKey = "peer" - - // TransferTypeFieldKey is the tracing field key for the class of a tx - // and votes. - TransferTypeFieldKey = "transfer_type" - - // TransferTypeDownload is a tracing field value for receiving some - // data from a peer. This value is used by the "TransferType" field key. - TransferTypeDownload = "download" - - // TransferTypeUpload is a tracing field value for sending some data - // to a peer. This value is used by the "TransferType" field key. - TransferTypeUpload = "upload" - - // RoundFieldKey is the name of the field that stores the consensus round. - // The value is an int32. - RoundFieldKey = "round" - - // HeightFieldKey is the name of the field that stores the consensus height. - // The value is an int64. - HeightFieldKey = "height" -) diff --git a/pkg/trace/tracer.go b/pkg/trace/tracer.go new file mode 100644 index 0000000000..6c4be62c4c --- /dev/null +++ b/pkg/trace/tracer.go @@ -0,0 +1,48 @@ +package trace + +import ( + "errors" + "os" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" +) + +// Entry is an interface for all structs that are used to define the schema for +// traces. +type Entry interface { + // Table defines which table the struct belongs to. + Table() string +} + +// Tracer defines the methods for a client that can write and read trace data. +type Tracer interface { + Write(Entry) + IsCollecting(table string) bool + Stop() +} + +func NewTracer(cfg *config.Config, logger log.Logger, chainID, nodeID string) (Tracer, error) { + switch cfg.Instrumentation.TraceType { + case "local": + return NewLocalTracer(cfg, logger, chainID, nodeID) + case "noop": + return NoOpTracer(), nil + default: + logger.Error("unknown tracer type, using noop", "type", cfg.Instrumentation.TraceType) + return NoOpTracer(), nil + } +} + +func NoOpTracer() Tracer { + return &noOpTracer{} +} + +type noOpTracer struct{} + +func (n *noOpTracer) Write(_ Entry) {} +func (n *noOpTracer) ReadTable(_ string) (*os.File, error) { + return nil, errors.New("no-op tracer does not support reading") +} +func (n *noOpTracer) IsCollecting(_ string) bool { return false } +func (n *noOpTracer) Stop() {} diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 12fd717e8d..deb439745d 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -127,11 +127,11 @@ message RequestApplySnapshotChunk { } message RequestPrepareProposal { - // block_data is an array of transactions that will be included in a block, - // sent to the app for possible modifications. - // applications can not exceed the size of the data passed to it. + // BlockData is a slice of candidate transactions that may be included in a + // block. BlockData is sent to the application so that the application can + // filter and re-arrange the slice of candidate transactions. tendermint.types.Data block_data = 1; - // If an application decides to populate block_data with extra information, they can not exceed this value. + // BlockDataSize is the maximum size (in bytes) that BlockData should be. int64 block_data_size = 2; // chain_id is a unique identifier for the blockchain network this proposal // belongs to (e.g. mocha-1). diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 4a486049b8..ff0ab91fcf 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -568,6 +568,8 @@ func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctype return result, nil } +// ProveShares +// Deprecated: Use ProveSharesV2 instead. func (c *baseRPCClient) ProveShares( ctx context.Context, height uint64, @@ -587,6 +589,25 @@ func (c *baseRPCClient) ProveShares( return *result, nil } +func (c *baseRPCClient) ProveSharesV2( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + result := new(ctypes.ResultShareProof) + params := map[string]interface{}{ + "height": height, + "startShare": startShare, + "endShare": endShare, + } + _, err := c.caller.Call(ctx, "prove_shares_v2", params, result) + if err != nil { + return nil, err + } + return result, nil +} + func (c *baseRPCClient) TxSearch( ctx context.Context, query string, diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 3ddc2a4227..ceef3b223f 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -83,7 +83,10 @@ type SignClient interface { Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) + // ProveShares + // Deprecated: Use ProveSharesV2 instead. ProveShares(_ context.Context, height uint64, startShare uint64, endShare uint64) (types.ShareProof, error) + ProveSharesV2(_ context.Context, height uint64, startShare uint64, endShare uint64) (*ctypes.ResultShareProof, error) // TxSearch defines a method to search for a paginated set of transactions by // DeliverTx event search criteria. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 91df5c684e..5c6ec079e3 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -210,6 +210,8 @@ func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Result return core.Tx(c.ctx, hash, prove) } +// ProveShares +// Deprecated: Use ProveSharesV2 instead. func (c *Local) ProveShares( ctx context.Context, height uint64, @@ -219,6 +221,15 @@ func (c *Local) ProveShares( return core.ProveShares(c.ctx, int64(height), startShare, endShare) } +func (c *Local) ProveSharesV2( + ctx context.Context, + height uint64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + return core.ProveSharesV2(c.ctx, int64(height), startShare, endShare) +} + func (c *Local) TxSearch( _ context.Context, query string, diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 09ebe391c2..059e479e3a 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -11,7 +11,6 @@ import ( "github.com/tendermint/tendermint/libs/bytes" cmtmath "github.com/tendermint/tendermint/libs/math" cmtquery "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/pkg/consts" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null" @@ -333,6 +332,10 @@ func EncodeDataRootTuple(height uint64, dataRoot [32]byte) ([]byte, error) { return append(paddedHeight, dataRoot[:]...), nil } +// dataCommitmentBlocksLimit The maximum number of blocks to be used to create a data commitment. +// It's a local parameter to protect the API from creating unnecessarily large commitments. +const dataCommitmentBlocksLimit = 10_000 // ~33 hours of blocks assuming 12-second blocks. + // validateDataCommitmentRange runs basic checks on the asc sorted list of // heights that will be used subsequently in generating data commitments over // the defined set of heights. @@ -342,8 +345,8 @@ func validateDataCommitmentRange(start uint64, end uint64) error { } env := GetEnvironment() heightsRange := end - start - if heightsRange > uint64(consts.DataCommitmentBlocksLimit) { - return fmt.Errorf("the query exceeds the limit of allowed blocks %d", consts.DataCommitmentBlocksLimit) + if heightsRange > uint64(dataCommitmentBlocksLimit) { + return fmt.Errorf("the query exceeds the limit of allowed blocks %d", dataCommitmentBlocksLimit) } if heightsRange == 0 { return fmt.Errorf("cannot create the data commitments for an empty set of blocks") diff --git a/rpc/core/events.go b/rpc/core/events.go index ec11d68ae2..2e5f02b573 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -46,9 +46,6 @@ func Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, er if err != nil { return nil, err } - if sub == nil { - return nil, fmt.Errorf("env.EventBus.Subscribe() returned nil") - } closeIfSlow := env.Config.CloseOnSlowClient diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 7077275aaa..b2c8d69ee1 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -31,6 +31,7 @@ var Routes = map[string]*rpc.RPCFunc{ "check_tx": rpc.NewRPCFunc(CheckTx, "tx"), "tx": rpc.NewRPCFunc(Tx, "hash,prove", rpc.Cacheable()), "prove_shares": rpc.NewRPCFunc(ProveShares, "height,startShare,endShare"), + "prove_shares_v2": rpc.NewRPCFunc(ProveSharesV2, "height,startShare,endShare"), "data_root_inclusion_proof": rpc.NewRPCFunc(DataRootInclusionProof, "height,start,end"), "tx_search": rpc.NewRPCFunc(TxSearchMatchEvents, "query,prove,page,per_page,order_by,match_events"), "block_search": rpc.NewRPCFunc(BlockSearchMatchEvents, "query,page,per_page,order_by,match_events"), diff --git a/rpc/core/status.go b/rpc/core/status.go index c82dffa6d6..5c9c9e3408 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -53,7 +53,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { } result := &ctypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo), + NodeInfo: GetNodeInfo(env, latestHeight), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, @@ -85,3 +85,27 @@ func validatorAtHeight(h int64) *types.Validator { _, val := vals.GetByAddress(privValAddress) return val } + +// GetNodeInfo returns the node info with the app version set to the latest app +// version from the state store. +// +// This function is necessary because upstream CometBFT does not support +// upgrading app versions for a running binary. Therefore the +// env.P2PTransport.NodeInfo.ProtocolVersion.App is expected to be set on node +// start-up and never updated. Celestia supports upgrading the app version for a +// running binary so the env.P2PTransport.NodeInfo.ProtocolVersion.App will be +// incorrect if a node upgraded app versions without restarting. This function +// corrects that issue by fetching the latest app version from the state store. +func GetNodeInfo(env *Environment, latestHeight int64) p2p.DefaultNodeInfo { + nodeInfo := env.P2PTransport.NodeInfo().(p2p.DefaultNodeInfo) + + consensusParams, err := env.StateStore.LoadConsensusParams(latestHeight) + if err != nil { + // use the default app version if we can't load the consensus params (i.e. height 0) + return nodeInfo + } + + // override the default app version with the latest app version + nodeInfo.ProtocolVersion.App = consensusParams.Version.AppVersion + return nodeInfo +} diff --git a/rpc/core/status_test.go b/rpc/core/status_test.go new file mode 100644 index 0000000000..2fed1a2d91 --- /dev/null +++ b/rpc/core/status_test.go @@ -0,0 +1,76 @@ +package core_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/rpc/core" + "github.com/tendermint/tendermint/state/mocks" +) + +func TestGetNodeInfo(t *testing.T) { + p2pTransport := mockTransport{} + stateStore := &mocks.Store{} + stateStore.On("LoadConsensusParams", int64(1)).Return(types.ConsensusParams{Version: types.VersionParams{AppVersion: 1}}, nil) + stateStore.On("LoadConsensusParams", int64(2)).Return(types.ConsensusParams{Version: types.VersionParams{AppVersion: 2}}, nil) + + type testCase struct { + name string + env *core.Environment + latestHeight int64 + want uint64 + } + testCases := []testCase{ + { + name: "want 1 when consensus params app version is 1", + env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, + latestHeight: 1, + want: 1, + }, + { + name: "want 2 if consensus params app version is 2", + env: &core.Environment{P2PTransport: p2pTransport, StateStore: stateStore}, + latestHeight: 2, + want: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + nodeInfo := core.GetNodeInfo(tc.env, tc.latestHeight) + assert.Equal(t, tc.want, nodeInfo.ProtocolVersion.App) + }) + } +} + +// transport is copy + pasted from the core package because it isn't exported. +// https://github.com/celestiaorg/celestia-core/blob/640d115aec834609022c842b2497fc568df53692/rpc/core/env.go#L69-L73 +type transport interface { + Listeners() []string + IsListening() bool + NodeInfo() p2p.NodeInfo +} + +// mockTransport implements the transport interface. +var _ transport = (*mockTransport)(nil) + +type mockTransport struct{} + +func (m mockTransport) Listeners() []string { + return []string{} +} +func (m mockTransport) IsListening() bool { + return false +} + +func (m mockTransport) NodeInfo() p2p.NodeInfo { + return p2p.DefaultNodeInfo{ + ProtocolVersion: p2p.ProtocolVersion{ + P2P: 0, + Block: 0, + App: 0, + }, + } +} diff --git a/rpc/core/tx.go b/rpc/core/tx.go index c60beb17eb..cfe9ffa872 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -181,6 +181,7 @@ func proveTx(height int64, index uint32) (types.ShareProof, error) { // ProveShares creates an NMT proof for a set of shares to a set of rows. It is // end exclusive. +// Deprecated: Use ProveSharesV2 instead. func ProveShares( _ *rpctypes.Context, height int64, @@ -250,7 +251,22 @@ func TxStatus(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultTxStatus, error } // If the tx is not in the mempool, evicted, or committed, return unknown - return &ctypes.ResultTxStatus{Status: txStatusUnknown}, nil + return &ctypes.ResultTxStatus{Status: txStatusUnknown}, nil +} + +// ProveSharesV2 creates a proof for a set of shares to the data root. +// The range is end exclusive. +func ProveSharesV2( + ctx *rpctypes.Context, + height int64, + startShare uint64, + endShare uint64, +) (*ctypes.ResultShareProof, error) { + shareProof, err := ProveShares(ctx, height, startShare, endShare) + if err != nil { + return nil, err + } + return &ctypes.ResultShareProof{ShareProof: shareProof}, nil } func loadRawBlock(bs state.BlockStore, height int64) ([]byte, error) { diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 72ac6f347e..a15d528a64 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -281,3 +281,8 @@ type ResultEvent struct { Data types.TMEventData `json:"data"` Events map[string][]string `json:"events"` } + +// ResultShareProof is an API response that contains a ShareProof. +type ResultShareProof struct { + ShareProof types.ShareProof `json:"share_proof"` +} diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index fc493e4d91..7e80472601 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -1002,6 +1002,92 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /prove_shares: + get: + summary: Prove shares for a given share range. + description: | + Generates a proof of inclusion for a range of shares to the data root. + Note: shares are referenced by their range: startShare to endShare. + The share range is end exclusive. + Deprecated: Use '/prove_shares_v2' instead. + operationId: prove_shares + tags: + - Info + parameters: + - in: query + name: height + description: The block height + schema: + type: integer + default: 1 + example: 1 + - in: query + name: startShare + description: The starting share index + schema: + type: integer + default: 0 + example: 0 + - in: query + name: endShare + description: The end exclusive ending share index + schema: + type: integer + default: 1 + example: 1 + responses: + '200': + description: Successfully retrieved the share proof + content: + application/json: + schema: + $ref: '#/components/schemas/ShareProof' + '500': + description: Internal server error + + /prove_shares_v2: + get: + summary: Prove shares for a given share range. + description: | + Generates a proof of inclusion for a range of shares to the data root. + Note: shares are referenced by their range: startShare to endShare. + The share range is end exclusive. + Replaces '/prove_shares' + operationId: prove_shares_v2 + tags: + - Info + parameters: + - in: query + name: height + description: The block height + schema: + type: integer + default: 1 + example: 1 + - in: query + name: startShare + description: The starting share index + schema: + type: integer + default: 0 + example: 0 + - in: query + name: endShare + description: The end exclusive ending share index + schema: + type: integer + default: 1 + example: 1 + responses: + '200': + description: Successfully retrieved the share proof + content: + application/json: + schema: + $ref: '#/components/schemas/ResultShareProof' + '500': + description: Internal server error + /data_commitment: get: summary: Generates a data commitment for a range of blocks @@ -2581,8 +2667,98 @@ components: tx: type: string example: "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + proof: + type: object + $ref: '#/components/schemas/ShareProof' + nullable: true + description: Optional proof of the transaction, provided only when requested. type: object + ResultShareProof: + type: object + properties: + share_proof: + $ref: '#/components/schemas/ShareProof' + description: API proof response of a set of shares. + ShareProof: + type: object + properties: + data: + type: array + items: + type: string + format: byte + description: The raw shares that are being proven. + shareProofs: + type: array + items: + $ref: '#/components/schemas/NMTProof' + description: NMT proofs that the shares in Data exist in a set of rows. + namespaceID: + type: string + format: byte + description: The namespace id of the shares being proven. + rowProof: + $ref: '#/components/schemas/RowProof' + namespaceVersion: + type: integer + format: uint32 + description: The version of the namespace used for verification. + NMTProof: + type: object + properties: + start: + type: integer + format: int32 + end: + type: integer + format: int32 + nodes: + type: array + items: + type: string + format: byte + description: Nodes used to verify the proof. + leaf_hash: + type: string + format: byte + description: Leaf hash necessary for proof of absence, if applicable. + RowProof: + type: object + properties: + rowRoots: + type: array + items: + type: string + format: byte + proofs: + type: array + items: + $ref: '#/components/schemas/Proof' + startRow: + type: integer + format: uint32 + endRow: + type: integer + format: uint32 + Proof: + type: object + description: Binary merkle proof + properties: + total: + type: integer + format: int64 + index: + type: integer + format: int64 + leafHash: + type: string + format: byte + aunts: + type: array + items: + type: string + format: byte ABCIInfoResponse: type: object required: diff --git a/scripts/proto-gen.sh b/scripts/proto-gen.sh index cb8261fdfd..4cf0656326 100755 --- a/scripts/proto-gen.sh +++ b/scripts/proto-gen.sh @@ -10,7 +10,7 @@ cd "$(git rev-parse --show-toplevel)" # Run inside Docker to install the correct versions of the required tools # without polluting the local system. -docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.19-alpine sh <<"EOF" +docker run --rm -i -v "$PWD":/w --workdir=/w golang:1.22.4-alpine sh <<"EOF" apk add git make go install github.com/bufbuild/buf/cmd/buf diff --git a/state/execution.go b/state/execution.go index e658549d32..1ea2b999f6 100644 --- a/state/execution.go +++ b/state/execution.go @@ -114,15 +114,8 @@ func (blockExec *BlockExecutor) CreateProposalBlock( evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) - // Fetch a limited amount of valid txs maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) - // TODO(ismail): reaping the mempool has to happen in relation to a max - // allowed square size instead of (only) Gas / bytes - // maybe the mempool actually should track things separately - // meaning that CheckTx should already do the mapping: - // Tx -> Txs, Message - // https://github.com/tendermint/tendermint/issues/77 txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) var timestamp time.Time diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index 513aa2cd1e..638a37ecc1 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -90,6 +90,18 @@ func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { txIDArg = txID } + const ( + insertEventQuery = ` + INSERT INTO ` + tableEvents + ` (block_id, tx_id, type) + VALUES ($1, $2, $3) + RETURNING rowid; + ` + insertAttributeQuery = ` + INSERT INTO ` + tableAttributes + ` (event_id, key, composite_key, value) + VALUES ($1, $2, $3, $4); + ` + ) + // Add each event to the events table, and retrieve its row ID to use when // adding any attributes the event provides. for _, evt := range evts { @@ -98,10 +110,7 @@ func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { continue } - eid, err := queryWithID(dbtx, ` -INSERT INTO `+tableEvents+` (block_id, tx_id, type) VALUES ($1, $2, $3) - RETURNING rowid; -`, blockID, txIDArg, evt.Type) + eid, err := queryWithID(dbtx, insertEventQuery, blockID, txIDArg, evt.Type) if err != nil { return err } @@ -112,10 +121,7 @@ INSERT INTO `+tableEvents+` (block_id, tx_id, type) VALUES ($1, $2, $3) continue } compositeKey := evt.Type + "." + string(attr.Key) - if _, err := dbtx.Exec(` -INSERT INTO `+tableAttributes+` (event_id, key, composite_key, value) - VALUES ($1, $2, $3, $4); -`, eid, attr.Key, compositeKey, attr.Value); err != nil { + if _, err := dbtx.Exec(insertAttributeQuery, eid, attr.Key, compositeKey, attr.Value); err != nil { return err } } diff --git a/state/state.go b/state/state.go index 53ff02bb47..516af24119 100644 --- a/state/state.go +++ b/state/state.go @@ -24,16 +24,15 @@ var ( //----------------------------------------------------------------------------- -// InitStateVersion sets the Consensus.Block and Software versions, -// but leaves the Consensus.App version blank. -// The Consensus.App version will be set during the Handshake, once -// we hear from the app what protocol version it is running. -var InitStateVersion = cmtstate.Version{ - Consensus: cmtversion.Consensus{ - Block: version.BlockProtocol, - App: 0, - }, - Software: version.TMCoreSemVer, +// InitStateVersion sets the Consensus.Block, Consensus.App and Software versions +func InitStateVersion(appVersion uint64) cmtstate.Version { + return cmtstate.Version{ + Consensus: cmtversion.Consensus{ + Block: version.BlockProtocol, + App: appVersion, + }, + Software: version.TMCoreSemVer, + } } //----------------------------------------------------------------------------- @@ -332,8 +331,13 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementProposerPriority(1) } + appVersion := uint64(0) + if genDoc.ConsensusParams != nil { + appVersion = genDoc.ConsensusParams.Version.AppVersion + } + return State{ - Version: InitStateVersion, + Version: InitStateVersion(appVersion), ChainID: genDoc.ChainID, InitialHeight: genDoc.InitialHeight, diff --git a/state/state_test.go b/state/state_test.go index 4ce87ddce7..fb3eb4fbf6 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -22,6 +22,7 @@ import ( cmtproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" ) // setupTestCase does setup common to all test cases. @@ -74,6 +75,21 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { require.Equal(t, 0, len(state.NextValidators.Validators)) } +func TestMakeGenesisStateSetsAppVersion(t *testing.T) { + cp := types.DefaultConsensusParams() + appVersion := uint64(5) + cp.Version.AppVersion = appVersion + doc := types.GenesisDoc{ + ChainID: "dummy", + ConsensusParams: cp, + } + require.Nil(t, doc.ValidateAndComplete()) + state, err := sm.MakeGenesisState(&doc) + require.Nil(t, err) + require.Equal(t, appVersion, state.Version.Consensus.App) + require.Equal(t, version.BlockProtocol, state.Version.Consensus.Block) +} + // TestStateSaveLoad tests saving and loading State from a db. func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index b799b3acb4..364589845a 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19 +FROM golang:1.22.4 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index c81db55fc8..41d0fc4937 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.20-bullseye +FROM golang:1.22.4-bullseye RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 1105cb7955..8a02ed7338 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -130,6 +130,8 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } + cmtcfg.Instrumentation.TraceType = "local" + n, err := node.NewNode(cmtcfg, privval.LoadOrGenFilePV(cmtcfg.PrivValidatorKeyFile(), cmtcfg.PrivValidatorStateFile()), nodeKey, diff --git a/test/e2e/pkg/infrastructure.go b/test/e2e/pkg/infrastructure.go index a1bec6fbf2..d829c2d96b 100644 --- a/test/e2e/pkg/infrastructure.go +++ b/test/e2e/pkg/infrastructure.go @@ -33,13 +33,11 @@ type InfrastructureData struct { // IP addresses are expected to be within. Network string `json:"network"` - // InfluxDBURL is the URL of the InfluxDB instance to use for arbitrary data - // collection. If not specified, data will not be collected. - InfluxDBURL string `json:"influxdb_url,omitempty"` + // TracePushConfig is the URL of the server to push trace data to. + TracePushConfig string `json:"trace_push_config,omitempty"` - // InfluxDBToken is the token to use when writing to the InfluxDB instance. - // Must be specified if 'influxdb-url' is specified. - InfluxDBToken string `json:"influxdb_token,omitempty"` + // TracePullAddress is the address to listen on for pulling trace data. + TracePullAddress string `json:"trace_pull_address,omitempty"` // PyroscopeURL is the URL of the pyroscope instance to use for continuous // profiling. If not specified, data will not be collected. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 2cb9a58f4e..6819f88c17 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -106,8 +106,8 @@ type Node struct { SendNoLoad bool Prometheus bool PrometheusProxyPort uint32 - InfluxDBURL string - InfluxDBToken string + TracePushConfig string + TracePullAddress string PyroscopeURL string PyroscopeTrace bool PyroscopeProfileTypes []string @@ -209,8 +209,8 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test Perturbations: []Perturbation{}, Misbehaviors: make(map[int64]string), SendNoLoad: nodeManifest.SendNoLoad, - InfluxDBURL: ifd.InfluxDBURL, - InfluxDBToken: ifd.InfluxDBToken, + TracePushConfig: ifd.TracePushConfig, + TracePullAddress: ifd.TracePullAddress, PyroscopeURL: ifd.PyroscopeURL, PyroscopeTrace: ifd.PyroscopeTrace, PyroscopeProfileTypes: ifd.PyroscopeProfileTypes, diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 425c2fa4a0..30d0d3232f 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -78,17 +78,17 @@ func NewCLI() *CLI { return fmt.Errorf("unknown infrastructure type '%s'", inft) } - iurl, err := cmd.Flags().GetString(trace.FlagInfluxDBURL) + iurl, err := cmd.Flags().GetString(trace.FlagTracePushConfig) if err != nil { return err } - itoken, err := cmd.Flags().GetString(trace.FlagInfluxDBToken) + itoken, err := cmd.Flags().GetString(trace.FlagTracePullAddress) if err != nil { return err } - if ifd.InfluxDBURL == "" { - ifd.InfluxDBURL = iurl - ifd.InfluxDBToken = itoken + if ifd.TracePushConfig == "" { + ifd.TracePushConfig = iurl + ifd.TracePullAddress = itoken } purl, err := cmd.Flags().GetString(trace.FlagPyroscopeURL) @@ -186,9 +186,9 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("infrastructure-data", "", "", "path to the json file containing the infrastructure data. Only used if the 'infrastructure-type' is set to a value other than 'docker'") - cli.root.PersistentFlags().String(trace.FlagInfluxDBURL, "", trace.FlagInfluxDBURLDescription) + cli.root.PersistentFlags().String(trace.FlagTracePushConfig, "", trace.FlagTracePushConfigDescription) - cli.root.PersistentFlags().String(trace.FlagInfluxDBToken, "", trace.FlagInfluxDBTokenDescription) + cli.root.PersistentFlags().String(trace.FlagTracePullAddress, "", trace.FlagTracePullAddressDescription) cli.root.PersistentFlags().String(trace.FlagPyroscopeURL, "", trace.FlagPyroscopeURLDescription) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 8fdecf6a77..f2cec6b227 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -166,10 +166,9 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second - cfg.Instrumentation.InfluxOrg = "celestia" - cfg.Instrumentation.InfluxBucket = "e2e" - cfg.Instrumentation.InfluxURL = node.InfluxDBURL - cfg.Instrumentation.InfluxToken = node.InfluxDBToken + cfg.Instrumentation.TraceType = "celestia" + cfg.Instrumentation.TracePushConfig = node.TracePushConfig + cfg.Instrumentation.TracePullAddress = node.TracePullAddress cfg.Instrumentation.PyroscopeTrace = node.PyroscopeTrace cfg.Instrumentation.PyroscopeURL = node.PyroscopeURL cfg.Instrumentation.PyroscopeProfileTypes = node.PyroscopeProfileTypes diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 27aa71b3b2..bcc198c668 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -427,7 +427,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, reactor := mempoolv1.NewReactor( config.Mempool, mp, - &trace.Client{}, + trace.NoOpTracer(), ) if config.Consensus.WaitForTxs() { mp.EnableTxsAvailable() @@ -551,7 +551,7 @@ func createTransport( ) { var ( mConnConfig = p2p.MConnConfig(config.P2P) - transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig, trace.NoOpTracer()) connFilters = []p2p.ConnFilterFunc{} peerFilters = []p2p.PeerFilterFunc{} ) diff --git a/types/row_proof.go b/types/row_proof.go index 3127044188..2498f32f7d 100644 --- a/types/row_proof.go +++ b/types/row_proof.go @@ -25,7 +25,9 @@ type RowProof struct { // the proof fails validation. If the proof passes validation, this function // attempts to verify the proof. It returns nil if the proof is valid. func (rp RowProof) Validate(root []byte) error { - // HACKHACK performing subtraction with unsigned integers is unsafe. + if rp.EndRow < rp.StartRow { + return fmt.Errorf("end row %d cannot be less than start row %d", rp.EndRow, rp.StartRow) + } if int(rp.EndRow-rp.StartRow+1) != len(rp.RowRoots) { return fmt.Errorf("the number of rows %d must equal the number of row roots %d", int(rp.EndRow-rp.StartRow+1), len(rp.RowRoots)) } diff --git a/types/row_proof_test.go b/types/row_proof_test.go index e77ac48e0d..5026c2f19c 100644 --- a/types/row_proof_test.go +++ b/types/row_proof_test.go @@ -53,6 +53,12 @@ func TestRowProofValidate(t *testing.T) { root: incorrectRoot, wantErr: true, }, + { + name: "start row greater than end row", + rp: RowProof{StartRow: 10, EndRow: 5}, + root: root, + wantErr: true, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) {