Skip to content

Commit

Permalink
Merge branch 'develop' into fix_pos_migration
Browse files Browse the repository at this point in the history
  • Loading branch information
ze97286 authored Jan 25, 2024
2 parents 8042067 + 43228b1 commit 7fc28f0
Show file tree
Hide file tree
Showing 31 changed files with 2,998 additions and 2,199 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
- [10459](https://github.com/vegaprotocol/vega/issues/10459) - Update `pMedian` to consider staleness of the inputs.
- [10429](https://github.com/vegaprotocol/vega/issues/10439) - Account for isolated margin mode in `EstimatePosition` endpoint.
- [10441](https://github.com/vegaprotocol/vega/issues/10441) - Remove active restore check in collateral snapshot loading, snapshot order change removes the need for it.
- [10376](https://github.com/vegaprotocol/vega/issues/10376) - Add spam protection for update profile.

### 🐛 Fixes

Expand Down Expand Up @@ -144,14 +145,18 @@
- [10407](https://github.com/vegaprotocol/vega/issues/10407) - Workaround to allow running feature test with invalid 0 mark price frequency.
- [10378](https://github.com/vegaprotocol/vega/issues/10378) - Ensure network position has price set at all times.
- [10409](https://github.com/vegaprotocol/vega/issues/10499) - Block explorer `API` failing in release `0.74.0`.
-
- [10417](https://github.com/vegaprotocol/vega/issues/10417) - Party margin modes `API` always errors.
- [10431](https://github.com/vegaprotocol/vega/issues/10431) - Fix source staleness validation.
- [10436](https://github.com/vegaprotocol/vega/issues/10436) - Fix source staleness validation when oracles are not defined.
- [10434](https://github.com/vegaprotocol/vega/issues/10434) - Unsubscribe oracles when market is closed.
- [10454](https://github.com/vegaprotocol/vega/issues/10454) - Fix account resolver validation to include order margin account.
- [10451](https://github.com/vegaprotocol/vega/issues/10451) - Fix get update asset bundle.
- [10480](https://github.com/vegaprotocol/vega/issues/10480) - Fix migration of position average entry price.
- [10419](https://github.com/vegaprotocol/vega/issues/10419) - Block explorer database migration is slow.
- [10431](https://github.com/vegaprotocol/vega/issues/10431) - Fix source staleness validation.
- [10419](https://github.com/vegaprotocol/vega/issues/10419) - Block explorer database migration is slow.
- [10470](https://github.com/vegaprotocol/vega/issues/10470) - Mark non-optional parameters as required and update documentation strings.
- [10456](https://github.com/vegaprotocol/vega/issues/10456) - Expose proper enum for `GraphQL` dispatch metric.

## 0.73.0

Expand Down
2 changes: 2 additions & 0 deletions blockexplorer/blockexplorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ func (a *BlockExplorer) Run(ctx context.Context) error {
return a.portal.Serve()
})

g.Go(func() error { return a.store.Migrate(ctx) })

// Now we can do all the http 'handlers' that talk to the gateway
if err := a.grpcUI.Start(ctx); err != nil {
return fmt.Errorf("could not start grpc-ui: %w", err)
Expand Down
10 changes: 9 additions & 1 deletion blockexplorer/store/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,18 @@
package store

import (
"time"

"code.vegaprotocol.io/vega/libs/config"
)

var namedLogger = "postgres.store"

type Config struct {
Postgres config.PostgresConnection `group:"database" namespace:"postgres"`
Postgres config.PostgresConnection `group:"database" namespace:"postgres"`
MigrateData bool `default:"true" description:"Migrate data from the old database" group:"database" namespace:"postgres"`
MigrateBlockDuration time.Duration `default:"1h" description:"Amount of data to migrate at a time, in duration, i.e. 1h, 4h etc." group:"database" namespace:"postgres"`
MigratePauseInterval time.Duration `default:"1m" description:"Pause migrations between dates to prevent block explorer from being blocked" group:"database" namespace:"postgres"`
}

func NewDefaultConfig() Config {
Expand All @@ -35,5 +40,8 @@ func NewDefaultConfig() Config {
Password: "vega",
ApplicationName: "vega block explorer",
},
MigrateData: true,
MigrateBlockDuration: time.Hour,
MigratePauseInterval: time.Minute,
}
}
150 changes: 130 additions & 20 deletions blockexplorer/store/migrations/0005_add_block_height_to_tx_result.sql
Original file line number Diff line number Diff line change
Expand Up @@ -5,38 +5,148 @@
ALTER TABLE tx_results
ADD COLUMN IF NOT EXISTS block_height BIGINT DEFAULT 0;

UPDATE tx_results
SET block_height=b.height
FROM blocks b
WHERE b.rowid = tx_results.block_id;
-- First drop any foreign key constraints that depend on the tx_results table
-- This will be restored after all the data has been migrated to the new tx_results table
ALTER TABLE events DROP constraint events_tx_id_fkey;

-- Rename the tx_results table to tx_results_old
ALTER TABLE IF EXISTS tx_results RENAME TO tx_results_old;
ALTER INDEX IF EXISTS tx_results_tx_hash_index RENAME TO tx_results_old_tx_hash_index;
ALTER INDEX IF EXISTS tx_results_submitter_block_id_index_idx RENAME TO tx_results_old_submitter_block_id_index_idx;
ALTER INDEX IF EXISTS tx_results_cmd_type_block_id_index RENAME TO tx_results_old_cmd_type_block_id_index;
ALTER INDEX IF EXISTS tx_results_cmd_type_index RENAME TO tx_results_old_cmd_type_index;

-- We need to make sure the next value in the rowid serial for the new tx_results table
-- continues where the old one leaves off otherwise we will break foreign key constraints
-- in the events table which we have had to drop temporarily and will restore once all the
-- data has been migrated.
-- +goose StatementBegin
do $$
declare
tx_results_seq_name text;
tx_results_seq_next bigint;
begin
-- get the next value of the sequence for tx_results_old
-- we will use this to reset the sequence value for the new tx_results table
select nextval(pg_get_serial_sequence('tx_results_old', 'rowid'))
into tx_results_seq_next;

-- Create a new tx_results table with all the necessary fields
CREATE TABLE tx_results (
rowid BIGSERIAL PRIMARY KEY,
-- The block to which this transaction belongs.
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
-- The sequential index of the transaction within the block.
index INTEGER NOT NULL,
-- When this result record was logged into the sink, in UTC.
created_at TIMESTAMPTZ NOT NULL,
-- The hex-encoded hash of the transaction.
tx_hash VARCHAR NOT NULL,
-- The protobuf wire encoding of the TxResult message.
tx_result BYTEA NOT NULL,
submitter TEXT,
cmd_type TEXT,
block_height BIGINT DEFAULT 0,
UNIQUE (block_id, index)
);

CREATE INDEX tx_results_tx_hash_index ON tx_results(tx_hash);
CREATE INDEX tx_results_submitter_block_id_index_idx ON tx_results(submitter, block_id, index);
CREATE INDEX tx_results_cmd_type_block_id_index ON tx_results
USING btree (cmd_type, block_id, index);
CREATE INDEX tx_results_submitter_block_height_index_idx ON tx_results(submitter, block_height, index);
CREATE INDEX tx_results_cmd_type_block_height_index ON tx_results
USING btree (cmd_type, block_height, index);
CREATE INDEX tx_results_cmd_type_index ON tx_results(cmd_type, submitter);
CREATE INDEX tx_results_block_height_index_idx ON tx_results(block_height, index);

-- get the sequence name for the new tx_results serial
select pg_get_serial_sequence('tx_results', 'rowid')
into tx_results_seq_name;

-- restart the sequence with the current value of the sequence for tx_results_old
-- when nextval is called, we should get the restart value, which is the next value
-- in the sequence for tx_results_old
execute format('alter sequence %s restart with %s', tx_results_seq_name, tx_results_seq_next);
end;
$$;
-- +goose StatementEnd

-- Recreate views, functions and triggers associated with the original tx_results table
CREATE OR REPLACE VIEW tx_events AS
SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at
FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id)
JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id)
WHERE event_attributes.tx_id IS NOT NULL;

-- +goose StatementBegin
CREATE OR REPLACE FUNCTION update_txresult_submitter()
RETURNS TRIGGER
LANGUAGE PLPGSQL AS
$$
BEGIN
UPDATE tx_results SET submitter=NEW.value
FROM events e
WHERE e.rowid = NEW.event_id
AND tx_results.rowid = e.tx_id;
RETURN NEW;
END;
$$;
-- +goose StatementEnd

DROP TRIGGER IF EXISTS update_txresult_submitter ON attributes;

CREATE TRIGGER update_txresult_submitter AFTER INSERT ON attributes
FOR EACH ROW
WHEN (NEW.composite_key='tx.submitter')
EXECUTE function update_txresult_submitter();

-- +goose StatementBegin
CREATE OR REPLACE FUNCTION update_txresult_cmd_type()
RETURNS TRIGGER
LANGUAGE PLPGSQL AS
$$
BEGIN
UPDATE tx_results SET cmd_type=NEW.value
FROM events e
WHERE e.rowid = NEW.event_id
AND tx_results.rowid = e.tx_id;

RETURN NEW;
END;
$$;
-- +goose StatementEnd

DROP TRIGGER IF EXISTS update_txresult_cmd_type ON attributes;

CREATE TRIGGER update_txresult_cmd_type AFTER INSERT ON attributes
FOR EACH ROW
WHEN (NEW.composite_key='command.type')
EXECUTE function update_txresult_cmd_type();

-- +goose StatementBegin
CREATE OR REPLACE FUNCTION add_block_height_to_tx_results()
RETURNS TRIGGER
LANGUAGE plpgsql AS
RETURNS TRIGGER
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE tx_results
SET block_height=b.height
FROM blocks b
WHERE b.rowid = NEW.block_id
UPDATE tx_results
SET block_height=b.height
FROM blocks b
WHERE b.rowid = NEW.block_id
AND tx_results.rowid = NEW.rowid;

RETURN NULL;
RETURN NEW;
END;
$$;
-- +goose StatementEnd

CREATE TRIGGER add_block_height_to_tx_results
AFTER INSERT
ON tx_results
FOR EACH ROW
EXECUTE PROCEDURE add_block_height_to_tx_results();
AFTER INSERT
ON tx_results
FOR EACH ROW
EXECUTE PROCEDURE add_block_height_to_tx_results();

-- +goose Down

DROP TRIGGER IF EXISTS add_block_height_to_tx_results ON tx_results;

ALTER TABLE tx_results
DROP COLUMN IF EXISTS block_height;

-- we don't want to do anything to and leave things as they are for this migration.
Loading

0 comments on commit 7fc28f0

Please sign in to comment.